pmap.c revision 1.13 1 /* $NetBSD: pmap.c,v 1.13 1997/03/06 00:04:18 gwr Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 #include <sys/proc.h>
117 #include <sys/malloc.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120
121 #include <vm/vm.h>
122 #include <vm/vm_kern.h>
123 #include <vm/vm_page.h>
124
125 #include <machine/cpu.h>
126 #include <machine/pmap.h>
127 #include <machine/pte.h>
128 #include <machine/machdep.h>
129 #include <machine/mon.h>
130
131 #include "pmap_pvt.h"
132
133 /* XXX - What headers declare these? */
134 extern struct pcb *curpcb;
135 extern int physmem;
136
137 extern void copypage __P((const void*, void*));
138 extern void zeropage __P((void*));
139
140 /* Defined in locore.s */
141 extern char kernel_text[];
142
143 /* Defined by the linker */
144 extern char etext[], edata[], end[];
145 extern char *esym; /* DDB */
146
147 /*************************** DEBUGGING DEFINITIONS ***********************
148 * Macros, preprocessor defines and variables used in debugging can make *
149 * code hard to read. Anything used exclusively for debugging purposes *
150 * is defined here to avoid having such mess scattered around the file. *
151 *************************************************************************/
152 #ifdef PMAP_DEBUG
153 /*
154 * To aid the debugging process, macros should be expanded into smaller steps
155 * that accomplish the same goal, yet provide convenient places for placing
156 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
157 * 'INLINE' keyword is defined to an empty string. This way, any function
158 * defined to be a 'static INLINE' will become 'outlined' and compiled as
159 * a separate function, which is much easier to debug.
160 */
161 #define INLINE /* nothing */
162
163 /*
164 * It is sometimes convenient to watch the activity of a particular table
165 * in the system. The following variables are used for that purpose.
166 */
167 a_tmgr_t *pmap_watch_atbl = 0;
168 b_tmgr_t *pmap_watch_btbl = 0;
169 c_tmgr_t *pmap_watch_ctbl = 0;
170
171 int pmap_debug = 0;
172 #define DPRINT(args) if (pmap_debug) printf args
173
174 #else /********** Stuff below is defined if NOT debugging **************/
175
176 #define INLINE inline
177 #define DPRINT(args) /* nada */
178
179 #endif /* PMAP_DEBUG */
180 /*********************** END OF DEBUGGING DEFINITIONS ********************/
181
182 /*** Management Structure - Memory Layout
183 * For every MMU table in the sun3x pmap system there must be a way to
184 * manage it; we must know which process is using it, what other tables
185 * depend on it, and whether or not it contains any locked pages. This
186 * is solved by the creation of 'table management' or 'tmgr'
187 * structures. One for each MMU table in the system.
188 *
189 * MAP OF MEMORY USED BY THE PMAP SYSTEM
190 *
191 * towards lower memory
192 * kernAbase -> +-------------------------------------------------------+
193 * | Kernel MMU A level table |
194 * kernBbase -> +-------------------------------------------------------+
195 * | Kernel MMU B level tables |
196 * kernCbase -> +-------------------------------------------------------+
197 * | |
198 * | Kernel MMU C level tables |
199 * | |
200 * mmuCbase -> +-------------------------------------------------------+
201 * | User MMU C level tables |
202 * mmuAbase -> +-------------------------------------------------------+
203 * | |
204 * | User MMU A level tables |
205 * | |
206 * mmuBbase -> +-------------------------------------------------------+
207 * | User MMU B level tables |
208 * tmgrAbase -> +-------------------------------------------------------+
209 * | TMGR A level table structures |
210 * tmgrBbase -> +-------------------------------------------------------+
211 * | TMGR B level table structures |
212 * tmgrCbase -> +-------------------------------------------------------+
213 * | TMGR C level table structures |
214 * pvbase -> +-------------------------------------------------------+
215 * | Physical to Virtual mapping table (list heads) |
216 * pvebase -> +-------------------------------------------------------+
217 * | Physical to Virtual mapping table (list elements) |
218 * | |
219 * +-------------------------------------------------------+
220 * towards higher memory
221 *
222 * For every A table in the MMU A area, there will be a corresponding
223 * a_tmgr structure in the TMGR A area. The same will be true for
224 * the B and C tables. This arrangement will make it easy to find the
225 * controling tmgr structure for any table in the system by use of
226 * (relatively) simple macros.
227 */
228
229 /*
230 * Global variables for storing the base addresses for the areas
231 * labeled above.
232 */
233 static vm_offset_t kernAphys;
234 static mmu_long_dte_t *kernAbase;
235 static mmu_short_dte_t *kernBbase;
236 static mmu_short_pte_t *kernCbase;
237 static mmu_long_dte_t *mmuAbase;
238 static mmu_short_dte_t *mmuBbase;
239 static mmu_short_pte_t *mmuCbase;
240 static a_tmgr_t *Atmgrbase;
241 static b_tmgr_t *Btmgrbase;
242 static c_tmgr_t *Ctmgrbase;
243 static pv_t *pvbase;
244 static pv_elem_t *pvebase;
245 struct pmap kernel_pmap;
246
247 /*
248 * This holds the CRP currently loaded into the MMU.
249 */
250 struct mmu_rootptr kernel_crp;
251
252 /*
253 * Just all around global variables.
254 */
255 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
256 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
257 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
258
259
260 /*
261 * Flags used to mark the safety/availability of certain operations or
262 * resources.
263 */
264 static boolean_t
265 pv_initialized = FALSE, /* PV system has been initialized. */
266 tmp_vpages_inuse = FALSE, /*
267 * Temp. virtual pages are in use.
268 * (see pmap_copy_page, et. al.)
269 */
270 bootstrap_alloc_enabled = FALSE; /* Safe to use pmap_bootstrap_alloc(). */
271
272 /*
273 * XXX: For now, retain the traditional variables that were
274 * used in the old pmap/vm interface (without NONCONTIG).
275 */
276 /* Kernel virtual address space available: */
277 vm_offset_t virtual_avail, virtual_end;
278 /* Physical address space available: */
279 vm_offset_t avail_start, avail_end;
280
281 /* This keep track of the end of the contiguously mapped range. */
282 vm_offset_t virtual_contig_end;
283
284 /* Physical address used by pmap_next_page() */
285 vm_offset_t avail_next;
286
287 /* These are used by pmap_copy_page(), etc. */
288 vm_offset_t tmp_vpages[2];
289
290 /*
291 * The 3/80 is the only member of the sun3x family that has non-contiguous
292 * physical memory. Memory is divided into 4 banks which are physically
293 * locatable on the system board. Although the size of these banks varies
294 * with the size of memory they contain, their base addresses are
295 * permenently fixed. The following structure, which describes these
296 * banks, is initialized by pmap_bootstrap() after it reads from a similar
297 * structure provided by the ROM Monitor.
298 *
299 * For the other machines in the sun3x architecture which do have contiguous
300 * RAM, this list will have only one entry, which will describe the entire
301 * range of available memory.
302 */
303 struct pmap_physmem_struct avail_mem[SUN3X_80_MEM_BANKS];
304 u_int total_phys_mem;
305
306 /*************************************************************************/
307
308 /*
309 * XXX - Should "tune" these based on statistics.
310 *
311 * My first guess about the relative numbers of these needed is
312 * based on the fact that a "typical" process will have several
313 * pages mapped at low virtual addresses (text, data, bss), then
314 * some mapped shared libraries, and then some stack pages mapped
315 * near the high end of the VA space. Each process can use only
316 * one A table, and most will use only two B tables (maybe three)
317 * and probably about four C tables. Therefore, the first guess
318 * at the relative numbers of these needed is 1:2:4 -gwr
319 *
320 * The number of C tables needed is closely related to the amount
321 * of physical memory available plus a certain amount attributable
322 * to the use of double mappings. With a few simulation statistics
323 * we can find a reasonably good estimation of this unknown value.
324 * Armed with that and the above ratios, we have a good idea of what
325 * is needed at each level. -j
326 *
327 * Note: It is not physical memory memory size, but the total mapped
328 * virtual space required by the combined working sets of all the
329 * currently _runnable_ processes. (Sleeping ones don't count.)
330 * The amount of physical memory should be irrelevant. -gwr
331 */
332 #define NUM_A_TABLES 16
333 #define NUM_B_TABLES 32
334 #define NUM_C_TABLES 64
335
336 /*
337 * This determines our total virtual mapping capacity.
338 * Yes, it is a FIXED value so we can pre-allocate.
339 */
340 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
341 #define NUM_KERN_PTES (sun3x_btop(KERN_END - KERNBASE))
342
343 /*************************** MISCELANEOUS MACROS *************************/
344 #define PMAP_LOCK() ; /* Nothing, for now */
345 #define PMAP_UNLOCK() ; /* same. */
346 #define NULL 0
347
348 static INLINE void * mmu_ptov __P((vm_offset_t pa));
349 static INLINE vm_offset_t mmu_vtop __P((void * va));
350
351 #if 0
352 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
353 #endif
354 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
355 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
356
357 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
358 static INLINE int pteidx __P((mmu_short_pte_t *));
359 static INLINE pmap_t current_pmap __P((void));
360
361 /*
362 * We can always convert between virtual and physical addresses
363 * for anything in the range [KERNBASE ... avail_start] because
364 * that range is GUARANTEED to be mapped linearly.
365 * We rely heavily upon this feature!
366 */
367 static INLINE void *
368 mmu_ptov(pa)
369 vm_offset_t pa;
370 {
371 register vm_offset_t va;
372
373 va = (pa + KERNBASE);
374 #ifdef PMAP_DEBUG
375 if ((va < KERNBASE) || (va >= virtual_contig_end))
376 panic("mmu_ptov");
377 #endif
378 return ((void*)va);
379 }
380 static INLINE vm_offset_t
381 mmu_vtop(vva)
382 void *vva;
383 {
384 register vm_offset_t va;
385
386 va = (vm_offset_t)vva;
387 #ifdef PMAP_DEBUG
388 if ((va < KERNBASE) || (va >= virtual_contig_end))
389 panic("mmu_ptov");
390 #endif
391 return (va - KERNBASE);
392 }
393
394 /*
395 * These macros map MMU tables to their corresponding manager structures.
396 * They are needed quite often because many of the pointers in the pmap
397 * system reference MMU tables and not the structures that control them.
398 * There needs to be a way to find one when given the other and these
399 * macros do so by taking advantage of the memory layout described above.
400 * Here's a quick step through the first macro, mmuA2tmgr():
401 *
402 * 1) find the offset of the given MMU A table from the base of its table
403 * pool (table - mmuAbase).
404 * 2) convert this offset into a table index by dividing it by the
405 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
406 * 3) use this index to select the corresponding 'A' table manager
407 * structure from the 'A' table manager pool (Atmgrbase[index]).
408 */
409 /* This function is not currently used. */
410 #if 0
411 static INLINE a_tmgr_t *
412 mmuA2tmgr(mmuAtbl)
413 mmu_long_dte_t *mmuAtbl;
414 {
415 register int idx;
416
417 /* Which table is this in? */
418 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
419 #ifdef PMAP_DEBUG
420 if ((idx < 0) || (idx >= NUM_A_TABLES))
421 panic("mmuA2tmgr");
422 #endif
423 return (&Atmgrbase[idx]);
424 }
425 #endif /* 0 */
426
427 static INLINE b_tmgr_t *
428 mmuB2tmgr(mmuBtbl)
429 mmu_short_dte_t *mmuBtbl;
430 {
431 register int idx;
432
433 /* Which table is this in? */
434 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
435 #ifdef PMAP_DEBUG
436 if ((idx < 0) || (idx >= NUM_B_TABLES))
437 panic("mmuB2tmgr");
438 #endif
439 return (&Btmgrbase[idx]);
440 }
441
442 /* mmuC2tmgr INTERNAL
443 **
444 * Given a pte known to belong to a C table, return the address of
445 * that table's management structure.
446 */
447 static INLINE c_tmgr_t *
448 mmuC2tmgr(mmuCtbl)
449 mmu_short_pte_t *mmuCtbl;
450 {
451 register int idx;
452
453 /* Which table is this in? */
454 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
455 #ifdef PMAP_DEBUG
456 if ((idx < 0) || (idx >= NUM_C_TABLES))
457 panic("mmuC2tmgr");
458 #endif
459 return (&Ctmgrbase[idx]);
460 }
461
462 /* This is now a function call below.
463 * #define pa2pv(pa) \
464 * (&pvbase[(unsigned long)\
465 * sun3x_btop(pa)\
466 * ])
467 */
468
469 /* pa2pv INTERNAL
470 **
471 * Return the pv_list_head element which manages the given physical
472 * address.
473 */
474 static INLINE pv_t *
475 pa2pv(pa)
476 vm_offset_t pa;
477 {
478 register struct pmap_physmem_struct *bank;
479 register int idx;
480
481 bank = &avail_mem[0];
482 while (pa >= bank->pmem_end)
483 bank = bank->pmem_next;
484
485 pa -= bank->pmem_start;
486 idx = bank->pmem_pvbase + sun3x_btop(pa);
487 #ifdef PMAP_DEBUG
488 if ((idx < 0) || (idx >= physmem))
489 panic("pa2pv");
490 #endif
491 return &pvbase[idx];
492 }
493
494 /* pteidx INTERNAL
495 **
496 * Return the index of the given PTE within the entire fixed table of
497 * PTEs.
498 */
499 static INLINE int
500 pteidx(pte)
501 mmu_short_pte_t *pte;
502 {
503 return (pte - kernCbase);
504 }
505
506 /*
507 * This just offers a place to put some debugging checks,
508 * and reduces the number of places "curproc" appears...
509 */
510 static INLINE pmap_t
511 current_pmap()
512 {
513 struct proc *p;
514 struct vmspace *vm;
515 vm_map_t map;
516 pmap_t pmap;
517
518 p = curproc; /* XXX */
519 if (p == NULL)
520 pmap = &kernel_pmap;
521 else {
522 vm = p->p_vmspace;
523 map = &vm->vm_map;
524 pmap = vm_map_pmap(map);
525 }
526
527 return (pmap);
528 }
529
530
531 /*************************** FUNCTION DEFINITIONS ************************
532 * These appear here merely for the compiler to enforce type checking on *
533 * all function calls. *
534 *************************************************************************/
535
536 /** External functions
537 ** - functions used within this module but written elsewhere.
538 ** both of these functions are in locore.s
539 ** XXX - These functions were later replaced with their more cryptic
540 ** hp300 counterparts. They may be removed now.
541 **/
542 #if 0 /* deprecated mmu */
543 void mmu_seturp __P((vm_offset_t));
544 void mmu_flush __P((int, vm_offset_t));
545 void mmu_flusha __P((void));
546 #endif /* 0 */
547
548 /** Internal functions
549 ** - all functions used only within this module are defined in
550 ** pmap_pvt.h
551 **/
552
553 /** Interface functions
554 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
555 ** defined.
556 **/
557 #ifdef INCLUDED_IN_PMAP_H
558 void pmap_bootstrap __P((void));
559 void *pmap_bootstrap_alloc __P((int));
560 void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
561 pmap_t pmap_create __P((vm_size_t));
562 void pmap_destroy __P((pmap_t));
563 void pmap_reference __P((pmap_t));
564 boolean_t pmap_is_referenced __P((vm_offset_t));
565 boolean_t pmap_is_modified __P((vm_offset_t));
566 void pmap_clear_modify __P((vm_offset_t));
567 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
568 void pmap_activate __P((pmap_t));
569 int pmap_page_index __P((vm_offset_t));
570 u_int pmap_free_pages __P((void));
571 #endif /* INCLUDED_IN_PMAP_H */
572
573 /********************************** CODE ********************************
574 * Functions that are called from other parts of the kernel are labeled *
575 * as 'INTERFACE' functions. Functions that are only called from *
576 * within the pmap module are labeled as 'INTERNAL' functions. *
577 * Functions that are internal, but are not (currently) used at all are *
578 * labeled 'INTERNAL_X'. *
579 ************************************************************************/
580
581 /* pmap_bootstrap INTERNAL
582 **
583 * Initializes the pmap system. Called at boot time from sun3x_vm_init()
584 * in _startup.c.
585 *
586 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
587 * system implement pmap_steal_memory() is redundant.
588 * Don't release this code without removing one or the other!
589 */
590 void
591 pmap_bootstrap(nextva)
592 vm_offset_t nextva;
593 {
594 struct physmemory *membank;
595 struct pmap_physmem_struct *pmap_membank;
596 vm_offset_t va, pa, eva;
597 int b, c, i, j; /* running table counts */
598 int size;
599
600 /*
601 * This function is called by __bootstrap after it has
602 * determined the type of machine and made the appropriate
603 * patches to the ROM vectors (XXX- I don't quite know what I meant
604 * by that.) It allocates and sets up enough of the pmap system
605 * to manage the kernel's address space.
606 */
607
608 /*
609 * Determine the range of kernel virtual and physical
610 * space available. Note that we ABSOLUTELY DEPEND on
611 * the fact that the first bank of memory (4MB) is
612 * mapped linearly to KERNBASE (which we guaranteed in
613 * the first instructions of locore.s).
614 * That is plenty for our bootstrap work.
615 */
616 virtual_avail = sun3x_round_page(nextva);
617 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
618 virtual_end = VM_MAX_KERNEL_ADDRESS;
619 /* Don't need avail_start til later. */
620
621 /* We may now call pmap_bootstrap_alloc(). */
622 bootstrap_alloc_enabled = TRUE;
623
624 /*
625 * This is a somewhat unwrapped loop to deal with
626 * copying the PROM's 'phsymem' banks into the pmap's
627 * banks. The following is always assumed:
628 * 1. There is always at least one bank of memory.
629 * 2. There is always a last bank of memory, and its
630 * pmem_next member must be set to NULL.
631 * XXX - Use: do { ... } while (membank->next) instead?
632 * XXX - Why copy this stuff at all? -gwr
633 * - It is needed in pa2pv().
634 */
635 membank = romVectorPtr->v_physmemory;
636 pmap_membank = avail_mem;
637 total_phys_mem = 0;
638
639 while (membank->next) {
640 pmap_membank->pmem_start = membank->address;
641 pmap_membank->pmem_end = membank->address + membank->size;
642 total_phys_mem += membank->size;
643 /* This silly syntax arises because pmap_membank
644 * is really a pre-allocated array, but it is put into
645 * use as a linked list.
646 */
647 pmap_membank->pmem_next = pmap_membank + 1;
648 pmap_membank = pmap_membank->pmem_next;
649 membank = membank->next;
650 }
651
652 /*
653 * XXX The last bank of memory should be reduced to exclude the
654 * physical pages needed by the PROM monitor from being used
655 * in the VM system. XXX - See below - Fix!
656 */
657 pmap_membank->pmem_start = membank->address;
658 pmap_membank->pmem_end = membank->address + membank->size;
659 pmap_membank->pmem_next = NULL;
660
661 #if 0 /* XXX - Need to integrate this! */
662 /*
663 * The last few pages of physical memory are "owned" by
664 * the PROM. The total amount of memory we are allowed
665 * to use is given by the romvec pointer. -gwr
666 *
667 * We should dedicate different variables for 'useable'
668 * and 'physically available'. Most users are used to the
669 * kernel reporting the amount of memory 'physically available'
670 * as opposed to 'useable by the kernel' at boot time. -j
671 */
672 total_phys_mem = *romVectorPtr->memoryAvail;
673 #endif /* XXX */
674
675 total_phys_mem += membank->size; /* XXX see above */
676 physmem = btoc(total_phys_mem);
677
678 /*
679 * Avail_end is set to the first byte of physical memory
680 * after the end of the last bank. We use this only to
681 * determine if a physical address is "managed" memory.
682 *
683 * XXX - The setting of avail_end is a temporary ROM saving hack.
684 */
685 avail_end = pmap_membank->pmem_end -
686 (total_phys_mem - *romVectorPtr->memoryAvail);
687 avail_end = sun3x_trunc_page(avail_end);
688
689 /*
690 * The first step is to allocate MMU tables.
691 * Note: All must be aligned on 256 byte boundaries.
692 *
693 * Start with the top level, or 'A' table.
694 */
695 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
696 kernAbase = pmap_bootstrap_alloc(size);
697 bzero(kernAbase, size);
698
699 /*
700 * Allocate enough B tables to map from KERNBASE to
701 * the end of VM.
702 */
703 size = sizeof(mmu_short_dte_t) *
704 (MMU_A_TBL_SIZE - MMU_TIA(KERNBASE)) * MMU_B_TBL_SIZE;
705 kernBbase = pmap_bootstrap_alloc(size);
706 bzero(kernBbase, size);
707
708 /*
709 * Allocate enough C tables.
710 * Note: In order for the PV system to work correctly, the kernel
711 * and user-level C tables must be allocated contiguously.
712 * Nothing should be allocated between here and the allocation of
713 * mmuCbase below. XXX: Should do this as one allocation, and
714 * then compute a pointer for mmuCbase instead of this...
715 */
716 size = sizeof (mmu_short_pte_t) *
717 (MMU_A_TBL_SIZE - MMU_TIA(KERNBASE))
718 * MMU_B_TBL_SIZE * MMU_C_TBL_SIZE;
719 kernCbase = pmap_bootstrap_alloc(size);
720 bzero(kernCbase, size);
721
722 /*
723 * Allocate user MMU tables.
724 * These must be aligned on 256 byte boundaries.
725 *
726 * As noted in the comment preceding the allocation of the kernel
727 * C tables in pmap_bootstrap(), user-level C tables must be the
728 * flush with (up against) the kernel-level C tables.
729 */
730 mmuCbase = (mmu_short_pte_t *)
731 pmap_bootstrap_alloc(sizeof(mmu_short_pte_t)
732 * MMU_C_TBL_SIZE
733 * NUM_C_TABLES);
734 mmuAbase = (mmu_long_dte_t *)
735 pmap_bootstrap_alloc(sizeof(mmu_long_dte_t)
736 * MMU_A_TBL_SIZE
737 * NUM_A_TABLES);
738 mmuBbase = (mmu_short_dte_t *)
739 pmap_bootstrap_alloc(sizeof(mmu_short_dte_t)
740 * MMU_B_TBL_SIZE
741 * NUM_B_TABLES);
742
743 /*
744 * Fill in the never-changing part of the kernel tables.
745 * For simplicity, the kernel's mappings will be editable as a
746 * flat array of page table entries at kernCbase. The
747 * higher level 'A' and 'B' tables must be initialized to point
748 * to this lower one.
749 */
750 b = c = 0;
751
752 /*
753 * Invalidate all mappings below KERNBASE in the A table.
754 * This area has already been zeroed out, but it is good
755 * practice to explicitly show that we are interpreting
756 * it as a list of A table descriptors.
757 */
758 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
759 kernAbase[i].addr.raw = 0;
760 }
761
762 /*
763 * Set up the kernel A and B tables so that they will reference the
764 * correct spots in the contiguous table of PTEs allocated for the
765 * kernel's virtual memory space.
766 */
767 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
768 kernAbase[i].attr.raw =
769 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
770 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
771
772 for (j=0; j < MMU_B_TBL_SIZE; j++) {
773 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
774 | MMU_DT_SHORT;
775 c += MMU_C_TBL_SIZE;
776 }
777 b += MMU_B_TBL_SIZE;
778 }
779
780 /* XXX - Doing kernel_pmap a little further down. */
781
782 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
783 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
784 pmap_alloc_pv(); /* Allocate physical->virtual map. */
785
786 /*
787 * We are now done with pmap_bootstrap_alloc(). Round up
788 * `virtual_avail' to the nearest page, and set the flag
789 * to prevent use of pmap_bootstrap_alloc() hereafter.
790 */
791 pmap_bootstrap_aalign(NBPG);
792 bootstrap_alloc_enabled = FALSE;
793
794 /*
795 * Now that we are done with pmap_bootstrap_alloc(), we
796 * must save the virtual and physical addresses of the
797 * end of the linearly mapped range, which are stored in
798 * virtual_contig_end and avail_start, respectively.
799 * These variables will never change after this point.
800 */
801 virtual_contig_end = virtual_avail;
802 avail_start = virtual_avail - KERNBASE;
803
804 /*
805 * `avail_next' is a running pointer used by pmap_next_page() to
806 * keep track of the next available physical page to be handed
807 * to the VM system during its initialization, in which it
808 * asks for physical pages, one at a time.
809 */
810 avail_next = avail_start;
811
812 /*
813 * Now allocate some virtual addresses, but not the physical pages
814 * behind them. Note that virtual_avail is already page-aligned.
815 *
816 * tmp_vpages[] is an array of two virtual pages used for temporary
817 * kernel mappings in the pmap module to facilitate various physical
818 * address-oritented operations.
819 */
820 tmp_vpages[0] = virtual_avail;
821 virtual_avail += NBPG;
822 tmp_vpages[1] = virtual_avail;
823 virtual_avail += NBPG;
824
825 /** Initialize the PV system **/
826 pmap_init_pv();
827
828 /*
829 * Fill in the kernel_pmap structure and kernel_crp.
830 */
831 kernAphys = mmu_vtop(kernAbase);
832 kernel_pmap.pm_a_tmgr = NULL;
833 kernel_pmap.pm_a_phys = kernAphys;
834 kernel_pmap.pm_refcount = 1; /* always in use */
835
836 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
837 kernel_crp.rp_addr = kernAphys;
838
839 /*
840 * Now pmap_enter_kernel() may be used safely and will be
841 * the main interface used hereafter to modify the kernel's
842 * virtual address space. Note that since we are still running
843 * under the PROM's address table, none of these table modifications
844 * actually take effect until pmap_takeover_mmu() is called.
845 *
846 * Note: Our tables do NOT have the PROM linear mappings!
847 * Only the mappings created here exist in our tables, so
848 * remember to map anything we expect to use.
849 */
850 va = (vm_offset_t) KERNBASE;
851 pa = 0;
852
853 /*
854 * The first page of the kernel virtual address space is the msgbuf
855 * page. The page attributes (data, non-cached) are set here, while
856 * the address is assigned to this global pointer in cpu_startup().
857 * XXX - Make it non-cached?
858 */
859 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
860 va += NBPG; pa += NBPG;
861
862 /* Next page is used as the temporary stack. */
863 pmap_enter_kernel(va, pa, VM_PROT_ALL);
864 va += NBPG; pa += NBPG;
865
866 /*
867 * Map all of the kernel's text segment as read-only and cacheable.
868 * (Cacheable is implied by default). Unfortunately, the last bytes
869 * of kernel text and the first bytes of kernel data will often be
870 * sharing the same page. Therefore, the last page of kernel text
871 * has to be mapped as read/write, to accomodate the data.
872 */
873 eva = sun3x_trunc_page((vm_offset_t)etext);
874 for (; va < eva; va += NBPG, pa += NBPG)
875 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
876
877 /*
878 * Map all of the kernel's data as read/write and cacheable.
879 * This includes: data, BSS, symbols, and everything in the
880 * contiguous memory used by pmap_bootstrap_alloc()
881 */
882 for (; pa < avail_start; va += NBPG, pa += NBPG)
883 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
884
885 /*
886 * At this point we are almost ready to take over the MMU. But first
887 * we must save the PROM's address space in our map, as we call its
888 * routines and make references to its data later in the kernel.
889 */
890 pmap_bootstrap_copyprom();
891 pmap_takeover_mmu();
892 pmap_bootstrap_setprom();
893
894 /*
895 * XXX - Todo: Fill in the PROM's level-A table for the VA range
896 * KERNBASE ... 0xFE000000 so that the PROM monitor can see our
897 * mappings. This should make bouncing in/out of PROM easier.
898 * XXX - Add (i.e.) pmap_setup_prommap();
899 */
900
901 /* Notify the VM system of our page size. */
902 PAGE_SIZE = NBPG;
903 vm_set_page_size();
904 }
905
906
907 /* pmap_alloc_usermmu INTERNAL
908 **
909 * Called from pmap_bootstrap() to allocate MMU tables that will
910 * eventually be used for user mappings.
911 */
912 void
913 pmap_alloc_usermmu()
914 {
915 /* XXX: Moved into caller. */
916 }
917
918 /* pmap_alloc_pv INTERNAL
919 **
920 * Called from pmap_bootstrap() to allocate the physical
921 * to virtual mapping list. Each physical page of memory
922 * in the system has a corresponding element in this list.
923 */
924 void
925 pmap_alloc_pv()
926 {
927 int i;
928 unsigned int total_mem;
929
930 /*
931 * Allocate a pv_head structure for every page of physical
932 * memory that will be managed by the system. Since memory on
933 * the 3/80 is non-contiguous, we cannot arrive at a total page
934 * count by subtraction of the lowest available address from the
935 * highest, but rather we have to step through each memory
936 * bank and add the number of pages in each to the total.
937 *
938 * At this time we also initialize the offset of each bank's
939 * starting pv_head within the pv_head list so that the physical
940 * memory state routines (pmap_is_referenced(),
941 * pmap_is_modified(), et al.) can quickly find coresponding
942 * pv_heads in spite of the non-contiguity.
943 */
944 total_mem = 0;
945 for (i = 0; i < SUN3X_80_MEM_BANKS; i++) {
946 avail_mem[i].pmem_pvbase = sun3x_btop(total_mem);
947 total_mem += avail_mem[i].pmem_end -
948 avail_mem[i].pmem_start;
949 if (avail_mem[i].pmem_next == NULL)
950 break;
951 }
952 #ifdef PMAP_DEBUG
953 if (total_mem != total_phys_mem)
954 panic("pmap_alloc_pv did not arrive at correct page count");
955 #endif
956
957 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
958 sun3x_btop(total_phys_mem));
959 }
960
961 /* pmap_alloc_usertmgr INTERNAL
962 **
963 * Called from pmap_bootstrap() to allocate the structures which
964 * facilitate management of user MMU tables. Each user MMU table
965 * in the system has one such structure associated with it.
966 */
967 void
968 pmap_alloc_usertmgr()
969 {
970 /* Allocate user MMU table managers */
971 /* It would be a lot simpler to just make these BSS, but */
972 /* we may want to change their size at boot time... -j */
973 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
974 * NUM_A_TABLES);
975 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
976 * NUM_B_TABLES);
977 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
978 * NUM_C_TABLES);
979
980 /*
981 * Allocate PV list elements for the physical to virtual
982 * mapping system.
983 */
984 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
985 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
986 }
987
988 /* pmap_bootstrap_copyprom() INTERNAL
989 **
990 * Copy the PROM mappings into our own tables. Note, we
991 * can use physical addresses until __bootstrap returns.
992 */
993 void
994 pmap_bootstrap_copyprom()
995 {
996 MachMonRomVector *romp;
997 int *mon_ctbl;
998 mmu_short_pte_t *kpte;
999 int i, len;
1000
1001 romp = romVectorPtr;
1002
1003 /*
1004 * Copy the mappings in MON_KDB_START...MONEND
1005 * Note: mon_ctbl[0] maps MON_KDB_START
1006 */
1007 mon_ctbl = *romp->monptaddr;
1008 i = sun3x_btop(MON_KDB_START - KERNBASE);
1009 kpte = &kernCbase[i];
1010 len = sun3x_btop(MONEND - MON_KDB_START);
1011
1012 for (i = 0; i < len; i++) {
1013 kpte[i].attr.raw = mon_ctbl[i];
1014 }
1015
1016 /*
1017 * Copy the mappings at MON_DVMA_BASE (to the end).
1018 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1019 * XXX - This does not appear to be necessary, but
1020 * I'm not sure yet if it is or not. -gwr
1021 */
1022 mon_ctbl = *romp->shadowpteaddr;
1023 i = sun3x_btop(MON_DVMA_BASE - KERNBASE);
1024 kpte = &kernCbase[i];
1025 len = sun3x_btop(MON_DVMA_SIZE);
1026
1027 for (i = 0; i < len; i++) {
1028 kpte[i].attr.raw = mon_ctbl[i];
1029 }
1030 }
1031
1032 /* pmap_takeover_mmu INTERNAL
1033 **
1034 * Called from pmap_bootstrap() after it has copied enough of the
1035 * PROM mappings into the kernel map so that we can use our own
1036 * MMU table.
1037 */
1038 void
1039 pmap_takeover_mmu()
1040 {
1041
1042 loadcrp(&kernel_crp);
1043 }
1044
1045 /* pmap_bootstrap_setprom() INTERNAL
1046 **
1047 * Set the PROM mappings so it can see kernel space.
1048 * Note that physical addresses are used here, which
1049 * we can get away with because this runs with the
1050 * low 1GB set for transparent translation.
1051 */
1052 void
1053 pmap_bootstrap_setprom()
1054 {
1055 mmu_long_dte_t *mon_dte;
1056 extern struct mmu_rootptr mon_crp;
1057 int i;
1058
1059 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1060 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1061 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1062 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1063 }
1064 }
1065
1066
1067 /* pmap_init INTERFACE
1068 **
1069 * Called at the end of vm_init() to set up the pmap system to go
1070 * into full time operation. All initialization of kernel_pmap
1071 * should be already done by now, so this should just do things
1072 * needed for user-level pmaps to work.
1073 */
1074 void
1075 pmap_init()
1076 {
1077 /** Initialize the manager pools **/
1078 TAILQ_INIT(&a_pool);
1079 TAILQ_INIT(&b_pool);
1080 TAILQ_INIT(&c_pool);
1081
1082 /**************************************************************
1083 * Initialize all tmgr structures and MMU tables they manage. *
1084 **************************************************************/
1085 /** Initialize A tables **/
1086 pmap_init_a_tables();
1087 /** Initialize B tables **/
1088 pmap_init_b_tables();
1089 /** Initialize C tables **/
1090 pmap_init_c_tables();
1091 }
1092
1093 /* pmap_init_a_tables() INTERNAL
1094 **
1095 * Initializes all A managers, their MMU A tables, and inserts
1096 * them into the A manager pool for use by the system.
1097 */
1098 void
1099 pmap_init_a_tables()
1100 {
1101 int i;
1102 a_tmgr_t *a_tbl;
1103
1104 for (i=0; i < NUM_A_TABLES; i++) {
1105 /* Select the next available A manager from the pool */
1106 a_tbl = &Atmgrbase[i];
1107
1108 /*
1109 * Clear its parent entry. Set its wired and valid
1110 * entry count to zero.
1111 */
1112 a_tbl->at_parent = NULL;
1113 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1114
1115 /* Assign it the next available MMU A table from the pool */
1116 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1117
1118 /*
1119 * Initialize the MMU A table with the table in the `proc0',
1120 * or kernel, mapping. This ensures that every process has
1121 * the kernel mapped in the top part of its address space.
1122 */
1123 bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1124 sizeof(mmu_long_dte_t));
1125
1126 /*
1127 * Finally, insert the manager into the A pool,
1128 * making it ready to be used by the system.
1129 */
1130 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1131 }
1132 }
1133
1134 /* pmap_init_b_tables() INTERNAL
1135 **
1136 * Initializes all B table managers, their MMU B tables, and
1137 * inserts them into the B manager pool for use by the system.
1138 */
1139 void
1140 pmap_init_b_tables()
1141 {
1142 int i,j;
1143 b_tmgr_t *b_tbl;
1144
1145 for (i=0; i < NUM_B_TABLES; i++) {
1146 /* Select the next available B manager from the pool */
1147 b_tbl = &Btmgrbase[i];
1148
1149 b_tbl->bt_parent = NULL; /* clear its parent, */
1150 b_tbl->bt_pidx = 0; /* parent index, */
1151 b_tbl->bt_wcnt = 0; /* wired entry count, */
1152 b_tbl->bt_ecnt = 0; /* valid entry count. */
1153
1154 /* Assign it the next available MMU B table from the pool */
1155 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1156
1157 /* Invalidate every descriptor in the table */
1158 for (j=0; j < MMU_B_TBL_SIZE; j++)
1159 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1160
1161 /* Insert the manager into the B pool */
1162 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1163 }
1164 }
1165
1166 /* pmap_init_c_tables() INTERNAL
1167 **
1168 * Initializes all C table managers, their MMU C tables, and
1169 * inserts them into the C manager pool for use by the system.
1170 */
1171 void
1172 pmap_init_c_tables()
1173 {
1174 int i,j;
1175 c_tmgr_t *c_tbl;
1176
1177 for (i=0; i < NUM_C_TABLES; i++) {
1178 /* Select the next available C manager from the pool */
1179 c_tbl = &Ctmgrbase[i];
1180
1181 c_tbl->ct_parent = NULL; /* clear its parent, */
1182 c_tbl->ct_pidx = 0; /* parent index, */
1183 c_tbl->ct_wcnt = 0; /* wired entry count, */
1184 c_tbl->ct_ecnt = 0; /* valid entry count. */
1185
1186 /* Assign it the next available MMU C table from the pool */
1187 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1188
1189 for (j=0; j < MMU_C_TBL_SIZE; j++)
1190 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1191
1192 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1193 }
1194 }
1195
1196 /* pmap_init_pv() INTERNAL
1197 **
1198 * Initializes the Physical to Virtual mapping system.
1199 */
1200 void
1201 pmap_init_pv()
1202 {
1203 int i;
1204
1205 /* Initialize every PV head. */
1206 for (i = 0; i < sun3x_btop(total_phys_mem); i++) {
1207 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1208 pvbase[i].pv_flags = 0; /* Zero out page flags */
1209 }
1210
1211 pv_initialized = TRUE;
1212 }
1213
1214 /* get_a_table INTERNAL
1215 **
1216 * Retrieve and return a level A table for use in a user map.
1217 */
1218 a_tmgr_t *
1219 get_a_table()
1220 {
1221 a_tmgr_t *tbl;
1222 pmap_t pmap;
1223
1224 /* Get the top A table in the pool */
1225 tbl = a_pool.tqh_first;
1226 if (tbl == NULL) {
1227 /*
1228 * XXX - Instead of panicing here and in other get_x_table
1229 * functions, we do have the option of sleeping on the head of
1230 * the table pool. Any function which updates the table pool
1231 * would then issue a wakeup() on the head, thus waking up any
1232 * processes waiting for a table.
1233 *
1234 * Actually, the place to sleep would be when some process
1235 * asks for a "wired" mapping that would run us short of
1236 * mapping resources. This design DEPENDS on always having
1237 * some mapping resources in the pool for stealing, so we
1238 * must make sure we NEVER let the pool become empty. -gwr
1239 */
1240 panic("get_a_table: out of A tables.");
1241 }
1242
1243 TAILQ_REMOVE(&a_pool, tbl, at_link);
1244 /*
1245 * If the table has a non-null parent pointer then it is in use.
1246 * Forcibly abduct it from its parent and clear its entries.
1247 * No re-entrancy worries here. This table would not be in the
1248 * table pool unless it was available for use.
1249 *
1250 * Note that the second argument to free_a_table() is FALSE. This
1251 * indicates that the table should not be relinked into the A table
1252 * pool. That is a job for the function that called us.
1253 */
1254 if (tbl->at_parent) {
1255 pmap = tbl->at_parent;
1256 free_a_table(tbl, FALSE);
1257 pmap->pm_a_tmgr = NULL;
1258 pmap->pm_a_phys = kernAphys;
1259 }
1260 #ifdef NON_REENTRANT
1261 /*
1262 * If the table isn't to be wired down, re-insert it at the
1263 * end of the pool.
1264 */
1265 if (!wired)
1266 /*
1267 * Quandary - XXX
1268 * Would it be better to let the calling function insert this
1269 * table into the queue? By inserting it here, we are allowing
1270 * it to be stolen immediately. The calling function is
1271 * probably not expecting to use a table that it is not
1272 * assured full control of.
1273 * Answer - In the intrest of re-entrancy, it is best to let
1274 * the calling function determine when a table is available
1275 * for use. Therefore this code block is not used.
1276 */
1277 TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1278 #endif /* NON_REENTRANT */
1279 return tbl;
1280 }
1281
1282 /* get_b_table INTERNAL
1283 **
1284 * Return a level B table for use.
1285 */
1286 b_tmgr_t *
1287 get_b_table()
1288 {
1289 b_tmgr_t *tbl;
1290
1291 /* See 'get_a_table' for comments. */
1292 tbl = b_pool.tqh_first;
1293 if (tbl == NULL)
1294 panic("get_b_table: out of B tables.");
1295 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1296 if (tbl->bt_parent) {
1297 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1298 tbl->bt_parent->at_ecnt--;
1299 free_b_table(tbl, FALSE);
1300 }
1301 #ifdef NON_REENTRANT
1302 if (!wired)
1303 /* XXX see quandary in get_b_table */
1304 /* XXX start lock */
1305 TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1306 /* XXX end lock */
1307 #endif /* NON_REENTRANT */
1308 return tbl;
1309 }
1310
1311 /* get_c_table INTERNAL
1312 **
1313 * Return a level C table for use.
1314 */
1315 c_tmgr_t *
1316 get_c_table()
1317 {
1318 c_tmgr_t *tbl;
1319
1320 /* See 'get_a_table' for comments */
1321 tbl = c_pool.tqh_first;
1322 if (tbl == NULL)
1323 panic("get_c_table: out of C tables.");
1324 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1325 if (tbl->ct_parent) {
1326 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1327 tbl->ct_parent->bt_ecnt--;
1328 free_c_table(tbl, FALSE);
1329 }
1330 #ifdef NON_REENTRANT
1331 if (!wired)
1332 /* XXX See quandary in get_a_table */
1333 /* XXX start lock */
1334 TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1335 /* XXX end lock */
1336 #endif /* NON_REENTRANT */
1337
1338 return tbl;
1339 }
1340
1341 /*
1342 * The following 'free_table' and 'steal_table' functions are called to
1343 * detach tables from their current obligations (parents and children) and
1344 * prepare them for reuse in another mapping.
1345 *
1346 * Free_table is used when the calling function will handle the fate
1347 * of the parent table, such as returning it to the free pool when it has
1348 * no valid entries. Functions that do not want to handle this should
1349 * call steal_table, in which the parent table's descriptors and entry
1350 * count are automatically modified when this table is removed.
1351 */
1352
1353 /* free_a_table INTERNAL
1354 **
1355 * Unmaps the given A table and all child tables from their current
1356 * mappings. Returns the number of pages that were invalidated.
1357 * If 'relink' is true, the function will return the table to the head
1358 * of the available table pool.
1359 *
1360 * Cache note: The MC68851 will automatically flush all
1361 * descriptors derived from a given A table from its
1362 * Automatic Translation Cache (ATC) if we issue a
1363 * 'PFLUSHR' instruction with the base address of the
1364 * table. This function should do, and does so.
1365 * Note note: We are using an MC68030 - there is no
1366 * PFLUSHR.
1367 */
1368 int
1369 free_a_table(a_tbl, relink)
1370 a_tmgr_t *a_tbl;
1371 boolean_t relink;
1372 {
1373 int i, removed_cnt;
1374 mmu_long_dte_t *dte;
1375 mmu_short_dte_t *dtbl;
1376 b_tmgr_t *tmgr;
1377
1378 /*
1379 * Flush the ATC cache of all cached descriptors derived
1380 * from this table.
1381 * XXX - Sun3x does not use 68851's cached table feature
1382 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1383 */
1384
1385 /*
1386 * Remove any pending cache flushes that were designated
1387 * for the pmap this A table belongs to.
1388 * a_tbl->parent->atc_flushq[0] = 0;
1389 * XXX - Not implemented in sun3x.
1390 */
1391
1392 /*
1393 * All A tables in the system should retain a map for the
1394 * kernel. If the table contains any valid descriptors
1395 * (other than those for the kernel area), invalidate them all,
1396 * stopping short of the kernel's entries.
1397 */
1398 removed_cnt = 0;
1399 if (a_tbl->at_ecnt) {
1400 dte = a_tbl->at_dtbl;
1401 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1402 /*
1403 * If a table entry points to a valid B table, free
1404 * it and its children.
1405 */
1406 if (MMU_VALID_DT(dte[i])) {
1407 /*
1408 * The following block does several things,
1409 * from innermost expression to the
1410 * outermost:
1411 * 1) It extracts the base (cc 1996)
1412 * address of the B table pointed
1413 * to in the A table entry dte[i].
1414 * 2) It converts this base address into
1415 * the virtual address it can be
1416 * accessed with. (all MMU tables point
1417 * to physical addresses.)
1418 * 3) It finds the corresponding manager
1419 * structure which manages this MMU table.
1420 * 4) It frees the manager structure.
1421 * (This frees the MMU table and all
1422 * child tables. See 'free_b_table' for
1423 * details.)
1424 */
1425 dtbl = mmu_ptov(dte[i].addr.raw);
1426 tmgr = mmuB2tmgr(dtbl);
1427 removed_cnt += free_b_table(tmgr, TRUE);
1428 dte[i].attr.raw = MMU_DT_INVALID;
1429 }
1430 }
1431 a_tbl->at_ecnt = 0;
1432 }
1433 if (relink) {
1434 a_tbl->at_parent = NULL;
1435 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1436 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1437 }
1438 return removed_cnt;
1439 }
1440
1441 /* free_b_table INTERNAL
1442 **
1443 * Unmaps the given B table and all its children from their current
1444 * mappings. Returns the number of pages that were invalidated.
1445 * (For comments, see 'free_a_table()').
1446 */
1447 int
1448 free_b_table(b_tbl, relink)
1449 b_tmgr_t *b_tbl;
1450 boolean_t relink;
1451 {
1452 int i, removed_cnt;
1453 mmu_short_dte_t *dte;
1454 mmu_short_pte_t *dtbl;
1455 c_tmgr_t *tmgr;
1456
1457 removed_cnt = 0;
1458 if (b_tbl->bt_ecnt) {
1459 dte = b_tbl->bt_dtbl;
1460 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1461 if (MMU_VALID_DT(dte[i])) {
1462 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1463 tmgr = mmuC2tmgr(dtbl);
1464 removed_cnt += free_c_table(tmgr, TRUE);
1465 dte[i].attr.raw = MMU_DT_INVALID;
1466 }
1467 }
1468 b_tbl->bt_ecnt = 0;
1469 }
1470
1471 if (relink) {
1472 b_tbl->bt_parent = NULL;
1473 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1474 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1475 }
1476 return removed_cnt;
1477 }
1478
1479 /* free_c_table INTERNAL
1480 **
1481 * Unmaps the given C table from use and returns it to the pool for
1482 * re-use. Returns the number of pages that were invalidated.
1483 *
1484 * This function preserves any physical page modification information
1485 * contained in the page descriptors within the C table by calling
1486 * 'pmap_remove_pte().'
1487 */
1488 int
1489 free_c_table(c_tbl, relink)
1490 c_tmgr_t *c_tbl;
1491 boolean_t relink;
1492 {
1493 int i, removed_cnt;
1494
1495 removed_cnt = 0;
1496 if (c_tbl->ct_ecnt) {
1497 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1498 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1499 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1500 removed_cnt++;
1501 }
1502 }
1503 c_tbl->ct_ecnt = 0;
1504 }
1505
1506 if (relink) {
1507 c_tbl->ct_parent = NULL;
1508 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1509 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1510 }
1511 return removed_cnt;
1512 }
1513
1514 #if 0
1515 /* free_c_table_novalid INTERNAL
1516 **
1517 * Frees the given C table manager without checking to see whether
1518 * or not it contains any valid page descriptors as it is assumed
1519 * that it does not.
1520 */
1521 void
1522 free_c_table_novalid(c_tbl)
1523 c_tmgr_t *c_tbl;
1524 {
1525 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1526 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1527 c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1528 c_tbl->ct_parent->bt_ecnt--;
1529 /*
1530 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1531 * we just removed the last entry of the parent B table.
1532 * But I want to insure that this will not endanger pmap_enter()
1533 * with sudden removal of tables it is working with.
1534 *
1535 * We should probably add another field to each table, indicating
1536 * whether or not it is 'locked', ie. in the process of being
1537 * modified.
1538 */
1539 c_tbl->ct_parent = NULL;
1540 }
1541 #endif
1542
1543 /* pmap_remove_pte INTERNAL
1544 **
1545 * Unmap the given pte and preserve any page modification
1546 * information by transfering it to the pv head of the
1547 * physical page it maps to. This function does not update
1548 * any reference counts because it is assumed that the calling
1549 * function will do so.
1550 */
1551 void
1552 pmap_remove_pte(pte)
1553 mmu_short_pte_t *pte;
1554 {
1555 u_short pv_idx, targ_idx;
1556 int s;
1557 vm_offset_t pa;
1558 pv_t *pv;
1559
1560 pa = MMU_PTE_PA(*pte);
1561 if (is_managed(pa)) {
1562 pv = pa2pv(pa);
1563 targ_idx = pteidx(pte); /* Index of PTE being removed */
1564
1565 /*
1566 * If the PTE being removed is the first (or only) PTE in
1567 * the list of PTEs currently mapped to this page, remove the
1568 * PTE by changing the index found on the PV head. Otherwise
1569 * a linear search through the list will have to be executed
1570 * in order to find the PVE which points to the PTE being
1571 * removed, so that it may be modified to point to its new
1572 * neighbor.
1573 */
1574 s = splimp();
1575 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1576 if (pv_idx == targ_idx) {
1577 pv->pv_idx = pvebase[targ_idx].pve_next;
1578 } else {
1579 /*
1580 * Find the PV element which points to the target
1581 * element.
1582 */
1583 while (pvebase[pv_idx].pve_next != targ_idx) {
1584 pv_idx = pvebase[pv_idx].pve_next;
1585 #ifdef DIAGNOSTIC
1586 if (pv_idx == PVE_EOL)
1587 panic("pmap_remove_pte: pv list end!");
1588 #endif
1589 }
1590
1591 /*
1592 * At this point, pv_idx is the index of the PV
1593 * element just before the target element in the list.
1594 * Unlink the target.
1595 */
1596 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1597 }
1598 /*
1599 * Save the mod/ref bits of the pte by simply
1600 * ORing the entire pte onto the pv_flags member
1601 * of the pv structure.
1602 * There is no need to use a separate bit pattern
1603 * for usage information on the pv head than that
1604 * which is used on the MMU ptes.
1605 */
1606 pv->pv_flags |= (u_short) pte->attr.raw;
1607 splx(s);
1608 }
1609
1610 pte->attr.raw = MMU_DT_INVALID;
1611 }
1612
1613 #if 0 /* XXX - I am eliminating this function. -j */
1614 /* pmap_dereference_pte INTERNAL
1615 **
1616 * Update the necessary reference counts in any tables and pmaps to
1617 * reflect the removal of the given pte. Only called when no knowledge of
1618 * the pte's associated pmap is unknown. This only occurs in the PV call
1619 * 'pmap_page_protect()' with a protection of VM_PROT_NONE, which means
1620 * that all references to a given physical page must be removed.
1621 */
1622 void
1623 pmap_dereference_pte(pte)
1624 mmu_short_pte_t *pte;
1625 {
1626 vm_offset_t va;
1627 c_tmgr_t *c_tbl;
1628 pmap_t pmap;
1629
1630 va = pmap_get_pteinfo(pte, &pmap, &c_tbl);
1631 /*
1632 * Flush the translation cache of the page mapped by the PTE, should
1633 * it prove to be in the current pmap. Kernel mappings appear in
1634 * all address spaces, so they always should be flushed
1635 */
1636 if (pmap == pmap_kernel() || pmap == current_pmap())
1637 TBIS(va);
1638
1639 /*
1640 * If the mapping belongs to a user map, update the necessary
1641 * reference counts in the table manager. XXX - It would be
1642 * much easier to keep the resident count in the c_tmgr_t -gwr
1643 */
1644 if (pmap != pmap_kernel()) {
1645 /*
1646 * Most of the situations in which pmap_dereference_pte() is
1647 * called are usually temporary removals of a mapping. Often
1648 * the mapping is reinserted shortly afterwards. If the parent
1649 * C table's valid entry count reaches zero as a result of
1650 * removing this mapping, we could return it to the free pool,
1651 * but we leave it alone because it is likely to be used as
1652 * stated above.
1653 */
1654 c_tbl->ct_ecnt--;
1655 pmap->pm_stats.resident_count--;
1656 }
1657 }
1658 #endif 0 /* function elimination */
1659
1660 /* pmap_stroll INTERNAL
1661 **
1662 * Retrieve the addresses of all table managers involved in the mapping of
1663 * the given virtual address. If the table walk completed sucessfully,
1664 * return TRUE. If it was only partially sucessful, return FALSE.
1665 * The table walk performed by this function is important to many other
1666 * functions in this module.
1667 *
1668 * Note: This function ought to be easier to read.
1669 */
1670 boolean_t
1671 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1672 pmap_t pmap;
1673 vm_offset_t va;
1674 a_tmgr_t **a_tbl;
1675 b_tmgr_t **b_tbl;
1676 c_tmgr_t **c_tbl;
1677 mmu_short_pte_t **pte;
1678 int *a_idx, *b_idx, *pte_idx;
1679 {
1680 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1681 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1682
1683 if (pmap == pmap_kernel())
1684 return FALSE;
1685
1686 /* Does the given pmap have its own A table? */
1687 *a_tbl = pmap->pm_a_tmgr;
1688 if (*a_tbl == NULL)
1689 return FALSE; /* No. Return unknown. */
1690 /* Does the A table have a valid B table
1691 * under the corresponding table entry?
1692 */
1693 *a_idx = MMU_TIA(va);
1694 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1695 if (!MMU_VALID_DT(*a_dte))
1696 return FALSE; /* No. Return unknown. */
1697 /* Yes. Extract B table from the A table. */
1698 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1699 /* Does the B table have a valid C table
1700 * under the corresponding table entry?
1701 */
1702 *b_idx = MMU_TIB(va);
1703 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1704 if (!MMU_VALID_DT(*b_dte))
1705 return FALSE; /* No. Return unknown. */
1706 /* Yes. Extract C table from the B table. */
1707 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1708 *pte_idx = MMU_TIC(va);
1709 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1710
1711 return TRUE;
1712 }
1713
1714 /* pmap_enter INTERFACE
1715 **
1716 * Called by the kernel to map a virtual address
1717 * to a physical address in the given process map.
1718 *
1719 * Note: this function should apply an exclusive lock
1720 * on the pmap system for its duration. (it certainly
1721 * would save my hair!!)
1722 * This function ought to be easier to read.
1723 */
1724 void
1725 pmap_enter(pmap, va, pa, prot, wired)
1726 pmap_t pmap;
1727 vm_offset_t va;
1728 vm_offset_t pa;
1729 vm_prot_t prot;
1730 boolean_t wired;
1731 {
1732 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1733 u_short nidx; /* PV list index */
1734 int s; /* Used for splimp()/splx() */
1735 int flags; /* Mapping flags. eg. Cache inhibit */
1736 u_int a_idx, b_idx, pte_idx; /* table indices */
1737 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1738 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1739 c_tmgr_t *c_tbl; /* C: short page table manager */
1740 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1741 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1742 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1743 pv_t *pv; /* pv list head */
1744 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1745
1746 if (pmap == NULL)
1747 return;
1748 if (pmap == pmap_kernel()) {
1749 pmap_enter_kernel(va, pa, prot);
1750 return;
1751 }
1752
1753 flags = (pa & ~MMU_PAGE_MASK);
1754 pa &= MMU_PAGE_MASK;
1755
1756 /*
1757 * Determine if the physical address being mapped is managed.
1758 * If it isn't, the mapping should be cache inhibited. (This is
1759 * applied later in the function.) XXX - Why non-cached? -gwr
1760 */
1761 if ((managed = is_managed(pa)) == FALSE)
1762 flags |= PMAP_NC;
1763
1764 /*
1765 * For user mappings we walk along the MMU tables of the given
1766 * pmap, reaching a PTE which describes the virtual page being
1767 * mapped or changed. If any level of the walk ends in an invalid
1768 * entry, a table must be allocated and the entry must be updated
1769 * to point to it.
1770 * There is a bit of confusion as to whether this code must be
1771 * re-entrant. For now we will assume it is. To support
1772 * re-entrancy we must unlink tables from the table pool before
1773 * we assume we may use them. Tables are re-linked into the pool
1774 * when we are finished with them at the end of the function.
1775 * But I don't feel like doing that until we have proof that this
1776 * needs to be re-entrant.
1777 * 'llevel' records which tables need to be relinked.
1778 */
1779 llevel = NONE;
1780
1781 /*
1782 * Step 1 - Retrieve the A table from the pmap. If it has no
1783 * A table, allocate a new one from the available pool.
1784 */
1785
1786 a_tbl = pmap->pm_a_tmgr;
1787 if (a_tbl == NULL) {
1788 /*
1789 * This pmap does not currently have an A table. Allocate
1790 * a new one.
1791 */
1792 a_tbl = get_a_table();
1793 a_tbl->at_parent = pmap;
1794
1795 /*
1796 * Assign this new A table to the pmap, and calculate its
1797 * physical address so that loadcrp() can be used to make
1798 * the table active.
1799 */
1800 pmap->pm_a_tmgr = a_tbl;
1801 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1802
1803 /*
1804 * If the process receiving a new A table is the current
1805 * process, we are responsible for setting the MMU so that
1806 * it becomes the current address space. This only adds
1807 * new mappings, so no need to flush anything.
1808 */
1809 if (pmap == current_pmap()) {
1810 kernel_crp.rp_addr = pmap->pm_a_phys;
1811 loadcrp(&kernel_crp);
1812 }
1813
1814 if (!wired)
1815 llevel = NEWA;
1816 } else {
1817 /*
1818 * Use the A table already allocated for this pmap.
1819 * Unlink it from the A table pool if necessary.
1820 */
1821 if (wired && !a_tbl->at_wcnt)
1822 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1823 }
1824
1825 /*
1826 * Step 2 - Walk into the B table. If there is no valid B table,
1827 * allocate one.
1828 */
1829
1830 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1831 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1832 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1833 /* The descriptor is valid. Use the B table it points to. */
1834 /*************************************
1835 * a_idx *
1836 * v *
1837 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1838 * | | | | | | | | | | | | *
1839 * +-+-+-+-+-+-+-+-+-+-+-+- *
1840 * | *
1841 * \- b_tbl -> +-+- *
1842 * | | *
1843 * +-+- *
1844 *************************************/
1845 b_dte = mmu_ptov(a_dte->addr.raw);
1846 b_tbl = mmuB2tmgr(b_dte);
1847
1848 /*
1849 * If the requested mapping must be wired, but this table
1850 * being used to map it is not, the table must be removed
1851 * from the available pool and its wired entry count
1852 * incremented.
1853 */
1854 if (wired && !b_tbl->bt_wcnt) {
1855 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1856 a_tbl->at_wcnt++;
1857 }
1858 } else {
1859 /* The descriptor is invalid. Allocate a new B table. */
1860 b_tbl = get_b_table();
1861
1862 /* Point the parent A table descriptor to this new B table. */
1863 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1864 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1865 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1866
1867 /* Create the necessary back references to the parent table */
1868 b_tbl->bt_parent = a_tbl;
1869 b_tbl->bt_pidx = a_idx;
1870
1871 /*
1872 * If this table is to be wired, make sure the parent A table
1873 * wired count is updated to reflect that it has another wired
1874 * entry.
1875 */
1876 if (wired)
1877 a_tbl->at_wcnt++;
1878 else if (llevel == NONE)
1879 llevel = NEWB;
1880 }
1881
1882 /*
1883 * Step 3 - Walk into the C table, if there is no valid C table,
1884 * allocate one.
1885 */
1886
1887 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1888 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1889 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1890 /* The descriptor is valid. Use the C table it points to. */
1891 /**************************************
1892 * c_idx *
1893 * | v *
1894 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1895 * | | | | | | | | | | | *
1896 * +-+-+-+-+-+-+-+-+-+-+- *
1897 * | *
1898 * \- c_tbl -> +-+-- *
1899 * | | | *
1900 * +-+-- *
1901 **************************************/
1902 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1903 c_tbl = mmuC2tmgr(c_pte);
1904
1905 /* If mapping is wired and table is not */
1906 if (wired && !c_tbl->ct_wcnt) {
1907 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1908 b_tbl->bt_wcnt++;
1909 }
1910 } else {
1911 /* The descriptor is invalid. Allocate a new C table. */
1912 c_tbl = get_c_table();
1913
1914 /* Point the parent B table descriptor to this new C table. */
1915 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1916 b_dte->attr.raw |= MMU_DT_SHORT;
1917 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1918
1919 /* Create the necessary back references to the parent table */
1920 c_tbl->ct_parent = b_tbl;
1921 c_tbl->ct_pidx = b_idx;
1922
1923 /*
1924 * If this table is to be wired, make sure the parent B table
1925 * wired count is updated to reflect that it has another wired
1926 * entry.
1927 */
1928 if (wired)
1929 b_tbl->bt_wcnt++;
1930 else if (llevel == NONE)
1931 llevel = NEWC;
1932 }
1933
1934 /*
1935 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1936 * slot of the C table, describing the PA to which the VA is mapped.
1937 */
1938
1939 pte_idx = MMU_TIC(va);
1940 c_pte = &c_tbl->ct_dtbl[pte_idx];
1941 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1942 /*
1943 * The PTE is currently valid. This particular call
1944 * is just a synonym for one (or more) of the following
1945 * operations:
1946 * change protection of a page
1947 * change wiring status of a page
1948 * remove the mapping of a page
1949 *
1950 * XXX - Semi critical: This code should unwire the PTE
1951 * and, possibly, associated parent tables if this is a
1952 * change wiring operation. Currently it does not.
1953 *
1954 * This may be ok if pmap_change_wiring() is the only
1955 * interface used to UNWIRE a page.
1956 */
1957
1958 /* First check if this is a wiring operation. */
1959 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1960 /*
1961 * The PTE is already wired. To prevent it from being
1962 * counted as a new wiring operation, reset the 'wired'
1963 * variable.
1964 */
1965 wired = FALSE;
1966 }
1967
1968 /* Is the new address the same as the old? */
1969 if (MMU_PTE_PA(*c_pte) == pa) {
1970 /*
1971 * Yes, mark that it does not need to be reinserted
1972 * into the PV list.
1973 */
1974 insert = FALSE;
1975
1976 /*
1977 * Clear all but the modified, referenced and wired
1978 * bits on the PTE.
1979 */
1980 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1981 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1982 } else {
1983 /* No, remove the old entry */
1984 pmap_remove_pte(c_pte);
1985 insert = TRUE;
1986 }
1987
1988 /*
1989 * TLB flush is only necessary if modifying current map.
1990 * However, in pmap_enter(), the pmap almost always IS
1991 * the current pmap, so don't even bother to check.
1992 */
1993 TBIS(va);
1994 } else {
1995 /*
1996 * The PTE is invalid. Increment the valid entry count in
1997 * the C table manager to reflect the addition of a new entry.
1998 */
1999 c_tbl->ct_ecnt++;
2000
2001 /* XXX - temporarily make sure the PTE is cleared. */
2002 c_pte->attr.raw = 0;
2003
2004 /* It will also need to be inserted into the PV list. */
2005 insert = TRUE;
2006 }
2007
2008 /*
2009 * If page is changing from unwired to wired status, set an unused bit
2010 * within the PTE to indicate that it is wired. Also increment the
2011 * wired entry count in the C table manager.
2012 */
2013 if (wired) {
2014 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
2015 c_tbl->ct_wcnt++;
2016 }
2017
2018 /*
2019 * Map the page, being careful to preserve modify/reference/wired
2020 * bits. At this point it is assumed that the PTE either has no bits
2021 * set, or if there are set bits, they are only modified, reference or
2022 * wired bits. If not, the following statement will cause erratic
2023 * behavior.
2024 */
2025 #ifdef PMAP_DEBUG
2026 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
2027 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2028 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2029 Debugger();
2030 }
2031 #endif
2032 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2033
2034 /*
2035 * If the mapping should be read-only, set the write protect
2036 * bit in the PTE.
2037 */
2038 if (!(prot & VM_PROT_WRITE))
2039 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2040
2041 /*
2042 * If the mapping should be cache inhibited (indicated by the flag
2043 * bits found on the lower order of the physical address.)
2044 * mark the PTE as a cache inhibited page.
2045 */
2046 if (flags & PMAP_NC)
2047 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2048
2049 /*
2050 * If the physical address being mapped is managed by the PV
2051 * system then link the pte into the list of pages mapped to that
2052 * address.
2053 */
2054 if (insert && managed) {
2055 pv = pa2pv(pa);
2056 nidx = pteidx(c_pte);
2057
2058 s = splimp();
2059 pvebase[nidx].pve_next = pv->pv_idx;
2060 pv->pv_idx = nidx;
2061 splx(s);
2062 }
2063
2064 /* Move any allocated tables back into the active pool. */
2065
2066 switch (llevel) {
2067 case NEWA:
2068 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2069 /* FALLTHROUGH */
2070 case NEWB:
2071 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2072 /* FALLTHROUGH */
2073 case NEWC:
2074 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2075 /* FALLTHROUGH */
2076 default:
2077 break;
2078 }
2079 }
2080
2081 /* pmap_enter_kernel INTERNAL
2082 **
2083 * Map the given virtual address to the given physical address within the
2084 * kernel address space. This function exists because the kernel map does
2085 * not do dynamic table allocation. It consists of a contiguous array of ptes
2086 * and can be edited directly without the need to walk through any tables.
2087 *
2088 * XXX: "Danger, Will Robinson!"
2089 * Note that the kernel should never take a fault on any page
2090 * between [ KERNBASE .. virtual_avail ] and this is checked in
2091 * trap.c for kernel-mode MMU faults. This means that mappings
2092 * created in that range must be implicily wired. -gwr
2093 */
2094 void
2095 pmap_enter_kernel(va, pa, prot)
2096 vm_offset_t va;
2097 vm_offset_t pa;
2098 vm_prot_t prot;
2099 {
2100 boolean_t was_valid, insert;
2101 u_short pte_idx, pv_idx;
2102 int s, flags;
2103 mmu_short_pte_t *pte;
2104 pv_t *pv;
2105 vm_offset_t old_pa;
2106
2107 flags = (pa & ~MMU_PAGE_MASK);
2108 pa &= MMU_PAGE_MASK;
2109
2110 /*
2111 * Calculate the index of the PTE being modified.
2112 */
2113 pte_idx = (u_long) sun3x_btop(va - KERNBASE);
2114
2115 /* XXX - This array is traditionally named "Sysmap" */
2116 pte = &kernCbase[pte_idx];
2117
2118 s = splimp();
2119 if (MMU_VALID_DT(*pte)) {
2120 was_valid = TRUE;
2121 /*
2122 * If the PTE is already mapped to an address and it differs
2123 * from the address requested, unlink it from the PV list.
2124 *
2125 * This only applies to mappings within virtual_avail
2126 * and VM_MAX_KERNEL_ADDRESS. All others are not requests
2127 * from the VM system and should not be part of the PV system.
2128 */
2129 if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
2130 old_pa = MMU_PTE_PA(*pte);
2131 if (pa != old_pa) {
2132 if (is_managed(old_pa)) {
2133 /* XXX - Make this into a function call? */
2134 pv = pa2pv(old_pa);
2135 pv_idx = pv->pv_idx;
2136 if (pv_idx == pte_idx) {
2137 pv->pv_idx = pvebase[pte_idx].pve_next;
2138 } else {
2139 while (pvebase[pv_idx].pve_next != pte_idx)
2140 pv_idx = pvebase[pv_idx].pve_next;
2141 pvebase[pv_idx].pve_next =
2142 pvebase[pte_idx].pve_next;
2143 }
2144 /* Save modified/reference bits */
2145 pv->pv_flags |= (u_short) pte->attr.raw;
2146 }
2147 if (is_managed(pa))
2148 insert = TRUE;
2149 else
2150 insert = FALSE;
2151 /*
2152 * Clear out any old bits in the PTE.
2153 */
2154 pte->attr.raw = MMU_DT_INVALID;
2155 } else {
2156 /*
2157 * Old PA and new PA are the same. No need to relink
2158 * the mapping within the PV list.
2159 */
2160 insert = FALSE;
2161
2162 /*
2163 * Save any mod/ref bits on the PTE.
2164 */
2165 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2166 }
2167 } else {
2168 /*
2169 * If the VA lies below virtual_avail or beyond
2170 * VM_MAX_KERNEL_ADDRESS, it is not a request by the VM
2171 * system and hence does not need to be linked into the PV
2172 * system.
2173 */
2174 insert = FALSE;
2175 pte->attr.raw = MMU_DT_INVALID;
2176 }
2177 } else {
2178 pte->attr.raw = MMU_DT_INVALID;
2179 was_valid = FALSE;
2180 if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
2181 if (is_managed(pa))
2182 insert = TRUE;
2183 else
2184 insert = FALSE;
2185 } else
2186 insert = FALSE;
2187 }
2188
2189 /*
2190 * Map the page. Being careful to preserve modified/referenced bits
2191 * on the PTE.
2192 */
2193 pte->attr.raw |= (pa | MMU_DT_PAGE);
2194
2195 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2196 pte->attr.raw |= MMU_SHORT_PTE_WP;
2197 if (flags & PMAP_NC)
2198 pte->attr.raw |= MMU_SHORT_PTE_CI;
2199 if (was_valid)
2200 TBIS(va);
2201
2202 /*
2203 * Insert the PTE into the PV system, if need be.
2204 */
2205 if (insert) {
2206 pv = pa2pv(pa);
2207 pvebase[pte_idx].pve_next = pv->pv_idx;
2208 pv->pv_idx = pte_idx;
2209 }
2210 splx(s);
2211
2212 }
2213
2214 /* pmap_protect INTERFACE
2215 **
2216 * Apply the given protection to the given virtual address range within
2217 * the given map.
2218 *
2219 * It is ok for the protection applied to be stronger than what is
2220 * specified. We use this to our advantage when the given map has no
2221 * mapping for the virtual address. By skipping a page when this
2222 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2223 * and therefore do not need to map the page just to apply a protection
2224 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2225 *
2226 * XXX - This function could be speeded up by using pmap_stroll() for inital
2227 * setup, and then manual scrolling in the for() loop.
2228 */
2229 void
2230 pmap_protect(pmap, startva, endva, prot)
2231 pmap_t pmap;
2232 vm_offset_t startva, endva;
2233 vm_prot_t prot;
2234 {
2235 boolean_t iscurpmap;
2236 int a_idx, b_idx, c_idx;
2237 a_tmgr_t *a_tbl;
2238 b_tmgr_t *b_tbl;
2239 c_tmgr_t *c_tbl;
2240 mmu_short_pte_t *pte;
2241
2242 if (pmap == NULL)
2243 return;
2244 if (pmap == pmap_kernel()) {
2245 pmap_protect_kernel(startva, endva, prot);
2246 return;
2247 }
2248
2249 /*
2250 * In this particular pmap implementation, there are only three
2251 * types of memory protection: 'all' (read/write/execute),
2252 * 'read-only' (read/execute) and 'none' (no mapping.)
2253 * It is not possible for us to treat 'executable' as a separate
2254 * protection type. Therefore, protection requests that seek to
2255 * remove execute permission while retaining read or write, and those
2256 * that make little sense (write-only for example) are ignored.
2257 */
2258 switch (prot) {
2259 case VM_PROT_NONE:
2260 /*
2261 * A request to apply the protection code of
2262 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2263 */
2264 pmap_remove(pmap, startva, endva);
2265 return;
2266 case VM_PROT_EXECUTE:
2267 case VM_PROT_READ:
2268 case VM_PROT_READ|VM_PROT_EXECUTE:
2269 /* continue */
2270 break;
2271 case VM_PROT_WRITE:
2272 case VM_PROT_WRITE|VM_PROT_READ:
2273 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2274 case VM_PROT_ALL:
2275 /* None of these should happen in a sane system. */
2276 return;
2277 }
2278
2279 /*
2280 * If the pmap has no A table, it has no mappings and therefore
2281 * there is nothing to protect.
2282 */
2283 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2284 return;
2285
2286 a_idx = MMU_TIA(startva);
2287 b_idx = MMU_TIB(startva);
2288 c_idx = MMU_TIC(startva);
2289 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2290
2291 iscurpmap = (pmap == current_pmap());
2292 while (startva < endva) {
2293 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2294 if (b_tbl == NULL) {
2295 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2296 b_tbl = mmu_ptov((vm_offset_t) b_tbl);
2297 b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
2298 }
2299 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2300 if (c_tbl == NULL) {
2301 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2302 c_tbl = mmu_ptov((vm_offset_t) c_tbl);
2303 c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
2304 }
2305 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2306 pte = &c_tbl->ct_dtbl[c_idx];
2307 /* make the mapping read-only */
2308 pte->attr.raw |= MMU_SHORT_PTE_WP;
2309 /*
2310 * If we just modified the current address space,
2311 * flush any translations for the modified page from
2312 * the translation cache and any data from it in the
2313 * data cache.
2314 */
2315 if (iscurpmap)
2316 TBIS(startva);
2317 }
2318 startva += NBPG;
2319
2320 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2321 c_tbl = NULL;
2322 c_idx = 0;
2323 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2324 b_tbl = NULL;
2325 b_idx = 0;
2326 }
2327 }
2328 } else { /* C table wasn't valid */
2329 c_tbl = NULL;
2330 c_idx = 0;
2331 startva += MMU_TIB_RANGE;
2332 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2333 b_tbl = NULL;
2334 b_idx = 0;
2335 }
2336 } /* C table */
2337 } else { /* B table wasn't valid */
2338 b_tbl = NULL;
2339 b_idx = 0;
2340 startva += MMU_TIA_RANGE;
2341 a_idx++;
2342 } /* B table */
2343 }
2344 }
2345
2346 /* pmap_protect_kernel INTERNAL
2347 **
2348 * Apply the given protection code to a kernel address range.
2349 */
2350 void
2351 pmap_protect_kernel(startva, endva, prot)
2352 vm_offset_t startva, endva;
2353 vm_prot_t prot;
2354 {
2355 vm_offset_t va;
2356 mmu_short_pte_t *pte;
2357
2358 pte = &kernCbase[(unsigned long) sun3x_btop(startva - KERNBASE)];
2359 for (va = startva; va < endva; va += NBPG, pte++) {
2360 if (MMU_VALID_DT(*pte)) {
2361 switch (prot) {
2362 case VM_PROT_ALL:
2363 break;
2364 case VM_PROT_EXECUTE:
2365 case VM_PROT_READ:
2366 case VM_PROT_READ|VM_PROT_EXECUTE:
2367 pte->attr.raw |= MMU_SHORT_PTE_WP;
2368 break;
2369 case VM_PROT_NONE:
2370 /* this is an alias for 'pmap_remove_kernel' */
2371 pmap_remove_pte(pte);
2372 break;
2373 default:
2374 break;
2375 }
2376 /*
2377 * since this is the kernel, immediately flush any cached
2378 * descriptors for this address.
2379 */
2380 TBIS(va);
2381 }
2382 }
2383 }
2384
2385 /* pmap_change_wiring INTERFACE
2386 **
2387 * Changes the wiring of the specified page.
2388 *
2389 * This function is called from vm_fault.c to unwire
2390 * a mapping. It really should be called 'pmap_unwire'
2391 * because it is never asked to do anything but remove
2392 * wirings.
2393 */
2394 void
2395 pmap_change_wiring(pmap, va, wire)
2396 pmap_t pmap;
2397 vm_offset_t va;
2398 boolean_t wire;
2399 {
2400 int a_idx, b_idx, c_idx;
2401 a_tmgr_t *a_tbl;
2402 b_tmgr_t *b_tbl;
2403 c_tmgr_t *c_tbl;
2404 mmu_short_pte_t *pte;
2405
2406 /* Kernel mappings always remain wired. */
2407 if (pmap == pmap_kernel())
2408 return;
2409
2410 #ifdef PMAP_DEBUG
2411 if (wire == TRUE)
2412 panic("pmap_change_wiring: wire requested.");
2413 #endif
2414
2415 /*
2416 * Walk through the tables. If the walk terminates without
2417 * a valid PTE then the address wasn't wired in the first place.
2418 * Return immediately.
2419 */
2420 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2421 &b_idx, &c_idx) == FALSE)
2422 return;
2423
2424
2425 /* Is the PTE wired? If not, return. */
2426 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2427 return;
2428
2429 /* Remove the wiring bit. */
2430 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2431
2432 /*
2433 * Decrement the wired entry count in the C table.
2434 * If it reaches zero the following things happen:
2435 * 1. The table no longer has any wired entries and is considered
2436 * unwired.
2437 * 2. It is placed on the available queue.
2438 * 3. The parent table's wired entry count is decremented.
2439 * 4. If it reaches zero, this process repeats at step 1 and
2440 * stops at after reaching the A table.
2441 */
2442 if (--c_tbl->ct_wcnt == 0) {
2443 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2444 if (--b_tbl->bt_wcnt == 0) {
2445 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2446 if (--a_tbl->at_wcnt == 0) {
2447 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2448 }
2449 }
2450 }
2451 }
2452
2453 /* pmap_pageable INTERFACE
2454 **
2455 * Make the specified range of addresses within the given pmap,
2456 * 'pageable' or 'not-pageable'. A pageable page must not cause
2457 * any faults when referenced. A non-pageable page may.
2458 *
2459 * This routine is only advisory. The VM system will call pmap_enter()
2460 * to wire or unwire pages that are going to be made pageable before calling
2461 * this function. By the time this routine is called, everything that needs
2462 * to be done has already been done.
2463 */
2464 void
2465 pmap_pageable(pmap, start, end, pageable)
2466 pmap_t pmap;
2467 vm_offset_t start, end;
2468 boolean_t pageable;
2469 {
2470 /* not implemented. */
2471 }
2472
2473 /* pmap_copy INTERFACE
2474 **
2475 * Copy the mappings of a range of addresses in one pmap, into
2476 * the destination address of another.
2477 *
2478 * This routine is advisory. Should we one day decide that MMU tables
2479 * may be shared by more than one pmap, this function should be used to
2480 * link them together. Until that day however, we do nothing.
2481 */
2482 void
2483 pmap_copy(pmap_a, pmap_b, dst, len, src)
2484 pmap_t pmap_a, pmap_b;
2485 vm_offset_t dst;
2486 vm_size_t len;
2487 vm_offset_t src;
2488 {
2489 /* not implemented. */
2490 }
2491
2492 /* pmap_copy_page INTERFACE
2493 **
2494 * Copy the contents of one physical page into another.
2495 *
2496 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2497 * to map the two specified physical pages into the kernel address space. It
2498 * then uses bcopy() to copy one into the other.
2499 *
2500 * Note: We could use the transparent translation registers to make the
2501 * mappings. If we do so, be sure to disable interrupts before using them.
2502 */
2503 void
2504 pmap_copy_page(src, dst)
2505 vm_offset_t src, dst;
2506 {
2507 PMAP_LOCK();
2508 if (tmp_vpages_inuse)
2509 panic("pmap_copy_page: temporary vpages are in use.");
2510 tmp_vpages_inuse++;
2511
2512 /* XXX - Use non-cached mappings to avoid cache polution? */
2513 pmap_enter_kernel(tmp_vpages[0], src, VM_PROT_READ);
2514 pmap_enter_kernel(tmp_vpages[1], dst, VM_PROT_READ|VM_PROT_WRITE);
2515 copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
2516
2517 tmp_vpages_inuse--;
2518 PMAP_UNLOCK();
2519 }
2520
2521 /* pmap_zero_page INTERFACE
2522 **
2523 * Zero the contents of the specified physical page.
2524 *
2525 * Uses one of the virtual pages allocated in pmap_boostrap()
2526 * to map the specified page into the kernel address space. Then uses
2527 * bzero() to zero out the page.
2528 */
2529 void
2530 pmap_zero_page(pa)
2531 vm_offset_t pa;
2532 {
2533 PMAP_LOCK();
2534 if (tmp_vpages_inuse)
2535 panic("pmap_zero_page: temporary vpages are in use.");
2536 tmp_vpages_inuse++;
2537
2538 pmap_enter_kernel(tmp_vpages[0], pa, VM_PROT_READ|VM_PROT_WRITE);
2539 zeropage((char *) tmp_vpages[0]);
2540
2541 tmp_vpages_inuse--;
2542 PMAP_UNLOCK();
2543 }
2544
2545 /* pmap_collect INTERFACE
2546 **
2547 * Called from the VM system when we are about to swap out
2548 * the process using this pmap. This should give up any
2549 * resources held here, including all its MMU tables.
2550 */
2551 void
2552 pmap_collect(pmap)
2553 pmap_t pmap;
2554 {
2555 /* XXX - todo... */
2556 }
2557
2558 /* pmap_create INTERFACE
2559 **
2560 * Create and return a pmap structure.
2561 */
2562 pmap_t
2563 pmap_create(size)
2564 vm_size_t size;
2565 {
2566 pmap_t pmap;
2567
2568 if (size)
2569 return NULL;
2570
2571 pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
2572 pmap_pinit(pmap);
2573
2574 return pmap;
2575 }
2576
2577 /* pmap_pinit INTERNAL
2578 **
2579 * Initialize a pmap structure.
2580 */
2581 void
2582 pmap_pinit(pmap)
2583 pmap_t pmap;
2584 {
2585 bzero(pmap, sizeof(struct pmap));
2586 pmap->pm_a_tmgr = NULL;
2587 pmap->pm_a_phys = kernAphys;
2588 }
2589
2590 /* pmap_release INTERFACE
2591 **
2592 * Release any resources held by the given pmap.
2593 *
2594 * This is the reverse analog to pmap_pinit. It does not
2595 * necessarily mean for the pmap structure to be deallocated,
2596 * as in pmap_destroy.
2597 */
2598 void
2599 pmap_release(pmap)
2600 pmap_t pmap;
2601 {
2602 /*
2603 * As long as the pmap contains no mappings,
2604 * which always should be the case whenever
2605 * this function is called, there really should
2606 * be nothing to do.
2607 *
2608 * XXX - This function is being called while there are
2609 * still valid mappings, so I guess the above must not
2610 * be true.
2611 * XXX - Unless the mappings persist due to a bug here...
2612 * + That's what was happening. The map had no mappings,
2613 * but it still had an A table. pmap_remove() was not
2614 * releasing tables when they were empty.
2615 */
2616 #ifdef PMAP_DEBUG
2617 if (pmap == NULL)
2618 return;
2619 if (pmap == pmap_kernel())
2620 panic("pmap_release: kernel pmap");
2621 #endif
2622 /*
2623 * XXX - If this pmap has an A table, give it back.
2624 * The pmap SHOULD be empty by now, and pmap_remove
2625 * should have already given back the A table...
2626 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2627 * at this point, which means some mapping was not
2628 * removed when it should have been. -gwr
2629 */
2630 if (pmap->pm_a_tmgr != NULL) {
2631 /* First make sure we are not using it! */
2632 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2633 kernel_crp.rp_addr = kernAphys;
2634 loadcrp(&kernel_crp);
2635 }
2636 #ifdef PMAP_DEBUG /* XXX - todo! */
2637 /* XXX - Now complain... */
2638 printf("pmap_release: still have table\n");
2639 Debugger();
2640 #endif
2641 free_a_table(pmap->pm_a_tmgr, TRUE);
2642 pmap->pm_a_tmgr = NULL;
2643 pmap->pm_a_phys = kernAphys;
2644 }
2645 }
2646
2647 /* pmap_reference INTERFACE
2648 **
2649 * Increment the reference count of a pmap.
2650 */
2651 void
2652 pmap_reference(pmap)
2653 pmap_t pmap;
2654 {
2655 if (pmap == NULL)
2656 return;
2657
2658 /* pmap_lock(pmap); */
2659 pmap->pm_refcount++;
2660 /* pmap_unlock(pmap); */
2661 }
2662
2663 /* pmap_dereference INTERNAL
2664 **
2665 * Decrease the reference count on the given pmap
2666 * by one and return the current count.
2667 */
2668 int
2669 pmap_dereference(pmap)
2670 pmap_t pmap;
2671 {
2672 int rtn;
2673
2674 if (pmap == NULL)
2675 return 0;
2676
2677 /* pmap_lock(pmap); */
2678 rtn = --pmap->pm_refcount;
2679 /* pmap_unlock(pmap); */
2680
2681 return rtn;
2682 }
2683
2684 /* pmap_destroy INTERFACE
2685 **
2686 * Decrement a pmap's reference count and delete
2687 * the pmap if it becomes zero. Will be called
2688 * only after all mappings have been removed.
2689 */
2690 void
2691 pmap_destroy(pmap)
2692 pmap_t pmap;
2693 {
2694 if (pmap == NULL)
2695 return;
2696 if (pmap == &kernel_pmap)
2697 panic("pmap_destroy: kernel_pmap!");
2698 if (pmap_dereference(pmap) == 0) {
2699 pmap_release(pmap);
2700 free(pmap, M_VMPMAP);
2701 }
2702 }
2703
2704 /* pmap_is_referenced INTERFACE
2705 **
2706 * Determine if the given physical page has been
2707 * referenced (read from [or written to.])
2708 */
2709 boolean_t
2710 pmap_is_referenced(pa)
2711 vm_offset_t pa;
2712 {
2713 pv_t *pv;
2714 int idx, s;
2715
2716 if (!pv_initialized)
2717 return FALSE;
2718 /* XXX - this may be unecessary. */
2719 if (!is_managed(pa))
2720 return FALSE;
2721
2722 pv = pa2pv(pa);
2723 /*
2724 * Check the flags on the pv head. If they are set,
2725 * return immediately. Otherwise a search must be done.
2726 */
2727 if (pv->pv_flags & PV_FLAGS_USED)
2728 return TRUE;
2729 else {
2730 s = splimp();
2731 /*
2732 * Search through all pv elements pointing
2733 * to this page and query their reference bits
2734 */
2735 for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2736 pvebase[idx].pve_next)
2737 if (MMU_PTE_USED(kernCbase[idx])) {
2738 splx(s);
2739 return TRUE;
2740 }
2741 splx(s);
2742 }
2743
2744 return FALSE;
2745 }
2746
2747 /* pmap_is_modified INTERFACE
2748 **
2749 * Determine if the given physical page has been
2750 * modified (written to.)
2751 */
2752 boolean_t
2753 pmap_is_modified(pa)
2754 vm_offset_t pa;
2755 {
2756 pv_t *pv;
2757 int idx, s;
2758
2759 if (!pv_initialized)
2760 return FALSE;
2761 /* XXX - this may be unecessary. */
2762 if (!is_managed(pa))
2763 return FALSE;
2764
2765 /* see comments in pmap_is_referenced() */
2766 pv = pa2pv(pa);
2767 if (pv->pv_flags & PV_FLAGS_MDFY) {
2768 return TRUE;
2769 } else {
2770 s = splimp();
2771 for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2772 pvebase[idx].pve_next)
2773 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2774 splx(s);
2775 return TRUE;
2776 }
2777 splx(s);
2778 }
2779
2780 return FALSE;
2781 }
2782
2783 /* pmap_page_protect INTERFACE
2784 **
2785 * Applies the given protection to all mappings to the given
2786 * physical page.
2787 */
2788 void
2789 pmap_page_protect(pa, prot)
2790 vm_offset_t pa;
2791 vm_prot_t prot;
2792 {
2793 pv_t *pv;
2794 int idx, s;
2795 vm_offset_t va;
2796 struct mmu_short_pte_struct *pte;
2797 c_tmgr_t *c_tbl;
2798 pmap_t pmap, curpmap;
2799
2800 if (!is_managed(pa))
2801 return;
2802
2803 curpmap = current_pmap();
2804 pv = pa2pv(pa);
2805 s = splimp();
2806 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2807 pte = &kernCbase[idx];
2808 switch (prot) {
2809 case VM_PROT_ALL:
2810 /* do nothing */
2811 break;
2812 case VM_PROT_EXECUTE:
2813 case VM_PROT_READ:
2814 case VM_PROT_READ|VM_PROT_EXECUTE:
2815 pte->attr.raw |= MMU_SHORT_PTE_WP;
2816
2817 /*
2818 * Determine the virtual address mapped by
2819 * the PTE and flush ATC entries if necessary.
2820 */
2821 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2822 if (pmap == curpmap || pmap == pmap_kernel())
2823 TBIS(va);
2824 break;
2825 case VM_PROT_NONE:
2826 /* Save the mod/ref bits. */
2827 pv->pv_flags |= pte->attr.raw;
2828 /* Invalidate the PTE. */
2829 pte->attr.raw = MMU_DT_INVALID;
2830
2831 /*
2832 * Update table counts. And flush ATC entries
2833 * if necessary.
2834 */
2835 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2836
2837 /*
2838 * If the PTE belongs to the kernel map,
2839 * be sure to flush the page it maps.
2840 */
2841 if (pmap == pmap_kernel()) {
2842 TBIS(va);
2843 } else {
2844 /*
2845 * The PTE belongs to a user map.
2846 * update the entry count in the C
2847 * table to which it belongs and flush
2848 * the ATC if the mapping belongs to
2849 * the current pmap.
2850 */
2851 c_tbl->ct_ecnt--;
2852 if (pmap == curpmap)
2853 TBIS(va);
2854 }
2855 break;
2856 default:
2857 break;
2858 }
2859 }
2860
2861 /*
2862 * If the protection code indicates that all mappings to the page
2863 * be removed, truncate the PV list to zero entries.
2864 */
2865 if (prot == VM_PROT_NONE)
2866 pv->pv_idx = PVE_EOL;
2867 splx(s);
2868 }
2869
2870 /* pmap_get_pteinfo INTERNAL
2871 **
2872 * Called internally to find the pmap and virtual address within that
2873 * map to which the pte at the given index maps. Also includes the PTE's C
2874 * table manager.
2875 *
2876 * Returns the pmap in the argument provided, and the virtual address
2877 * by return value.
2878 */
2879 vm_offset_t
2880 pmap_get_pteinfo(idx, pmap, tbl)
2881 u_int idx;
2882 pmap_t *pmap;
2883 c_tmgr_t **tbl;
2884 {
2885 a_tmgr_t *a_tbl;
2886 b_tmgr_t *b_tbl;
2887 c_tmgr_t *c_tbl;
2888 vm_offset_t va = 0;
2889
2890 /*
2891 * Determine if the PTE is a kernel PTE or a user PTE.
2892 */
2893 if (idx >= NUM_KERN_PTES) {
2894 /*
2895 * The PTE belongs to a user mapping.
2896 * Find the virtual address by decoding table indices.
2897 * Each successive decode will reveal the address from
2898 * least to most significant bit fashion.
2899 *
2900 * 31 0
2901 * +-------------------------------+
2902 * |AAAAAAABBBBBBCCCCCC............|
2903 * +-------------------------------+
2904 */
2905 /* XXX: c_tbl = mmuC2tmgr(pte); */
2906 /* XXX: Would like an inline for this to validate idx... */
2907 c_tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2908 b_tbl = c_tbl->ct_parent;
2909 a_tbl = b_tbl->bt_parent;
2910 *pmap = a_tbl->at_parent;
2911 *tbl = c_tbl;
2912
2913 /* Start with the 'C' bits, then add B and A... */
2914 va |= ((idx % MMU_C_TBL_SIZE) << MMU_TIC_SHIFT);
2915 va |= (c_tbl->ct_pidx << MMU_TIB_SHIFT);
2916 va |= (b_tbl->bt_pidx << MMU_TIA_SHIFT);
2917 } else {
2918 /*
2919 * The PTE belongs to the kernel map.
2920 */
2921 *pmap = pmap_kernel();
2922
2923 va = sun3x_ptob(idx);
2924 va += KERNBASE;
2925 }
2926
2927 return va;
2928 }
2929
2930 #if 0 /* XXX - I am eliminating this function. */
2931 /* pmap_find_tic INTERNAL
2932 **
2933 * Given the address of a pte, find the TIC (level 'C' table index) for
2934 * the pte within its C table.
2935 */
2936 char
2937 pmap_find_tic(pte)
2938 mmu_short_pte_t *pte;
2939 {
2940 return ((pte - mmuCbase) % MMU_C_TBL_SIZE);
2941 }
2942 #endif /* 0 */
2943
2944
2945 /* pmap_clear_modify INTERFACE
2946 **
2947 * Clear the modification bit on the page at the specified
2948 * physical address.
2949 *
2950 */
2951 void
2952 pmap_clear_modify(pa)
2953 vm_offset_t pa;
2954 {
2955 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2956 }
2957
2958 /* pmap_clear_reference INTERFACE
2959 **
2960 * Clear the referenced bit on the page at the specified
2961 * physical address.
2962 */
2963 void
2964 pmap_clear_reference(pa)
2965 vm_offset_t pa;
2966 {
2967 pmap_clear_pv(pa, PV_FLAGS_USED);
2968 }
2969
2970 /* pmap_clear_pv INTERNAL
2971 **
2972 * Clears the specified flag from the specified physical address.
2973 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2974 *
2975 * Flag is one of:
2976 * PV_FLAGS_MDFY - Page modified bit.
2977 * PV_FLAGS_USED - Page used (referenced) bit.
2978 *
2979 * This routine must not only clear the flag on the pv list
2980 * head. It must also clear the bit on every pte in the pv
2981 * list associated with the address.
2982 */
2983 void
2984 pmap_clear_pv(pa, flag)
2985 vm_offset_t pa;
2986 int flag;
2987 {
2988 pv_t *pv;
2989 int idx, s;
2990 vm_offset_t va;
2991 pmap_t pmap;
2992 mmu_short_pte_t *pte;
2993 c_tmgr_t *c_tbl;
2994
2995 pv = pa2pv(pa);
2996
2997 s = splimp();
2998 pv->pv_flags &= ~(flag);
2999 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
3000 pte = &kernCbase[idx];
3001 pte->attr.raw &= ~(flag);
3002 /*
3003 * The MC68030 MMU will not set the modified or
3004 * referenced bits on any MMU tables for which it has
3005 * a cached descriptor with its modify bit set. To insure
3006 * that it will modify these bits on the PTE during the next
3007 * time it is written to or read from, we must flush it from
3008 * the ATC.
3009 *
3010 * Ordinarily it is only necessary to flush the descriptor
3011 * if it is used in the current address space. But since I
3012 * am not sure that there will always be a notion of
3013 * 'the current address space' when this function is called,
3014 * I will skip the test and always flush the address. It
3015 * does no harm.
3016 */
3017 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3018 TBIS(va);
3019 }
3020 splx(s);
3021 }
3022
3023 /* pmap_extract INTERFACE
3024 **
3025 * Return the physical address mapped by the virtual address
3026 * in the specified pmap or 0 if it is not known.
3027 *
3028 * Note: this function should also apply an exclusive lock
3029 * on the pmap system during its duration.
3030 */
3031 vm_offset_t
3032 pmap_extract(pmap, va)
3033 pmap_t pmap;
3034 vm_offset_t va;
3035 {
3036 int a_idx, b_idx, pte_idx;
3037 a_tmgr_t *a_tbl;
3038 b_tmgr_t *b_tbl;
3039 c_tmgr_t *c_tbl;
3040 mmu_short_pte_t *c_pte;
3041
3042 if (pmap == pmap_kernel())
3043 return pmap_extract_kernel(va);
3044 if (pmap == NULL)
3045 return 0;
3046
3047 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
3048 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
3049 return 0;
3050
3051 if (!MMU_VALID_DT(*c_pte))
3052 return 0;
3053
3054 return (MMU_PTE_PA(*c_pte));
3055 }
3056
3057 /* pmap_extract_kernel INTERNAL
3058 **
3059 * Extract a translation from the kernel address space.
3060 */
3061 vm_offset_t
3062 pmap_extract_kernel(va)
3063 vm_offset_t va;
3064 {
3065 mmu_short_pte_t *pte;
3066
3067 pte = &kernCbase[(u_int) sun3x_btop(va - KERNBASE)];
3068 return MMU_PTE_PA(*pte);
3069 }
3070
3071 /* pmap_remove_kernel INTERNAL
3072 **
3073 * Remove the mapping of a range of virtual addresses from the kernel map.
3074 * The arguments are already page-aligned.
3075 */
3076 void
3077 pmap_remove_kernel(sva, eva)
3078 vm_offset_t sva;
3079 vm_offset_t eva;
3080 {
3081 int idx, eidx;
3082
3083 #ifdef PMAP_DEBUG
3084 if ((sva & PGOFSET) || (eva & PGOFSET))
3085 panic("pmap_remove_kernel: alignment");
3086 #endif
3087
3088 idx = sun3x_btop(sva - KERNBASE);
3089 eidx = sun3x_btop(eva - KERNBASE);
3090
3091 while (idx < eidx)
3092 pmap_remove_pte(&kernCbase[idx++]);
3093 /* Always flush the ATC when maniplating the kernel address space. */
3094 TBIAS();
3095 }
3096
3097 /* pmap_remove INTERFACE
3098 **
3099 * Remove the mapping of a range of virtual addresses from the given pmap.
3100 *
3101 * If the range contains any wired entries, this function will probably create
3102 * disaster.
3103 */
3104 void
3105 pmap_remove(pmap, start, end)
3106 pmap_t pmap;
3107 vm_offset_t start;
3108 vm_offset_t end;
3109 {
3110
3111 if (pmap == pmap_kernel()) {
3112 pmap_remove_kernel(start, end);
3113 return;
3114 }
3115
3116 /*
3117 * XXX - Temporary(?) statement to prevent panic caused
3118 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3119 * to remove because it couldn't get backing store.
3120 * (I guess.)
3121 */
3122 if (pmap == NULL)
3123 return;
3124
3125 /*
3126 * If the pmap doesn't have an A table of its own, it has no mappings
3127 * that can be removed.
3128 */
3129 if (pmap->pm_a_tmgr == NULL)
3130 return;
3131
3132 /*
3133 * Remove the specified range from the pmap. If the function
3134 * returns true, the operation removed all the valid mappings
3135 * in the pmap and freed its A table. If this happened to the
3136 * currently loaded pmap, the MMU root pointer must be reloaded
3137 * with the default 'kernel' map.
3138 */
3139 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3140 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3141 kernel_crp.rp_addr = kernAphys;
3142 loadcrp(&kernel_crp);
3143 /* will do TLB flush below */
3144 }
3145 pmap->pm_a_tmgr = NULL;
3146 pmap->pm_a_phys = kernAphys;
3147 }
3148
3149 /*
3150 * If we just modified the current address space,
3151 * make sure to flush the MMU cache.
3152 *
3153 * XXX - this could be an unecessarily large flush.
3154 * XXX - Could decide, based on the size of the VA range
3155 * to be removed, whether to flush "by pages" or "all".
3156 */
3157 if (pmap == current_pmap())
3158 TBIAU();
3159 }
3160
3161 /* pmap_remove_a INTERNAL
3162 **
3163 * This is function number one in a set of three that removes a range
3164 * of memory in the most efficient manner by removing the highest possible
3165 * tables from the memory space. This particular function attempts to remove
3166 * as many B tables as it can, delegating the remaining fragmented ranges to
3167 * pmap_remove_b().
3168 *
3169 * If the removal operation results in an empty A table, the function returns
3170 * TRUE.
3171 *
3172 * It's ugly but will do for now.
3173 */
3174 boolean_t
3175 pmap_remove_a(a_tbl, start, end)
3176 a_tmgr_t *a_tbl;
3177 vm_offset_t start;
3178 vm_offset_t end;
3179 {
3180 boolean_t empty;
3181 int idx;
3182 vm_offset_t nstart, nend;
3183 b_tmgr_t *b_tbl;
3184 mmu_long_dte_t *a_dte;
3185 mmu_short_dte_t *b_dte;
3186
3187 /*
3188 * The following code works with what I call a 'granularity
3189 * reduction algorithim'. A range of addresses will always have
3190 * the following properties, which are classified according to
3191 * how the range relates to the size of the current granularity
3192 * - an A table entry:
3193 *
3194 * 1 2 3 4
3195 * -+---+---+---+---+---+---+---+-
3196 * -+---+---+---+---+---+---+---+-
3197 *
3198 * A range will always start on a granularity boundary, illustrated
3199 * by '+' signs in the table above, or it will start at some point
3200 * inbetween a granularity boundary, as illustrated by point 1.
3201 * The first step in removing a range of addresses is to remove the
3202 * range between 1 and 2, the nearest granularity boundary. This
3203 * job is handled by the section of code governed by the
3204 * 'if (start < nstart)' statement.
3205 *
3206 * A range will always encompass zero or more intergral granules,
3207 * illustrated by points 2 and 3. Integral granules are easy to
3208 * remove. The removal of these granules is the second step, and
3209 * is handled by the code block 'if (nstart < nend)'.
3210 *
3211 * Lastly, a range will always end on a granularity boundary,
3212 * ill. by point 3, or it will fall just beyond one, ill. by point
3213 * 4. The last step involves removing this range and is handled by
3214 * the code block 'if (nend < end)'.
3215 */
3216 nstart = MMU_ROUND_UP_A(start);
3217 nend = MMU_ROUND_A(end);
3218
3219 if (start < nstart) {
3220 /*
3221 * This block is executed if the range starts between
3222 * a granularity boundary.
3223 *
3224 * First find the DTE which is responsible for mapping
3225 * the start of the range.
3226 */
3227 idx = MMU_TIA(start);
3228 a_dte = &a_tbl->at_dtbl[idx];
3229
3230 /*
3231 * If the DTE is valid then delegate the removal of the sub
3232 * range to pmap_remove_b(), which can remove addresses at
3233 * a finer granularity.
3234 */
3235 if (MMU_VALID_DT(*a_dte)) {
3236 b_dte = mmu_ptov(a_dte->addr.raw);
3237 b_tbl = mmuB2tmgr(b_dte);
3238
3239 /*
3240 * The sub range to be removed starts at the start
3241 * of the full range we were asked to remove, and ends
3242 * at the greater of:
3243 * 1. The end of the full range, -or-
3244 * 2. The end of the full range, rounded down to the
3245 * nearest granularity boundary.
3246 */
3247 if (end < nstart)
3248 empty = pmap_remove_b(b_tbl, start, end);
3249 else
3250 empty = pmap_remove_b(b_tbl, start, nstart);
3251
3252 /*
3253 * If the removal resulted in an empty B table,
3254 * invalidate the DTE that points to it and decrement
3255 * the valid entry count of the A table.
3256 */
3257 if (empty) {
3258 a_dte->attr.raw = MMU_DT_INVALID;
3259 a_tbl->at_ecnt--;
3260 }
3261 }
3262 /*
3263 * If the DTE is invalid, the address range is already non-
3264 * existant and can simply be skipped.
3265 */
3266 }
3267 if (nstart < nend) {
3268 /*
3269 * This block is executed if the range spans a whole number
3270 * multiple of granules (A table entries.)
3271 *
3272 * First find the DTE which is responsible for mapping
3273 * the start of the first granule involved.
3274 */
3275 idx = MMU_TIA(nstart);
3276 a_dte = &a_tbl->at_dtbl[idx];
3277
3278 /*
3279 * Remove entire sub-granules (B tables) one at a time,
3280 * until reaching the end of the range.
3281 */
3282 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3283 if (MMU_VALID_DT(*a_dte)) {
3284 /*
3285 * Find the B table manager for the
3286 * entry and free it.
3287 */
3288 b_dte = mmu_ptov(a_dte->addr.raw);
3289 b_tbl = mmuB2tmgr(b_dte);
3290 free_b_table(b_tbl, TRUE);
3291
3292 /*
3293 * Invalidate the DTE that points to the
3294 * B table and decrement the valid entry
3295 * count of the A table.
3296 */
3297 a_dte->attr.raw = MMU_DT_INVALID;
3298 a_tbl->at_ecnt--;
3299 }
3300 }
3301 if (nend < end) {
3302 /*
3303 * This block is executed if the range ends beyond a
3304 * granularity boundary.
3305 *
3306 * First find the DTE which is responsible for mapping
3307 * the start of the nearest (rounded down) granularity
3308 * boundary.
3309 */
3310 idx = MMU_TIA(nend);
3311 a_dte = &a_tbl->at_dtbl[idx];
3312
3313 /*
3314 * If the DTE is valid then delegate the removal of the sub
3315 * range to pmap_remove_b(), which can remove addresses at
3316 * a finer granularity.
3317 */
3318 if (MMU_VALID_DT(*a_dte)) {
3319 /*
3320 * Find the B table manager for the entry
3321 * and hand it to pmap_remove_b() along with
3322 * the sub range.
3323 */
3324 b_dte = mmu_ptov(a_dte->addr.raw);
3325 b_tbl = mmuB2tmgr(b_dte);
3326
3327 empty = pmap_remove_b(b_tbl, nend, end);
3328
3329 /*
3330 * If the removal resulted in an empty B table,
3331 * invalidate the DTE that points to it and decrement
3332 * the valid entry count of the A table.
3333 */
3334 if (empty) {
3335 a_dte->attr.raw = MMU_DT_INVALID;
3336 a_tbl->at_ecnt--;
3337 }
3338 }
3339 }
3340
3341 /*
3342 * If there are no more entries in the A table, release it
3343 * back to the available pool and return TRUE.
3344 */
3345 if (a_tbl->at_ecnt == 0) {
3346 a_tbl->at_parent = NULL;
3347 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3348 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3349 empty = TRUE;
3350 } else {
3351 empty = FALSE;
3352 }
3353
3354 return empty;
3355 }
3356
3357 /* pmap_remove_b INTERNAL
3358 **
3359 * Remove a range of addresses from an address space, trying to remove entire
3360 * C tables if possible.
3361 *
3362 * If the operation results in an empty B table, the function returns TRUE.
3363 */
3364 boolean_t
3365 pmap_remove_b(b_tbl, start, end)
3366 b_tmgr_t *b_tbl;
3367 vm_offset_t start;
3368 vm_offset_t end;
3369 {
3370 boolean_t empty;
3371 int idx;
3372 vm_offset_t nstart, nend, rstart;
3373 c_tmgr_t *c_tbl;
3374 mmu_short_dte_t *b_dte;
3375 mmu_short_pte_t *c_dte;
3376
3377
3378 nstart = MMU_ROUND_UP_B(start);
3379 nend = MMU_ROUND_B(end);
3380
3381 if (start < nstart) {
3382 idx = MMU_TIB(start);
3383 b_dte = &b_tbl->bt_dtbl[idx];
3384 if (MMU_VALID_DT(*b_dte)) {
3385 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3386 c_tbl = mmuC2tmgr(c_dte);
3387 if (end < nstart)
3388 empty = pmap_remove_c(c_tbl, start, end);
3389 else
3390 empty = pmap_remove_c(c_tbl, start, nstart);
3391 if (empty) {
3392 b_dte->attr.raw = MMU_DT_INVALID;
3393 b_tbl->bt_ecnt--;
3394 }
3395 }
3396 }
3397 if (nstart < nend) {
3398 idx = MMU_TIB(nstart);
3399 b_dte = &b_tbl->bt_dtbl[idx];
3400 rstart = nstart;
3401 while (rstart < nend) {
3402 if (MMU_VALID_DT(*b_dte)) {
3403 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3404 c_tbl = mmuC2tmgr(c_dte);
3405 free_c_table(c_tbl, TRUE);
3406 b_dte->attr.raw = MMU_DT_INVALID;
3407 b_tbl->bt_ecnt--;
3408 }
3409 b_dte++;
3410 rstart += MMU_TIB_RANGE;
3411 }
3412 }
3413 if (nend < end) {
3414 idx = MMU_TIB(nend);
3415 b_dte = &b_tbl->bt_dtbl[idx];
3416 if (MMU_VALID_DT(*b_dte)) {
3417 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3418 c_tbl = mmuC2tmgr(c_dte);
3419 empty = pmap_remove_c(c_tbl, nend, end);
3420 if (empty) {
3421 b_dte->attr.raw = MMU_DT_INVALID;
3422 b_tbl->bt_ecnt--;
3423 }
3424 }
3425 }
3426
3427 if (b_tbl->bt_ecnt == 0) {
3428 b_tbl->bt_parent = NULL;
3429 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3430 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3431 empty = TRUE;
3432 } else {
3433 empty = FALSE;
3434 }
3435
3436 return empty;
3437 }
3438
3439 /* pmap_remove_c INTERNAL
3440 **
3441 * Remove a range of addresses from the given C table.
3442 */
3443 boolean_t
3444 pmap_remove_c(c_tbl, start, end)
3445 c_tmgr_t *c_tbl;
3446 vm_offset_t start;
3447 vm_offset_t end;
3448 {
3449 boolean_t empty;
3450 int idx;
3451 mmu_short_pte_t *c_pte;
3452
3453 idx = MMU_TIC(start);
3454 c_pte = &c_tbl->ct_dtbl[idx];
3455 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3456 if (MMU_VALID_DT(*c_pte)) {
3457 pmap_remove_pte(c_pte);
3458 c_tbl->ct_ecnt--;
3459 }
3460 }
3461
3462 if (c_tbl->ct_ecnt == 0) {
3463 c_tbl->ct_parent = NULL;
3464 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3465 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3466 empty = TRUE;
3467 } else {
3468 empty = FALSE;
3469 }
3470
3471 return empty;
3472 }
3473
3474 /* is_managed INTERNAL
3475 **
3476 * Determine if the given physical address is managed by the PV system.
3477 * Note that this logic assumes that no one will ask for the status of
3478 * addresses which lie in-between the memory banks on the 3/80. If they
3479 * do so, it will falsely report that it is managed.
3480 *
3481 * Note: A "managed" address is one that was reported to the VM system as
3482 * a "usable page" during system startup. As such, the VM system expects the
3483 * pmap module to keep an accurate track of the useage of those pages.
3484 * Any page not given to the VM system at startup does not exist (as far as
3485 * the VM system is concerned) and is therefore "unmanaged." Examples are
3486 * those pages which belong to the ROM monitor and the memory allocated before
3487 * the VM system was started.
3488 */
3489 boolean_t
3490 is_managed(pa)
3491 vm_offset_t pa;
3492 {
3493 if (pa >= avail_start && pa < avail_end)
3494 return TRUE;
3495 else
3496 return FALSE;
3497 }
3498
3499 /* pmap_bootstrap_alloc INTERNAL
3500 **
3501 * Used internally for memory allocation at startup when malloc is not
3502 * available. This code will fail once it crosses the first memory
3503 * bank boundary on the 3/80. Hopefully by then however, the VM system
3504 * will be in charge of allocation.
3505 */
3506 void *
3507 pmap_bootstrap_alloc(size)
3508 int size;
3509 {
3510 void *rtn;
3511
3512 #ifdef PMAP_DEBUG
3513 if (bootstrap_alloc_enabled == FALSE) {
3514 mon_printf("pmap_bootstrap_alloc: disabled\n");
3515 sunmon_abort();
3516 }
3517 #endif
3518
3519 rtn = (void *) virtual_avail;
3520 virtual_avail += size;
3521
3522 #ifdef PMAP_DEBUG
3523 if (virtual_avail > virtual_contig_end) {
3524 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3525 sunmon_abort();
3526 }
3527 #endif
3528
3529 return rtn;
3530 }
3531
3532 /* pmap_bootstap_aalign INTERNAL
3533 **
3534 * Used to insure that the next call to pmap_bootstrap_alloc() will
3535 * return a chunk of memory aligned to the specified size.
3536 *
3537 * Note: This function will only support alignment sizes that are powers
3538 * of two.
3539 */
3540 void
3541 pmap_bootstrap_aalign(size)
3542 int size;
3543 {
3544 int off;
3545
3546 off = virtual_avail & (size - 1);
3547 if (off) {
3548 (void) pmap_bootstrap_alloc(size - off);
3549 }
3550 }
3551
3552 /* pmap_pa_exists
3553 **
3554 * Used by the /dev/mem driver to see if a given PA is memory
3555 * that can be mapped. (The PA is not in a hole.)
3556 */
3557 int
3558 pmap_pa_exists(pa)
3559 vm_offset_t pa;
3560 {
3561 /* XXX - NOTYET */
3562 return (0);
3563 }
3564
3565 /* pmap_activate INTERFACE
3566 **
3567 * This is called by locore.s:cpu_switch when we are switching to a
3568 * new process. This should load the MMU context for the new proc.
3569 * XXX - Later, this should be done directly in locore.s
3570 */
3571 void
3572 pmap_activate(pmap)
3573 pmap_t pmap;
3574 {
3575 u_long rootpa;
3576
3577 /* Only do reload/flush if we have to. */
3578 rootpa = pmap->pm_a_phys;
3579 if (kernel_crp.rp_addr != rootpa) {
3580 DPRINT(("pmap_activate(%p)\n", pmap));
3581 kernel_crp.rp_addr = rootpa;
3582 loadcrp(&kernel_crp);
3583 TBIAU();
3584 }
3585 }
3586
3587
3588 /* pmap_update
3589 **
3590 * Apply any delayed changes scheduled for all pmaps immediately.
3591 *
3592 * No delayed operations are currently done in this pmap.
3593 */
3594 void
3595 pmap_update()
3596 {
3597 /* not implemented. */
3598 }
3599
3600 /* pmap_virtual_space INTERFACE
3601 **
3602 * Return the current available range of virtual addresses in the
3603 * arguuments provided. Only really called once.
3604 */
3605 void
3606 pmap_virtual_space(vstart, vend)
3607 vm_offset_t *vstart, *vend;
3608 {
3609 *vstart = virtual_avail;
3610 *vend = virtual_end;
3611 }
3612
3613 /* pmap_free_pages INTERFACE
3614 **
3615 * Return the number of physical pages still available.
3616 *
3617 * This is probably going to be a mess, but it's only called
3618 * once and it's the only function left that I have to implement!
3619 */
3620 u_int
3621 pmap_free_pages()
3622 {
3623 int i;
3624 u_int left;
3625 vm_offset_t avail;
3626
3627 avail = avail_next;
3628 left = 0;
3629 i = 0;
3630 while (avail >= avail_mem[i].pmem_end) {
3631 if (avail_mem[i].pmem_next == NULL)
3632 return 0;
3633 i++;
3634 }
3635 while (i < SUN3X_80_MEM_BANKS) {
3636 if (avail < avail_mem[i].pmem_start) {
3637 /* Avail is inside a hole, march it
3638 * up to the next bank.
3639 */
3640 avail = avail_mem[i].pmem_start;
3641 }
3642 left += sun3x_btop(avail_mem[i].pmem_end - avail);
3643 if (avail_mem[i].pmem_next == NULL)
3644 break;
3645 i++;
3646 }
3647
3648 return left;
3649 }
3650
3651 /* pmap_page_index INTERFACE
3652 **
3653 * Return the index of the given physical page in a list of useable
3654 * physical pages in the system. Holes in physical memory may be counted
3655 * if so desired. As long as pmap_free_pages() and pmap_page_index()
3656 * agree as to whether holes in memory do or do not count as valid pages,
3657 * it really doesn't matter. However, if you like to save a little
3658 * memory, don't count holes as valid pages. This is even more true when
3659 * the holes are large.
3660 *
3661 * We will not count holes as valid pages. We can generate page indices
3662 * that conform to this by using the memory bank structures initialized
3663 * in pmap_alloc_pv().
3664 */
3665 int
3666 pmap_page_index(pa)
3667 vm_offset_t pa;
3668 {
3669 struct pmap_physmem_struct *bank = avail_mem;
3670
3671 /* Search for the memory bank with this page. */
3672 /* XXX - What if it is not physical memory? */
3673 while (pa > bank->pmem_end)
3674 bank = bank->pmem_next;
3675 pa -= bank->pmem_start;
3676
3677 return (bank->pmem_pvbase + sun3x_btop(pa));
3678 }
3679
3680 /* pmap_next_page INTERFACE
3681 **
3682 * Place the physical address of the next available page in the
3683 * argument given. Returns FALSE if there are no more pages left.
3684 *
3685 * This function must jump over any holes in physical memory.
3686 * Once this function is used, any use of pmap_bootstrap_alloc()
3687 * is a sin. Sinners will be punished with erratic behavior.
3688 */
3689 boolean_t
3690 pmap_next_page(pa)
3691 vm_offset_t *pa;
3692 {
3693 static struct pmap_physmem_struct *curbank = avail_mem;
3694
3695 /* XXX - temporary ROM saving hack. */
3696 if (avail_next >= avail_end)
3697 return FALSE;
3698
3699 if (avail_next >= curbank->pmem_end)
3700 if (curbank->pmem_next == NULL)
3701 return FALSE;
3702 else {
3703 curbank = curbank->pmem_next;
3704 avail_next = curbank->pmem_start;
3705 }
3706
3707 *pa = avail_next;
3708 avail_next += NBPG;
3709 return TRUE;
3710 }
3711
3712 /* pmap_count INTERFACE
3713 **
3714 * Return the number of resident (valid) pages in the given pmap.
3715 *
3716 * Note: If this function is handed the kernel map, it will report
3717 * that it has no mappings. Hopefully the VM system won't ask for kernel
3718 * map statistics.
3719 */
3720 segsz_t
3721 pmap_count(pmap, type)
3722 pmap_t pmap;
3723 int type;
3724 {
3725 u_int count;
3726 int a_idx, b_idx;
3727 a_tmgr_t *a_tbl;
3728 b_tmgr_t *b_tbl;
3729 c_tmgr_t *c_tbl;
3730
3731 /*
3732 * If the pmap does not have its own A table manager, it has no
3733 * valid entires.
3734 */
3735 if (pmap->pm_a_tmgr == NULL)
3736 return 0;
3737
3738 a_tbl = pmap->pm_a_tmgr;
3739
3740 count = 0;
3741 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3742 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3743 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3744 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3745 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3746 c_tbl = mmuC2tmgr(
3747 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3748 if (type == 0)
3749 /*
3750 * A resident entry count has been requested.
3751 */
3752 count += c_tbl->ct_ecnt;
3753 else
3754 /*
3755 * A wired entry count has been requested.
3756 */
3757 count += c_tbl->ct_wcnt;
3758 }
3759 }
3760 }
3761 }
3762
3763 return count;
3764 }
3765
3766 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3767 * The following routines are only used by DDB for tricky kernel text *
3768 * text operations in db_memrw.c. They are provided for sun3 *
3769 * compatibility. *
3770 *************************************************************************/
3771 /* get_pte INTERNAL
3772 **
3773 * Return the page descriptor the describes the kernel mapping
3774 * of the given virtual address.
3775 *
3776 * XXX - It might be nice if this worked outside of the MMU
3777 * structures we manage. (Could do it with ptest). -gwr
3778 */
3779 #if 0 /* XXX old version - kernel only */
3780 vm_offset_t
3781 get_pte(va)
3782 vm_offset_t va;
3783 {
3784 u_long idx;
3785
3786 if (va < KERNBASE)
3787 return 0;
3788
3789 idx = (u_long) sun3x_btop(va - KERNBASE);
3790 return (kernCbase[idx].attr.raw);
3791 }
3792 #else
3793 extern u_long ptest_addr __P((u_long)); /* locore.s */
3794
3795 u_long
3796 get_pte(va)
3797 vm_offset_t va;
3798 {
3799 u_long pte_pa;
3800 mmu_short_pte_t *pte;
3801
3802 /* Get the physical address of the PTE */
3803 pte_pa = ptest_addr(va & ~PGOFSET);
3804
3805 /* Convert to a virtual address... */
3806 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3807
3808 /* Make sure it is in our level-C tables... */
3809 if ((pte < kernCbase) ||
3810 (pte >= &mmuCbase[NUM_USER_PTES]))
3811 return 0;
3812
3813 /* ... and just return its contents. */
3814 return (pte->attr.raw);
3815 }
3816 #endif
3817
3818
3819 /* set_pte INTERNAL
3820 **
3821 * Set the page descriptor that describes the kernel mapping
3822 * of the given virtual address.
3823 */
3824 void
3825 set_pte(va, pte)
3826 vm_offset_t va;
3827 vm_offset_t pte;
3828 {
3829 u_long idx;
3830
3831 if (va < KERNBASE)
3832 return;
3833
3834 idx = (unsigned long) sun3x_btop(va - KERNBASE);
3835 kernCbase[idx].attr.raw = pte;
3836 }
3837
3838 #ifdef PMAP_DEBUG
3839 /************************** DEBUGGING ROUTINES **************************
3840 * The following routines are meant to be an aid to debugging the pmap *
3841 * system. They are callable from the DDB command line and should be *
3842 * prepared to be handed unstable or incomplete states of the system. *
3843 ************************************************************************/
3844
3845 /* pv_list
3846 **
3847 * List all pages found on the pv list for the given physical page.
3848 * To avoid endless loops, the listing will stop at the end of the list
3849 * or after 'n' entries - whichever comes first.
3850 */
3851 void
3852 pv_list(pa, n)
3853 vm_offset_t pa;
3854 int n;
3855 {
3856 int idx;
3857 vm_offset_t va;
3858 pv_t *pv;
3859 c_tmgr_t *c_tbl;
3860 pmap_t pmap;
3861
3862 pv = pa2pv(pa);
3863 idx = pv->pv_idx;
3864
3865 for (;idx != PVE_EOL && n > 0; idx=pvebase[idx].pve_next, n--) {
3866 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3867 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3868 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3869 }
3870 }
3871 #endif /* PMAP_DEBUG */
3872
3873 #ifdef NOT_YET
3874 /* and maybe not ever */
3875 /************************** LOW-LEVEL ROUTINES **************************
3876 * These routines will eventualy be re-written into assembly and placed *
3877 * in locore.s. They are here now as stubs so that the pmap module can *
3878 * be linked as a standalone user program for testing. *
3879 ************************************************************************/
3880 /* flush_atc_crp INTERNAL
3881 **
3882 * Flush all page descriptors derived from the given CPU Root Pointer
3883 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3884 * cache.
3885 */
3886 void
3887 flush_atc_crp(a_tbl)
3888 {
3889 mmu_long_rp_t rp;
3890
3891 /* Create a temporary root table pointer that points to the
3892 * given A table.
3893 */
3894 rp.attr.raw = ~MMU_LONG_RP_LU;
3895 rp.addr.raw = (unsigned int) a_tbl;
3896
3897 mmu_pflushr(&rp);
3898 /* mmu_pflushr:
3899 * movel sp(4)@,a0
3900 * pflushr a0@
3901 * rts
3902 */
3903 }
3904 #endif /* NOT_YET */
3905