pmap.c revision 1.56 1 /* $NetBSD: pmap.c,v 1.56 2000/10/27 13:28:55 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include "opt_ddb.h"
115
116 #include <sys/param.h>
117 #include <sys/systm.h>
118 #include <sys/proc.h>
119 #include <sys/malloc.h>
120 #include <sys/pool.h>
121 #include <sys/user.h>
122 #include <sys/queue.h>
123 #include <sys/kcore.h>
124
125 #include <uvm/uvm.h>
126
127 #define PAGER_SVA (uvm.pager_sva)
128 #define PAGER_EVA (uvm.pager_eva)
129
130 #include <machine/cpu.h>
131 #include <machine/kcore.h>
132 #include <machine/mon.h>
133 #include <machine/pmap.h>
134 #include <machine/pte.h>
135 #include <machine/vmparam.h>
136
137 #include <sun3/sun3/cache.h>
138 #include <sun3/sun3/machdep.h>
139
140 #include "pmap_pvt.h"
141
142 /* XXX - What headers declare these? */
143 extern struct pcb *curpcb;
144 extern int physmem;
145
146 extern void copypage __P((const void*, void*));
147 extern void zeropage __P((void*));
148
149 /* Defined in locore.s */
150 extern char kernel_text[];
151
152 /* Defined by the linker */
153 extern char etext[], edata[], end[];
154 extern char *esym; /* DDB */
155
156 /*************************** DEBUGGING DEFINITIONS ***********************
157 * Macros, preprocessor defines and variables used in debugging can make *
158 * code hard to read. Anything used exclusively for debugging purposes *
159 * is defined here to avoid having such mess scattered around the file. *
160 *************************************************************************/
161 #ifdef PMAP_DEBUG
162 /*
163 * To aid the debugging process, macros should be expanded into smaller steps
164 * that accomplish the same goal, yet provide convenient places for placing
165 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
166 * 'INLINE' keyword is defined to an empty string. This way, any function
167 * defined to be a 'static INLINE' will become 'outlined' and compiled as
168 * a separate function, which is much easier to debug.
169 */
170 #define INLINE /* nothing */
171
172 /*
173 * It is sometimes convenient to watch the activity of a particular table
174 * in the system. The following variables are used for that purpose.
175 */
176 a_tmgr_t *pmap_watch_atbl = 0;
177 b_tmgr_t *pmap_watch_btbl = 0;
178 c_tmgr_t *pmap_watch_ctbl = 0;
179
180 int pmap_debug = 0;
181 #define DPRINT(args) if (pmap_debug) printf args
182
183 #else /********** Stuff below is defined if NOT debugging **************/
184
185 #define INLINE inline
186 #define DPRINT(args) /* nada */
187
188 #endif /* PMAP_DEBUG */
189 /*********************** END OF DEBUGGING DEFINITIONS ********************/
190
191 /*** Management Structure - Memory Layout
192 * For every MMU table in the sun3x pmap system there must be a way to
193 * manage it; we must know which process is using it, what other tables
194 * depend on it, and whether or not it contains any locked pages. This
195 * is solved by the creation of 'table management' or 'tmgr'
196 * structures. One for each MMU table in the system.
197 *
198 * MAP OF MEMORY USED BY THE PMAP SYSTEM
199 *
200 * towards lower memory
201 * kernAbase -> +-------------------------------------------------------+
202 * | Kernel MMU A level table |
203 * kernBbase -> +-------------------------------------------------------+
204 * | Kernel MMU B level tables |
205 * kernCbase -> +-------------------------------------------------------+
206 * | |
207 * | Kernel MMU C level tables |
208 * | |
209 * mmuCbase -> +-------------------------------------------------------+
210 * | User MMU C level tables |
211 * mmuAbase -> +-------------------------------------------------------+
212 * | |
213 * | User MMU A level tables |
214 * | |
215 * mmuBbase -> +-------------------------------------------------------+
216 * | User MMU B level tables |
217 * tmgrAbase -> +-------------------------------------------------------+
218 * | TMGR A level table structures |
219 * tmgrBbase -> +-------------------------------------------------------+
220 * | TMGR B level table structures |
221 * tmgrCbase -> +-------------------------------------------------------+
222 * | TMGR C level table structures |
223 * pvbase -> +-------------------------------------------------------+
224 * | Physical to Virtual mapping table (list heads) |
225 * pvebase -> +-------------------------------------------------------+
226 * | Physical to Virtual mapping table (list elements) |
227 * | |
228 * +-------------------------------------------------------+
229 * towards higher memory
230 *
231 * For every A table in the MMU A area, there will be a corresponding
232 * a_tmgr structure in the TMGR A area. The same will be true for
233 * the B and C tables. This arrangement will make it easy to find the
234 * controling tmgr structure for any table in the system by use of
235 * (relatively) simple macros.
236 */
237
238 /*
239 * Global variables for storing the base addresses for the areas
240 * labeled above.
241 */
242 static vm_offset_t kernAphys;
243 static mmu_long_dte_t *kernAbase;
244 static mmu_short_dte_t *kernBbase;
245 static mmu_short_pte_t *kernCbase;
246 static mmu_short_pte_t *mmuCbase;
247 static mmu_short_dte_t *mmuBbase;
248 static mmu_long_dte_t *mmuAbase;
249 static a_tmgr_t *Atmgrbase;
250 static b_tmgr_t *Btmgrbase;
251 static c_tmgr_t *Ctmgrbase;
252 static pv_t *pvbase;
253 static pv_elem_t *pvebase;
254 struct pmap kernel_pmap;
255
256 /*
257 * This holds the CRP currently loaded into the MMU.
258 */
259 struct mmu_rootptr kernel_crp;
260
261 /*
262 * Just all around global variables.
263 */
264 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
265 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
266 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
267
268
269 /*
270 * Flags used to mark the safety/availability of certain operations or
271 * resources.
272 */
273 static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
274 bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
275 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
276
277 /*
278 * XXX: For now, retain the traditional variables that were
279 * used in the old pmap/vm interface (without NONCONTIG).
280 */
281 /* Kernel virtual address space available: */
282 vm_offset_t virtual_avail, virtual_end;
283 /* Physical address space available: */
284 vm_offset_t avail_start, avail_end;
285
286 /* This keep track of the end of the contiguously mapped range. */
287 vm_offset_t virtual_contig_end;
288
289 /* Physical address used by pmap_next_page() */
290 vm_offset_t avail_next;
291
292 /* These are used by pmap_copy_page(), etc. */
293 vm_offset_t tmp_vpages[2];
294
295 /* memory pool for pmap structures */
296 struct pool pmap_pmap_pool;
297
298 /*
299 * The 3/80 is the only member of the sun3x family that has non-contiguous
300 * physical memory. Memory is divided into 4 banks which are physically
301 * locatable on the system board. Although the size of these banks varies
302 * with the size of memory they contain, their base addresses are
303 * permenently fixed. The following structure, which describes these
304 * banks, is initialized by pmap_bootstrap() after it reads from a similar
305 * structure provided by the ROM Monitor.
306 *
307 * For the other machines in the sun3x architecture which do have contiguous
308 * RAM, this list will have only one entry, which will describe the entire
309 * range of available memory.
310 */
311 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
312 u_int total_phys_mem;
313
314 /*************************************************************************/
315
316 /*
317 * XXX - Should "tune" these based on statistics.
318 *
319 * My first guess about the relative numbers of these needed is
320 * based on the fact that a "typical" process will have several
321 * pages mapped at low virtual addresses (text, data, bss), then
322 * some mapped shared libraries, and then some stack pages mapped
323 * near the high end of the VA space. Each process can use only
324 * one A table, and most will use only two B tables (maybe three)
325 * and probably about four C tables. Therefore, the first guess
326 * at the relative numbers of these needed is 1:2:4 -gwr
327 *
328 * The number of C tables needed is closely related to the amount
329 * of physical memory available plus a certain amount attributable
330 * to the use of double mappings. With a few simulation statistics
331 * we can find a reasonably good estimation of this unknown value.
332 * Armed with that and the above ratios, we have a good idea of what
333 * is needed at each level. -j
334 *
335 * Note: It is not physical memory memory size, but the total mapped
336 * virtual space required by the combined working sets of all the
337 * currently _runnable_ processes. (Sleeping ones don't count.)
338 * The amount of physical memory should be irrelevant. -gwr
339 */
340 #ifdef FIXED_NTABLES
341 #define NUM_A_TABLES 16
342 #define NUM_B_TABLES 32
343 #define NUM_C_TABLES 64
344 #else
345 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
346 #endif /* FIXED_NTABLES */
347
348 /*
349 * This determines our total virtual mapping capacity.
350 * Yes, it is a FIXED value so we can pre-allocate.
351 */
352 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
353
354 /*
355 * The size of the Kernel Virtual Address Space (KVAS)
356 * for purposes of MMU table allocation is -KERNBASE
357 * (length from KERNBASE to 0xFFFFffff)
358 */
359 #define KVAS_SIZE (-KERNBASE)
360
361 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
362 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
363 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
364 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
365
366 /*************************** MISCELANEOUS MACROS *************************/
367 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
368 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
369 #define pmap_add_ref(pmap) ++pmap->pm_refcount
370 #define pmap_del_ref(pmap) --pmap->pm_refcount
371 #define pmap_refcount(pmap) pmap->pm_refcount
372
373 static INLINE void * mmu_ptov __P((vm_offset_t pa));
374 static INLINE vm_offset_t mmu_vtop __P((void * va));
375
376 #if 0
377 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
378 #endif /* 0 */
379 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
380 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
381
382 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
383 static INLINE int pteidx __P((mmu_short_pte_t *));
384 static INLINE pmap_t current_pmap __P((void));
385
386 /*
387 * We can always convert between virtual and physical addresses
388 * for anything in the range [KERNBASE ... avail_start] because
389 * that range is GUARANTEED to be mapped linearly.
390 * We rely heavily upon this feature!
391 */
392 static INLINE void *
393 mmu_ptov(pa)
394 vm_offset_t pa;
395 {
396 register vm_offset_t va;
397
398 va = (pa + KERNBASE);
399 #ifdef PMAP_DEBUG
400 if ((va < KERNBASE) || (va >= virtual_contig_end))
401 panic("mmu_ptov");
402 #endif
403 return ((void*)va);
404 }
405 static INLINE vm_offset_t
406 mmu_vtop(vva)
407 void *vva;
408 {
409 register vm_offset_t va;
410
411 va = (vm_offset_t)vva;
412 #ifdef PMAP_DEBUG
413 if ((va < KERNBASE) || (va >= virtual_contig_end))
414 panic("mmu_ptov");
415 #endif
416 return (va - KERNBASE);
417 }
418
419 /*
420 * These macros map MMU tables to their corresponding manager structures.
421 * They are needed quite often because many of the pointers in the pmap
422 * system reference MMU tables and not the structures that control them.
423 * There needs to be a way to find one when given the other and these
424 * macros do so by taking advantage of the memory layout described above.
425 * Here's a quick step through the first macro, mmuA2tmgr():
426 *
427 * 1) find the offset of the given MMU A table from the base of its table
428 * pool (table - mmuAbase).
429 * 2) convert this offset into a table index by dividing it by the
430 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
431 * 3) use this index to select the corresponding 'A' table manager
432 * structure from the 'A' table manager pool (Atmgrbase[index]).
433 */
434 /* This function is not currently used. */
435 #if 0
436 static INLINE a_tmgr_t *
437 mmuA2tmgr(mmuAtbl)
438 mmu_long_dte_t *mmuAtbl;
439 {
440 register int idx;
441
442 /* Which table is this in? */
443 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
444 #ifdef PMAP_DEBUG
445 if ((idx < 0) || (idx >= NUM_A_TABLES))
446 panic("mmuA2tmgr");
447 #endif
448 return (&Atmgrbase[idx]);
449 }
450 #endif /* 0 */
451
452 static INLINE b_tmgr_t *
453 mmuB2tmgr(mmuBtbl)
454 mmu_short_dte_t *mmuBtbl;
455 {
456 register int idx;
457
458 /* Which table is this in? */
459 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
460 #ifdef PMAP_DEBUG
461 if ((idx < 0) || (idx >= NUM_B_TABLES))
462 panic("mmuB2tmgr");
463 #endif
464 return (&Btmgrbase[idx]);
465 }
466
467 /* mmuC2tmgr INTERNAL
468 **
469 * Given a pte known to belong to a C table, return the address of
470 * that table's management structure.
471 */
472 static INLINE c_tmgr_t *
473 mmuC2tmgr(mmuCtbl)
474 mmu_short_pte_t *mmuCtbl;
475 {
476 register int idx;
477
478 /* Which table is this in? */
479 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
480 #ifdef PMAP_DEBUG
481 if ((idx < 0) || (idx >= NUM_C_TABLES))
482 panic("mmuC2tmgr");
483 #endif
484 return (&Ctmgrbase[idx]);
485 }
486
487 /* This is now a function call below.
488 * #define pa2pv(pa) \
489 * (&pvbase[(unsigned long)\
490 * m68k_btop(pa)\
491 * ])
492 */
493
494 /* pa2pv INTERNAL
495 **
496 * Return the pv_list_head element which manages the given physical
497 * address.
498 */
499 static INLINE pv_t *
500 pa2pv(pa)
501 vm_offset_t pa;
502 {
503 register struct pmap_physmem_struct *bank;
504 register int idx;
505
506 bank = &avail_mem[0];
507 while (pa >= bank->pmem_end)
508 bank = bank->pmem_next;
509
510 pa -= bank->pmem_start;
511 idx = bank->pmem_pvbase + m68k_btop(pa);
512 #ifdef PMAP_DEBUG
513 if ((idx < 0) || (idx >= physmem))
514 panic("pa2pv");
515 #endif
516 return &pvbase[idx];
517 }
518
519 /* pteidx INTERNAL
520 **
521 * Return the index of the given PTE within the entire fixed table of
522 * PTEs.
523 */
524 static INLINE int
525 pteidx(pte)
526 mmu_short_pte_t *pte;
527 {
528 return (pte - kernCbase);
529 }
530
531 /*
532 * This just offers a place to put some debugging checks,
533 * and reduces the number of places "curproc" appears...
534 */
535 static INLINE pmap_t
536 current_pmap()
537 {
538 struct proc *p;
539 struct vmspace *vm;
540 vm_map_t map;
541 pmap_t pmap;
542
543 p = curproc; /* XXX */
544 if (p == NULL)
545 pmap = &kernel_pmap;
546 else {
547 vm = p->p_vmspace;
548 map = &vm->vm_map;
549 pmap = vm_map_pmap(map);
550 }
551
552 return (pmap);
553 }
554
555
556 /*************************** FUNCTION DEFINITIONS ************************
557 * These appear here merely for the compiler to enforce type checking on *
558 * all function calls. *
559 *************************************************************************/
560
561 /** External functions
562 ** - functions used within this module but written elsewhere.
563 ** both of these functions are in locore.s
564 ** XXX - These functions were later replaced with their more cryptic
565 ** hp300 counterparts. They may be removed now.
566 **/
567 #if 0 /* deprecated mmu */
568 void mmu_seturp __P((vm_offset_t));
569 void mmu_flush __P((int, vm_offset_t));
570 void mmu_flusha __P((void));
571 #endif /* 0 */
572
573 /** Internal functions
574 ** Most functions used only within this module are defined in
575 ** pmap_pvt.h (why not here if used only here?)
576 **/
577 static void pmap_page_upload __P((void));
578
579 /** Interface functions
580 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
581 ** defined.
582 **/
583 int pmap_page_index __P((vm_offset_t));
584 void pmap_pinit __P((pmap_t));
585 void pmap_release __P((pmap_t));
586
587 /********************************** CODE ********************************
588 * Functions that are called from other parts of the kernel are labeled *
589 * as 'INTERFACE' functions. Functions that are only called from *
590 * within the pmap module are labeled as 'INTERNAL' functions. *
591 * Functions that are internal, but are not (currently) used at all are *
592 * labeled 'INTERNAL_X'. *
593 ************************************************************************/
594
595 /* pmap_bootstrap INTERNAL
596 **
597 * Initializes the pmap system. Called at boot time from
598 * locore2.c:_vm_init()
599 *
600 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
601 * system implement pmap_steal_memory() is redundant.
602 * Don't release this code without removing one or the other!
603 */
604 void
605 pmap_bootstrap(nextva)
606 vm_offset_t nextva;
607 {
608 struct physmemory *membank;
609 struct pmap_physmem_struct *pmap_membank;
610 vm_offset_t va, pa, eva;
611 int b, c, i, j; /* running table counts */
612 int size, resvmem;
613
614 /*
615 * This function is called by __bootstrap after it has
616 * determined the type of machine and made the appropriate
617 * patches to the ROM vectors (XXX- I don't quite know what I meant
618 * by that.) It allocates and sets up enough of the pmap system
619 * to manage the kernel's address space.
620 */
621
622 /*
623 * Determine the range of kernel virtual and physical
624 * space available. Note that we ABSOLUTELY DEPEND on
625 * the fact that the first bank of memory (4MB) is
626 * mapped linearly to KERNBASE (which we guaranteed in
627 * the first instructions of locore.s).
628 * That is plenty for our bootstrap work.
629 */
630 virtual_avail = m68k_round_page(nextva);
631 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
632 virtual_end = VM_MAX_KERNEL_ADDRESS;
633 /* Don't need avail_start til later. */
634
635 /* We may now call pmap_bootstrap_alloc(). */
636 bootstrap_alloc_enabled = TRUE;
637
638 /*
639 * This is a somewhat unwrapped loop to deal with
640 * copying the PROM's 'phsymem' banks into the pmap's
641 * banks. The following is always assumed:
642 * 1. There is always at least one bank of memory.
643 * 2. There is always a last bank of memory, and its
644 * pmem_next member must be set to NULL.
645 */
646 membank = romVectorPtr->v_physmemory;
647 pmap_membank = avail_mem;
648 total_phys_mem = 0;
649
650 for (;;) { /* break on !membank */
651 pmap_membank->pmem_start = membank->address;
652 pmap_membank->pmem_end = membank->address + membank->size;
653 total_phys_mem += membank->size;
654 membank = membank->next;
655 if (!membank)
656 break;
657 /* This silly syntax arises because pmap_membank
658 * is really a pre-allocated array, but it is put into
659 * use as a linked list.
660 */
661 pmap_membank->pmem_next = pmap_membank + 1;
662 pmap_membank = pmap_membank->pmem_next;
663 }
664 /* This is the last element. */
665 pmap_membank->pmem_next = NULL;
666
667 /*
668 * Note: total_phys_mem, physmem represent
669 * actual physical memory, including that
670 * reserved for the PROM monitor.
671 */
672 physmem = btoc(total_phys_mem);
673
674 /*
675 * The last bank of memory should be reduced to prevent the
676 * physical pages needed by the PROM monitor from being used
677 * in the VM system.
678 */
679 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
680 resvmem = m68k_round_page(resvmem);
681 pmap_membank->pmem_end -= resvmem;
682
683 /*
684 * Avail_end is set to the first byte of physical memory
685 * after the end of the last bank. We use this only to
686 * determine if a physical address is "managed" memory.
687 */
688 avail_end = pmap_membank->pmem_end;
689
690 /*
691 * First allocate enough kernel MMU tables to map all
692 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
693 * Note: All must be aligned on 256 byte boundaries.
694 * Start with the level-A table (one of those).
695 */
696 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
697 kernAbase = pmap_bootstrap_alloc(size);
698 bzero(kernAbase, size);
699
700 /* Now the level-B kernel tables... */
701 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
702 kernBbase = pmap_bootstrap_alloc(size);
703 bzero(kernBbase, size);
704
705 /* Now the level-C kernel tables... */
706 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
707 kernCbase = pmap_bootstrap_alloc(size);
708 bzero(kernCbase, size);
709 /*
710 * Note: In order for the PV system to work correctly, the kernel
711 * and user-level C tables must be allocated contiguously.
712 * Nothing should be allocated between here and the allocation of
713 * mmuCbase below. XXX: Should do this as one allocation, and
714 * then compute a pointer for mmuCbase instead of this...
715 *
716 * Allocate user MMU tables.
717 * These must be contiguous with the preceeding.
718 */
719
720 #ifndef FIXED_NTABLES
721 /*
722 * The number of user-level C tables that should be allocated is
723 * related to the size of physical memory. In general, there should
724 * be enough tables to map four times the amount of available RAM.
725 * The extra amount is needed because some table space is wasted by
726 * fragmentation.
727 */
728 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
729 NUM_B_TABLES = NUM_C_TABLES / 2;
730 NUM_A_TABLES = NUM_B_TABLES / 2;
731 #endif /* !FIXED_NTABLES */
732
733 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
734 mmuCbase = pmap_bootstrap_alloc(size);
735
736 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
737 mmuBbase = pmap_bootstrap_alloc(size);
738
739 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
740 mmuAbase = pmap_bootstrap_alloc(size);
741
742 /*
743 * Fill in the never-changing part of the kernel tables.
744 * For simplicity, the kernel's mappings will be editable as a
745 * flat array of page table entries at kernCbase. The
746 * higher level 'A' and 'B' tables must be initialized to point
747 * to this lower one.
748 */
749 b = c = 0;
750
751 /*
752 * Invalidate all mappings below KERNBASE in the A table.
753 * This area has already been zeroed out, but it is good
754 * practice to explicitly show that we are interpreting
755 * it as a list of A table descriptors.
756 */
757 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
758 kernAbase[i].addr.raw = 0;
759 }
760
761 /*
762 * Set up the kernel A and B tables so that they will reference the
763 * correct spots in the contiguous table of PTEs allocated for the
764 * kernel's virtual memory space.
765 */
766 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
767 kernAbase[i].attr.raw =
768 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
769 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
770
771 for (j=0; j < MMU_B_TBL_SIZE; j++) {
772 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
773 | MMU_DT_SHORT;
774 c += MMU_C_TBL_SIZE;
775 }
776 b += MMU_B_TBL_SIZE;
777 }
778
779 /* XXX - Doing kernel_pmap a little further down. */
780
781 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
782 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
783 pmap_alloc_pv(); /* Allocate physical->virtual map. */
784
785 /*
786 * We are now done with pmap_bootstrap_alloc(). Round up
787 * `virtual_avail' to the nearest page, and set the flag
788 * to prevent use of pmap_bootstrap_alloc() hereafter.
789 */
790 pmap_bootstrap_aalign(NBPG);
791 bootstrap_alloc_enabled = FALSE;
792
793 /*
794 * Now that we are done with pmap_bootstrap_alloc(), we
795 * must save the virtual and physical addresses of the
796 * end of the linearly mapped range, which are stored in
797 * virtual_contig_end and avail_start, respectively.
798 * These variables will never change after this point.
799 */
800 virtual_contig_end = virtual_avail;
801 avail_start = virtual_avail - KERNBASE;
802
803 /*
804 * `avail_next' is a running pointer used by pmap_next_page() to
805 * keep track of the next available physical page to be handed
806 * to the VM system during its initialization, in which it
807 * asks for physical pages, one at a time.
808 */
809 avail_next = avail_start;
810
811 /*
812 * Now allocate some virtual addresses, but not the physical pages
813 * behind them. Note that virtual_avail is already page-aligned.
814 *
815 * tmp_vpages[] is an array of two virtual pages used for temporary
816 * kernel mappings in the pmap module to facilitate various physical
817 * address-oritented operations.
818 */
819 tmp_vpages[0] = virtual_avail;
820 virtual_avail += NBPG;
821 tmp_vpages[1] = virtual_avail;
822 virtual_avail += NBPG;
823
824 /** Initialize the PV system **/
825 pmap_init_pv();
826
827 /*
828 * Fill in the kernel_pmap structure and kernel_crp.
829 */
830 kernAphys = mmu_vtop(kernAbase);
831 kernel_pmap.pm_a_tmgr = NULL;
832 kernel_pmap.pm_a_phys = kernAphys;
833 kernel_pmap.pm_refcount = 1; /* always in use */
834 simple_lock_init(&kernel_pmap.pm_lock);
835
836 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
837 kernel_crp.rp_addr = kernAphys;
838
839 /*
840 * Now pmap_enter_kernel() may be used safely and will be
841 * the main interface used hereafter to modify the kernel's
842 * virtual address space. Note that since we are still running
843 * under the PROM's address table, none of these table modifications
844 * actually take effect until pmap_takeover_mmu() is called.
845 *
846 * Note: Our tables do NOT have the PROM linear mappings!
847 * Only the mappings created here exist in our tables, so
848 * remember to map anything we expect to use.
849 */
850 va = (vm_offset_t) KERNBASE;
851 pa = 0;
852
853 /*
854 * The first page of the kernel virtual address space is the msgbuf
855 * page. The page attributes (data, non-cached) are set here, while
856 * the address is assigned to this global pointer in cpu_startup().
857 * It is non-cached, mostly due to paranoia.
858 */
859 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
860 va += NBPG; pa += NBPG;
861
862 /* Next page is used as the temporary stack. */
863 pmap_enter_kernel(va, pa, VM_PROT_ALL);
864 va += NBPG; pa += NBPG;
865
866 /*
867 * Map all of the kernel's text segment as read-only and cacheable.
868 * (Cacheable is implied by default). Unfortunately, the last bytes
869 * of kernel text and the first bytes of kernel data will often be
870 * sharing the same page. Therefore, the last page of kernel text
871 * has to be mapped as read/write, to accomodate the data.
872 */
873 eva = m68k_trunc_page((vm_offset_t)etext);
874 for (; va < eva; va += NBPG, pa += NBPG)
875 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
876
877 /*
878 * Map all of the kernel's data as read/write and cacheable.
879 * This includes: data, BSS, symbols, and everything in the
880 * contiguous memory used by pmap_bootstrap_alloc()
881 */
882 for (; pa < avail_start; va += NBPG, pa += NBPG)
883 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
884
885 /*
886 * At this point we are almost ready to take over the MMU. But first
887 * we must save the PROM's address space in our map, as we call its
888 * routines and make references to its data later in the kernel.
889 */
890 pmap_bootstrap_copyprom();
891 pmap_takeover_mmu();
892 pmap_bootstrap_setprom();
893
894 /* Notify the VM system of our page size. */
895 PAGE_SIZE = NBPG;
896 uvm_setpagesize();
897
898 pmap_page_upload();
899 }
900
901
902 /* pmap_alloc_usermmu INTERNAL
903 **
904 * Called from pmap_bootstrap() to allocate MMU tables that will
905 * eventually be used for user mappings.
906 */
907 void
908 pmap_alloc_usermmu()
909 {
910 /* XXX: Moved into caller. */
911 }
912
913 /* pmap_alloc_pv INTERNAL
914 **
915 * Called from pmap_bootstrap() to allocate the physical
916 * to virtual mapping list. Each physical page of memory
917 * in the system has a corresponding element in this list.
918 */
919 void
920 pmap_alloc_pv()
921 {
922 int i;
923 unsigned int total_mem;
924
925 /*
926 * Allocate a pv_head structure for every page of physical
927 * memory that will be managed by the system. Since memory on
928 * the 3/80 is non-contiguous, we cannot arrive at a total page
929 * count by subtraction of the lowest available address from the
930 * highest, but rather we have to step through each memory
931 * bank and add the number of pages in each to the total.
932 *
933 * At this time we also initialize the offset of each bank's
934 * starting pv_head within the pv_head list so that the physical
935 * memory state routines (pmap_is_referenced(),
936 * pmap_is_modified(), et al.) can quickly find coresponding
937 * pv_heads in spite of the non-contiguity.
938 */
939 total_mem = 0;
940 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
941 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
942 total_mem += avail_mem[i].pmem_end -
943 avail_mem[i].pmem_start;
944 if (avail_mem[i].pmem_next == NULL)
945 break;
946 }
947 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
948 m68k_btop(total_phys_mem));
949 }
950
951 /* pmap_alloc_usertmgr INTERNAL
952 **
953 * Called from pmap_bootstrap() to allocate the structures which
954 * facilitate management of user MMU tables. Each user MMU table
955 * in the system has one such structure associated with it.
956 */
957 void
958 pmap_alloc_usertmgr()
959 {
960 /* Allocate user MMU table managers */
961 /* It would be a lot simpler to just make these BSS, but */
962 /* we may want to change their size at boot time... -j */
963 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
964 * NUM_A_TABLES);
965 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
966 * NUM_B_TABLES);
967 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
968 * NUM_C_TABLES);
969
970 /*
971 * Allocate PV list elements for the physical to virtual
972 * mapping system.
973 */
974 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
975 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
976 }
977
978 /* pmap_bootstrap_copyprom() INTERNAL
979 **
980 * Copy the PROM mappings into our own tables. Note, we
981 * can use physical addresses until __bootstrap returns.
982 */
983 void
984 pmap_bootstrap_copyprom()
985 {
986 struct sunromvec *romp;
987 int *mon_ctbl;
988 mmu_short_pte_t *kpte;
989 int i, len;
990
991 romp = romVectorPtr;
992
993 /*
994 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
995 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
996 */
997 mon_ctbl = *romp->monptaddr;
998 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
999 kpte = &kernCbase[i];
1000 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
1001
1002 for (i = 0; i < len; i++) {
1003 kpte[i].attr.raw = mon_ctbl[i];
1004 }
1005
1006 /*
1007 * Copy the mappings at MON_DVMA_BASE (to the end).
1008 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1009 * Actually, we only want the last page, which the
1010 * PROM has set up for use by the "ie" driver.
1011 * (The i82686 needs its SCP there.)
1012 * If we copy all the mappings, pmap_enter_kernel
1013 * may complain about finding valid PTEs that are
1014 * not recorded in our PV lists...
1015 */
1016 mon_ctbl = *romp->shadowpteaddr;
1017 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
1018 kpte = &kernCbase[i];
1019 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1020 for (i = (len-1); i < len; i++) {
1021 kpte[i].attr.raw = mon_ctbl[i];
1022 }
1023 }
1024
1025 /* pmap_takeover_mmu INTERNAL
1026 **
1027 * Called from pmap_bootstrap() after it has copied enough of the
1028 * PROM mappings into the kernel map so that we can use our own
1029 * MMU table.
1030 */
1031 void
1032 pmap_takeover_mmu()
1033 {
1034
1035 loadcrp(&kernel_crp);
1036 }
1037
1038 /* pmap_bootstrap_setprom() INTERNAL
1039 **
1040 * Set the PROM mappings so it can see kernel space.
1041 * Note that physical addresses are used here, which
1042 * we can get away with because this runs with the
1043 * low 1GB set for transparent translation.
1044 */
1045 void
1046 pmap_bootstrap_setprom()
1047 {
1048 mmu_long_dte_t *mon_dte;
1049 extern struct mmu_rootptr mon_crp;
1050 int i;
1051
1052 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1053 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1054 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1055 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1056 }
1057 }
1058
1059
1060 /* pmap_init INTERFACE
1061 **
1062 * Called at the end of vm_init() to set up the pmap system to go
1063 * into full time operation. All initialization of kernel_pmap
1064 * should be already done by now, so this should just do things
1065 * needed for user-level pmaps to work.
1066 */
1067 void
1068 pmap_init()
1069 {
1070 /** Initialize the manager pools **/
1071 TAILQ_INIT(&a_pool);
1072 TAILQ_INIT(&b_pool);
1073 TAILQ_INIT(&c_pool);
1074
1075 /**************************************************************
1076 * Initialize all tmgr structures and MMU tables they manage. *
1077 **************************************************************/
1078 /** Initialize A tables **/
1079 pmap_init_a_tables();
1080 /** Initialize B tables **/
1081 pmap_init_b_tables();
1082 /** Initialize C tables **/
1083 pmap_init_c_tables();
1084
1085 /** Initialize the pmap pools **/
1086 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1087 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
1088 }
1089
1090 /* pmap_init_a_tables() INTERNAL
1091 **
1092 * Initializes all A managers, their MMU A tables, and inserts
1093 * them into the A manager pool for use by the system.
1094 */
1095 void
1096 pmap_init_a_tables()
1097 {
1098 int i;
1099 a_tmgr_t *a_tbl;
1100
1101 for (i=0; i < NUM_A_TABLES; i++) {
1102 /* Select the next available A manager from the pool */
1103 a_tbl = &Atmgrbase[i];
1104
1105 /*
1106 * Clear its parent entry. Set its wired and valid
1107 * entry count to zero.
1108 */
1109 a_tbl->at_parent = NULL;
1110 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1111
1112 /* Assign it the next available MMU A table from the pool */
1113 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1114
1115 /*
1116 * Initialize the MMU A table with the table in the `proc0',
1117 * or kernel, mapping. This ensures that every process has
1118 * the kernel mapped in the top part of its address space.
1119 */
1120 bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1121 sizeof(mmu_long_dte_t));
1122
1123 /*
1124 * Finally, insert the manager into the A pool,
1125 * making it ready to be used by the system.
1126 */
1127 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1128 }
1129 }
1130
1131 /* pmap_init_b_tables() INTERNAL
1132 **
1133 * Initializes all B table managers, their MMU B tables, and
1134 * inserts them into the B manager pool for use by the system.
1135 */
1136 void
1137 pmap_init_b_tables()
1138 {
1139 int i,j;
1140 b_tmgr_t *b_tbl;
1141
1142 for (i=0; i < NUM_B_TABLES; i++) {
1143 /* Select the next available B manager from the pool */
1144 b_tbl = &Btmgrbase[i];
1145
1146 b_tbl->bt_parent = NULL; /* clear its parent, */
1147 b_tbl->bt_pidx = 0; /* parent index, */
1148 b_tbl->bt_wcnt = 0; /* wired entry count, */
1149 b_tbl->bt_ecnt = 0; /* valid entry count. */
1150
1151 /* Assign it the next available MMU B table from the pool */
1152 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1153
1154 /* Invalidate every descriptor in the table */
1155 for (j=0; j < MMU_B_TBL_SIZE; j++)
1156 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1157
1158 /* Insert the manager into the B pool */
1159 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1160 }
1161 }
1162
1163 /* pmap_init_c_tables() INTERNAL
1164 **
1165 * Initializes all C table managers, their MMU C tables, and
1166 * inserts them into the C manager pool for use by the system.
1167 */
1168 void
1169 pmap_init_c_tables()
1170 {
1171 int i,j;
1172 c_tmgr_t *c_tbl;
1173
1174 for (i=0; i < NUM_C_TABLES; i++) {
1175 /* Select the next available C manager from the pool */
1176 c_tbl = &Ctmgrbase[i];
1177
1178 c_tbl->ct_parent = NULL; /* clear its parent, */
1179 c_tbl->ct_pidx = 0; /* parent index, */
1180 c_tbl->ct_wcnt = 0; /* wired entry count, */
1181 c_tbl->ct_ecnt = 0; /* valid entry count, */
1182 c_tbl->ct_pmap = NULL; /* parent pmap, */
1183 c_tbl->ct_va = 0; /* base of managed range */
1184
1185 /* Assign it the next available MMU C table from the pool */
1186 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1187
1188 for (j=0; j < MMU_C_TBL_SIZE; j++)
1189 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1190
1191 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1192 }
1193 }
1194
1195 /* pmap_init_pv() INTERNAL
1196 **
1197 * Initializes the Physical to Virtual mapping system.
1198 */
1199 void
1200 pmap_init_pv()
1201 {
1202 int i;
1203
1204 /* Initialize every PV head. */
1205 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1206 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1207 pvbase[i].pv_flags = 0; /* Zero out page flags */
1208 }
1209
1210 pv_initialized = TRUE;
1211 }
1212
1213 /* get_a_table INTERNAL
1214 **
1215 * Retrieve and return a level A table for use in a user map.
1216 */
1217 a_tmgr_t *
1218 get_a_table()
1219 {
1220 a_tmgr_t *tbl;
1221 pmap_t pmap;
1222
1223 /* Get the top A table in the pool */
1224 tbl = a_pool.tqh_first;
1225 if (tbl == NULL) {
1226 /*
1227 * XXX - Instead of panicing here and in other get_x_table
1228 * functions, we do have the option of sleeping on the head of
1229 * the table pool. Any function which updates the table pool
1230 * would then issue a wakeup() on the head, thus waking up any
1231 * processes waiting for a table.
1232 *
1233 * Actually, the place to sleep would be when some process
1234 * asks for a "wired" mapping that would run us short of
1235 * mapping resources. This design DEPENDS on always having
1236 * some mapping resources in the pool for stealing, so we
1237 * must make sure we NEVER let the pool become empty. -gwr
1238 */
1239 panic("get_a_table: out of A tables.");
1240 }
1241
1242 TAILQ_REMOVE(&a_pool, tbl, at_link);
1243 /*
1244 * If the table has a non-null parent pointer then it is in use.
1245 * Forcibly abduct it from its parent and clear its entries.
1246 * No re-entrancy worries here. This table would not be in the
1247 * table pool unless it was available for use.
1248 *
1249 * Note that the second argument to free_a_table() is FALSE. This
1250 * indicates that the table should not be relinked into the A table
1251 * pool. That is a job for the function that called us.
1252 */
1253 if (tbl->at_parent) {
1254 pmap = tbl->at_parent;
1255 free_a_table(tbl, FALSE);
1256 pmap->pm_a_tmgr = NULL;
1257 pmap->pm_a_phys = kernAphys;
1258 }
1259 #ifdef NON_REENTRANT
1260 /*
1261 * If the table isn't to be wired down, re-insert it at the
1262 * end of the pool.
1263 */
1264 if (!wired)
1265 /*
1266 * Quandary - XXX
1267 * Would it be better to let the calling function insert this
1268 * table into the queue? By inserting it here, we are allowing
1269 * it to be stolen immediately. The calling function is
1270 * probably not expecting to use a table that it is not
1271 * assured full control of.
1272 * Answer - In the intrest of re-entrancy, it is best to let
1273 * the calling function determine when a table is available
1274 * for use. Therefore this code block is not used.
1275 */
1276 TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1277 #endif /* NON_REENTRANT */
1278 return tbl;
1279 }
1280
1281 /* get_b_table INTERNAL
1282 **
1283 * Return a level B table for use.
1284 */
1285 b_tmgr_t *
1286 get_b_table()
1287 {
1288 b_tmgr_t *tbl;
1289
1290 /* See 'get_a_table' for comments. */
1291 tbl = b_pool.tqh_first;
1292 if (tbl == NULL)
1293 panic("get_b_table: out of B tables.");
1294 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1295 if (tbl->bt_parent) {
1296 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1297 tbl->bt_parent->at_ecnt--;
1298 free_b_table(tbl, FALSE);
1299 }
1300 #ifdef NON_REENTRANT
1301 if (!wired)
1302 /* XXX see quandary in get_b_table */
1303 /* XXX start lock */
1304 TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1305 /* XXX end lock */
1306 #endif /* NON_REENTRANT */
1307 return tbl;
1308 }
1309
1310 /* get_c_table INTERNAL
1311 **
1312 * Return a level C table for use.
1313 */
1314 c_tmgr_t *
1315 get_c_table()
1316 {
1317 c_tmgr_t *tbl;
1318
1319 /* See 'get_a_table' for comments */
1320 tbl = c_pool.tqh_first;
1321 if (tbl == NULL)
1322 panic("get_c_table: out of C tables.");
1323 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1324 if (tbl->ct_parent) {
1325 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1326 tbl->ct_parent->bt_ecnt--;
1327 free_c_table(tbl, FALSE);
1328 }
1329 #ifdef NON_REENTRANT
1330 if (!wired)
1331 /* XXX See quandary in get_a_table */
1332 /* XXX start lock */
1333 TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1334 /* XXX end lock */
1335 #endif /* NON_REENTRANT */
1336
1337 return tbl;
1338 }
1339
1340 /*
1341 * The following 'free_table' and 'steal_table' functions are called to
1342 * detach tables from their current obligations (parents and children) and
1343 * prepare them for reuse in another mapping.
1344 *
1345 * Free_table is used when the calling function will handle the fate
1346 * of the parent table, such as returning it to the free pool when it has
1347 * no valid entries. Functions that do not want to handle this should
1348 * call steal_table, in which the parent table's descriptors and entry
1349 * count are automatically modified when this table is removed.
1350 */
1351
1352 /* free_a_table INTERNAL
1353 **
1354 * Unmaps the given A table and all child tables from their current
1355 * mappings. Returns the number of pages that were invalidated.
1356 * If 'relink' is true, the function will return the table to the head
1357 * of the available table pool.
1358 *
1359 * Cache note: The MC68851 will automatically flush all
1360 * descriptors derived from a given A table from its
1361 * Automatic Translation Cache (ATC) if we issue a
1362 * 'PFLUSHR' instruction with the base address of the
1363 * table. This function should do, and does so.
1364 * Note note: We are using an MC68030 - there is no
1365 * PFLUSHR.
1366 */
1367 int
1368 free_a_table(a_tbl, relink)
1369 a_tmgr_t *a_tbl;
1370 boolean_t relink;
1371 {
1372 int i, removed_cnt;
1373 mmu_long_dte_t *dte;
1374 mmu_short_dte_t *dtbl;
1375 b_tmgr_t *tmgr;
1376
1377 /*
1378 * Flush the ATC cache of all cached descriptors derived
1379 * from this table.
1380 * Sun3x does not use 68851's cached table feature
1381 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1382 */
1383
1384 /*
1385 * Remove any pending cache flushes that were designated
1386 * for the pmap this A table belongs to.
1387 * a_tbl->parent->atc_flushq[0] = 0;
1388 * Not implemented in sun3x.
1389 */
1390
1391 /*
1392 * All A tables in the system should retain a map for the
1393 * kernel. If the table contains any valid descriptors
1394 * (other than those for the kernel area), invalidate them all,
1395 * stopping short of the kernel's entries.
1396 */
1397 removed_cnt = 0;
1398 if (a_tbl->at_ecnt) {
1399 dte = a_tbl->at_dtbl;
1400 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1401 /*
1402 * If a table entry points to a valid B table, free
1403 * it and its children.
1404 */
1405 if (MMU_VALID_DT(dte[i])) {
1406 /*
1407 * The following block does several things,
1408 * from innermost expression to the
1409 * outermost:
1410 * 1) It extracts the base (cc 1996)
1411 * address of the B table pointed
1412 * to in the A table entry dte[i].
1413 * 2) It converts this base address into
1414 * the virtual address it can be
1415 * accessed with. (all MMU tables point
1416 * to physical addresses.)
1417 * 3) It finds the corresponding manager
1418 * structure which manages this MMU table.
1419 * 4) It frees the manager structure.
1420 * (This frees the MMU table and all
1421 * child tables. See 'free_b_table' for
1422 * details.)
1423 */
1424 dtbl = mmu_ptov(dte[i].addr.raw);
1425 tmgr = mmuB2tmgr(dtbl);
1426 removed_cnt += free_b_table(tmgr, TRUE);
1427 dte[i].attr.raw = MMU_DT_INVALID;
1428 }
1429 }
1430 a_tbl->at_ecnt = 0;
1431 }
1432 if (relink) {
1433 a_tbl->at_parent = NULL;
1434 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1435 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1436 }
1437 return removed_cnt;
1438 }
1439
1440 /* free_b_table INTERNAL
1441 **
1442 * Unmaps the given B table and all its children from their current
1443 * mappings. Returns the number of pages that were invalidated.
1444 * (For comments, see 'free_a_table()').
1445 */
1446 int
1447 free_b_table(b_tbl, relink)
1448 b_tmgr_t *b_tbl;
1449 boolean_t relink;
1450 {
1451 int i, removed_cnt;
1452 mmu_short_dte_t *dte;
1453 mmu_short_pte_t *dtbl;
1454 c_tmgr_t *tmgr;
1455
1456 removed_cnt = 0;
1457 if (b_tbl->bt_ecnt) {
1458 dte = b_tbl->bt_dtbl;
1459 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1460 if (MMU_VALID_DT(dte[i])) {
1461 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1462 tmgr = mmuC2tmgr(dtbl);
1463 removed_cnt += free_c_table(tmgr, TRUE);
1464 dte[i].attr.raw = MMU_DT_INVALID;
1465 }
1466 }
1467 b_tbl->bt_ecnt = 0;
1468 }
1469
1470 if (relink) {
1471 b_tbl->bt_parent = NULL;
1472 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1473 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1474 }
1475 return removed_cnt;
1476 }
1477
1478 /* free_c_table INTERNAL
1479 **
1480 * Unmaps the given C table from use and returns it to the pool for
1481 * re-use. Returns the number of pages that were invalidated.
1482 *
1483 * This function preserves any physical page modification information
1484 * contained in the page descriptors within the C table by calling
1485 * 'pmap_remove_pte().'
1486 */
1487 int
1488 free_c_table(c_tbl, relink)
1489 c_tmgr_t *c_tbl;
1490 boolean_t relink;
1491 {
1492 int i, removed_cnt;
1493
1494 removed_cnt = 0;
1495 if (c_tbl->ct_ecnt) {
1496 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1497 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1498 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1499 removed_cnt++;
1500 }
1501 }
1502 c_tbl->ct_ecnt = 0;
1503 }
1504
1505 if (relink) {
1506 c_tbl->ct_parent = NULL;
1507 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1508 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1509 }
1510 return removed_cnt;
1511 }
1512
1513 #if 0
1514 /* free_c_table_novalid INTERNAL
1515 **
1516 * Frees the given C table manager without checking to see whether
1517 * or not it contains any valid page descriptors as it is assumed
1518 * that it does not.
1519 */
1520 void
1521 free_c_table_novalid(c_tbl)
1522 c_tmgr_t *c_tbl;
1523 {
1524 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1525 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1526 c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1527 c_tbl->ct_parent->bt_ecnt--;
1528 /*
1529 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1530 * we just removed the last entry of the parent B table.
1531 * But I want to insure that this will not endanger pmap_enter()
1532 * with sudden removal of tables it is working with.
1533 *
1534 * We should probably add another field to each table, indicating
1535 * whether or not it is 'locked', ie. in the process of being
1536 * modified.
1537 */
1538 c_tbl->ct_parent = NULL;
1539 }
1540 #endif
1541
1542 /* pmap_remove_pte INTERNAL
1543 **
1544 * Unmap the given pte and preserve any page modification
1545 * information by transfering it to the pv head of the
1546 * physical page it maps to. This function does not update
1547 * any reference counts because it is assumed that the calling
1548 * function will do so.
1549 */
1550 void
1551 pmap_remove_pte(pte)
1552 mmu_short_pte_t *pte;
1553 {
1554 u_short pv_idx, targ_idx;
1555 int s;
1556 vm_offset_t pa;
1557 pv_t *pv;
1558
1559 pa = MMU_PTE_PA(*pte);
1560 if (is_managed(pa)) {
1561 pv = pa2pv(pa);
1562 targ_idx = pteidx(pte); /* Index of PTE being removed */
1563
1564 /*
1565 * If the PTE being removed is the first (or only) PTE in
1566 * the list of PTEs currently mapped to this page, remove the
1567 * PTE by changing the index found on the PV head. Otherwise
1568 * a linear search through the list will have to be executed
1569 * in order to find the PVE which points to the PTE being
1570 * removed, so that it may be modified to point to its new
1571 * neighbor.
1572 */
1573 s = splimp();
1574 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1575 if (pv_idx == targ_idx) {
1576 pv->pv_idx = pvebase[targ_idx].pve_next;
1577 } else {
1578 /*
1579 * Find the PV element pointing to the target
1580 * element. Note: may have pv_idx==PVE_EOL
1581 */
1582 for (;;) {
1583 if (pv_idx == PVE_EOL) {
1584 #ifdef PMAP_DEBUG
1585 printf("pmap_remove_pte: PVE_EOL\n");
1586 Debugger();
1587 #endif
1588 goto pv_not_found;
1589 }
1590 if (pvebase[pv_idx].pve_next == targ_idx)
1591 break;
1592 pv_idx = pvebase[pv_idx].pve_next;
1593 }
1594 /*
1595 * At this point, pv_idx is the index of the PV
1596 * element just before the target element in the list.
1597 * Unlink the target.
1598 */
1599 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1600 pv_not_found:
1601 }
1602 /*
1603 * Save the mod/ref bits of the pte by simply
1604 * ORing the entire pte onto the pv_flags member
1605 * of the pv structure.
1606 * There is no need to use a separate bit pattern
1607 * for usage information on the pv head than that
1608 * which is used on the MMU ptes.
1609 */
1610 pv->pv_flags |= (u_short) pte->attr.raw;
1611 splx(s);
1612 }
1613
1614 pte->attr.raw = MMU_DT_INVALID;
1615 }
1616
1617 /* pmap_stroll INTERNAL
1618 **
1619 * Retrieve the addresses of all table managers involved in the mapping of
1620 * the given virtual address. If the table walk completed sucessfully,
1621 * return TRUE. If it was only partially sucessful, return FALSE.
1622 * The table walk performed by this function is important to many other
1623 * functions in this module.
1624 *
1625 * Note: This function ought to be easier to read.
1626 */
1627 boolean_t
1628 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1629 pmap_t pmap;
1630 vm_offset_t va;
1631 a_tmgr_t **a_tbl;
1632 b_tmgr_t **b_tbl;
1633 c_tmgr_t **c_tbl;
1634 mmu_short_pte_t **pte;
1635 int *a_idx, *b_idx, *pte_idx;
1636 {
1637 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1638 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1639
1640 if (pmap == pmap_kernel())
1641 return FALSE;
1642
1643 /* Does the given pmap have its own A table? */
1644 *a_tbl = pmap->pm_a_tmgr;
1645 if (*a_tbl == NULL)
1646 return FALSE; /* No. Return unknown. */
1647 /* Does the A table have a valid B table
1648 * under the corresponding table entry?
1649 */
1650 *a_idx = MMU_TIA(va);
1651 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1652 if (!MMU_VALID_DT(*a_dte))
1653 return FALSE; /* No. Return unknown. */
1654 /* Yes. Extract B table from the A table. */
1655 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1656 /* Does the B table have a valid C table
1657 * under the corresponding table entry?
1658 */
1659 *b_idx = MMU_TIB(va);
1660 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1661 if (!MMU_VALID_DT(*b_dte))
1662 return FALSE; /* No. Return unknown. */
1663 /* Yes. Extract C table from the B table. */
1664 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1665 *pte_idx = MMU_TIC(va);
1666 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1667
1668 return TRUE;
1669 }
1670
1671 /* pmap_enter INTERFACE
1672 **
1673 * Called by the kernel to map a virtual address
1674 * to a physical address in the given process map.
1675 *
1676 * Note: this function should apply an exclusive lock
1677 * on the pmap system for its duration. (it certainly
1678 * would save my hair!!)
1679 * This function ought to be easier to read.
1680 */
1681 int
1682 pmap_enter(pmap, va, pa, prot, flags)
1683 pmap_t pmap;
1684 vm_offset_t va;
1685 vm_offset_t pa;
1686 vm_prot_t prot;
1687 int flags;
1688 {
1689 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1690 u_short nidx; /* PV list index */
1691 int s; /* Used for splimp()/splx() */
1692 int mapflags; /* Flags for the mapping (see NOTE1) */
1693 u_int a_idx, b_idx, pte_idx; /* table indices */
1694 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1695 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1696 c_tmgr_t *c_tbl; /* C: short page table manager */
1697 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1698 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1699 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1700 pv_t *pv; /* pv list head */
1701 boolean_t wired; /* is the mapping to be wired? */
1702 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1703
1704 if (pmap == NULL)
1705 return (KERN_SUCCESS);
1706 if (pmap == pmap_kernel()) {
1707 pmap_enter_kernel(va, pa, prot);
1708 return (KERN_SUCCESS);
1709 }
1710
1711 /*
1712 * Determine if the mapping should be wired.
1713 */
1714 wired = ((flags & PMAP_WIRED) != 0);
1715
1716 /*
1717 * NOTE1:
1718 *
1719 * On November 13, 1999, someone changed the pmap_enter() API such
1720 * that it now accepts a 'flags' argument. This new argument
1721 * contains bit-flags for the architecture-independent (UVM) system to
1722 * use in signalling certain mapping requirements to the architecture-
1723 * dependent (pmap) system. The argument it replaces, 'wired', is now
1724 * one of the flags within it.
1725 *
1726 * In addition to flags signaled by the architecture-independent
1727 * system, parts of the architecture-dependent section of the sun3x
1728 * kernel pass their own flags in the lower, unused bits of the
1729 * physical address supplied to this function. These flags are
1730 * extracted and stored in the temporary variable 'mapflags'.
1731 *
1732 * Extract sun3x specific flags from the physical address.
1733 */
1734 mapflags = (pa & ~MMU_PAGE_MASK);
1735 pa &= MMU_PAGE_MASK;
1736
1737 /*
1738 * Determine if the physical address being mapped is on-board RAM.
1739 * Any other area of the address space is likely to belong to a
1740 * device and hence it would be disasterous to cache its contents.
1741 */
1742 if ((managed = is_managed(pa)) == FALSE)
1743 mapflags |= PMAP_NC;
1744
1745 /*
1746 * For user mappings we walk along the MMU tables of the given
1747 * pmap, reaching a PTE which describes the virtual page being
1748 * mapped or changed. If any level of the walk ends in an invalid
1749 * entry, a table must be allocated and the entry must be updated
1750 * to point to it.
1751 * There is a bit of confusion as to whether this code must be
1752 * re-entrant. For now we will assume it is. To support
1753 * re-entrancy we must unlink tables from the table pool before
1754 * we assume we may use them. Tables are re-linked into the pool
1755 * when we are finished with them at the end of the function.
1756 * But I don't feel like doing that until we have proof that this
1757 * needs to be re-entrant.
1758 * 'llevel' records which tables need to be relinked.
1759 */
1760 llevel = NONE;
1761
1762 /*
1763 * Step 1 - Retrieve the A table from the pmap. If it has no
1764 * A table, allocate a new one from the available pool.
1765 */
1766
1767 a_tbl = pmap->pm_a_tmgr;
1768 if (a_tbl == NULL) {
1769 /*
1770 * This pmap does not currently have an A table. Allocate
1771 * a new one.
1772 */
1773 a_tbl = get_a_table();
1774 a_tbl->at_parent = pmap;
1775
1776 /*
1777 * Assign this new A table to the pmap, and calculate its
1778 * physical address so that loadcrp() can be used to make
1779 * the table active.
1780 */
1781 pmap->pm_a_tmgr = a_tbl;
1782 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1783
1784 /*
1785 * If the process receiving a new A table is the current
1786 * process, we are responsible for setting the MMU so that
1787 * it becomes the current address space. This only adds
1788 * new mappings, so no need to flush anything.
1789 */
1790 if (pmap == current_pmap()) {
1791 kernel_crp.rp_addr = pmap->pm_a_phys;
1792 loadcrp(&kernel_crp);
1793 }
1794
1795 if (!wired)
1796 llevel = NEWA;
1797 } else {
1798 /*
1799 * Use the A table already allocated for this pmap.
1800 * Unlink it from the A table pool if necessary.
1801 */
1802 if (wired && !a_tbl->at_wcnt)
1803 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1804 }
1805
1806 /*
1807 * Step 2 - Walk into the B table. If there is no valid B table,
1808 * allocate one.
1809 */
1810
1811 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1812 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1813 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1814 /* The descriptor is valid. Use the B table it points to. */
1815 /*************************************
1816 * a_idx *
1817 * v *
1818 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1819 * | | | | | | | | | | | | *
1820 * +-+-+-+-+-+-+-+-+-+-+-+- *
1821 * | *
1822 * \- b_tbl -> +-+- *
1823 * | | *
1824 * +-+- *
1825 *************************************/
1826 b_dte = mmu_ptov(a_dte->addr.raw);
1827 b_tbl = mmuB2tmgr(b_dte);
1828
1829 /*
1830 * If the requested mapping must be wired, but this table
1831 * being used to map it is not, the table must be removed
1832 * from the available pool and its wired entry count
1833 * incremented.
1834 */
1835 if (wired && !b_tbl->bt_wcnt) {
1836 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1837 a_tbl->at_wcnt++;
1838 }
1839 } else {
1840 /* The descriptor is invalid. Allocate a new B table. */
1841 b_tbl = get_b_table();
1842
1843 /* Point the parent A table descriptor to this new B table. */
1844 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1845 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1846 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1847
1848 /* Create the necessary back references to the parent table */
1849 b_tbl->bt_parent = a_tbl;
1850 b_tbl->bt_pidx = a_idx;
1851
1852 /*
1853 * If this table is to be wired, make sure the parent A table
1854 * wired count is updated to reflect that it has another wired
1855 * entry.
1856 */
1857 if (wired)
1858 a_tbl->at_wcnt++;
1859 else if (llevel == NONE)
1860 llevel = NEWB;
1861 }
1862
1863 /*
1864 * Step 3 - Walk into the C table, if there is no valid C table,
1865 * allocate one.
1866 */
1867
1868 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1869 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1870 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1871 /* The descriptor is valid. Use the C table it points to. */
1872 /**************************************
1873 * c_idx *
1874 * | v *
1875 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1876 * | | | | | | | | | | | *
1877 * +-+-+-+-+-+-+-+-+-+-+- *
1878 * | *
1879 * \- c_tbl -> +-+-- *
1880 * | | | *
1881 * +-+-- *
1882 **************************************/
1883 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1884 c_tbl = mmuC2tmgr(c_pte);
1885
1886 /* If mapping is wired and table is not */
1887 if (wired && !c_tbl->ct_wcnt) {
1888 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1889 b_tbl->bt_wcnt++;
1890 }
1891 } else {
1892 /* The descriptor is invalid. Allocate a new C table. */
1893 c_tbl = get_c_table();
1894
1895 /* Point the parent B table descriptor to this new C table. */
1896 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1897 b_dte->attr.raw |= MMU_DT_SHORT;
1898 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1899
1900 /* Create the necessary back references to the parent table */
1901 c_tbl->ct_parent = b_tbl;
1902 c_tbl->ct_pidx = b_idx;
1903 /*
1904 * Store the pmap and base virtual managed address for faster
1905 * retrieval in the PV functions.
1906 */
1907 c_tbl->ct_pmap = pmap;
1908 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1909
1910 /*
1911 * If this table is to be wired, make sure the parent B table
1912 * wired count is updated to reflect that it has another wired
1913 * entry.
1914 */
1915 if (wired)
1916 b_tbl->bt_wcnt++;
1917 else if (llevel == NONE)
1918 llevel = NEWC;
1919 }
1920
1921 /*
1922 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1923 * slot of the C table, describing the PA to which the VA is mapped.
1924 */
1925
1926 pte_idx = MMU_TIC(va);
1927 c_pte = &c_tbl->ct_dtbl[pte_idx];
1928 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1929 /*
1930 * The PTE is currently valid. This particular call
1931 * is just a synonym for one (or more) of the following
1932 * operations:
1933 * change protection of a page
1934 * change wiring status of a page
1935 * remove the mapping of a page
1936 *
1937 * XXX - Semi critical: This code should unwire the PTE
1938 * and, possibly, associated parent tables if this is a
1939 * change wiring operation. Currently it does not.
1940 *
1941 * This may be ok if pmap_unwire() is the only
1942 * interface used to UNWIRE a page.
1943 */
1944
1945 /* First check if this is a wiring operation. */
1946 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1947 /*
1948 * The PTE is already wired. To prevent it from being
1949 * counted as a new wiring operation, reset the 'wired'
1950 * variable.
1951 */
1952 wired = FALSE;
1953 }
1954
1955 /* Is the new address the same as the old? */
1956 if (MMU_PTE_PA(*c_pte) == pa) {
1957 /*
1958 * Yes, mark that it does not need to be reinserted
1959 * into the PV list.
1960 */
1961 insert = FALSE;
1962
1963 /*
1964 * Clear all but the modified, referenced and wired
1965 * bits on the PTE.
1966 */
1967 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1968 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1969 } else {
1970 /* No, remove the old entry */
1971 pmap_remove_pte(c_pte);
1972 insert = TRUE;
1973 }
1974
1975 /*
1976 * TLB flush is only necessary if modifying current map.
1977 * However, in pmap_enter(), the pmap almost always IS
1978 * the current pmap, so don't even bother to check.
1979 */
1980 TBIS(va);
1981 } else {
1982 /*
1983 * The PTE is invalid. Increment the valid entry count in
1984 * the C table manager to reflect the addition of a new entry.
1985 */
1986 c_tbl->ct_ecnt++;
1987
1988 /* XXX - temporarily make sure the PTE is cleared. */
1989 c_pte->attr.raw = 0;
1990
1991 /* It will also need to be inserted into the PV list. */
1992 insert = TRUE;
1993 }
1994
1995 /*
1996 * If page is changing from unwired to wired status, set an unused bit
1997 * within the PTE to indicate that it is wired. Also increment the
1998 * wired entry count in the C table manager.
1999 */
2000 if (wired) {
2001 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
2002 c_tbl->ct_wcnt++;
2003 }
2004
2005 /*
2006 * Map the page, being careful to preserve modify/reference/wired
2007 * bits. At this point it is assumed that the PTE either has no bits
2008 * set, or if there are set bits, they are only modified, reference or
2009 * wired bits. If not, the following statement will cause erratic
2010 * behavior.
2011 */
2012 #ifdef PMAP_DEBUG
2013 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
2014 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2015 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2016 Debugger();
2017 }
2018 #endif
2019 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2020
2021 /*
2022 * If the mapping should be read-only, set the write protect
2023 * bit in the PTE.
2024 */
2025 if (!(prot & VM_PROT_WRITE))
2026 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2027
2028 /*
2029 * If the mapping should be cache inhibited (indicated by the flag
2030 * bits found on the lower order of the physical address.)
2031 * mark the PTE as a cache inhibited page.
2032 */
2033 if (mapflags & PMAP_NC)
2034 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2035
2036 /*
2037 * If the physical address being mapped is managed by the PV
2038 * system then link the pte into the list of pages mapped to that
2039 * address.
2040 */
2041 if (insert && managed) {
2042 pv = pa2pv(pa);
2043 nidx = pteidx(c_pte);
2044
2045 s = splimp();
2046 pvebase[nidx].pve_next = pv->pv_idx;
2047 pv->pv_idx = nidx;
2048 splx(s);
2049 }
2050
2051 /* Move any allocated tables back into the active pool. */
2052
2053 switch (llevel) {
2054 case NEWA:
2055 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2056 /* FALLTHROUGH */
2057 case NEWB:
2058 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2059 /* FALLTHROUGH */
2060 case NEWC:
2061 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2062 /* FALLTHROUGH */
2063 default:
2064 break;
2065 }
2066
2067 return (KERN_SUCCESS);
2068 }
2069
2070 /* pmap_enter_kernel INTERNAL
2071 **
2072 * Map the given virtual address to the given physical address within the
2073 * kernel address space. This function exists because the kernel map does
2074 * not do dynamic table allocation. It consists of a contiguous array of ptes
2075 * and can be edited directly without the need to walk through any tables.
2076 *
2077 * XXX: "Danger, Will Robinson!"
2078 * Note that the kernel should never take a fault on any page
2079 * between [ KERNBASE .. virtual_avail ] and this is checked in
2080 * trap.c for kernel-mode MMU faults. This means that mappings
2081 * created in that range must be implicily wired. -gwr
2082 */
2083 void
2084 pmap_enter_kernel(va, pa, prot)
2085 vm_offset_t va;
2086 vm_offset_t pa;
2087 vm_prot_t prot;
2088 {
2089 boolean_t was_valid, insert;
2090 u_short pte_idx;
2091 int s, flags;
2092 mmu_short_pte_t *pte;
2093 pv_t *pv;
2094 vm_offset_t old_pa;
2095
2096 flags = (pa & ~MMU_PAGE_MASK);
2097 pa &= MMU_PAGE_MASK;
2098
2099 if (is_managed(pa))
2100 insert = TRUE;
2101 else
2102 insert = FALSE;
2103
2104 /*
2105 * Calculate the index of the PTE being modified.
2106 */
2107 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2108
2109 /* This array is traditionally named "Sysmap" */
2110 pte = &kernCbase[pte_idx];
2111
2112 s = splimp();
2113 if (MMU_VALID_DT(*pte)) {
2114 was_valid = TRUE;
2115 /*
2116 * If the PTE already maps a different
2117 * physical address, umap and pv_unlink.
2118 */
2119 old_pa = MMU_PTE_PA(*pte);
2120 if (pa != old_pa)
2121 pmap_remove_pte(pte);
2122 else {
2123 /*
2124 * Old PA and new PA are the same. No need to
2125 * relink the mapping within the PV list.
2126 */
2127 insert = FALSE;
2128
2129 /*
2130 * Save any mod/ref bits on the PTE.
2131 */
2132 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2133 }
2134 } else {
2135 pte->attr.raw = MMU_DT_INVALID;
2136 was_valid = FALSE;
2137 }
2138
2139 /*
2140 * Map the page. Being careful to preserve modified/referenced bits
2141 * on the PTE.
2142 */
2143 pte->attr.raw |= (pa | MMU_DT_PAGE);
2144
2145 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2146 pte->attr.raw |= MMU_SHORT_PTE_WP;
2147 if (flags & PMAP_NC)
2148 pte->attr.raw |= MMU_SHORT_PTE_CI;
2149 if (was_valid)
2150 TBIS(va);
2151
2152 /*
2153 * Insert the PTE into the PV system, if need be.
2154 */
2155 if (insert) {
2156 pv = pa2pv(pa);
2157 pvebase[pte_idx].pve_next = pv->pv_idx;
2158 pv->pv_idx = pte_idx;
2159 }
2160 splx(s);
2161
2162 }
2163
2164 void
2165 pmap_kenter_pa(va, pa, prot)
2166 vaddr_t va;
2167 paddr_t pa;
2168 vm_prot_t prot;
2169 {
2170 pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
2171 }
2172
2173 void
2174 pmap_kenter_pgs(va, pgs, npgs)
2175 vaddr_t va;
2176 struct vm_page **pgs;
2177 int npgs;
2178 {
2179 int i;
2180
2181 for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
2182 pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
2183 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
2184 }
2185 }
2186
2187 void
2188 pmap_kremove(va, len)
2189 vaddr_t va;
2190 vsize_t len;
2191 {
2192 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2193 pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
2194 }
2195 }
2196
2197 /* pmap_map INTERNAL
2198 **
2199 * Map a contiguous range of physical memory into a contiguous range of
2200 * the kernel virtual address space.
2201 *
2202 * Used for device mappings and early mapping of the kernel text/data/bss.
2203 * Returns the first virtual address beyond the end of the range.
2204 */
2205 vm_offset_t
2206 pmap_map(va, pa, endpa, prot)
2207 vm_offset_t va;
2208 vm_offset_t pa;
2209 vm_offset_t endpa;
2210 int prot;
2211 {
2212 int sz;
2213
2214 sz = endpa - pa;
2215 do {
2216 pmap_enter_kernel(va, pa, prot);
2217 va += NBPG;
2218 pa += NBPG;
2219 sz -= NBPG;
2220 } while (sz > 0);
2221 return(va);
2222 }
2223
2224 /* pmap_protect INTERFACE
2225 **
2226 * Apply the given protection to the given virtual address range within
2227 * the given map.
2228 *
2229 * It is ok for the protection applied to be stronger than what is
2230 * specified. We use this to our advantage when the given map has no
2231 * mapping for the virtual address. By skipping a page when this
2232 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2233 * and therefore do not need to map the page just to apply a protection
2234 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2235 *
2236 * XXX - This function could be speeded up by using pmap_stroll() for inital
2237 * setup, and then manual scrolling in the for() loop.
2238 */
2239 void
2240 pmap_protect(pmap, startva, endva, prot)
2241 pmap_t pmap;
2242 vm_offset_t startva, endva;
2243 vm_prot_t prot;
2244 {
2245 boolean_t iscurpmap;
2246 int a_idx, b_idx, c_idx;
2247 a_tmgr_t *a_tbl;
2248 b_tmgr_t *b_tbl;
2249 c_tmgr_t *c_tbl;
2250 mmu_short_pte_t *pte;
2251
2252 if (pmap == NULL)
2253 return;
2254 if (pmap == pmap_kernel()) {
2255 pmap_protect_kernel(startva, endva, prot);
2256 return;
2257 }
2258
2259 /*
2260 * In this particular pmap implementation, there are only three
2261 * types of memory protection: 'all' (read/write/execute),
2262 * 'read-only' (read/execute) and 'none' (no mapping.)
2263 * It is not possible for us to treat 'executable' as a separate
2264 * protection type. Therefore, protection requests that seek to
2265 * remove execute permission while retaining read or write, and those
2266 * that make little sense (write-only for example) are ignored.
2267 */
2268 switch (prot) {
2269 case VM_PROT_NONE:
2270 /*
2271 * A request to apply the protection code of
2272 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2273 */
2274 pmap_remove(pmap, startva, endva);
2275 return;
2276 case VM_PROT_EXECUTE:
2277 case VM_PROT_READ:
2278 case VM_PROT_READ|VM_PROT_EXECUTE:
2279 /* continue */
2280 break;
2281 case VM_PROT_WRITE:
2282 case VM_PROT_WRITE|VM_PROT_READ:
2283 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2284 case VM_PROT_ALL:
2285 /* None of these should happen in a sane system. */
2286 return;
2287 }
2288
2289 /*
2290 * If the pmap has no A table, it has no mappings and therefore
2291 * there is nothing to protect.
2292 */
2293 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2294 return;
2295
2296 a_idx = MMU_TIA(startva);
2297 b_idx = MMU_TIB(startva);
2298 c_idx = MMU_TIC(startva);
2299 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2300
2301 iscurpmap = (pmap == current_pmap());
2302 while (startva < endva) {
2303 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2304 if (b_tbl == NULL) {
2305 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2306 b_tbl = mmu_ptov((vm_offset_t) b_tbl);
2307 b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
2308 }
2309 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2310 if (c_tbl == NULL) {
2311 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2312 c_tbl = mmu_ptov((vm_offset_t) c_tbl);
2313 c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
2314 }
2315 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2316 pte = &c_tbl->ct_dtbl[c_idx];
2317 /* make the mapping read-only */
2318 pte->attr.raw |= MMU_SHORT_PTE_WP;
2319 /*
2320 * If we just modified the current address space,
2321 * flush any translations for the modified page from
2322 * the translation cache and any data from it in the
2323 * data cache.
2324 */
2325 if (iscurpmap)
2326 TBIS(startva);
2327 }
2328 startva += NBPG;
2329
2330 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2331 c_tbl = NULL;
2332 c_idx = 0;
2333 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2334 b_tbl = NULL;
2335 b_idx = 0;
2336 }
2337 }
2338 } else { /* C table wasn't valid */
2339 c_tbl = NULL;
2340 c_idx = 0;
2341 startva += MMU_TIB_RANGE;
2342 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2343 b_tbl = NULL;
2344 b_idx = 0;
2345 }
2346 } /* C table */
2347 } else { /* B table wasn't valid */
2348 b_tbl = NULL;
2349 b_idx = 0;
2350 startva += MMU_TIA_RANGE;
2351 a_idx++;
2352 } /* B table */
2353 }
2354 }
2355
2356 /* pmap_protect_kernel INTERNAL
2357 **
2358 * Apply the given protection code to a kernel address range.
2359 */
2360 void
2361 pmap_protect_kernel(startva, endva, prot)
2362 vm_offset_t startva, endva;
2363 vm_prot_t prot;
2364 {
2365 vm_offset_t va;
2366 mmu_short_pte_t *pte;
2367
2368 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2369 for (va = startva; va < endva; va += NBPG, pte++) {
2370 if (MMU_VALID_DT(*pte)) {
2371 switch (prot) {
2372 case VM_PROT_ALL:
2373 break;
2374 case VM_PROT_EXECUTE:
2375 case VM_PROT_READ:
2376 case VM_PROT_READ|VM_PROT_EXECUTE:
2377 pte->attr.raw |= MMU_SHORT_PTE_WP;
2378 break;
2379 case VM_PROT_NONE:
2380 /* this is an alias for 'pmap_remove_kernel' */
2381 pmap_remove_pte(pte);
2382 break;
2383 default:
2384 break;
2385 }
2386 /*
2387 * since this is the kernel, immediately flush any cached
2388 * descriptors for this address.
2389 */
2390 TBIS(va);
2391 }
2392 }
2393 }
2394
2395 /* pmap_unwire INTERFACE
2396 **
2397 * Clear the wired attribute of the specified page.
2398 *
2399 * This function is called from vm_fault.c to unwire
2400 * a mapping.
2401 */
2402 void
2403 pmap_unwire(pmap, va)
2404 pmap_t pmap;
2405 vm_offset_t va;
2406 {
2407 int a_idx, b_idx, c_idx;
2408 a_tmgr_t *a_tbl;
2409 b_tmgr_t *b_tbl;
2410 c_tmgr_t *c_tbl;
2411 mmu_short_pte_t *pte;
2412
2413 /* Kernel mappings always remain wired. */
2414 if (pmap == pmap_kernel())
2415 return;
2416
2417 /*
2418 * Walk through the tables. If the walk terminates without
2419 * a valid PTE then the address wasn't wired in the first place.
2420 * Return immediately.
2421 */
2422 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2423 &b_idx, &c_idx) == FALSE)
2424 return;
2425
2426
2427 /* Is the PTE wired? If not, return. */
2428 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2429 return;
2430
2431 /* Remove the wiring bit. */
2432 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2433
2434 /*
2435 * Decrement the wired entry count in the C table.
2436 * If it reaches zero the following things happen:
2437 * 1. The table no longer has any wired entries and is considered
2438 * unwired.
2439 * 2. It is placed on the available queue.
2440 * 3. The parent table's wired entry count is decremented.
2441 * 4. If it reaches zero, this process repeats at step 1 and
2442 * stops at after reaching the A table.
2443 */
2444 if (--c_tbl->ct_wcnt == 0) {
2445 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2446 if (--b_tbl->bt_wcnt == 0) {
2447 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2448 if (--a_tbl->at_wcnt == 0) {
2449 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2450 }
2451 }
2452 }
2453 }
2454
2455 /* pmap_copy INTERFACE
2456 **
2457 * Copy the mappings of a range of addresses in one pmap, into
2458 * the destination address of another.
2459 *
2460 * This routine is advisory. Should we one day decide that MMU tables
2461 * may be shared by more than one pmap, this function should be used to
2462 * link them together. Until that day however, we do nothing.
2463 */
2464 void
2465 pmap_copy(pmap_a, pmap_b, dst, len, src)
2466 pmap_t pmap_a, pmap_b;
2467 vm_offset_t dst;
2468 vm_size_t len;
2469 vm_offset_t src;
2470 {
2471 /* not implemented. */
2472 }
2473
2474 /* pmap_copy_page INTERFACE
2475 **
2476 * Copy the contents of one physical page into another.
2477 *
2478 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2479 * to map the two specified physical pages into the kernel address space.
2480 *
2481 * Note: We could use the transparent translation registers to make the
2482 * mappings. If we do so, be sure to disable interrupts before using them.
2483 */
2484 void
2485 pmap_copy_page(srcpa, dstpa)
2486 vm_offset_t srcpa, dstpa;
2487 {
2488 vm_offset_t srcva, dstva;
2489 int s;
2490
2491 srcva = tmp_vpages[0];
2492 dstva = tmp_vpages[1];
2493
2494 s = splimp();
2495 if (tmp_vpages_inuse++)
2496 panic("pmap_copy_page: temporary vpages are in use.");
2497
2498 /* Map pages as non-cacheable to avoid cache polution? */
2499 pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
2500 pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2501
2502 /* Hand-optimized version of bcopy(src, dst, NBPG) */
2503 copypage((char *) srcva, (char *) dstva);
2504
2505 pmap_remove_kernel(srcva, srcva + NBPG);
2506 pmap_remove_kernel(dstva, dstva + NBPG);
2507
2508 --tmp_vpages_inuse;
2509 splx(s);
2510 }
2511
2512 /* pmap_zero_page INTERFACE
2513 **
2514 * Zero the contents of the specified physical page.
2515 *
2516 * Uses one of the virtual pages allocated in pmap_boostrap()
2517 * to map the specified page into the kernel address space.
2518 */
2519 void
2520 pmap_zero_page(dstpa)
2521 vm_offset_t dstpa;
2522 {
2523 vm_offset_t dstva;
2524 int s;
2525
2526 dstva = tmp_vpages[1];
2527 s = splimp();
2528 if (tmp_vpages_inuse++)
2529 panic("pmap_zero_page: temporary vpages are in use.");
2530
2531 /* The comments in pmap_copy_page() above apply here also. */
2532 pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2533
2534 /* Hand-optimized version of bzero(ptr, NBPG) */
2535 zeropage((char *) dstva);
2536
2537 pmap_remove_kernel(dstva, dstva + NBPG);
2538
2539 --tmp_vpages_inuse;
2540 splx(s);
2541 }
2542
2543 /* pmap_collect INTERFACE
2544 **
2545 * Called from the VM system when we are about to swap out
2546 * the process using this pmap. This should give up any
2547 * resources held here, including all its MMU tables.
2548 */
2549 void
2550 pmap_collect(pmap)
2551 pmap_t pmap;
2552 {
2553 /* XXX - todo... */
2554 }
2555
2556 /* pmap_create INTERFACE
2557 **
2558 * Create and return a pmap structure.
2559 */
2560 pmap_t
2561 pmap_create()
2562 {
2563 pmap_t pmap;
2564
2565 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2566 pmap_pinit(pmap);
2567 return pmap;
2568 }
2569
2570 /* pmap_pinit INTERNAL
2571 **
2572 * Initialize a pmap structure.
2573 */
2574 void
2575 pmap_pinit(pmap)
2576 pmap_t pmap;
2577 {
2578 bzero(pmap, sizeof(struct pmap));
2579 pmap->pm_a_tmgr = NULL;
2580 pmap->pm_a_phys = kernAphys;
2581 pmap->pm_refcount = 1;
2582 simple_lock_init(&pmap->pm_lock);
2583 }
2584
2585 /* pmap_release INTERFACE
2586 **
2587 * Release any resources held by the given pmap.
2588 *
2589 * This is the reverse analog to pmap_pinit. It does not
2590 * necessarily mean for the pmap structure to be deallocated,
2591 * as in pmap_destroy.
2592 */
2593 void
2594 pmap_release(pmap)
2595 pmap_t pmap;
2596 {
2597 /*
2598 * As long as the pmap contains no mappings,
2599 * which always should be the case whenever
2600 * this function is called, there really should
2601 * be nothing to do.
2602 */
2603 #ifdef PMAP_DEBUG
2604 if (pmap == NULL)
2605 return;
2606 if (pmap == pmap_kernel())
2607 panic("pmap_release: kernel pmap");
2608 #endif
2609 /*
2610 * XXX - If this pmap has an A table, give it back.
2611 * The pmap SHOULD be empty by now, and pmap_remove
2612 * should have already given back the A table...
2613 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2614 * at this point, which means some mapping was not
2615 * removed when it should have been. -gwr
2616 */
2617 if (pmap->pm_a_tmgr != NULL) {
2618 /* First make sure we are not using it! */
2619 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2620 kernel_crp.rp_addr = kernAphys;
2621 loadcrp(&kernel_crp);
2622 }
2623 #ifdef PMAP_DEBUG /* XXX - todo! */
2624 /* XXX - Now complain... */
2625 printf("pmap_release: still have table\n");
2626 Debugger();
2627 #endif
2628 free_a_table(pmap->pm_a_tmgr, TRUE);
2629 pmap->pm_a_tmgr = NULL;
2630 pmap->pm_a_phys = kernAphys;
2631 }
2632 }
2633
2634 /* pmap_reference INTERFACE
2635 **
2636 * Increment the reference count of a pmap.
2637 */
2638 void
2639 pmap_reference(pmap)
2640 pmap_t pmap;
2641 {
2642 if (pmap == NULL)
2643 return;
2644
2645 pmap_lock(pmap);
2646 pmap_add_ref(pmap);
2647 pmap_unlock(pmap);
2648 }
2649
2650 /* pmap_dereference INTERNAL
2651 **
2652 * Decrease the reference count on the given pmap
2653 * by one and return the current count.
2654 */
2655 int
2656 pmap_dereference(pmap)
2657 pmap_t pmap;
2658 {
2659 int rtn;
2660
2661 if (pmap == NULL)
2662 return 0;
2663
2664 pmap_lock(pmap);
2665 rtn = pmap_del_ref(pmap);
2666 pmap_unlock(pmap);
2667
2668 return rtn;
2669 }
2670
2671 /* pmap_destroy INTERFACE
2672 **
2673 * Decrement a pmap's reference count and delete
2674 * the pmap if it becomes zero. Will be called
2675 * only after all mappings have been removed.
2676 */
2677 void
2678 pmap_destroy(pmap)
2679 pmap_t pmap;
2680 {
2681 if (pmap == NULL)
2682 return;
2683 if (pmap == &kernel_pmap)
2684 panic("pmap_destroy: kernel_pmap!");
2685 if (pmap_dereference(pmap) == 0) {
2686 pmap_release(pmap);
2687 pool_put(&pmap_pmap_pool, pmap);
2688 }
2689 }
2690
2691 /* pmap_is_referenced INTERFACE
2692 **
2693 * Determine if the given physical page has been
2694 * referenced (read from [or written to.])
2695 */
2696 boolean_t
2697 pmap_is_referenced(pg)
2698 struct vm_page *pg;
2699 {
2700 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2701 pv_t *pv;
2702 int idx, s;
2703
2704 if (!pv_initialized)
2705 return FALSE;
2706 /* XXX - this may be unecessary. */
2707 if (!is_managed(pa))
2708 return FALSE;
2709
2710 pv = pa2pv(pa);
2711 /*
2712 * Check the flags on the pv head. If they are set,
2713 * return immediately. Otherwise a search must be done.
2714 */
2715 if (pv->pv_flags & PV_FLAGS_USED)
2716 return TRUE;
2717
2718 s = splimp();
2719 /*
2720 * Search through all pv elements pointing
2721 * to this page and query their reference bits
2722 */
2723 for (idx = pv->pv_idx;
2724 idx != PVE_EOL;
2725 idx = pvebase[idx].pve_next) {
2726
2727 if (MMU_PTE_USED(kernCbase[idx])) {
2728 splx(s);
2729 return TRUE;
2730 }
2731 }
2732 splx(s);
2733
2734 return FALSE;
2735 }
2736
2737 /* pmap_is_modified INTERFACE
2738 **
2739 * Determine if the given physical page has been
2740 * modified (written to.)
2741 */
2742 boolean_t
2743 pmap_is_modified(pg)
2744 struct vm_page *pg;
2745 {
2746 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2747 pv_t *pv;
2748 int idx, s;
2749
2750 if (!pv_initialized)
2751 return FALSE;
2752 /* XXX - this may be unecessary. */
2753 if (!is_managed(pa))
2754 return FALSE;
2755
2756 /* see comments in pmap_is_referenced() */
2757 pv = pa2pv(pa);
2758 if (pv->pv_flags & PV_FLAGS_MDFY)
2759 return TRUE;
2760
2761 s = splimp();
2762 for (idx = pv->pv_idx;
2763 idx != PVE_EOL;
2764 idx = pvebase[idx].pve_next) {
2765
2766 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2767 splx(s);
2768 return TRUE;
2769 }
2770 }
2771 splx(s);
2772
2773 return FALSE;
2774 }
2775
2776 /* pmap_page_protect INTERFACE
2777 **
2778 * Applies the given protection to all mappings to the given
2779 * physical page.
2780 */
2781 void
2782 pmap_page_protect(pg, prot)
2783 struct vm_page *pg;
2784 vm_prot_t prot;
2785 {
2786 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2787 pv_t *pv;
2788 int idx, s;
2789 vm_offset_t va;
2790 struct mmu_short_pte_struct *pte;
2791 c_tmgr_t *c_tbl;
2792 pmap_t pmap, curpmap;
2793
2794 if (!is_managed(pa))
2795 return;
2796
2797 curpmap = current_pmap();
2798 pv = pa2pv(pa);
2799 s = splimp();
2800
2801 for (idx = pv->pv_idx;
2802 idx != PVE_EOL;
2803 idx = pvebase[idx].pve_next) {
2804
2805 pte = &kernCbase[idx];
2806 switch (prot) {
2807 case VM_PROT_ALL:
2808 /* do nothing */
2809 break;
2810 case VM_PROT_EXECUTE:
2811 case VM_PROT_READ:
2812 case VM_PROT_READ|VM_PROT_EXECUTE:
2813 /*
2814 * Determine the virtual address mapped by
2815 * the PTE and flush ATC entries if necessary.
2816 */
2817 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2818 /* XXX don't write protect pager mappings */
2819 if (va >= PAGER_SVA && va < PAGER_EVA) {
2820 #ifdef PMAP_DEBUG
2821 /* XXX - Does this actually happen? */
2822 printf("pmap_page_protect: in pager!\n");
2823 Debugger();
2824 #endif
2825 } else
2826 pte->attr.raw |= MMU_SHORT_PTE_WP;
2827 if (pmap == curpmap || pmap == pmap_kernel())
2828 TBIS(va);
2829 break;
2830 case VM_PROT_NONE:
2831 /* Save the mod/ref bits. */
2832 pv->pv_flags |= pte->attr.raw;
2833 /* Invalidate the PTE. */
2834 pte->attr.raw = MMU_DT_INVALID;
2835
2836 /*
2837 * Update table counts. And flush ATC entries
2838 * if necessary.
2839 */
2840 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2841
2842 /*
2843 * If the PTE belongs to the kernel map,
2844 * be sure to flush the page it maps.
2845 */
2846 if (pmap == pmap_kernel()) {
2847 TBIS(va);
2848 } else {
2849 /*
2850 * The PTE belongs to a user map.
2851 * update the entry count in the C
2852 * table to which it belongs and flush
2853 * the ATC if the mapping belongs to
2854 * the current pmap.
2855 */
2856 c_tbl->ct_ecnt--;
2857 if (pmap == curpmap)
2858 TBIS(va);
2859 }
2860 break;
2861 default:
2862 break;
2863 }
2864 }
2865
2866 /*
2867 * If the protection code indicates that all mappings to the page
2868 * be removed, truncate the PV list to zero entries.
2869 */
2870 if (prot == VM_PROT_NONE)
2871 pv->pv_idx = PVE_EOL;
2872 splx(s);
2873 }
2874
2875 /* pmap_get_pteinfo INTERNAL
2876 **
2877 * Called internally to find the pmap and virtual address within that
2878 * map to which the pte at the given index maps. Also includes the PTE's C
2879 * table manager.
2880 *
2881 * Returns the pmap in the argument provided, and the virtual address
2882 * by return value.
2883 */
2884 vm_offset_t
2885 pmap_get_pteinfo(idx, pmap, tbl)
2886 u_int idx;
2887 pmap_t *pmap;
2888 c_tmgr_t **tbl;
2889 {
2890 vm_offset_t va = 0;
2891
2892 /*
2893 * Determine if the PTE is a kernel PTE or a user PTE.
2894 */
2895 if (idx >= NUM_KERN_PTES) {
2896 /*
2897 * The PTE belongs to a user mapping.
2898 */
2899 /* XXX: Would like an inline for this to validate idx... */
2900 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2901
2902 *pmap = (*tbl)->ct_pmap;
2903 /*
2904 * To find the va to which the PTE maps, we first take
2905 * the table's base virtual address mapping which is stored
2906 * in ct_va. We then increment this address by a page for
2907 * every slot skipped until we reach the PTE.
2908 */
2909 va = (*tbl)->ct_va;
2910 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2911 } else {
2912 /*
2913 * The PTE belongs to the kernel map.
2914 */
2915 *pmap = pmap_kernel();
2916
2917 va = m68k_ptob(idx);
2918 va += KERNBASE;
2919 }
2920
2921 return va;
2922 }
2923
2924 /* pmap_clear_modify INTERFACE
2925 **
2926 * Clear the modification bit on the page at the specified
2927 * physical address.
2928 *
2929 */
2930 boolean_t
2931 pmap_clear_modify(pg)
2932 struct vm_page *pg;
2933 {
2934 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2935 boolean_t rv;
2936
2937 if (!is_managed(pa))
2938 return FALSE;
2939 rv = pmap_is_modified(pg);
2940 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2941 return rv;
2942 }
2943
2944 /* pmap_clear_reference INTERFACE
2945 **
2946 * Clear the referenced bit on the page at the specified
2947 * physical address.
2948 */
2949 boolean_t
2950 pmap_clear_reference(pg)
2951 struct vm_page *pg;
2952 {
2953 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2954 boolean_t rv;
2955
2956 if (!is_managed(pa))
2957 return FALSE;
2958 rv = pmap_is_referenced(pg);
2959 pmap_clear_pv(pa, PV_FLAGS_USED);
2960 return rv;
2961 }
2962
2963 /* pmap_clear_pv INTERNAL
2964 **
2965 * Clears the specified flag from the specified physical address.
2966 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2967 *
2968 * Flag is one of:
2969 * PV_FLAGS_MDFY - Page modified bit.
2970 * PV_FLAGS_USED - Page used (referenced) bit.
2971 *
2972 * This routine must not only clear the flag on the pv list
2973 * head. It must also clear the bit on every pte in the pv
2974 * list associated with the address.
2975 */
2976 void
2977 pmap_clear_pv(pa, flag)
2978 vm_offset_t pa;
2979 int flag;
2980 {
2981 pv_t *pv;
2982 int idx, s;
2983 vm_offset_t va;
2984 pmap_t pmap;
2985 mmu_short_pte_t *pte;
2986 c_tmgr_t *c_tbl;
2987
2988 pv = pa2pv(pa);
2989
2990 s = splimp();
2991 pv->pv_flags &= ~(flag);
2992
2993 for (idx = pv->pv_idx;
2994 idx != PVE_EOL;
2995 idx = pvebase[idx].pve_next) {
2996
2997 pte = &kernCbase[idx];
2998 pte->attr.raw &= ~(flag);
2999 /*
3000 * The MC68030 MMU will not set the modified or
3001 * referenced bits on any MMU tables for which it has
3002 * a cached descriptor with its modify bit set. To insure
3003 * that it will modify these bits on the PTE during the next
3004 * time it is written to or read from, we must flush it from
3005 * the ATC.
3006 *
3007 * Ordinarily it is only necessary to flush the descriptor
3008 * if it is used in the current address space. But since I
3009 * am not sure that there will always be a notion of
3010 * 'the current address space' when this function is called,
3011 * I will skip the test and always flush the address. It
3012 * does no harm.
3013 */
3014 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3015 TBIS(va);
3016 }
3017 splx(s);
3018 }
3019
3020 /* pmap_extract INTERFACE
3021 **
3022 * Return the physical address mapped by the virtual address
3023 * in the specified pmap.
3024 *
3025 * Note: this function should also apply an exclusive lock
3026 * on the pmap system during its duration.
3027 */
3028 boolean_t
3029 pmap_extract(pmap, va, pap)
3030 pmap_t pmap;
3031 vaddr_t va;
3032 paddr_t *pap;
3033 {
3034 int a_idx, b_idx, pte_idx;
3035 a_tmgr_t *a_tbl;
3036 b_tmgr_t *b_tbl;
3037 c_tmgr_t *c_tbl;
3038 mmu_short_pte_t *c_pte;
3039
3040 if (pmap == pmap_kernel())
3041 return pmap_extract_kernel(va, pap);
3042 if (pmap == NULL)
3043 return FALSE;
3044
3045 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
3046 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
3047 return FALSE;
3048
3049 if (!MMU_VALID_DT(*c_pte))
3050 return FALSE;
3051
3052 if (pap != NULL)
3053 *pap = MMU_PTE_PA(*c_pte);
3054 return (TRUE);
3055 }
3056
3057 /* pmap_extract_kernel INTERNAL
3058 **
3059 * Extract a translation from the kernel address space.
3060 */
3061 boolean_t
3062 pmap_extract_kernel(va, pap)
3063 vaddr_t va;
3064 paddr_t *pap;
3065 {
3066 mmu_short_pte_t *pte;
3067
3068 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
3069 if (!MMU_VALID_DT(*pte))
3070 return (FALSE);
3071 if (pap != NULL)
3072 *pap = MMU_PTE_PA(*pte);
3073 return (TRUE);
3074 }
3075
3076 /* pmap_remove_kernel INTERNAL
3077 **
3078 * Remove the mapping of a range of virtual addresses from the kernel map.
3079 * The arguments are already page-aligned.
3080 */
3081 void
3082 pmap_remove_kernel(sva, eva)
3083 vm_offset_t sva;
3084 vm_offset_t eva;
3085 {
3086 int idx, eidx;
3087
3088 #ifdef PMAP_DEBUG
3089 if ((sva & PGOFSET) || (eva & PGOFSET))
3090 panic("pmap_remove_kernel: alignment");
3091 #endif
3092
3093 idx = m68k_btop(sva - KERNBASE);
3094 eidx = m68k_btop(eva - KERNBASE);
3095
3096 while (idx < eidx) {
3097 pmap_remove_pte(&kernCbase[idx++]);
3098 TBIS(sva);
3099 sva += NBPG;
3100 }
3101 }
3102
3103 /* pmap_remove INTERFACE
3104 **
3105 * Remove the mapping of a range of virtual addresses from the given pmap.
3106 *
3107 * If the range contains any wired entries, this function will probably create
3108 * disaster.
3109 */
3110 void
3111 pmap_remove(pmap, start, end)
3112 pmap_t pmap;
3113 vm_offset_t start;
3114 vm_offset_t end;
3115 {
3116
3117 if (pmap == pmap_kernel()) {
3118 pmap_remove_kernel(start, end);
3119 return;
3120 }
3121
3122 /*
3123 * XXX - Temporary(?) statement to prevent panic caused
3124 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3125 * to remove because it couldn't get backing store.
3126 * (I guess.)
3127 */
3128 if (pmap == NULL)
3129 return;
3130
3131 /*
3132 * If the pmap doesn't have an A table of its own, it has no mappings
3133 * that can be removed.
3134 */
3135 if (pmap->pm_a_tmgr == NULL)
3136 return;
3137
3138 /*
3139 * Remove the specified range from the pmap. If the function
3140 * returns true, the operation removed all the valid mappings
3141 * in the pmap and freed its A table. If this happened to the
3142 * currently loaded pmap, the MMU root pointer must be reloaded
3143 * with the default 'kernel' map.
3144 */
3145 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3146 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3147 kernel_crp.rp_addr = kernAphys;
3148 loadcrp(&kernel_crp);
3149 /* will do TLB flush below */
3150 }
3151 pmap->pm_a_tmgr = NULL;
3152 pmap->pm_a_phys = kernAphys;
3153 }
3154
3155 /*
3156 * If we just modified the current address space,
3157 * make sure to flush the MMU cache.
3158 *
3159 * XXX - this could be an unecessarily large flush.
3160 * XXX - Could decide, based on the size of the VA range
3161 * to be removed, whether to flush "by pages" or "all".
3162 */
3163 if (pmap == current_pmap())
3164 TBIAU();
3165 }
3166
3167 /* pmap_remove_a INTERNAL
3168 **
3169 * This is function number one in a set of three that removes a range
3170 * of memory in the most efficient manner by removing the highest possible
3171 * tables from the memory space. This particular function attempts to remove
3172 * as many B tables as it can, delegating the remaining fragmented ranges to
3173 * pmap_remove_b().
3174 *
3175 * If the removal operation results in an empty A table, the function returns
3176 * TRUE.
3177 *
3178 * It's ugly but will do for now.
3179 */
3180 boolean_t
3181 pmap_remove_a(a_tbl, start, end)
3182 a_tmgr_t *a_tbl;
3183 vm_offset_t start;
3184 vm_offset_t end;
3185 {
3186 boolean_t empty;
3187 int idx;
3188 vm_offset_t nstart, nend;
3189 b_tmgr_t *b_tbl;
3190 mmu_long_dte_t *a_dte;
3191 mmu_short_dte_t *b_dte;
3192
3193 /*
3194 * The following code works with what I call a 'granularity
3195 * reduction algorithim'. A range of addresses will always have
3196 * the following properties, which are classified according to
3197 * how the range relates to the size of the current granularity
3198 * - an A table entry:
3199 *
3200 * 1 2 3 4
3201 * -+---+---+---+---+---+---+---+-
3202 * -+---+---+---+---+---+---+---+-
3203 *
3204 * A range will always start on a granularity boundary, illustrated
3205 * by '+' signs in the table above, or it will start at some point
3206 * inbetween a granularity boundary, as illustrated by point 1.
3207 * The first step in removing a range of addresses is to remove the
3208 * range between 1 and 2, the nearest granularity boundary. This
3209 * job is handled by the section of code governed by the
3210 * 'if (start < nstart)' statement.
3211 *
3212 * A range will always encompass zero or more intergral granules,
3213 * illustrated by points 2 and 3. Integral granules are easy to
3214 * remove. The removal of these granules is the second step, and
3215 * is handled by the code block 'if (nstart < nend)'.
3216 *
3217 * Lastly, a range will always end on a granularity boundary,
3218 * ill. by point 3, or it will fall just beyond one, ill. by point
3219 * 4. The last step involves removing this range and is handled by
3220 * the code block 'if (nend < end)'.
3221 */
3222 nstart = MMU_ROUND_UP_A(start);
3223 nend = MMU_ROUND_A(end);
3224
3225 if (start < nstart) {
3226 /*
3227 * This block is executed if the range starts between
3228 * a granularity boundary.
3229 *
3230 * First find the DTE which is responsible for mapping
3231 * the start of the range.
3232 */
3233 idx = MMU_TIA(start);
3234 a_dte = &a_tbl->at_dtbl[idx];
3235
3236 /*
3237 * If the DTE is valid then delegate the removal of the sub
3238 * range to pmap_remove_b(), which can remove addresses at
3239 * a finer granularity.
3240 */
3241 if (MMU_VALID_DT(*a_dte)) {
3242 b_dte = mmu_ptov(a_dte->addr.raw);
3243 b_tbl = mmuB2tmgr(b_dte);
3244
3245 /*
3246 * The sub range to be removed starts at the start
3247 * of the full range we were asked to remove, and ends
3248 * at the greater of:
3249 * 1. The end of the full range, -or-
3250 * 2. The end of the full range, rounded down to the
3251 * nearest granularity boundary.
3252 */
3253 if (end < nstart)
3254 empty = pmap_remove_b(b_tbl, start, end);
3255 else
3256 empty = pmap_remove_b(b_tbl, start, nstart);
3257
3258 /*
3259 * If the removal resulted in an empty B table,
3260 * invalidate the DTE that points to it and decrement
3261 * the valid entry count of the A table.
3262 */
3263 if (empty) {
3264 a_dte->attr.raw = MMU_DT_INVALID;
3265 a_tbl->at_ecnt--;
3266 }
3267 }
3268 /*
3269 * If the DTE is invalid, the address range is already non-
3270 * existant and can simply be skipped.
3271 */
3272 }
3273 if (nstart < nend) {
3274 /*
3275 * This block is executed if the range spans a whole number
3276 * multiple of granules (A table entries.)
3277 *
3278 * First find the DTE which is responsible for mapping
3279 * the start of the first granule involved.
3280 */
3281 idx = MMU_TIA(nstart);
3282 a_dte = &a_tbl->at_dtbl[idx];
3283
3284 /*
3285 * Remove entire sub-granules (B tables) one at a time,
3286 * until reaching the end of the range.
3287 */
3288 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3289 if (MMU_VALID_DT(*a_dte)) {
3290 /*
3291 * Find the B table manager for the
3292 * entry and free it.
3293 */
3294 b_dte = mmu_ptov(a_dte->addr.raw);
3295 b_tbl = mmuB2tmgr(b_dte);
3296 free_b_table(b_tbl, TRUE);
3297
3298 /*
3299 * Invalidate the DTE that points to the
3300 * B table and decrement the valid entry
3301 * count of the A table.
3302 */
3303 a_dte->attr.raw = MMU_DT_INVALID;
3304 a_tbl->at_ecnt--;
3305 }
3306 }
3307 if (nend < end) {
3308 /*
3309 * This block is executed if the range ends beyond a
3310 * granularity boundary.
3311 *
3312 * First find the DTE which is responsible for mapping
3313 * the start of the nearest (rounded down) granularity
3314 * boundary.
3315 */
3316 idx = MMU_TIA(nend);
3317 a_dte = &a_tbl->at_dtbl[idx];
3318
3319 /*
3320 * If the DTE is valid then delegate the removal of the sub
3321 * range to pmap_remove_b(), which can remove addresses at
3322 * a finer granularity.
3323 */
3324 if (MMU_VALID_DT(*a_dte)) {
3325 /*
3326 * Find the B table manager for the entry
3327 * and hand it to pmap_remove_b() along with
3328 * the sub range.
3329 */
3330 b_dte = mmu_ptov(a_dte->addr.raw);
3331 b_tbl = mmuB2tmgr(b_dte);
3332
3333 empty = pmap_remove_b(b_tbl, nend, end);
3334
3335 /*
3336 * If the removal resulted in an empty B table,
3337 * invalidate the DTE that points to it and decrement
3338 * the valid entry count of the A table.
3339 */
3340 if (empty) {
3341 a_dte->attr.raw = MMU_DT_INVALID;
3342 a_tbl->at_ecnt--;
3343 }
3344 }
3345 }
3346
3347 /*
3348 * If there are no more entries in the A table, release it
3349 * back to the available pool and return TRUE.
3350 */
3351 if (a_tbl->at_ecnt == 0) {
3352 a_tbl->at_parent = NULL;
3353 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3354 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3355 empty = TRUE;
3356 } else {
3357 empty = FALSE;
3358 }
3359
3360 return empty;
3361 }
3362
3363 /* pmap_remove_b INTERNAL
3364 **
3365 * Remove a range of addresses from an address space, trying to remove entire
3366 * C tables if possible.
3367 *
3368 * If the operation results in an empty B table, the function returns TRUE.
3369 */
3370 boolean_t
3371 pmap_remove_b(b_tbl, start, end)
3372 b_tmgr_t *b_tbl;
3373 vm_offset_t start;
3374 vm_offset_t end;
3375 {
3376 boolean_t empty;
3377 int idx;
3378 vm_offset_t nstart, nend, rstart;
3379 c_tmgr_t *c_tbl;
3380 mmu_short_dte_t *b_dte;
3381 mmu_short_pte_t *c_dte;
3382
3383
3384 nstart = MMU_ROUND_UP_B(start);
3385 nend = MMU_ROUND_B(end);
3386
3387 if (start < nstart) {
3388 idx = MMU_TIB(start);
3389 b_dte = &b_tbl->bt_dtbl[idx];
3390 if (MMU_VALID_DT(*b_dte)) {
3391 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3392 c_tbl = mmuC2tmgr(c_dte);
3393 if (end < nstart)
3394 empty = pmap_remove_c(c_tbl, start, end);
3395 else
3396 empty = pmap_remove_c(c_tbl, start, nstart);
3397 if (empty) {
3398 b_dte->attr.raw = MMU_DT_INVALID;
3399 b_tbl->bt_ecnt--;
3400 }
3401 }
3402 }
3403 if (nstart < nend) {
3404 idx = MMU_TIB(nstart);
3405 b_dte = &b_tbl->bt_dtbl[idx];
3406 rstart = nstart;
3407 while (rstart < nend) {
3408 if (MMU_VALID_DT(*b_dte)) {
3409 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3410 c_tbl = mmuC2tmgr(c_dte);
3411 free_c_table(c_tbl, TRUE);
3412 b_dte->attr.raw = MMU_DT_INVALID;
3413 b_tbl->bt_ecnt--;
3414 }
3415 b_dte++;
3416 rstart += MMU_TIB_RANGE;
3417 }
3418 }
3419 if (nend < end) {
3420 idx = MMU_TIB(nend);
3421 b_dte = &b_tbl->bt_dtbl[idx];
3422 if (MMU_VALID_DT(*b_dte)) {
3423 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3424 c_tbl = mmuC2tmgr(c_dte);
3425 empty = pmap_remove_c(c_tbl, nend, end);
3426 if (empty) {
3427 b_dte->attr.raw = MMU_DT_INVALID;
3428 b_tbl->bt_ecnt--;
3429 }
3430 }
3431 }
3432
3433 if (b_tbl->bt_ecnt == 0) {
3434 b_tbl->bt_parent = NULL;
3435 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3436 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3437 empty = TRUE;
3438 } else {
3439 empty = FALSE;
3440 }
3441
3442 return empty;
3443 }
3444
3445 /* pmap_remove_c INTERNAL
3446 **
3447 * Remove a range of addresses from the given C table.
3448 */
3449 boolean_t
3450 pmap_remove_c(c_tbl, start, end)
3451 c_tmgr_t *c_tbl;
3452 vm_offset_t start;
3453 vm_offset_t end;
3454 {
3455 boolean_t empty;
3456 int idx;
3457 mmu_short_pte_t *c_pte;
3458
3459 idx = MMU_TIC(start);
3460 c_pte = &c_tbl->ct_dtbl[idx];
3461 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3462 if (MMU_VALID_DT(*c_pte)) {
3463 pmap_remove_pte(c_pte);
3464 c_tbl->ct_ecnt--;
3465 }
3466 }
3467
3468 if (c_tbl->ct_ecnt == 0) {
3469 c_tbl->ct_parent = NULL;
3470 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3471 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3472 empty = TRUE;
3473 } else {
3474 empty = FALSE;
3475 }
3476
3477 return empty;
3478 }
3479
3480 /* is_managed INTERNAL
3481 **
3482 * Determine if the given physical address is managed by the PV system.
3483 * Note that this logic assumes that no one will ask for the status of
3484 * addresses which lie in-between the memory banks on the 3/80. If they
3485 * do so, it will falsely report that it is managed.
3486 *
3487 * Note: A "managed" address is one that was reported to the VM system as
3488 * a "usable page" during system startup. As such, the VM system expects the
3489 * pmap module to keep an accurate track of the useage of those pages.
3490 * Any page not given to the VM system at startup does not exist (as far as
3491 * the VM system is concerned) and is therefore "unmanaged." Examples are
3492 * those pages which belong to the ROM monitor and the memory allocated before
3493 * the VM system was started.
3494 */
3495 boolean_t
3496 is_managed(pa)
3497 vm_offset_t pa;
3498 {
3499 if (pa >= avail_start && pa < avail_end)
3500 return TRUE;
3501 else
3502 return FALSE;
3503 }
3504
3505 /* pmap_bootstrap_alloc INTERNAL
3506 **
3507 * Used internally for memory allocation at startup when malloc is not
3508 * available. This code will fail once it crosses the first memory
3509 * bank boundary on the 3/80. Hopefully by then however, the VM system
3510 * will be in charge of allocation.
3511 */
3512 void *
3513 pmap_bootstrap_alloc(size)
3514 int size;
3515 {
3516 void *rtn;
3517
3518 #ifdef PMAP_DEBUG
3519 if (bootstrap_alloc_enabled == FALSE) {
3520 mon_printf("pmap_bootstrap_alloc: disabled\n");
3521 sunmon_abort();
3522 }
3523 #endif
3524
3525 rtn = (void *) virtual_avail;
3526 virtual_avail += size;
3527
3528 #ifdef PMAP_DEBUG
3529 if (virtual_avail > virtual_contig_end) {
3530 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3531 sunmon_abort();
3532 }
3533 #endif
3534
3535 return rtn;
3536 }
3537
3538 /* pmap_bootstap_aalign INTERNAL
3539 **
3540 * Used to insure that the next call to pmap_bootstrap_alloc() will
3541 * return a chunk of memory aligned to the specified size.
3542 *
3543 * Note: This function will only support alignment sizes that are powers
3544 * of two.
3545 */
3546 void
3547 pmap_bootstrap_aalign(size)
3548 int size;
3549 {
3550 int off;
3551
3552 off = virtual_avail & (size - 1);
3553 if (off) {
3554 (void) pmap_bootstrap_alloc(size - off);
3555 }
3556 }
3557
3558 /* pmap_pa_exists
3559 **
3560 * Used by the /dev/mem driver to see if a given PA is memory
3561 * that can be mapped. (The PA is not in a hole.)
3562 */
3563 int
3564 pmap_pa_exists(pa)
3565 vm_offset_t pa;
3566 {
3567 register int i;
3568
3569 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3570 if ((pa >= avail_mem[i].pmem_start) &&
3571 (pa < avail_mem[i].pmem_end))
3572 return (1);
3573 if (avail_mem[i].pmem_next == NULL)
3574 break;
3575 }
3576 return (0);
3577 }
3578
3579 /* Called only from locore.s and pmap.c */
3580 void _pmap_switch __P((pmap_t pmap));
3581
3582 /*
3583 * _pmap_switch INTERNAL
3584 *
3585 * This is called by locore.s:cpu_switch() when it is
3586 * switching to a new process. Load new translations.
3587 * Note: done in-line by locore.s unless PMAP_DEBUG
3588 *
3589 * Note that we do NOT allocate a context here, but
3590 * share the "kernel only" context until we really
3591 * need our own context for user-space mappings in
3592 * pmap_enter_user(). [ s/context/mmu A table/ ]
3593 */
3594 void
3595 _pmap_switch(pmap)
3596 pmap_t pmap;
3597 {
3598 u_long rootpa;
3599
3600 /*
3601 * Only do reload/flush if we have to.
3602 * Note that if the old and new process
3603 * were BOTH using the "null" context,
3604 * then this will NOT flush the TLB.
3605 */
3606 rootpa = pmap->pm_a_phys;
3607 if (kernel_crp.rp_addr != rootpa) {
3608 DPRINT(("pmap_activate(%p)\n", pmap));
3609 kernel_crp.rp_addr = rootpa;
3610 loadcrp(&kernel_crp);
3611 TBIAU();
3612 }
3613 }
3614
3615 /*
3616 * Exported version of pmap_activate(). This is called from the
3617 * machine-independent VM code when a process is given a new pmap.
3618 * If (p == curproc) do like cpu_switch would do; otherwise just
3619 * take this as notification that the process has a new pmap.
3620 */
3621 void
3622 pmap_activate(p)
3623 struct proc *p;
3624 {
3625 pmap_t pmap = p->p_vmspace->vm_map.pmap;
3626 int s;
3627
3628 if (p == curproc) {
3629 s = splimp();
3630 _pmap_switch(pmap);
3631 splx(s);
3632 }
3633 }
3634
3635 /*
3636 * pmap_deactivate INTERFACE
3637 **
3638 * This is called to deactivate the specified process's address space.
3639 * XXX The semantics of this function are currently not well-defined.
3640 */
3641 void
3642 pmap_deactivate(p)
3643 struct proc *p;
3644 {
3645 /* not implemented. */
3646 }
3647
3648 /* pmap_update
3649 **
3650 * Apply any delayed changes scheduled for all pmaps immediately.
3651 *
3652 * No delayed operations are currently done in this pmap.
3653 */
3654 void
3655 pmap_update()
3656 {
3657 /* not implemented. */
3658 }
3659
3660 /*
3661 * Fill in the sun3x-specific part of the kernel core header
3662 * for dumpsys(). (See machdep.c for the rest.)
3663 */
3664 void
3665 pmap_kcore_hdr(sh)
3666 struct sun3x_kcore_hdr *sh;
3667 {
3668 u_long spa, len;
3669 int i;
3670
3671 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3672 sh->pg_valid = MMU_DT_PAGE;
3673 sh->contig_end = virtual_contig_end;
3674 sh->kernCbase = (u_long) kernCbase;
3675 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3676 spa = avail_mem[i].pmem_start;
3677 spa = m68k_trunc_page(spa);
3678 len = avail_mem[i].pmem_end - spa;
3679 len = m68k_round_page(len);
3680 sh->ram_segs[i].start = spa;
3681 sh->ram_segs[i].size = len;
3682 }
3683 }
3684
3685
3686 /* pmap_virtual_space INTERFACE
3687 **
3688 * Return the current available range of virtual addresses in the
3689 * arguuments provided. Only really called once.
3690 */
3691 void
3692 pmap_virtual_space(vstart, vend)
3693 vm_offset_t *vstart, *vend;
3694 {
3695 *vstart = virtual_avail;
3696 *vend = virtual_end;
3697 }
3698
3699 /*
3700 * Provide memory to the VM system.
3701 *
3702 * Assume avail_start is always in the
3703 * first segment as pmap_bootstrap does.
3704 */
3705 static void
3706 pmap_page_upload()
3707 {
3708 vm_offset_t a, b; /* memory range */
3709 int i;
3710
3711 /* Supply the memory in segments. */
3712 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3713 a = atop(avail_mem[i].pmem_start);
3714 b = atop(avail_mem[i].pmem_end);
3715 if (i == 0)
3716 a = atop(avail_start);
3717
3718 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3719
3720 if (avail_mem[i].pmem_next == NULL)
3721 break;
3722 }
3723 }
3724
3725 /* pmap_page_index INTERFACE
3726 **
3727 * Return the index of the given physical page in a list of useable
3728 * physical pages in the system. Holes in physical memory may be counted
3729 * if so desired. As long as pmap_free_pages() and pmap_page_index()
3730 * agree as to whether holes in memory do or do not count as valid pages,
3731 * it really doesn't matter. However, if you like to save a little
3732 * memory, don't count holes as valid pages. This is even more true when
3733 * the holes are large.
3734 *
3735 * We will not count holes as valid pages. We can generate page indices
3736 * that conform to this by using the memory bank structures initialized
3737 * in pmap_alloc_pv().
3738 */
3739 int
3740 pmap_page_index(pa)
3741 vm_offset_t pa;
3742 {
3743 struct pmap_physmem_struct *bank = avail_mem;
3744 vm_offset_t off;
3745
3746 /* Search for the memory bank with this page. */
3747 /* XXX - What if it is not physical memory? */
3748 while (pa > bank->pmem_end)
3749 bank = bank->pmem_next;
3750 off = pa - bank->pmem_start;
3751
3752 return (bank->pmem_pvbase + m68k_btop(off));
3753 }
3754
3755 /* pmap_count INTERFACE
3756 **
3757 * Return the number of resident (valid) pages in the given pmap.
3758 *
3759 * Note: If this function is handed the kernel map, it will report
3760 * that it has no mappings. Hopefully the VM system won't ask for kernel
3761 * map statistics.
3762 */
3763 segsz_t
3764 pmap_count(pmap, type)
3765 pmap_t pmap;
3766 int type;
3767 {
3768 u_int count;
3769 int a_idx, b_idx;
3770 a_tmgr_t *a_tbl;
3771 b_tmgr_t *b_tbl;
3772 c_tmgr_t *c_tbl;
3773
3774 /*
3775 * If the pmap does not have its own A table manager, it has no
3776 * valid entires.
3777 */
3778 if (pmap->pm_a_tmgr == NULL)
3779 return 0;
3780
3781 a_tbl = pmap->pm_a_tmgr;
3782
3783 count = 0;
3784 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3785 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3786 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3787 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3788 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3789 c_tbl = mmuC2tmgr(
3790 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3791 if (type == 0)
3792 /*
3793 * A resident entry count has been requested.
3794 */
3795 count += c_tbl->ct_ecnt;
3796 else
3797 /*
3798 * A wired entry count has been requested.
3799 */
3800 count += c_tbl->ct_wcnt;
3801 }
3802 }
3803 }
3804 }
3805
3806 return count;
3807 }
3808
3809 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3810 * The following routines are only used by DDB for tricky kernel text *
3811 * text operations in db_memrw.c. They are provided for sun3 *
3812 * compatibility. *
3813 *************************************************************************/
3814 /* get_pte INTERNAL
3815 **
3816 * Return the page descriptor the describes the kernel mapping
3817 * of the given virtual address.
3818 */
3819 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3820 u_int
3821 get_pte(va)
3822 vm_offset_t va;
3823 {
3824 u_long pte_pa;
3825 mmu_short_pte_t *pte;
3826
3827 /* Get the physical address of the PTE */
3828 pte_pa = ptest_addr(va & ~PGOFSET);
3829
3830 /* Convert to a virtual address... */
3831 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3832
3833 /* Make sure it is in our level-C tables... */
3834 if ((pte < kernCbase) ||
3835 (pte >= &mmuCbase[NUM_USER_PTES]))
3836 return 0;
3837
3838 /* ... and just return its contents. */
3839 return (pte->attr.raw);
3840 }
3841
3842
3843 /* set_pte INTERNAL
3844 **
3845 * Set the page descriptor that describes the kernel mapping
3846 * of the given virtual address.
3847 */
3848 void
3849 set_pte(va, pte)
3850 vm_offset_t va;
3851 u_int pte;
3852 {
3853 u_long idx;
3854
3855 if (va < KERNBASE)
3856 return;
3857
3858 idx = (unsigned long) m68k_btop(va - KERNBASE);
3859 kernCbase[idx].attr.raw = pte;
3860 TBIS(va);
3861 }
3862
3863 /*
3864 * Routine: pmap_procwr
3865 *
3866 * Function:
3867 * Synchronize caches corresponding to [addr, addr+len) in p.
3868 */
3869 void
3870 pmap_procwr(p, va, len)
3871 struct proc *p;
3872 vaddr_t va;
3873 size_t len;
3874 {
3875 (void)cachectl1(0x80000004, va, len, p);
3876 }
3877
3878
3879 #ifdef PMAP_DEBUG
3880 /************************** DEBUGGING ROUTINES **************************
3881 * The following routines are meant to be an aid to debugging the pmap *
3882 * system. They are callable from the DDB command line and should be *
3883 * prepared to be handed unstable or incomplete states of the system. *
3884 ************************************************************************/
3885
3886 /* pv_list
3887 **
3888 * List all pages found on the pv list for the given physical page.
3889 * To avoid endless loops, the listing will stop at the end of the list
3890 * or after 'n' entries - whichever comes first.
3891 */
3892 void
3893 pv_list(pa, n)
3894 vm_offset_t pa;
3895 int n;
3896 {
3897 int idx;
3898 vm_offset_t va;
3899 pv_t *pv;
3900 c_tmgr_t *c_tbl;
3901 pmap_t pmap;
3902
3903 pv = pa2pv(pa);
3904 idx = pv->pv_idx;
3905
3906 for (;idx != PVE_EOL && n > 0;
3907 idx=pvebase[idx].pve_next, n--) {
3908
3909 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3910 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3911 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3912 }
3913 }
3914 #endif /* PMAP_DEBUG */
3915
3916 #ifdef NOT_YET
3917 /* and maybe not ever */
3918 /************************** LOW-LEVEL ROUTINES **************************
3919 * These routines will eventualy be re-written into assembly and placed *
3920 * in locore.s. They are here now as stubs so that the pmap module can *
3921 * be linked as a standalone user program for testing. *
3922 ************************************************************************/
3923 /* flush_atc_crp INTERNAL
3924 **
3925 * Flush all page descriptors derived from the given CPU Root Pointer
3926 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3927 * cache.
3928 */
3929 void
3930 flush_atc_crp(a_tbl)
3931 {
3932 mmu_long_rp_t rp;
3933
3934 /* Create a temporary root table pointer that points to the
3935 * given A table.
3936 */
3937 rp.attr.raw = ~MMU_LONG_RP_LU;
3938 rp.addr.raw = (unsigned int) a_tbl;
3939
3940 mmu_pflushr(&rp);
3941 /* mmu_pflushr:
3942 * movel sp(4)@,a0
3943 * pflushr a0@
3944 * rts
3945 */
3946 }
3947 #endif /* NOT_YET */
3948