pmap.c revision 1.84.4.1 1 /* $NetBSD: pmap.c,v 1.84.4.1 2005/06/08 11:34:17 tron Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a process called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/cdefs.h>
115 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.84.4.1 2005/06/08 11:34:17 tron Exp $");
116
117 #include "opt_ddb.h"
118 #include "opt_pmap_debug.h"
119
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/proc.h>
123 #include <sys/malloc.h>
124 #include <sys/pool.h>
125 #include <sys/user.h>
126 #include <sys/queue.h>
127 #include <sys/kcore.h>
128
129 #include <uvm/uvm.h>
130
131 #include <machine/cpu.h>
132 #include <machine/kcore.h>
133 #include <machine/mon.h>
134 #include <machine/pmap.h>
135 #include <machine/pte.h>
136 #include <machine/vmparam.h>
137 #include <m68k/cacheops.h>
138
139 #include <sun3/sun3/cache.h>
140 #include <sun3/sun3/machdep.h>
141
142 #include "pmap_pvt.h"
143
144 /* XXX - What headers declare these? */
145 extern struct pcb *curpcb;
146 extern int physmem;
147
148 /* Defined in locore.s */
149 extern char kernel_text[];
150
151 /* Defined by the linker */
152 extern char etext[], edata[], end[];
153 extern char *esym; /* DDB */
154
155 /*************************** DEBUGGING DEFINITIONS ***********************
156 * Macros, preprocessor defines and variables used in debugging can make *
157 * code hard to read. Anything used exclusively for debugging purposes *
158 * is defined here to avoid having such mess scattered around the file. *
159 *************************************************************************/
160 #ifdef PMAP_DEBUG
161 /*
162 * To aid the debugging process, macros should be expanded into smaller steps
163 * that accomplish the same goal, yet provide convenient places for placing
164 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
165 * 'INLINE' keyword is defined to an empty string. This way, any function
166 * defined to be a 'static INLINE' will become 'outlined' and compiled as
167 * a separate function, which is much easier to debug.
168 */
169 #define INLINE /* nothing */
170
171 /*
172 * It is sometimes convenient to watch the activity of a particular table
173 * in the system. The following variables are used for that purpose.
174 */
175 a_tmgr_t *pmap_watch_atbl = 0;
176 b_tmgr_t *pmap_watch_btbl = 0;
177 c_tmgr_t *pmap_watch_ctbl = 0;
178
179 int pmap_debug = 0;
180 #define DPRINT(args) if (pmap_debug) printf args
181
182 #else /********** Stuff below is defined if NOT debugging **************/
183
184 #define INLINE inline
185 #define DPRINT(args) /* nada */
186
187 #endif /* PMAP_DEBUG */
188 /*********************** END OF DEBUGGING DEFINITIONS ********************/
189
190 /*** Management Structure - Memory Layout
191 * For every MMU table in the sun3x pmap system there must be a way to
192 * manage it; we must know which process is using it, what other tables
193 * depend on it, and whether or not it contains any locked pages. This
194 * is solved by the creation of 'table management' or 'tmgr'
195 * structures. One for each MMU table in the system.
196 *
197 * MAP OF MEMORY USED BY THE PMAP SYSTEM
198 *
199 * towards lower memory
200 * kernAbase -> +-------------------------------------------------------+
201 * | Kernel MMU A level table |
202 * kernBbase -> +-------------------------------------------------------+
203 * | Kernel MMU B level tables |
204 * kernCbase -> +-------------------------------------------------------+
205 * | |
206 * | Kernel MMU C level tables |
207 * | |
208 * mmuCbase -> +-------------------------------------------------------+
209 * | User MMU C level tables |
210 * mmuAbase -> +-------------------------------------------------------+
211 * | |
212 * | User MMU A level tables |
213 * | |
214 * mmuBbase -> +-------------------------------------------------------+
215 * | User MMU B level tables |
216 * tmgrAbase -> +-------------------------------------------------------+
217 * | TMGR A level table structures |
218 * tmgrBbase -> +-------------------------------------------------------+
219 * | TMGR B level table structures |
220 * tmgrCbase -> +-------------------------------------------------------+
221 * | TMGR C level table structures |
222 * pvbase -> +-------------------------------------------------------+
223 * | Physical to Virtual mapping table (list heads) |
224 * pvebase -> +-------------------------------------------------------+
225 * | Physical to Virtual mapping table (list elements) |
226 * | |
227 * +-------------------------------------------------------+
228 * towards higher memory
229 *
230 * For every A table in the MMU A area, there will be a corresponding
231 * a_tmgr structure in the TMGR A area. The same will be true for
232 * the B and C tables. This arrangement will make it easy to find the
233 * controling tmgr structure for any table in the system by use of
234 * (relatively) simple macros.
235 */
236
237 /*
238 * Global variables for storing the base addresses for the areas
239 * labeled above.
240 */
241 static vaddr_t kernAphys;
242 static mmu_long_dte_t *kernAbase;
243 static mmu_short_dte_t *kernBbase;
244 static mmu_short_pte_t *kernCbase;
245 static mmu_short_pte_t *mmuCbase;
246 static mmu_short_dte_t *mmuBbase;
247 static mmu_long_dte_t *mmuAbase;
248 static a_tmgr_t *Atmgrbase;
249 static b_tmgr_t *Btmgrbase;
250 static c_tmgr_t *Ctmgrbase;
251 static pv_t *pvbase;
252 static pv_elem_t *pvebase;
253 struct pmap kernel_pmap;
254
255 /*
256 * This holds the CRP currently loaded into the MMU.
257 */
258 struct mmu_rootptr kernel_crp;
259
260 /*
261 * Just all around global variables.
262 */
263 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
264 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
265 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
266
267
268 /*
269 * Flags used to mark the safety/availability of certain operations or
270 * resources.
271 */
272 static boolean_t bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
273 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
274
275 /*
276 * XXX: For now, retain the traditional variables that were
277 * used in the old pmap/vm interface (without NONCONTIG).
278 */
279 /* Kernel virtual address space available: */
280 vaddr_t virtual_avail, virtual_end;
281 /* Physical address space available: */
282 paddr_t avail_start, avail_end;
283
284 /* This keep track of the end of the contiguously mapped range. */
285 vaddr_t virtual_contig_end;
286
287 /* Physical address used by pmap_next_page() */
288 paddr_t avail_next;
289
290 /* These are used by pmap_copy_page(), etc. */
291 vaddr_t tmp_vpages[2];
292
293 /* memory pool for pmap structures */
294 struct pool pmap_pmap_pool;
295
296 /*
297 * The 3/80 is the only member of the sun3x family that has non-contiguous
298 * physical memory. Memory is divided into 4 banks which are physically
299 * locatable on the system board. Although the size of these banks varies
300 * with the size of memory they contain, their base addresses are
301 * permenently fixed. The following structure, which describes these
302 * banks, is initialized by pmap_bootstrap() after it reads from a similar
303 * structure provided by the ROM Monitor.
304 *
305 * For the other machines in the sun3x architecture which do have contiguous
306 * RAM, this list will have only one entry, which will describe the entire
307 * range of available memory.
308 */
309 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
310 u_int total_phys_mem;
311
312 /*************************************************************************/
313
314 /*
315 * XXX - Should "tune" these based on statistics.
316 *
317 * My first guess about the relative numbers of these needed is
318 * based on the fact that a "typical" process will have several
319 * pages mapped at low virtual addresses (text, data, bss), then
320 * some mapped shared libraries, and then some stack pages mapped
321 * near the high end of the VA space. Each process can use only
322 * one A table, and most will use only two B tables (maybe three)
323 * and probably about four C tables. Therefore, the first guess
324 * at the relative numbers of these needed is 1:2:4 -gwr
325 *
326 * The number of C tables needed is closely related to the amount
327 * of physical memory available plus a certain amount attributable
328 * to the use of double mappings. With a few simulation statistics
329 * we can find a reasonably good estimation of this unknown value.
330 * Armed with that and the above ratios, we have a good idea of what
331 * is needed at each level. -j
332 *
333 * Note: It is not physical memory memory size, but the total mapped
334 * virtual space required by the combined working sets of all the
335 * currently _runnable_ processes. (Sleeping ones don't count.)
336 * The amount of physical memory should be irrelevant. -gwr
337 */
338 #ifdef FIXED_NTABLES
339 #define NUM_A_TABLES 16
340 #define NUM_B_TABLES 32
341 #define NUM_C_TABLES 64
342 #else
343 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
344 #endif /* FIXED_NTABLES */
345
346 /*
347 * This determines our total virtual mapping capacity.
348 * Yes, it is a FIXED value so we can pre-allocate.
349 */
350 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
351
352 /*
353 * The size of the Kernel Virtual Address Space (KVAS)
354 * for purposes of MMU table allocation is -KERNBASE
355 * (length from KERNBASE to 0xFFFFffff)
356 */
357 #define KVAS_SIZE (-KERNBASE)
358
359 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
360 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
361 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
362 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
363
364 /*************************** MISCELANEOUS MACROS *************************/
365 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
366 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
367 #define pmap_add_ref(pmap) ++pmap->pm_refcount
368 #define pmap_del_ref(pmap) --pmap->pm_refcount
369 #define pmap_refcount(pmap) pmap->pm_refcount
370
371 void *pmap_bootstrap_alloc(int);
372
373 static INLINE void *mmu_ptov __P((paddr_t));
374 static INLINE paddr_t mmu_vtop __P((void *));
375
376 #if 0
377 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
378 #endif /* 0 */
379 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
380 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
381
382 static INLINE pv_t *pa2pv __P((paddr_t));
383 static INLINE int pteidx __P((mmu_short_pte_t *));
384 static INLINE pmap_t current_pmap __P((void));
385
386 /*
387 * We can always convert between virtual and physical addresses
388 * for anything in the range [KERNBASE ... avail_start] because
389 * that range is GUARANTEED to be mapped linearly.
390 * We rely heavily upon this feature!
391 */
392 static INLINE void *
393 mmu_ptov(pa)
394 paddr_t pa;
395 {
396 vaddr_t va;
397
398 va = (pa + KERNBASE);
399 #ifdef PMAP_DEBUG
400 if ((va < KERNBASE) || (va >= virtual_contig_end))
401 panic("mmu_ptov");
402 #endif
403 return ((void*)va);
404 }
405
406 static INLINE paddr_t
407 mmu_vtop(vva)
408 void *vva;
409 {
410 vaddr_t va;
411
412 va = (vaddr_t)vva;
413 #ifdef PMAP_DEBUG
414 if ((va < KERNBASE) || (va >= virtual_contig_end))
415 panic("mmu_vtop");
416 #endif
417 return (va - KERNBASE);
418 }
419
420 /*
421 * These macros map MMU tables to their corresponding manager structures.
422 * They are needed quite often because many of the pointers in the pmap
423 * system reference MMU tables and not the structures that control them.
424 * There needs to be a way to find one when given the other and these
425 * macros do so by taking advantage of the memory layout described above.
426 * Here's a quick step through the first macro, mmuA2tmgr():
427 *
428 * 1) find the offset of the given MMU A table from the base of its table
429 * pool (table - mmuAbase).
430 * 2) convert this offset into a table index by dividing it by the
431 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
432 * 3) use this index to select the corresponding 'A' table manager
433 * structure from the 'A' table manager pool (Atmgrbase[index]).
434 */
435 /* This function is not currently used. */
436 #if 0
437 static INLINE a_tmgr_t *
438 mmuA2tmgr(mmuAtbl)
439 mmu_long_dte_t *mmuAtbl;
440 {
441 int idx;
442
443 /* Which table is this in? */
444 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
445 #ifdef PMAP_DEBUG
446 if ((idx < 0) || (idx >= NUM_A_TABLES))
447 panic("mmuA2tmgr");
448 #endif
449 return (&Atmgrbase[idx]);
450 }
451 #endif /* 0 */
452
453 static INLINE b_tmgr_t *
454 mmuB2tmgr(mmuBtbl)
455 mmu_short_dte_t *mmuBtbl;
456 {
457 int idx;
458
459 /* Which table is this in? */
460 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
461 #ifdef PMAP_DEBUG
462 if ((idx < 0) || (idx >= NUM_B_TABLES))
463 panic("mmuB2tmgr");
464 #endif
465 return (&Btmgrbase[idx]);
466 }
467
468 /* mmuC2tmgr INTERNAL
469 **
470 * Given a pte known to belong to a C table, return the address of
471 * that table's management structure.
472 */
473 static INLINE c_tmgr_t *
474 mmuC2tmgr(mmuCtbl)
475 mmu_short_pte_t *mmuCtbl;
476 {
477 int idx;
478
479 /* Which table is this in? */
480 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
481 #ifdef PMAP_DEBUG
482 if ((idx < 0) || (idx >= NUM_C_TABLES))
483 panic("mmuC2tmgr");
484 #endif
485 return (&Ctmgrbase[idx]);
486 }
487
488 /* This is now a function call below.
489 * #define pa2pv(pa) \
490 * (&pvbase[(unsigned long)\
491 * m68k_btop(pa)\
492 * ])
493 */
494
495 /* pa2pv INTERNAL
496 **
497 * Return the pv_list_head element which manages the given physical
498 * address.
499 */
500 static INLINE pv_t *
501 pa2pv(pa)
502 paddr_t pa;
503 {
504 struct pmap_physmem_struct *bank;
505 int idx;
506
507 bank = &avail_mem[0];
508 while (pa >= bank->pmem_end)
509 bank = bank->pmem_next;
510
511 pa -= bank->pmem_start;
512 idx = bank->pmem_pvbase + m68k_btop(pa);
513 #ifdef PMAP_DEBUG
514 if ((idx < 0) || (idx >= physmem))
515 panic("pa2pv");
516 #endif
517 return &pvbase[idx];
518 }
519
520 /* pteidx INTERNAL
521 **
522 * Return the index of the given PTE within the entire fixed table of
523 * PTEs.
524 */
525 static INLINE int
526 pteidx(pte)
527 mmu_short_pte_t *pte;
528 {
529 return (pte - kernCbase);
530 }
531
532 /*
533 * This just offers a place to put some debugging checks,
534 * and reduces the number of places "curlwp" appears...
535 */
536 static INLINE pmap_t
537 current_pmap()
538 {
539 struct vmspace *vm;
540 struct vm_map *map;
541 pmap_t pmap;
542
543 if (curlwp == NULL)
544 pmap = &kernel_pmap;
545 else {
546 vm = curproc->p_vmspace;
547 map = &vm->vm_map;
548 pmap = vm_map_pmap(map);
549 }
550
551 return (pmap);
552 }
553
554
555 /*************************** FUNCTION DEFINITIONS ************************
556 * These appear here merely for the compiler to enforce type checking on *
557 * all function calls. *
558 *************************************************************************/
559
560 /** Internal functions
561 ** Most functions used only within this module are defined in
562 ** pmap_pvt.h (why not here if used only here?)
563 **/
564 static void pmap_page_upload __P((void));
565
566 /** Interface functions
567 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
568 ** defined.
569 **/
570 void pmap_pinit __P((pmap_t));
571 void pmap_release __P((pmap_t));
572
573 /********************************** CODE ********************************
574 * Functions that are called from other parts of the kernel are labeled *
575 * as 'INTERFACE' functions. Functions that are only called from *
576 * within the pmap module are labeled as 'INTERNAL' functions. *
577 * Functions that are internal, but are not (currently) used at all are *
578 * labeled 'INTERNAL_X'. *
579 ************************************************************************/
580
581 /* pmap_bootstrap INTERNAL
582 **
583 * Initializes the pmap system. Called at boot time from
584 * locore2.c:_vm_init()
585 *
586 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
587 * system implement pmap_steal_memory() is redundant.
588 * Don't release this code without removing one or the other!
589 */
590 void
591 pmap_bootstrap(nextva)
592 vaddr_t nextva;
593 {
594 struct physmemory *membank;
595 struct pmap_physmem_struct *pmap_membank;
596 vaddr_t va, eva;
597 paddr_t pa;
598 int b, c, i, j; /* running table counts */
599 int size, resvmem;
600
601 /*
602 * This function is called by __bootstrap after it has
603 * determined the type of machine and made the appropriate
604 * patches to the ROM vectors (XXX- I don't quite know what I meant
605 * by that.) It allocates and sets up enough of the pmap system
606 * to manage the kernel's address space.
607 */
608
609 /*
610 * Determine the range of kernel virtual and physical
611 * space available. Note that we ABSOLUTELY DEPEND on
612 * the fact that the first bank of memory (4MB) is
613 * mapped linearly to KERNBASE (which we guaranteed in
614 * the first instructions of locore.s).
615 * That is plenty for our bootstrap work.
616 */
617 virtual_avail = m68k_round_page(nextva);
618 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
619 virtual_end = VM_MAX_KERNEL_ADDRESS;
620 /* Don't need avail_start til later. */
621
622 /* We may now call pmap_bootstrap_alloc(). */
623 bootstrap_alloc_enabled = TRUE;
624
625 /*
626 * This is a somewhat unwrapped loop to deal with
627 * copying the PROM's 'phsymem' banks into the pmap's
628 * banks. The following is always assumed:
629 * 1. There is always at least one bank of memory.
630 * 2. There is always a last bank of memory, and its
631 * pmem_next member must be set to NULL.
632 */
633 membank = romVectorPtr->v_physmemory;
634 pmap_membank = avail_mem;
635 total_phys_mem = 0;
636
637 for (;;) { /* break on !membank */
638 pmap_membank->pmem_start = membank->address;
639 pmap_membank->pmem_end = membank->address + membank->size;
640 total_phys_mem += membank->size;
641 membank = membank->next;
642 if (!membank)
643 break;
644 /* This silly syntax arises because pmap_membank
645 * is really a pre-allocated array, but it is put into
646 * use as a linked list.
647 */
648 pmap_membank->pmem_next = pmap_membank + 1;
649 pmap_membank = pmap_membank->pmem_next;
650 }
651 /* This is the last element. */
652 pmap_membank->pmem_next = NULL;
653
654 /*
655 * Note: total_phys_mem, physmem represent
656 * actual physical memory, including that
657 * reserved for the PROM monitor.
658 */
659 physmem = btoc(total_phys_mem);
660
661 /*
662 * Avail_end is set to the first byte of physical memory
663 * after the end of the last bank. We use this only to
664 * determine if a physical address is "managed" memory.
665 * This address range should be reduced to prevent the
666 * physical pages needed by the PROM monitor from being used
667 * in the VM system.
668 */
669 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
670 resvmem = m68k_round_page(resvmem);
671 avail_end = pmap_membank->pmem_end - resvmem;
672
673 /*
674 * First allocate enough kernel MMU tables to map all
675 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
676 * Note: All must be aligned on 256 byte boundaries.
677 * Start with the level-A table (one of those).
678 */
679 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
680 kernAbase = pmap_bootstrap_alloc(size);
681 memset(kernAbase, 0, size);
682
683 /* Now the level-B kernel tables... */
684 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
685 kernBbase = pmap_bootstrap_alloc(size);
686 memset(kernBbase, 0, size);
687
688 /* Now the level-C kernel tables... */
689 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
690 kernCbase = pmap_bootstrap_alloc(size);
691 memset(kernCbase, 0, size);
692 /*
693 * Note: In order for the PV system to work correctly, the kernel
694 * and user-level C tables must be allocated contiguously.
695 * Nothing should be allocated between here and the allocation of
696 * mmuCbase below. XXX: Should do this as one allocation, and
697 * then compute a pointer for mmuCbase instead of this...
698 *
699 * Allocate user MMU tables.
700 * These must be contiguous with the preceding.
701 */
702
703 #ifndef FIXED_NTABLES
704 /*
705 * The number of user-level C tables that should be allocated is
706 * related to the size of physical memory. In general, there should
707 * be enough tables to map four times the amount of available RAM.
708 * The extra amount is needed because some table space is wasted by
709 * fragmentation.
710 */
711 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
712 NUM_B_TABLES = NUM_C_TABLES / 2;
713 NUM_A_TABLES = NUM_B_TABLES / 2;
714 #endif /* !FIXED_NTABLES */
715
716 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
717 mmuCbase = pmap_bootstrap_alloc(size);
718
719 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
720 mmuBbase = pmap_bootstrap_alloc(size);
721
722 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
723 mmuAbase = pmap_bootstrap_alloc(size);
724
725 /*
726 * Fill in the never-changing part of the kernel tables.
727 * For simplicity, the kernel's mappings will be editable as a
728 * flat array of page table entries at kernCbase. The
729 * higher level 'A' and 'B' tables must be initialized to point
730 * to this lower one.
731 */
732 b = c = 0;
733
734 /*
735 * Invalidate all mappings below KERNBASE in the A table.
736 * This area has already been zeroed out, but it is good
737 * practice to explicitly show that we are interpreting
738 * it as a list of A table descriptors.
739 */
740 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
741 kernAbase[i].addr.raw = 0;
742 }
743
744 /*
745 * Set up the kernel A and B tables so that they will reference the
746 * correct spots in the contiguous table of PTEs allocated for the
747 * kernel's virtual memory space.
748 */
749 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
750 kernAbase[i].attr.raw =
751 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
752 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
753
754 for (j=0; j < MMU_B_TBL_SIZE; j++) {
755 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
756 | MMU_DT_SHORT;
757 c += MMU_C_TBL_SIZE;
758 }
759 b += MMU_B_TBL_SIZE;
760 }
761
762 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
763 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
764 pmap_alloc_pv(); /* Allocate physical->virtual map. */
765
766 /*
767 * We are now done with pmap_bootstrap_alloc(). Round up
768 * `virtual_avail' to the nearest page, and set the flag
769 * to prevent use of pmap_bootstrap_alloc() hereafter.
770 */
771 pmap_bootstrap_aalign(PAGE_SIZE);
772 bootstrap_alloc_enabled = FALSE;
773
774 /*
775 * Now that we are done with pmap_bootstrap_alloc(), we
776 * must save the virtual and physical addresses of the
777 * end of the linearly mapped range, which are stored in
778 * virtual_contig_end and avail_start, respectively.
779 * These variables will never change after this point.
780 */
781 virtual_contig_end = virtual_avail;
782 avail_start = virtual_avail - KERNBASE;
783
784 /*
785 * `avail_next' is a running pointer used by pmap_next_page() to
786 * keep track of the next available physical page to be handed
787 * to the VM system during its initialization, in which it
788 * asks for physical pages, one at a time.
789 */
790 avail_next = avail_start;
791
792 /*
793 * Now allocate some virtual addresses, but not the physical pages
794 * behind them. Note that virtual_avail is already page-aligned.
795 *
796 * tmp_vpages[] is an array of two virtual pages used for temporary
797 * kernel mappings in the pmap module to facilitate various physical
798 * address-oritented operations.
799 */
800 tmp_vpages[0] = virtual_avail;
801 virtual_avail += PAGE_SIZE;
802 tmp_vpages[1] = virtual_avail;
803 virtual_avail += PAGE_SIZE;
804
805 /** Initialize the PV system **/
806 pmap_init_pv();
807
808 /*
809 * Fill in the kernel_pmap structure and kernel_crp.
810 */
811 kernAphys = mmu_vtop(kernAbase);
812 kernel_pmap.pm_a_tmgr = NULL;
813 kernel_pmap.pm_a_phys = kernAphys;
814 kernel_pmap.pm_refcount = 1; /* always in use */
815 simple_lock_init(&kernel_pmap.pm_lock);
816
817 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
818 kernel_crp.rp_addr = kernAphys;
819
820 /*
821 * Now pmap_enter_kernel() may be used safely and will be
822 * the main interface used hereafter to modify the kernel's
823 * virtual address space. Note that since we are still running
824 * under the PROM's address table, none of these table modifications
825 * actually take effect until pmap_takeover_mmu() is called.
826 *
827 * Note: Our tables do NOT have the PROM linear mappings!
828 * Only the mappings created here exist in our tables, so
829 * remember to map anything we expect to use.
830 */
831 va = (vaddr_t)KERNBASE;
832 pa = 0;
833
834 /*
835 * The first page of the kernel virtual address space is the msgbuf
836 * page. The page attributes (data, non-cached) are set here, while
837 * the address is assigned to this global pointer in cpu_startup().
838 * It is non-cached, mostly due to paranoia.
839 */
840 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
841 va += PAGE_SIZE; pa += PAGE_SIZE;
842
843 /* Next page is used as the temporary stack. */
844 pmap_enter_kernel(va, pa, VM_PROT_ALL);
845 va += PAGE_SIZE; pa += PAGE_SIZE;
846
847 /*
848 * Map all of the kernel's text segment as read-only and cacheable.
849 * (Cacheable is implied by default). Unfortunately, the last bytes
850 * of kernel text and the first bytes of kernel data will often be
851 * sharing the same page. Therefore, the last page of kernel text
852 * has to be mapped as read/write, to accomodate the data.
853 */
854 eva = m68k_trunc_page((vaddr_t)etext);
855 for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
856 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
857
858 /*
859 * Map all of the kernel's data as read/write and cacheable.
860 * This includes: data, BSS, symbols, and everything in the
861 * contiguous memory used by pmap_bootstrap_alloc()
862 */
863 for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
864 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
865
866 /*
867 * At this point we are almost ready to take over the MMU. But first
868 * we must save the PROM's address space in our map, as we call its
869 * routines and make references to its data later in the kernel.
870 */
871 pmap_bootstrap_copyprom();
872 pmap_takeover_mmu();
873 pmap_bootstrap_setprom();
874
875 /* Notify the VM system of our page size. */
876 uvmexp.pagesize = PAGE_SIZE;
877 uvm_setpagesize();
878
879 pmap_page_upload();
880 }
881
882
883 /* pmap_alloc_usermmu INTERNAL
884 **
885 * Called from pmap_bootstrap() to allocate MMU tables that will
886 * eventually be used for user mappings.
887 */
888 void
889 pmap_alloc_usermmu()
890 {
891 /* XXX: Moved into caller. */
892 }
893
894 /* pmap_alloc_pv INTERNAL
895 **
896 * Called from pmap_bootstrap() to allocate the physical
897 * to virtual mapping list. Each physical page of memory
898 * in the system has a corresponding element in this list.
899 */
900 void
901 pmap_alloc_pv()
902 {
903 int i;
904 unsigned int total_mem;
905
906 /*
907 * Allocate a pv_head structure for every page of physical
908 * memory that will be managed by the system. Since memory on
909 * the 3/80 is non-contiguous, we cannot arrive at a total page
910 * count by subtraction of the lowest available address from the
911 * highest, but rather we have to step through each memory
912 * bank and add the number of pages in each to the total.
913 *
914 * At this time we also initialize the offset of each bank's
915 * starting pv_head within the pv_head list so that the physical
916 * memory state routines (pmap_is_referenced(),
917 * pmap_is_modified(), et al.) can quickly find coresponding
918 * pv_heads in spite of the non-contiguity.
919 */
920 total_mem = 0;
921 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
922 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
923 total_mem += avail_mem[i].pmem_end -
924 avail_mem[i].pmem_start;
925 if (avail_mem[i].pmem_next == NULL)
926 break;
927 }
928 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
929 m68k_btop(total_phys_mem));
930 }
931
932 /* pmap_alloc_usertmgr INTERNAL
933 **
934 * Called from pmap_bootstrap() to allocate the structures which
935 * facilitate management of user MMU tables. Each user MMU table
936 * in the system has one such structure associated with it.
937 */
938 void
939 pmap_alloc_usertmgr()
940 {
941 /* Allocate user MMU table managers */
942 /* It would be a lot simpler to just make these BSS, but */
943 /* we may want to change their size at boot time... -j */
944 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
945 * NUM_A_TABLES);
946 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
947 * NUM_B_TABLES);
948 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
949 * NUM_C_TABLES);
950
951 /*
952 * Allocate PV list elements for the physical to virtual
953 * mapping system.
954 */
955 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
956 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
957 }
958
959 /* pmap_bootstrap_copyprom() INTERNAL
960 **
961 * Copy the PROM mappings into our own tables. Note, we
962 * can use physical addresses until __bootstrap returns.
963 */
964 void
965 pmap_bootstrap_copyprom()
966 {
967 struct sunromvec *romp;
968 int *mon_ctbl;
969 mmu_short_pte_t *kpte;
970 int i, len;
971
972 romp = romVectorPtr;
973
974 /*
975 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
976 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
977 */
978 mon_ctbl = *romp->monptaddr;
979 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
980 kpte = &kernCbase[i];
981 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
982
983 for (i = 0; i < len; i++) {
984 kpte[i].attr.raw = mon_ctbl[i];
985 }
986
987 /*
988 * Copy the mappings at MON_DVMA_BASE (to the end).
989 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
990 * Actually, we only want the last page, which the
991 * PROM has set up for use by the "ie" driver.
992 * (The i82686 needs its SCP there.)
993 * If we copy all the mappings, pmap_enter_kernel
994 * may complain about finding valid PTEs that are
995 * not recorded in our PV lists...
996 */
997 mon_ctbl = *romp->shadowpteaddr;
998 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
999 kpte = &kernCbase[i];
1000 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1001 for (i = (len-1); i < len; i++) {
1002 kpte[i].attr.raw = mon_ctbl[i];
1003 }
1004 }
1005
1006 /* pmap_takeover_mmu INTERNAL
1007 **
1008 * Called from pmap_bootstrap() after it has copied enough of the
1009 * PROM mappings into the kernel map so that we can use our own
1010 * MMU table.
1011 */
1012 void
1013 pmap_takeover_mmu()
1014 {
1015
1016 loadcrp(&kernel_crp);
1017 }
1018
1019 /* pmap_bootstrap_setprom() INTERNAL
1020 **
1021 * Set the PROM mappings so it can see kernel space.
1022 * Note that physical addresses are used here, which
1023 * we can get away with because this runs with the
1024 * low 1GB set for transparent translation.
1025 */
1026 void
1027 pmap_bootstrap_setprom()
1028 {
1029 mmu_long_dte_t *mon_dte;
1030 extern struct mmu_rootptr mon_crp;
1031 int i;
1032
1033 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1034 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1035 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1036 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1037 }
1038 }
1039
1040
1041 /* pmap_init INTERFACE
1042 **
1043 * Called at the end of vm_init() to set up the pmap system to go
1044 * into full time operation. All initialization of kernel_pmap
1045 * should be already done by now, so this should just do things
1046 * needed for user-level pmaps to work.
1047 */
1048 void
1049 pmap_init()
1050 {
1051 /** Initialize the manager pools **/
1052 TAILQ_INIT(&a_pool);
1053 TAILQ_INIT(&b_pool);
1054 TAILQ_INIT(&c_pool);
1055
1056 /**************************************************************
1057 * Initialize all tmgr structures and MMU tables they manage. *
1058 **************************************************************/
1059 /** Initialize A tables **/
1060 pmap_init_a_tables();
1061 /** Initialize B tables **/
1062 pmap_init_b_tables();
1063 /** Initialize C tables **/
1064 pmap_init_c_tables();
1065
1066 /** Initialize the pmap pools **/
1067 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1068 &pool_allocator_nointr);
1069 }
1070
1071 /* pmap_init_a_tables() INTERNAL
1072 **
1073 * Initializes all A managers, their MMU A tables, and inserts
1074 * them into the A manager pool for use by the system.
1075 */
1076 void
1077 pmap_init_a_tables()
1078 {
1079 int i;
1080 a_tmgr_t *a_tbl;
1081
1082 for (i=0; i < NUM_A_TABLES; i++) {
1083 /* Select the next available A manager from the pool */
1084 a_tbl = &Atmgrbase[i];
1085
1086 /*
1087 * Clear its parent entry. Set its wired and valid
1088 * entry count to zero.
1089 */
1090 a_tbl->at_parent = NULL;
1091 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1092
1093 /* Assign it the next available MMU A table from the pool */
1094 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1095
1096 /*
1097 * Initialize the MMU A table with the table in the `proc0',
1098 * or kernel, mapping. This ensures that every process has
1099 * the kernel mapped in the top part of its address space.
1100 */
1101 memcpy(a_tbl->at_dtbl, kernAbase, MMU_A_TBL_SIZE *
1102 sizeof(mmu_long_dte_t));
1103
1104 /*
1105 * Finally, insert the manager into the A pool,
1106 * making it ready to be used by the system.
1107 */
1108 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1109 }
1110 }
1111
1112 /* pmap_init_b_tables() INTERNAL
1113 **
1114 * Initializes all B table managers, their MMU B tables, and
1115 * inserts them into the B manager pool for use by the system.
1116 */
1117 void
1118 pmap_init_b_tables()
1119 {
1120 int i,j;
1121 b_tmgr_t *b_tbl;
1122
1123 for (i=0; i < NUM_B_TABLES; i++) {
1124 /* Select the next available B manager from the pool */
1125 b_tbl = &Btmgrbase[i];
1126
1127 b_tbl->bt_parent = NULL; /* clear its parent, */
1128 b_tbl->bt_pidx = 0; /* parent index, */
1129 b_tbl->bt_wcnt = 0; /* wired entry count, */
1130 b_tbl->bt_ecnt = 0; /* valid entry count. */
1131
1132 /* Assign it the next available MMU B table from the pool */
1133 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1134
1135 /* Invalidate every descriptor in the table */
1136 for (j=0; j < MMU_B_TBL_SIZE; j++)
1137 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1138
1139 /* Insert the manager into the B pool */
1140 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1141 }
1142 }
1143
1144 /* pmap_init_c_tables() INTERNAL
1145 **
1146 * Initializes all C table managers, their MMU C tables, and
1147 * inserts them into the C manager pool for use by the system.
1148 */
1149 void
1150 pmap_init_c_tables()
1151 {
1152 int i,j;
1153 c_tmgr_t *c_tbl;
1154
1155 for (i=0; i < NUM_C_TABLES; i++) {
1156 /* Select the next available C manager from the pool */
1157 c_tbl = &Ctmgrbase[i];
1158
1159 c_tbl->ct_parent = NULL; /* clear its parent, */
1160 c_tbl->ct_pidx = 0; /* parent index, */
1161 c_tbl->ct_wcnt = 0; /* wired entry count, */
1162 c_tbl->ct_ecnt = 0; /* valid entry count, */
1163 c_tbl->ct_pmap = NULL; /* parent pmap, */
1164 c_tbl->ct_va = 0; /* base of managed range */
1165
1166 /* Assign it the next available MMU C table from the pool */
1167 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1168
1169 for (j=0; j < MMU_C_TBL_SIZE; j++)
1170 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1171
1172 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1173 }
1174 }
1175
1176 /* pmap_init_pv() INTERNAL
1177 **
1178 * Initializes the Physical to Virtual mapping system.
1179 */
1180 void
1181 pmap_init_pv()
1182 {
1183 int i;
1184
1185 /* Initialize every PV head. */
1186 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1187 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1188 pvbase[i].pv_flags = 0; /* Zero out page flags */
1189 }
1190 }
1191
1192 /* get_a_table INTERNAL
1193 **
1194 * Retrieve and return a level A table for use in a user map.
1195 */
1196 a_tmgr_t *
1197 get_a_table()
1198 {
1199 a_tmgr_t *tbl;
1200 pmap_t pmap;
1201
1202 /* Get the top A table in the pool */
1203 tbl = a_pool.tqh_first;
1204 if (tbl == NULL) {
1205 /*
1206 * XXX - Instead of panicing here and in other get_x_table
1207 * functions, we do have the option of sleeping on the head of
1208 * the table pool. Any function which updates the table pool
1209 * would then issue a wakeup() on the head, thus waking up any
1210 * processes waiting for a table.
1211 *
1212 * Actually, the place to sleep would be when some process
1213 * asks for a "wired" mapping that would run us short of
1214 * mapping resources. This design DEPENDS on always having
1215 * some mapping resources in the pool for stealing, so we
1216 * must make sure we NEVER let the pool become empty. -gwr
1217 */
1218 panic("get_a_table: out of A tables.");
1219 }
1220
1221 TAILQ_REMOVE(&a_pool, tbl, at_link);
1222 /*
1223 * If the table has a non-null parent pointer then it is in use.
1224 * Forcibly abduct it from its parent and clear its entries.
1225 * No re-entrancy worries here. This table would not be in the
1226 * table pool unless it was available for use.
1227 *
1228 * Note that the second argument to free_a_table() is FALSE. This
1229 * indicates that the table should not be relinked into the A table
1230 * pool. That is a job for the function that called us.
1231 */
1232 if (tbl->at_parent) {
1233 pmap = tbl->at_parent;
1234 free_a_table(tbl, FALSE);
1235 pmap->pm_a_tmgr = NULL;
1236 pmap->pm_a_phys = kernAphys;
1237 }
1238 return tbl;
1239 }
1240
1241 /* get_b_table INTERNAL
1242 **
1243 * Return a level B table for use.
1244 */
1245 b_tmgr_t *
1246 get_b_table()
1247 {
1248 b_tmgr_t *tbl;
1249
1250 /* See 'get_a_table' for comments. */
1251 tbl = b_pool.tqh_first;
1252 if (tbl == NULL)
1253 panic("get_b_table: out of B tables.");
1254 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1255 if (tbl->bt_parent) {
1256 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1257 tbl->bt_parent->at_ecnt--;
1258 free_b_table(tbl, FALSE);
1259 }
1260 return tbl;
1261 }
1262
1263 /* get_c_table INTERNAL
1264 **
1265 * Return a level C table for use.
1266 */
1267 c_tmgr_t *
1268 get_c_table()
1269 {
1270 c_tmgr_t *tbl;
1271
1272 /* See 'get_a_table' for comments */
1273 tbl = c_pool.tqh_first;
1274 if (tbl == NULL)
1275 panic("get_c_table: out of C tables.");
1276 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1277 if (tbl->ct_parent) {
1278 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1279 tbl->ct_parent->bt_ecnt--;
1280 free_c_table(tbl, FALSE);
1281 }
1282 return tbl;
1283 }
1284
1285 /*
1286 * The following 'free_table' and 'steal_table' functions are called to
1287 * detach tables from their current obligations (parents and children) and
1288 * prepare them for reuse in another mapping.
1289 *
1290 * Free_table is used when the calling function will handle the fate
1291 * of the parent table, such as returning it to the free pool when it has
1292 * no valid entries. Functions that do not want to handle this should
1293 * call steal_table, in which the parent table's descriptors and entry
1294 * count are automatically modified when this table is removed.
1295 */
1296
1297 /* free_a_table INTERNAL
1298 **
1299 * Unmaps the given A table and all child tables from their current
1300 * mappings. Returns the number of pages that were invalidated.
1301 * If 'relink' is true, the function will return the table to the head
1302 * of the available table pool.
1303 *
1304 * Cache note: The MC68851 will automatically flush all
1305 * descriptors derived from a given A table from its
1306 * Automatic Translation Cache (ATC) if we issue a
1307 * 'PFLUSHR' instruction with the base address of the
1308 * table. This function should do, and does so.
1309 * Note note: We are using an MC68030 - there is no
1310 * PFLUSHR.
1311 */
1312 int
1313 free_a_table(a_tbl, relink)
1314 a_tmgr_t *a_tbl;
1315 boolean_t relink;
1316 {
1317 int i, removed_cnt;
1318 mmu_long_dte_t *dte;
1319 mmu_short_dte_t *dtbl;
1320 b_tmgr_t *tmgr;
1321
1322 /*
1323 * Flush the ATC cache of all cached descriptors derived
1324 * from this table.
1325 * Sun3x does not use 68851's cached table feature
1326 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1327 */
1328
1329 /*
1330 * Remove any pending cache flushes that were designated
1331 * for the pmap this A table belongs to.
1332 * a_tbl->parent->atc_flushq[0] = 0;
1333 * Not implemented in sun3x.
1334 */
1335
1336 /*
1337 * All A tables in the system should retain a map for the
1338 * kernel. If the table contains any valid descriptors
1339 * (other than those for the kernel area), invalidate them all,
1340 * stopping short of the kernel's entries.
1341 */
1342 removed_cnt = 0;
1343 if (a_tbl->at_ecnt) {
1344 dte = a_tbl->at_dtbl;
1345 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1346 /*
1347 * If a table entry points to a valid B table, free
1348 * it and its children.
1349 */
1350 if (MMU_VALID_DT(dte[i])) {
1351 /*
1352 * The following block does several things,
1353 * from innermost expression to the
1354 * outermost:
1355 * 1) It extracts the base (cc 1996)
1356 * address of the B table pointed
1357 * to in the A table entry dte[i].
1358 * 2) It converts this base address into
1359 * the virtual address it can be
1360 * accessed with. (all MMU tables point
1361 * to physical addresses.)
1362 * 3) It finds the corresponding manager
1363 * structure which manages this MMU table.
1364 * 4) It frees the manager structure.
1365 * (This frees the MMU table and all
1366 * child tables. See 'free_b_table' for
1367 * details.)
1368 */
1369 dtbl = mmu_ptov(dte[i].addr.raw);
1370 tmgr = mmuB2tmgr(dtbl);
1371 removed_cnt += free_b_table(tmgr, TRUE);
1372 dte[i].attr.raw = MMU_DT_INVALID;
1373 }
1374 }
1375 a_tbl->at_ecnt = 0;
1376 }
1377 if (relink) {
1378 a_tbl->at_parent = NULL;
1379 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1380 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1381 }
1382 return removed_cnt;
1383 }
1384
1385 /* free_b_table INTERNAL
1386 **
1387 * Unmaps the given B table and all its children from their current
1388 * mappings. Returns the number of pages that were invalidated.
1389 * (For comments, see 'free_a_table()').
1390 */
1391 int
1392 free_b_table(b_tbl, relink)
1393 b_tmgr_t *b_tbl;
1394 boolean_t relink;
1395 {
1396 int i, removed_cnt;
1397 mmu_short_dte_t *dte;
1398 mmu_short_pte_t *dtbl;
1399 c_tmgr_t *tmgr;
1400
1401 removed_cnt = 0;
1402 if (b_tbl->bt_ecnt) {
1403 dte = b_tbl->bt_dtbl;
1404 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1405 if (MMU_VALID_DT(dte[i])) {
1406 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1407 tmgr = mmuC2tmgr(dtbl);
1408 removed_cnt += free_c_table(tmgr, TRUE);
1409 dte[i].attr.raw = MMU_DT_INVALID;
1410 }
1411 }
1412 b_tbl->bt_ecnt = 0;
1413 }
1414
1415 if (relink) {
1416 b_tbl->bt_parent = NULL;
1417 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1418 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1419 }
1420 return removed_cnt;
1421 }
1422
1423 /* free_c_table INTERNAL
1424 **
1425 * Unmaps the given C table from use and returns it to the pool for
1426 * re-use. Returns the number of pages that were invalidated.
1427 *
1428 * This function preserves any physical page modification information
1429 * contained in the page descriptors within the C table by calling
1430 * 'pmap_remove_pte().'
1431 */
1432 int
1433 free_c_table(c_tbl, relink)
1434 c_tmgr_t *c_tbl;
1435 boolean_t relink;
1436 {
1437 int i, removed_cnt;
1438
1439 removed_cnt = 0;
1440 if (c_tbl->ct_ecnt) {
1441 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1442 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1443 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1444 removed_cnt++;
1445 }
1446 }
1447 c_tbl->ct_ecnt = 0;
1448 }
1449
1450 if (relink) {
1451 c_tbl->ct_parent = NULL;
1452 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1453 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1454 }
1455 return removed_cnt;
1456 }
1457
1458
1459 /* pmap_remove_pte INTERNAL
1460 **
1461 * Unmap the given pte and preserve any page modification
1462 * information by transfering it to the pv head of the
1463 * physical page it maps to. This function does not update
1464 * any reference counts because it is assumed that the calling
1465 * function will do so.
1466 */
1467 void
1468 pmap_remove_pte(pte)
1469 mmu_short_pte_t *pte;
1470 {
1471 u_short pv_idx, targ_idx;
1472 paddr_t pa;
1473 pv_t *pv;
1474
1475 pa = MMU_PTE_PA(*pte);
1476 if (is_managed(pa)) {
1477 pv = pa2pv(pa);
1478 targ_idx = pteidx(pte); /* Index of PTE being removed */
1479
1480 /*
1481 * If the PTE being removed is the first (or only) PTE in
1482 * the list of PTEs currently mapped to this page, remove the
1483 * PTE by changing the index found on the PV head. Otherwise
1484 * a linear search through the list will have to be executed
1485 * in order to find the PVE which points to the PTE being
1486 * removed, so that it may be modified to point to its new
1487 * neighbor.
1488 */
1489
1490 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1491 if (pv_idx == targ_idx) {
1492 pv->pv_idx = pvebase[targ_idx].pve_next;
1493 } else {
1494
1495 /*
1496 * Find the PV element pointing to the target
1497 * element. Note: may have pv_idx==PVE_EOL
1498 */
1499
1500 for (;;) {
1501 if (pv_idx == PVE_EOL) {
1502 goto pv_not_found;
1503 }
1504 if (pvebase[pv_idx].pve_next == targ_idx)
1505 break;
1506 pv_idx = pvebase[pv_idx].pve_next;
1507 }
1508
1509 /*
1510 * At this point, pv_idx is the index of the PV
1511 * element just before the target element in the list.
1512 * Unlink the target.
1513 */
1514
1515 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1516 }
1517
1518 /*
1519 * Save the mod/ref bits of the pte by simply
1520 * ORing the entire pte onto the pv_flags member
1521 * of the pv structure.
1522 * There is no need to use a separate bit pattern
1523 * for usage information on the pv head than that
1524 * which is used on the MMU ptes.
1525 */
1526
1527 pv_not_found:
1528 pv->pv_flags |= (u_short) pte->attr.raw;
1529 }
1530 pte->attr.raw = MMU_DT_INVALID;
1531 }
1532
1533 /* pmap_stroll INTERNAL
1534 **
1535 * Retrieve the addresses of all table managers involved in the mapping of
1536 * the given virtual address. If the table walk completed successfully,
1537 * return TRUE. If it was only partially successful, return FALSE.
1538 * The table walk performed by this function is important to many other
1539 * functions in this module.
1540 *
1541 * Note: This function ought to be easier to read.
1542 */
1543 boolean_t
1544 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1545 pmap_t pmap;
1546 vaddr_t va;
1547 a_tmgr_t **a_tbl;
1548 b_tmgr_t **b_tbl;
1549 c_tmgr_t **c_tbl;
1550 mmu_short_pte_t **pte;
1551 int *a_idx, *b_idx, *pte_idx;
1552 {
1553 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1554 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1555
1556 if (pmap == pmap_kernel())
1557 return FALSE;
1558
1559 /* Does the given pmap have its own A table? */
1560 *a_tbl = pmap->pm_a_tmgr;
1561 if (*a_tbl == NULL)
1562 return FALSE; /* No. Return unknown. */
1563 /* Does the A table have a valid B table
1564 * under the corresponding table entry?
1565 */
1566 *a_idx = MMU_TIA(va);
1567 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1568 if (!MMU_VALID_DT(*a_dte))
1569 return FALSE; /* No. Return unknown. */
1570 /* Yes. Extract B table from the A table. */
1571 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1572 /* Does the B table have a valid C table
1573 * under the corresponding table entry?
1574 */
1575 *b_idx = MMU_TIB(va);
1576 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1577 if (!MMU_VALID_DT(*b_dte))
1578 return FALSE; /* No. Return unknown. */
1579 /* Yes. Extract C table from the B table. */
1580 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1581 *pte_idx = MMU_TIC(va);
1582 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1583
1584 return TRUE;
1585 }
1586
1587 /* pmap_enter INTERFACE
1588 **
1589 * Called by the kernel to map a virtual address
1590 * to a physical address in the given process map.
1591 *
1592 * Note: this function should apply an exclusive lock
1593 * on the pmap system for its duration. (it certainly
1594 * would save my hair!!)
1595 * This function ought to be easier to read.
1596 */
1597 int
1598 pmap_enter(pmap, va, pa, prot, flags)
1599 pmap_t pmap;
1600 vaddr_t va;
1601 paddr_t pa;
1602 vm_prot_t prot;
1603 int flags;
1604 {
1605 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1606 u_short nidx; /* PV list index */
1607 int mapflags; /* Flags for the mapping (see NOTE1) */
1608 u_int a_idx, b_idx, pte_idx; /* table indices */
1609 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1610 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1611 c_tmgr_t *c_tbl; /* C: short page table manager */
1612 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1613 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1614 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1615 pv_t *pv; /* pv list head */
1616 boolean_t wired; /* is the mapping to be wired? */
1617 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1618
1619 if (pmap == pmap_kernel()) {
1620 pmap_enter_kernel(va, pa, prot);
1621 return 0;
1622 }
1623
1624 /*
1625 * Determine if the mapping should be wired.
1626 */
1627 wired = ((flags & PMAP_WIRED) != 0);
1628
1629 /*
1630 * NOTE1:
1631 *
1632 * On November 13, 1999, someone changed the pmap_enter() API such
1633 * that it now accepts a 'flags' argument. This new argument
1634 * contains bit-flags for the architecture-independent (UVM) system to
1635 * use in signalling certain mapping requirements to the architecture-
1636 * dependent (pmap) system. The argument it replaces, 'wired', is now
1637 * one of the flags within it.
1638 *
1639 * In addition to flags signaled by the architecture-independent
1640 * system, parts of the architecture-dependent section of the sun3x
1641 * kernel pass their own flags in the lower, unused bits of the
1642 * physical address supplied to this function. These flags are
1643 * extracted and stored in the temporary variable 'mapflags'.
1644 *
1645 * Extract sun3x specific flags from the physical address.
1646 */
1647 mapflags = (pa & ~MMU_PAGE_MASK);
1648 pa &= MMU_PAGE_MASK;
1649
1650 /*
1651 * Determine if the physical address being mapped is on-board RAM.
1652 * Any other area of the address space is likely to belong to a
1653 * device and hence it would be disasterous to cache its contents.
1654 */
1655 if ((managed = is_managed(pa)) == FALSE)
1656 mapflags |= PMAP_NC;
1657
1658 /*
1659 * For user mappings we walk along the MMU tables of the given
1660 * pmap, reaching a PTE which describes the virtual page being
1661 * mapped or changed. If any level of the walk ends in an invalid
1662 * entry, a table must be allocated and the entry must be updated
1663 * to point to it.
1664 * There is a bit of confusion as to whether this code must be
1665 * re-entrant. For now we will assume it is. To support
1666 * re-entrancy we must unlink tables from the table pool before
1667 * we assume we may use them. Tables are re-linked into the pool
1668 * when we are finished with them at the end of the function.
1669 * But I don't feel like doing that until we have proof that this
1670 * needs to be re-entrant.
1671 * 'llevel' records which tables need to be relinked.
1672 */
1673 llevel = NONE;
1674
1675 /*
1676 * Step 1 - Retrieve the A table from the pmap. If it has no
1677 * A table, allocate a new one from the available pool.
1678 */
1679
1680 a_tbl = pmap->pm_a_tmgr;
1681 if (a_tbl == NULL) {
1682 /*
1683 * This pmap does not currently have an A table. Allocate
1684 * a new one.
1685 */
1686 a_tbl = get_a_table();
1687 a_tbl->at_parent = pmap;
1688
1689 /*
1690 * Assign this new A table to the pmap, and calculate its
1691 * physical address so that loadcrp() can be used to make
1692 * the table active.
1693 */
1694 pmap->pm_a_tmgr = a_tbl;
1695 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1696
1697 /*
1698 * If the process receiving a new A table is the current
1699 * process, we are responsible for setting the MMU so that
1700 * it becomes the current address space. This only adds
1701 * new mappings, so no need to flush anything.
1702 */
1703 if (pmap == current_pmap()) {
1704 kernel_crp.rp_addr = pmap->pm_a_phys;
1705 loadcrp(&kernel_crp);
1706 }
1707
1708 if (!wired)
1709 llevel = NEWA;
1710 } else {
1711 /*
1712 * Use the A table already allocated for this pmap.
1713 * Unlink it from the A table pool if necessary.
1714 */
1715 if (wired && !a_tbl->at_wcnt)
1716 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1717 }
1718
1719 /*
1720 * Step 2 - Walk into the B table. If there is no valid B table,
1721 * allocate one.
1722 */
1723
1724 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1725 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1726 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1727 /* The descriptor is valid. Use the B table it points to. */
1728 /*************************************
1729 * a_idx *
1730 * v *
1731 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1732 * | | | | | | | | | | | | *
1733 * +-+-+-+-+-+-+-+-+-+-+-+- *
1734 * | *
1735 * \- b_tbl -> +-+- *
1736 * | | *
1737 * +-+- *
1738 *************************************/
1739 b_dte = mmu_ptov(a_dte->addr.raw);
1740 b_tbl = mmuB2tmgr(b_dte);
1741
1742 /*
1743 * If the requested mapping must be wired, but this table
1744 * being used to map it is not, the table must be removed
1745 * from the available pool and its wired entry count
1746 * incremented.
1747 */
1748 if (wired && !b_tbl->bt_wcnt) {
1749 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1750 a_tbl->at_wcnt++;
1751 }
1752 } else {
1753 /* The descriptor is invalid. Allocate a new B table. */
1754 b_tbl = get_b_table();
1755
1756 /* Point the parent A table descriptor to this new B table. */
1757 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1758 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1759 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1760
1761 /* Create the necessary back references to the parent table */
1762 b_tbl->bt_parent = a_tbl;
1763 b_tbl->bt_pidx = a_idx;
1764
1765 /*
1766 * If this table is to be wired, make sure the parent A table
1767 * wired count is updated to reflect that it has another wired
1768 * entry.
1769 */
1770 if (wired)
1771 a_tbl->at_wcnt++;
1772 else if (llevel == NONE)
1773 llevel = NEWB;
1774 }
1775
1776 /*
1777 * Step 3 - Walk into the C table, if there is no valid C table,
1778 * allocate one.
1779 */
1780
1781 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1782 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1783 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1784 /* The descriptor is valid. Use the C table it points to. */
1785 /**************************************
1786 * c_idx *
1787 * | v *
1788 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1789 * | | | | | | | | | | | *
1790 * +-+-+-+-+-+-+-+-+-+-+- *
1791 * | *
1792 * \- c_tbl -> +-+-- *
1793 * | | | *
1794 * +-+-- *
1795 **************************************/
1796 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1797 c_tbl = mmuC2tmgr(c_pte);
1798
1799 /* If mapping is wired and table is not */
1800 if (wired && !c_tbl->ct_wcnt) {
1801 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1802 b_tbl->bt_wcnt++;
1803 }
1804 } else {
1805 /* The descriptor is invalid. Allocate a new C table. */
1806 c_tbl = get_c_table();
1807
1808 /* Point the parent B table descriptor to this new C table. */
1809 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1810 b_dte->attr.raw |= MMU_DT_SHORT;
1811 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1812
1813 /* Create the necessary back references to the parent table */
1814 c_tbl->ct_parent = b_tbl;
1815 c_tbl->ct_pidx = b_idx;
1816 /*
1817 * Store the pmap and base virtual managed address for faster
1818 * retrieval in the PV functions.
1819 */
1820 c_tbl->ct_pmap = pmap;
1821 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1822
1823 /*
1824 * If this table is to be wired, make sure the parent B table
1825 * wired count is updated to reflect that it has another wired
1826 * entry.
1827 */
1828 if (wired)
1829 b_tbl->bt_wcnt++;
1830 else if (llevel == NONE)
1831 llevel = NEWC;
1832 }
1833
1834 /*
1835 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1836 * slot of the C table, describing the PA to which the VA is mapped.
1837 */
1838
1839 pte_idx = MMU_TIC(va);
1840 c_pte = &c_tbl->ct_dtbl[pte_idx];
1841 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1842 /*
1843 * The PTE is currently valid. This particular call
1844 * is just a synonym for one (or more) of the following
1845 * operations:
1846 * change protection of a page
1847 * change wiring status of a page
1848 * remove the mapping of a page
1849 *
1850 * XXX - Semi critical: This code should unwire the PTE
1851 * and, possibly, associated parent tables if this is a
1852 * change wiring operation. Currently it does not.
1853 *
1854 * This may be ok if pmap_unwire() is the only
1855 * interface used to UNWIRE a page.
1856 */
1857
1858 /* First check if this is a wiring operation. */
1859 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1860 /*
1861 * The PTE is already wired. To prevent it from being
1862 * counted as a new wiring operation, reset the 'wired'
1863 * variable.
1864 */
1865 wired = FALSE;
1866 }
1867
1868 /* Is the new address the same as the old? */
1869 if (MMU_PTE_PA(*c_pte) == pa) {
1870 /*
1871 * Yes, mark that it does not need to be reinserted
1872 * into the PV list.
1873 */
1874 insert = FALSE;
1875
1876 /*
1877 * Clear all but the modified, referenced and wired
1878 * bits on the PTE.
1879 */
1880 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1881 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1882 } else {
1883 /* No, remove the old entry */
1884 pmap_remove_pte(c_pte);
1885 insert = TRUE;
1886 }
1887
1888 /*
1889 * TLB flush is only necessary if modifying current map.
1890 * However, in pmap_enter(), the pmap almost always IS
1891 * the current pmap, so don't even bother to check.
1892 */
1893 TBIS(va);
1894 } else {
1895 /*
1896 * The PTE is invalid. Increment the valid entry count in
1897 * the C table manager to reflect the addition of a new entry.
1898 */
1899 c_tbl->ct_ecnt++;
1900
1901 /* XXX - temporarily make sure the PTE is cleared. */
1902 c_pte->attr.raw = 0;
1903
1904 /* It will also need to be inserted into the PV list. */
1905 insert = TRUE;
1906 }
1907
1908 /*
1909 * If page is changing from unwired to wired status, set an unused bit
1910 * within the PTE to indicate that it is wired. Also increment the
1911 * wired entry count in the C table manager.
1912 */
1913 if (wired) {
1914 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1915 c_tbl->ct_wcnt++;
1916 }
1917
1918 /*
1919 * Map the page, being careful to preserve modify/reference/wired
1920 * bits. At this point it is assumed that the PTE either has no bits
1921 * set, or if there are set bits, they are only modified, reference or
1922 * wired bits. If not, the following statement will cause erratic
1923 * behavior.
1924 */
1925 #ifdef PMAP_DEBUG
1926 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1927 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1928 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1929 Debugger();
1930 }
1931 #endif
1932 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
1933
1934 /*
1935 * If the mapping should be read-only, set the write protect
1936 * bit in the PTE.
1937 */
1938 if (!(prot & VM_PROT_WRITE))
1939 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
1940
1941 /*
1942 * Mark the PTE as used and/or modified as specified by the flags arg.
1943 */
1944 if (flags & VM_PROT_ALL) {
1945 c_pte->attr.raw |= MMU_SHORT_PTE_USED;
1946 if (flags & VM_PROT_WRITE) {
1947 c_pte->attr.raw |= MMU_SHORT_PTE_M;
1948 }
1949 }
1950
1951 /*
1952 * If the mapping should be cache inhibited (indicated by the flag
1953 * bits found on the lower order of the physical address.)
1954 * mark the PTE as a cache inhibited page.
1955 */
1956 if (mapflags & PMAP_NC)
1957 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
1958
1959 /*
1960 * If the physical address being mapped is managed by the PV
1961 * system then link the pte into the list of pages mapped to that
1962 * address.
1963 */
1964 if (insert && managed) {
1965 pv = pa2pv(pa);
1966 nidx = pteidx(c_pte);
1967
1968 pvebase[nidx].pve_next = pv->pv_idx;
1969 pv->pv_idx = nidx;
1970 }
1971
1972 /* Move any allocated tables back into the active pool. */
1973
1974 switch (llevel) {
1975 case NEWA:
1976 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1977 /* FALLTHROUGH */
1978 case NEWB:
1979 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1980 /* FALLTHROUGH */
1981 case NEWC:
1982 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1983 /* FALLTHROUGH */
1984 default:
1985 break;
1986 }
1987
1988 return 0;
1989 }
1990
1991 /* pmap_enter_kernel INTERNAL
1992 **
1993 * Map the given virtual address to the given physical address within the
1994 * kernel address space. This function exists because the kernel map does
1995 * not do dynamic table allocation. It consists of a contiguous array of ptes
1996 * and can be edited directly without the need to walk through any tables.
1997 *
1998 * XXX: "Danger, Will Robinson!"
1999 * Note that the kernel should never take a fault on any page
2000 * between [ KERNBASE .. virtual_avail ] and this is checked in
2001 * trap.c for kernel-mode MMU faults. This means that mappings
2002 * created in that range must be implicily wired. -gwr
2003 */
2004 void
2005 pmap_enter_kernel(va, pa, prot)
2006 vaddr_t va;
2007 paddr_t pa;
2008 vm_prot_t prot;
2009 {
2010 boolean_t was_valid, insert;
2011 u_short pte_idx;
2012 int flags;
2013 mmu_short_pte_t *pte;
2014 pv_t *pv;
2015 paddr_t old_pa;
2016
2017 flags = (pa & ~MMU_PAGE_MASK);
2018 pa &= MMU_PAGE_MASK;
2019
2020 if (is_managed(pa))
2021 insert = TRUE;
2022 else
2023 insert = FALSE;
2024
2025 /*
2026 * Calculate the index of the PTE being modified.
2027 */
2028 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2029
2030 /* This array is traditionally named "Sysmap" */
2031 pte = &kernCbase[pte_idx];
2032
2033 if (MMU_VALID_DT(*pte)) {
2034 was_valid = TRUE;
2035 /*
2036 * If the PTE already maps a different
2037 * physical address, umap and pv_unlink.
2038 */
2039 old_pa = MMU_PTE_PA(*pte);
2040 if (pa != old_pa)
2041 pmap_remove_pte(pte);
2042 else {
2043 /*
2044 * Old PA and new PA are the same. No need to
2045 * relink the mapping within the PV list.
2046 */
2047 insert = FALSE;
2048
2049 /*
2050 * Save any mod/ref bits on the PTE.
2051 */
2052 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2053 }
2054 } else {
2055 pte->attr.raw = MMU_DT_INVALID;
2056 was_valid = FALSE;
2057 }
2058
2059 /*
2060 * Map the page. Being careful to preserve modified/referenced bits
2061 * on the PTE.
2062 */
2063 pte->attr.raw |= (pa | MMU_DT_PAGE);
2064
2065 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2066 pte->attr.raw |= MMU_SHORT_PTE_WP;
2067 if (flags & PMAP_NC)
2068 pte->attr.raw |= MMU_SHORT_PTE_CI;
2069 if (was_valid)
2070 TBIS(va);
2071
2072 /*
2073 * Insert the PTE into the PV system, if need be.
2074 */
2075 if (insert) {
2076 pv = pa2pv(pa);
2077 pvebase[pte_idx].pve_next = pv->pv_idx;
2078 pv->pv_idx = pte_idx;
2079 }
2080 }
2081
2082 void
2083 pmap_kenter_pa(va, pa, prot)
2084 vaddr_t va;
2085 paddr_t pa;
2086 vm_prot_t prot;
2087 {
2088 mmu_short_pte_t *pte;
2089
2090 /* This array is traditionally named "Sysmap" */
2091 pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
2092
2093 KASSERT(!MMU_VALID_DT(*pte));
2094 pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2095 if (!(prot & VM_PROT_WRITE))
2096 pte->attr.raw |= MMU_SHORT_PTE_WP;
2097 }
2098
2099 void
2100 pmap_kremove(va, len)
2101 vaddr_t va;
2102 vsize_t len;
2103 {
2104 int idx, eidx;
2105
2106 #ifdef PMAP_DEBUG
2107 if ((sva & PGOFSET) || (eva & PGOFSET))
2108 panic("pmap_kremove: alignment");
2109 #endif
2110
2111 idx = m68k_btop(va - KERNBASE);
2112 eidx = m68k_btop(va + len - KERNBASE);
2113
2114 while (idx < eidx) {
2115 kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2116 TBIS(va);
2117 va += PAGE_SIZE;
2118 }
2119 }
2120
2121 /* pmap_map INTERNAL
2122 **
2123 * Map a contiguous range of physical memory into a contiguous range of
2124 * the kernel virtual address space.
2125 *
2126 * Used for device mappings and early mapping of the kernel text/data/bss.
2127 * Returns the first virtual address beyond the end of the range.
2128 */
2129 vaddr_t
2130 pmap_map(va, pa, endpa, prot)
2131 vaddr_t va;
2132 paddr_t pa;
2133 paddr_t endpa;
2134 int prot;
2135 {
2136 int sz;
2137
2138 sz = endpa - pa;
2139 do {
2140 pmap_enter_kernel(va, pa, prot);
2141 va += PAGE_SIZE;
2142 pa += PAGE_SIZE;
2143 sz -= PAGE_SIZE;
2144 } while (sz > 0);
2145 pmap_update(pmap_kernel());
2146 return(va);
2147 }
2148
2149 /* pmap_protect INTERFACE
2150 **
2151 * Apply the given protection to the given virtual address range within
2152 * the given map.
2153 *
2154 * It is ok for the protection applied to be stronger than what is
2155 * specified. We use this to our advantage when the given map has no
2156 * mapping for the virtual address. By skipping a page when this
2157 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2158 * and therefore do not need to map the page just to apply a protection
2159 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2160 *
2161 * XXX - This function could be speeded up by using pmap_stroll() for inital
2162 * setup, and then manual scrolling in the for() loop.
2163 */
2164 void
2165 pmap_protect(pmap, startva, endva, prot)
2166 pmap_t pmap;
2167 vaddr_t startva, endva;
2168 vm_prot_t prot;
2169 {
2170 boolean_t iscurpmap;
2171 int a_idx, b_idx, c_idx;
2172 a_tmgr_t *a_tbl;
2173 b_tmgr_t *b_tbl;
2174 c_tmgr_t *c_tbl;
2175 mmu_short_pte_t *pte;
2176
2177 if (pmap == pmap_kernel()) {
2178 pmap_protect_kernel(startva, endva, prot);
2179 return;
2180 }
2181
2182 /*
2183 * In this particular pmap implementation, there are only three
2184 * types of memory protection: 'all' (read/write/execute),
2185 * 'read-only' (read/execute) and 'none' (no mapping.)
2186 * It is not possible for us to treat 'executable' as a separate
2187 * protection type. Therefore, protection requests that seek to
2188 * remove execute permission while retaining read or write, and those
2189 * that make little sense (write-only for example) are ignored.
2190 */
2191 switch (prot) {
2192 case VM_PROT_NONE:
2193 /*
2194 * A request to apply the protection code of
2195 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2196 */
2197 pmap_remove(pmap, startva, endva);
2198 return;
2199 case VM_PROT_EXECUTE:
2200 case VM_PROT_READ:
2201 case VM_PROT_READ|VM_PROT_EXECUTE:
2202 /* continue */
2203 break;
2204 case VM_PROT_WRITE:
2205 case VM_PROT_WRITE|VM_PROT_READ:
2206 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2207 case VM_PROT_ALL:
2208 /* None of these should happen in a sane system. */
2209 return;
2210 }
2211
2212 /*
2213 * If the pmap has no A table, it has no mappings and therefore
2214 * there is nothing to protect.
2215 */
2216 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2217 return;
2218
2219 a_idx = MMU_TIA(startva);
2220 b_idx = MMU_TIB(startva);
2221 c_idx = MMU_TIC(startva);
2222 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2223
2224 iscurpmap = (pmap == current_pmap());
2225 while (startva < endva) {
2226 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2227 if (b_tbl == NULL) {
2228 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2229 b_tbl = mmu_ptov((vaddr_t)b_tbl);
2230 b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2231 }
2232 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2233 if (c_tbl == NULL) {
2234 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2235 c_tbl = mmu_ptov((vaddr_t)c_tbl);
2236 c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2237 }
2238 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2239 pte = &c_tbl->ct_dtbl[c_idx];
2240 /* make the mapping read-only */
2241 pte->attr.raw |= MMU_SHORT_PTE_WP;
2242 /*
2243 * If we just modified the current address space,
2244 * flush any translations for the modified page from
2245 * the translation cache and any data from it in the
2246 * data cache.
2247 */
2248 if (iscurpmap)
2249 TBIS(startva);
2250 }
2251 startva += PAGE_SIZE;
2252
2253 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2254 c_tbl = NULL;
2255 c_idx = 0;
2256 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2257 b_tbl = NULL;
2258 b_idx = 0;
2259 }
2260 }
2261 } else { /* C table wasn't valid */
2262 c_tbl = NULL;
2263 c_idx = 0;
2264 startva += MMU_TIB_RANGE;
2265 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2266 b_tbl = NULL;
2267 b_idx = 0;
2268 }
2269 } /* C table */
2270 } else { /* B table wasn't valid */
2271 b_tbl = NULL;
2272 b_idx = 0;
2273 startva += MMU_TIA_RANGE;
2274 a_idx++;
2275 } /* B table */
2276 }
2277 }
2278
2279 /* pmap_protect_kernel INTERNAL
2280 **
2281 * Apply the given protection code to a kernel address range.
2282 */
2283 void
2284 pmap_protect_kernel(startva, endva, prot)
2285 vaddr_t startva, endva;
2286 vm_prot_t prot;
2287 {
2288 vaddr_t va;
2289 mmu_short_pte_t *pte;
2290
2291 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2292 for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
2293 if (MMU_VALID_DT(*pte)) {
2294 switch (prot) {
2295 case VM_PROT_ALL:
2296 break;
2297 case VM_PROT_EXECUTE:
2298 case VM_PROT_READ:
2299 case VM_PROT_READ|VM_PROT_EXECUTE:
2300 pte->attr.raw |= MMU_SHORT_PTE_WP;
2301 break;
2302 case VM_PROT_NONE:
2303 /* this is an alias for 'pmap_remove_kernel' */
2304 pmap_remove_pte(pte);
2305 break;
2306 default:
2307 break;
2308 }
2309 /*
2310 * since this is the kernel, immediately flush any cached
2311 * descriptors for this address.
2312 */
2313 TBIS(va);
2314 }
2315 }
2316 }
2317
2318 /* pmap_unwire INTERFACE
2319 **
2320 * Clear the wired attribute of the specified page.
2321 *
2322 * This function is called from vm_fault.c to unwire
2323 * a mapping.
2324 */
2325 void
2326 pmap_unwire(pmap, va)
2327 pmap_t pmap;
2328 vaddr_t va;
2329 {
2330 int a_idx, b_idx, c_idx;
2331 a_tmgr_t *a_tbl;
2332 b_tmgr_t *b_tbl;
2333 c_tmgr_t *c_tbl;
2334 mmu_short_pte_t *pte;
2335
2336 /* Kernel mappings always remain wired. */
2337 if (pmap == pmap_kernel())
2338 return;
2339
2340 /*
2341 * Walk through the tables. If the walk terminates without
2342 * a valid PTE then the address wasn't wired in the first place.
2343 * Return immediately.
2344 */
2345 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2346 &b_idx, &c_idx) == FALSE)
2347 return;
2348
2349
2350 /* Is the PTE wired? If not, return. */
2351 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2352 return;
2353
2354 /* Remove the wiring bit. */
2355 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2356
2357 /*
2358 * Decrement the wired entry count in the C table.
2359 * If it reaches zero the following things happen:
2360 * 1. The table no longer has any wired entries and is considered
2361 * unwired.
2362 * 2. It is placed on the available queue.
2363 * 3. The parent table's wired entry count is decremented.
2364 * 4. If it reaches zero, this process repeats at step 1 and
2365 * stops at after reaching the A table.
2366 */
2367 if (--c_tbl->ct_wcnt == 0) {
2368 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2369 if (--b_tbl->bt_wcnt == 0) {
2370 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2371 if (--a_tbl->at_wcnt == 0) {
2372 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2373 }
2374 }
2375 }
2376 }
2377
2378 /* pmap_copy INTERFACE
2379 **
2380 * Copy the mappings of a range of addresses in one pmap, into
2381 * the destination address of another.
2382 *
2383 * This routine is advisory. Should we one day decide that MMU tables
2384 * may be shared by more than one pmap, this function should be used to
2385 * link them together. Until that day however, we do nothing.
2386 */
2387 void
2388 pmap_copy(pmap_a, pmap_b, dst, len, src)
2389 pmap_t pmap_a, pmap_b;
2390 vaddr_t dst;
2391 vsize_t len;
2392 vaddr_t src;
2393 {
2394 /* not implemented. */
2395 }
2396
2397 /* pmap_copy_page INTERFACE
2398 **
2399 * Copy the contents of one physical page into another.
2400 *
2401 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2402 * to map the two specified physical pages into the kernel address space.
2403 *
2404 * Note: We could use the transparent translation registers to make the
2405 * mappings. If we do so, be sure to disable interrupts before using them.
2406 */
2407 void
2408 pmap_copy_page(srcpa, dstpa)
2409 paddr_t srcpa, dstpa;
2410 {
2411 vaddr_t srcva, dstva;
2412 int s;
2413
2414 srcva = tmp_vpages[0];
2415 dstva = tmp_vpages[1];
2416
2417 s = splvm();
2418 #ifdef DIAGNOSTIC
2419 if (tmp_vpages_inuse++)
2420 panic("pmap_copy_page: temporary vpages are in use.");
2421 #endif
2422
2423 /* Map pages as non-cacheable to avoid cache polution? */
2424 pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
2425 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2426
2427 /* Hand-optimized version of bcopy(src, dst, PAGE_SIZE) */
2428 copypage((char *) srcva, (char *) dstva);
2429
2430 pmap_kremove(srcva, PAGE_SIZE);
2431 pmap_kremove(dstva, PAGE_SIZE);
2432
2433 #ifdef DIAGNOSTIC
2434 --tmp_vpages_inuse;
2435 #endif
2436 splx(s);
2437 }
2438
2439 /* pmap_zero_page INTERFACE
2440 **
2441 * Zero the contents of the specified physical page.
2442 *
2443 * Uses one of the virtual pages allocated in pmap_boostrap()
2444 * to map the specified page into the kernel address space.
2445 */
2446 void
2447 pmap_zero_page(dstpa)
2448 paddr_t dstpa;
2449 {
2450 vaddr_t dstva;
2451 int s;
2452
2453 dstva = tmp_vpages[1];
2454 s = splvm();
2455 #ifdef DIAGNOSTIC
2456 if (tmp_vpages_inuse++)
2457 panic("pmap_zero_page: temporary vpages are in use.");
2458 #endif
2459
2460 /* The comments in pmap_copy_page() above apply here also. */
2461 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2462
2463 /* Hand-optimized version of bzero(ptr, PAGE_SIZE) */
2464 zeropage((char *) dstva);
2465
2466 pmap_kremove(dstva, PAGE_SIZE);
2467 #ifdef DIAGNOSTIC
2468 --tmp_vpages_inuse;
2469 #endif
2470 splx(s);
2471 }
2472
2473 /* pmap_collect INTERFACE
2474 **
2475 * Called from the VM system when we are about to swap out
2476 * the process using this pmap. This should give up any
2477 * resources held here, including all its MMU tables.
2478 */
2479 void
2480 pmap_collect(pmap)
2481 pmap_t pmap;
2482 {
2483 /* XXX - todo... */
2484 }
2485
2486 /* pmap_create INTERFACE
2487 **
2488 * Create and return a pmap structure.
2489 */
2490 pmap_t
2491 pmap_create()
2492 {
2493 pmap_t pmap;
2494
2495 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2496 pmap_pinit(pmap);
2497 return pmap;
2498 }
2499
2500 /* pmap_pinit INTERNAL
2501 **
2502 * Initialize a pmap structure.
2503 */
2504 void
2505 pmap_pinit(pmap)
2506 pmap_t pmap;
2507 {
2508 memset(pmap, 0, sizeof(struct pmap));
2509 pmap->pm_a_tmgr = NULL;
2510 pmap->pm_a_phys = kernAphys;
2511 pmap->pm_refcount = 1;
2512 simple_lock_init(&pmap->pm_lock);
2513 }
2514
2515 /* pmap_release INTERFACE
2516 **
2517 * Release any resources held by the given pmap.
2518 *
2519 * This is the reverse analog to pmap_pinit. It does not
2520 * necessarily mean for the pmap structure to be deallocated,
2521 * as in pmap_destroy.
2522 */
2523 void
2524 pmap_release(pmap)
2525 pmap_t pmap;
2526 {
2527 /*
2528 * As long as the pmap contains no mappings,
2529 * which always should be the case whenever
2530 * this function is called, there really should
2531 * be nothing to do.
2532 */
2533 #ifdef PMAP_DEBUG
2534 if (pmap == pmap_kernel())
2535 panic("pmap_release: kernel pmap");
2536 #endif
2537 /*
2538 * XXX - If this pmap has an A table, give it back.
2539 * The pmap SHOULD be empty by now, and pmap_remove
2540 * should have already given back the A table...
2541 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2542 * at this point, which means some mapping was not
2543 * removed when it should have been. -gwr
2544 */
2545 if (pmap->pm_a_tmgr != NULL) {
2546 /* First make sure we are not using it! */
2547 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2548 kernel_crp.rp_addr = kernAphys;
2549 loadcrp(&kernel_crp);
2550 }
2551 #ifdef PMAP_DEBUG /* XXX - todo! */
2552 /* XXX - Now complain... */
2553 printf("pmap_release: still have table\n");
2554 Debugger();
2555 #endif
2556 free_a_table(pmap->pm_a_tmgr, TRUE);
2557 pmap->pm_a_tmgr = NULL;
2558 pmap->pm_a_phys = kernAphys;
2559 }
2560 }
2561
2562 /* pmap_reference INTERFACE
2563 **
2564 * Increment the reference count of a pmap.
2565 */
2566 void
2567 pmap_reference(pmap)
2568 pmap_t pmap;
2569 {
2570 pmap_lock(pmap);
2571 pmap_add_ref(pmap);
2572 pmap_unlock(pmap);
2573 }
2574
2575 /* pmap_dereference INTERNAL
2576 **
2577 * Decrease the reference count on the given pmap
2578 * by one and return the current count.
2579 */
2580 int
2581 pmap_dereference(pmap)
2582 pmap_t pmap;
2583 {
2584 int rtn;
2585
2586 pmap_lock(pmap);
2587 rtn = pmap_del_ref(pmap);
2588 pmap_unlock(pmap);
2589
2590 return rtn;
2591 }
2592
2593 /* pmap_destroy INTERFACE
2594 **
2595 * Decrement a pmap's reference count and delete
2596 * the pmap if it becomes zero. Will be called
2597 * only after all mappings have been removed.
2598 */
2599 void
2600 pmap_destroy(pmap)
2601 pmap_t pmap;
2602 {
2603 if (pmap_dereference(pmap) == 0) {
2604 pmap_release(pmap);
2605 pool_put(&pmap_pmap_pool, pmap);
2606 }
2607 }
2608
2609 /* pmap_is_referenced INTERFACE
2610 **
2611 * Determine if the given physical page has been
2612 * referenced (read from [or written to.])
2613 */
2614 boolean_t
2615 pmap_is_referenced(pg)
2616 struct vm_page *pg;
2617 {
2618 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2619 pv_t *pv;
2620 int idx;
2621
2622 /*
2623 * Check the flags on the pv head. If they are set,
2624 * return immediately. Otherwise a search must be done.
2625 */
2626
2627 pv = pa2pv(pa);
2628 if (pv->pv_flags & PV_FLAGS_USED)
2629 return TRUE;
2630
2631 /*
2632 * Search through all pv elements pointing
2633 * to this page and query their reference bits
2634 */
2635
2636 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2637 if (MMU_PTE_USED(kernCbase[idx])) {
2638 return TRUE;
2639 }
2640 }
2641 return FALSE;
2642 }
2643
2644 /* pmap_is_modified INTERFACE
2645 **
2646 * Determine if the given physical page has been
2647 * modified (written to.)
2648 */
2649 boolean_t
2650 pmap_is_modified(pg)
2651 struct vm_page *pg;
2652 {
2653 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2654 pv_t *pv;
2655 int idx;
2656
2657 /* see comments in pmap_is_referenced() */
2658 pv = pa2pv(pa);
2659 if (pv->pv_flags & PV_FLAGS_MDFY)
2660 return TRUE;
2661
2662 for (idx = pv->pv_idx;
2663 idx != PVE_EOL;
2664 idx = pvebase[idx].pve_next) {
2665
2666 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2667 return TRUE;
2668 }
2669 }
2670
2671 return FALSE;
2672 }
2673
2674 /* pmap_page_protect INTERFACE
2675 **
2676 * Applies the given protection to all mappings to the given
2677 * physical page.
2678 */
2679 void
2680 pmap_page_protect(pg, prot)
2681 struct vm_page *pg;
2682 vm_prot_t prot;
2683 {
2684 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2685 pv_t *pv;
2686 int idx;
2687 vaddr_t va;
2688 struct mmu_short_pte_struct *pte;
2689 c_tmgr_t *c_tbl;
2690 pmap_t pmap, curpmap;
2691
2692 curpmap = current_pmap();
2693 pv = pa2pv(pa);
2694
2695 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2696 pte = &kernCbase[idx];
2697 switch (prot) {
2698 case VM_PROT_ALL:
2699 /* do nothing */
2700 break;
2701 case VM_PROT_EXECUTE:
2702 case VM_PROT_READ:
2703 case VM_PROT_READ|VM_PROT_EXECUTE:
2704 /*
2705 * Determine the virtual address mapped by
2706 * the PTE and flush ATC entries if necessary.
2707 */
2708 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2709 pte->attr.raw |= MMU_SHORT_PTE_WP;
2710 if (pmap == curpmap || pmap == pmap_kernel())
2711 TBIS(va);
2712 break;
2713 case VM_PROT_NONE:
2714 /* Save the mod/ref bits. */
2715 pv->pv_flags |= pte->attr.raw;
2716 /* Invalidate the PTE. */
2717 pte->attr.raw = MMU_DT_INVALID;
2718
2719 /*
2720 * Update table counts. And flush ATC entries
2721 * if necessary.
2722 */
2723 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2724
2725 /*
2726 * If the PTE belongs to the kernel map,
2727 * be sure to flush the page it maps.
2728 */
2729 if (pmap == pmap_kernel()) {
2730 TBIS(va);
2731 } else {
2732 /*
2733 * The PTE belongs to a user map.
2734 * update the entry count in the C
2735 * table to which it belongs and flush
2736 * the ATC if the mapping belongs to
2737 * the current pmap.
2738 */
2739 c_tbl->ct_ecnt--;
2740 if (pmap == curpmap)
2741 TBIS(va);
2742 }
2743 break;
2744 default:
2745 break;
2746 }
2747 }
2748
2749 /*
2750 * If the protection code indicates that all mappings to the page
2751 * be removed, truncate the PV list to zero entries.
2752 */
2753 if (prot == VM_PROT_NONE)
2754 pv->pv_idx = PVE_EOL;
2755 }
2756
2757 /* pmap_get_pteinfo INTERNAL
2758 **
2759 * Called internally to find the pmap and virtual address within that
2760 * map to which the pte at the given index maps. Also includes the PTE's C
2761 * table manager.
2762 *
2763 * Returns the pmap in the argument provided, and the virtual address
2764 * by return value.
2765 */
2766 vaddr_t
2767 pmap_get_pteinfo(idx, pmap, tbl)
2768 u_int idx;
2769 pmap_t *pmap;
2770 c_tmgr_t **tbl;
2771 {
2772 vaddr_t va = 0;
2773
2774 /*
2775 * Determine if the PTE is a kernel PTE or a user PTE.
2776 */
2777 if (idx >= NUM_KERN_PTES) {
2778 /*
2779 * The PTE belongs to a user mapping.
2780 */
2781 /* XXX: Would like an inline for this to validate idx... */
2782 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2783
2784 *pmap = (*tbl)->ct_pmap;
2785 /*
2786 * To find the va to which the PTE maps, we first take
2787 * the table's base virtual address mapping which is stored
2788 * in ct_va. We then increment this address by a page for
2789 * every slot skipped until we reach the PTE.
2790 */
2791 va = (*tbl)->ct_va;
2792 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2793 } else {
2794 /*
2795 * The PTE belongs to the kernel map.
2796 */
2797 *pmap = pmap_kernel();
2798
2799 va = m68k_ptob(idx);
2800 va += KERNBASE;
2801 }
2802
2803 return va;
2804 }
2805
2806 /* pmap_clear_modify INTERFACE
2807 **
2808 * Clear the modification bit on the page at the specified
2809 * physical address.
2810 *
2811 */
2812 boolean_t
2813 pmap_clear_modify(pg)
2814 struct vm_page *pg;
2815 {
2816 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2817 boolean_t rv;
2818
2819 rv = pmap_is_modified(pg);
2820 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2821 return rv;
2822 }
2823
2824 /* pmap_clear_reference INTERFACE
2825 **
2826 * Clear the referenced bit on the page at the specified
2827 * physical address.
2828 */
2829 boolean_t
2830 pmap_clear_reference(pg)
2831 struct vm_page *pg;
2832 {
2833 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2834 boolean_t rv;
2835
2836 rv = pmap_is_referenced(pg);
2837 pmap_clear_pv(pa, PV_FLAGS_USED);
2838 return rv;
2839 }
2840
2841 /* pmap_clear_pv INTERNAL
2842 **
2843 * Clears the specified flag from the specified physical address.
2844 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2845 *
2846 * Flag is one of:
2847 * PV_FLAGS_MDFY - Page modified bit.
2848 * PV_FLAGS_USED - Page used (referenced) bit.
2849 *
2850 * This routine must not only clear the flag on the pv list
2851 * head. It must also clear the bit on every pte in the pv
2852 * list associated with the address.
2853 */
2854 void
2855 pmap_clear_pv(pa, flag)
2856 paddr_t pa;
2857 int flag;
2858 {
2859 pv_t *pv;
2860 int idx;
2861 vaddr_t va;
2862 pmap_t pmap;
2863 mmu_short_pte_t *pte;
2864 c_tmgr_t *c_tbl;
2865
2866 pv = pa2pv(pa);
2867 pv->pv_flags &= ~(flag);
2868 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2869 pte = &kernCbase[idx];
2870 pte->attr.raw &= ~(flag);
2871
2872 /*
2873 * The MC68030 MMU will not set the modified or
2874 * referenced bits on any MMU tables for which it has
2875 * a cached descriptor with its modify bit set. To insure
2876 * that it will modify these bits on the PTE during the next
2877 * time it is written to or read from, we must flush it from
2878 * the ATC.
2879 *
2880 * Ordinarily it is only necessary to flush the descriptor
2881 * if it is used in the current address space. But since I
2882 * am not sure that there will always be a notion of
2883 * 'the current address space' when this function is called,
2884 * I will skip the test and always flush the address. It
2885 * does no harm.
2886 */
2887
2888 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2889 TBIS(va);
2890 }
2891 }
2892
2893 /* pmap_extract INTERFACE
2894 **
2895 * Return the physical address mapped by the virtual address
2896 * in the specified pmap.
2897 *
2898 * Note: this function should also apply an exclusive lock
2899 * on the pmap system during its duration.
2900 */
2901 boolean_t
2902 pmap_extract(pmap, va, pap)
2903 pmap_t pmap;
2904 vaddr_t va;
2905 paddr_t *pap;
2906 {
2907 int a_idx, b_idx, pte_idx;
2908 a_tmgr_t *a_tbl;
2909 b_tmgr_t *b_tbl;
2910 c_tmgr_t *c_tbl;
2911 mmu_short_pte_t *c_pte;
2912
2913 if (pmap == pmap_kernel())
2914 return pmap_extract_kernel(va, pap);
2915
2916 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2917 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2918 return FALSE;
2919
2920 if (!MMU_VALID_DT(*c_pte))
2921 return FALSE;
2922
2923 if (pap != NULL)
2924 *pap = MMU_PTE_PA(*c_pte);
2925 return (TRUE);
2926 }
2927
2928 /* pmap_extract_kernel INTERNAL
2929 **
2930 * Extract a translation from the kernel address space.
2931 */
2932 boolean_t
2933 pmap_extract_kernel(va, pap)
2934 vaddr_t va;
2935 paddr_t *pap;
2936 {
2937 mmu_short_pte_t *pte;
2938
2939 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
2940 if (!MMU_VALID_DT(*pte))
2941 return (FALSE);
2942 if (pap != NULL)
2943 *pap = MMU_PTE_PA(*pte);
2944 return (TRUE);
2945 }
2946
2947 /* pmap_remove_kernel INTERNAL
2948 **
2949 * Remove the mapping of a range of virtual addresses from the kernel map.
2950 * The arguments are already page-aligned.
2951 */
2952 void
2953 pmap_remove_kernel(sva, eva)
2954 vaddr_t sva;
2955 vaddr_t eva;
2956 {
2957 int idx, eidx;
2958
2959 #ifdef PMAP_DEBUG
2960 if ((sva & PGOFSET) || (eva & PGOFSET))
2961 panic("pmap_remove_kernel: alignment");
2962 #endif
2963
2964 idx = m68k_btop(sva - KERNBASE);
2965 eidx = m68k_btop(eva - KERNBASE);
2966
2967 while (idx < eidx) {
2968 pmap_remove_pte(&kernCbase[idx++]);
2969 TBIS(sva);
2970 sva += PAGE_SIZE;
2971 }
2972 }
2973
2974 /* pmap_remove INTERFACE
2975 **
2976 * Remove the mapping of a range of virtual addresses from the given pmap.
2977 *
2978 * If the range contains any wired entries, this function will probably create
2979 * disaster.
2980 */
2981 void
2982 pmap_remove(pmap, start, end)
2983 pmap_t pmap;
2984 vaddr_t start;
2985 vaddr_t end;
2986 {
2987
2988 if (pmap == pmap_kernel()) {
2989 pmap_remove_kernel(start, end);
2990 return;
2991 }
2992
2993 /*
2994 * If the pmap doesn't have an A table of its own, it has no mappings
2995 * that can be removed.
2996 */
2997 if (pmap->pm_a_tmgr == NULL)
2998 return;
2999
3000 /*
3001 * Remove the specified range from the pmap. If the function
3002 * returns true, the operation removed all the valid mappings
3003 * in the pmap and freed its A table. If this happened to the
3004 * currently loaded pmap, the MMU root pointer must be reloaded
3005 * with the default 'kernel' map.
3006 */
3007 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3008 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3009 kernel_crp.rp_addr = kernAphys;
3010 loadcrp(&kernel_crp);
3011 /* will do TLB flush below */
3012 }
3013 pmap->pm_a_tmgr = NULL;
3014 pmap->pm_a_phys = kernAphys;
3015 }
3016
3017 /*
3018 * If we just modified the current address space,
3019 * make sure to flush the MMU cache.
3020 *
3021 * XXX - this could be an unecessarily large flush.
3022 * XXX - Could decide, based on the size of the VA range
3023 * to be removed, whether to flush "by pages" or "all".
3024 */
3025 if (pmap == current_pmap())
3026 TBIAU();
3027 }
3028
3029 /* pmap_remove_a INTERNAL
3030 **
3031 * This is function number one in a set of three that removes a range
3032 * of memory in the most efficient manner by removing the highest possible
3033 * tables from the memory space. This particular function attempts to remove
3034 * as many B tables as it can, delegating the remaining fragmented ranges to
3035 * pmap_remove_b().
3036 *
3037 * If the removal operation results in an empty A table, the function returns
3038 * TRUE.
3039 *
3040 * It's ugly but will do for now.
3041 */
3042 boolean_t
3043 pmap_remove_a(a_tbl, start, end)
3044 a_tmgr_t *a_tbl;
3045 vaddr_t start;
3046 vaddr_t end;
3047 {
3048 boolean_t empty;
3049 int idx;
3050 vaddr_t nstart, nend;
3051 b_tmgr_t *b_tbl;
3052 mmu_long_dte_t *a_dte;
3053 mmu_short_dte_t *b_dte;
3054
3055 /*
3056 * The following code works with what I call a 'granularity
3057 * reduction algorithim'. A range of addresses will always have
3058 * the following properties, which are classified according to
3059 * how the range relates to the size of the current granularity
3060 * - an A table entry:
3061 *
3062 * 1 2 3 4
3063 * -+---+---+---+---+---+---+---+-
3064 * -+---+---+---+---+---+---+---+-
3065 *
3066 * A range will always start on a granularity boundary, illustrated
3067 * by '+' signs in the table above, or it will start at some point
3068 * inbetween a granularity boundary, as illustrated by point 1.
3069 * The first step in removing a range of addresses is to remove the
3070 * range between 1 and 2, the nearest granularity boundary. This
3071 * job is handled by the section of code governed by the
3072 * 'if (start < nstart)' statement.
3073 *
3074 * A range will always encompass zero or more intergral granules,
3075 * illustrated by points 2 and 3. Integral granules are easy to
3076 * remove. The removal of these granules is the second step, and
3077 * is handled by the code block 'if (nstart < nend)'.
3078 *
3079 * Lastly, a range will always end on a granularity boundary,
3080 * ill. by point 3, or it will fall just beyond one, ill. by point
3081 * 4. The last step involves removing this range and is handled by
3082 * the code block 'if (nend < end)'.
3083 */
3084 nstart = MMU_ROUND_UP_A(start);
3085 nend = MMU_ROUND_A(end);
3086
3087 if (start < nstart) {
3088 /*
3089 * This block is executed if the range starts between
3090 * a granularity boundary.
3091 *
3092 * First find the DTE which is responsible for mapping
3093 * the start of the range.
3094 */
3095 idx = MMU_TIA(start);
3096 a_dte = &a_tbl->at_dtbl[idx];
3097
3098 /*
3099 * If the DTE is valid then delegate the removal of the sub
3100 * range to pmap_remove_b(), which can remove addresses at
3101 * a finer granularity.
3102 */
3103 if (MMU_VALID_DT(*a_dte)) {
3104 b_dte = mmu_ptov(a_dte->addr.raw);
3105 b_tbl = mmuB2tmgr(b_dte);
3106
3107 /*
3108 * The sub range to be removed starts at the start
3109 * of the full range we were asked to remove, and ends
3110 * at the greater of:
3111 * 1. The end of the full range, -or-
3112 * 2. The end of the full range, rounded down to the
3113 * nearest granularity boundary.
3114 */
3115 if (end < nstart)
3116 empty = pmap_remove_b(b_tbl, start, end);
3117 else
3118 empty = pmap_remove_b(b_tbl, start, nstart);
3119
3120 /*
3121 * If the removal resulted in an empty B table,
3122 * invalidate the DTE that points to it and decrement
3123 * the valid entry count of the A table.
3124 */
3125 if (empty) {
3126 a_dte->attr.raw = MMU_DT_INVALID;
3127 a_tbl->at_ecnt--;
3128 }
3129 }
3130 /*
3131 * If the DTE is invalid, the address range is already non-
3132 * existent and can simply be skipped.
3133 */
3134 }
3135 if (nstart < nend) {
3136 /*
3137 * This block is executed if the range spans a whole number
3138 * multiple of granules (A table entries.)
3139 *
3140 * First find the DTE which is responsible for mapping
3141 * the start of the first granule involved.
3142 */
3143 idx = MMU_TIA(nstart);
3144 a_dte = &a_tbl->at_dtbl[idx];
3145
3146 /*
3147 * Remove entire sub-granules (B tables) one at a time,
3148 * until reaching the end of the range.
3149 */
3150 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3151 if (MMU_VALID_DT(*a_dte)) {
3152 /*
3153 * Find the B table manager for the
3154 * entry and free it.
3155 */
3156 b_dte = mmu_ptov(a_dte->addr.raw);
3157 b_tbl = mmuB2tmgr(b_dte);
3158 free_b_table(b_tbl, TRUE);
3159
3160 /*
3161 * Invalidate the DTE that points to the
3162 * B table and decrement the valid entry
3163 * count of the A table.
3164 */
3165 a_dte->attr.raw = MMU_DT_INVALID;
3166 a_tbl->at_ecnt--;
3167 }
3168 }
3169 if (nend < end) {
3170 /*
3171 * This block is executed if the range ends beyond a
3172 * granularity boundary.
3173 *
3174 * First find the DTE which is responsible for mapping
3175 * the start of the nearest (rounded down) granularity
3176 * boundary.
3177 */
3178 idx = MMU_TIA(nend);
3179 a_dte = &a_tbl->at_dtbl[idx];
3180
3181 /*
3182 * If the DTE is valid then delegate the removal of the sub
3183 * range to pmap_remove_b(), which can remove addresses at
3184 * a finer granularity.
3185 */
3186 if (MMU_VALID_DT(*a_dte)) {
3187 /*
3188 * Find the B table manager for the entry
3189 * and hand it to pmap_remove_b() along with
3190 * the sub range.
3191 */
3192 b_dte = mmu_ptov(a_dte->addr.raw);
3193 b_tbl = mmuB2tmgr(b_dte);
3194
3195 empty = pmap_remove_b(b_tbl, nend, end);
3196
3197 /*
3198 * If the removal resulted in an empty B table,
3199 * invalidate the DTE that points to it and decrement
3200 * the valid entry count of the A table.
3201 */
3202 if (empty) {
3203 a_dte->attr.raw = MMU_DT_INVALID;
3204 a_tbl->at_ecnt--;
3205 }
3206 }
3207 }
3208
3209 /*
3210 * If there are no more entries in the A table, release it
3211 * back to the available pool and return TRUE.
3212 */
3213 if (a_tbl->at_ecnt == 0) {
3214 a_tbl->at_parent = NULL;
3215 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3216 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3217 empty = TRUE;
3218 } else {
3219 empty = FALSE;
3220 }
3221
3222 return empty;
3223 }
3224
3225 /* pmap_remove_b INTERNAL
3226 **
3227 * Remove a range of addresses from an address space, trying to remove entire
3228 * C tables if possible.
3229 *
3230 * If the operation results in an empty B table, the function returns TRUE.
3231 */
3232 boolean_t
3233 pmap_remove_b(b_tbl, start, end)
3234 b_tmgr_t *b_tbl;
3235 vaddr_t start;
3236 vaddr_t end;
3237 {
3238 boolean_t empty;
3239 int idx;
3240 vaddr_t nstart, nend, rstart;
3241 c_tmgr_t *c_tbl;
3242 mmu_short_dte_t *b_dte;
3243 mmu_short_pte_t *c_dte;
3244
3245
3246 nstart = MMU_ROUND_UP_B(start);
3247 nend = MMU_ROUND_B(end);
3248
3249 if (start < nstart) {
3250 idx = MMU_TIB(start);
3251 b_dte = &b_tbl->bt_dtbl[idx];
3252 if (MMU_VALID_DT(*b_dte)) {
3253 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3254 c_tbl = mmuC2tmgr(c_dte);
3255 if (end < nstart)
3256 empty = pmap_remove_c(c_tbl, start, end);
3257 else
3258 empty = pmap_remove_c(c_tbl, start, nstart);
3259 if (empty) {
3260 b_dte->attr.raw = MMU_DT_INVALID;
3261 b_tbl->bt_ecnt--;
3262 }
3263 }
3264 }
3265 if (nstart < nend) {
3266 idx = MMU_TIB(nstart);
3267 b_dte = &b_tbl->bt_dtbl[idx];
3268 rstart = nstart;
3269 while (rstart < nend) {
3270 if (MMU_VALID_DT(*b_dte)) {
3271 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3272 c_tbl = mmuC2tmgr(c_dte);
3273 free_c_table(c_tbl, TRUE);
3274 b_dte->attr.raw = MMU_DT_INVALID;
3275 b_tbl->bt_ecnt--;
3276 }
3277 b_dte++;
3278 rstart += MMU_TIB_RANGE;
3279 }
3280 }
3281 if (nend < end) {
3282 idx = MMU_TIB(nend);
3283 b_dte = &b_tbl->bt_dtbl[idx];
3284 if (MMU_VALID_DT(*b_dte)) {
3285 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3286 c_tbl = mmuC2tmgr(c_dte);
3287 empty = pmap_remove_c(c_tbl, nend, end);
3288 if (empty) {
3289 b_dte->attr.raw = MMU_DT_INVALID;
3290 b_tbl->bt_ecnt--;
3291 }
3292 }
3293 }
3294
3295 if (b_tbl->bt_ecnt == 0) {
3296 b_tbl->bt_parent = NULL;
3297 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3298 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3299 empty = TRUE;
3300 } else {
3301 empty = FALSE;
3302 }
3303
3304 return empty;
3305 }
3306
3307 /* pmap_remove_c INTERNAL
3308 **
3309 * Remove a range of addresses from the given C table.
3310 */
3311 boolean_t
3312 pmap_remove_c(c_tbl, start, end)
3313 c_tmgr_t *c_tbl;
3314 vaddr_t start;
3315 vaddr_t end;
3316 {
3317 boolean_t empty;
3318 int idx;
3319 mmu_short_pte_t *c_pte;
3320
3321 idx = MMU_TIC(start);
3322 c_pte = &c_tbl->ct_dtbl[idx];
3323 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3324 if (MMU_VALID_DT(*c_pte)) {
3325 pmap_remove_pte(c_pte);
3326 c_tbl->ct_ecnt--;
3327 }
3328 }
3329
3330 if (c_tbl->ct_ecnt == 0) {
3331 c_tbl->ct_parent = NULL;
3332 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3333 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3334 empty = TRUE;
3335 } else {
3336 empty = FALSE;
3337 }
3338
3339 return empty;
3340 }
3341
3342 /* is_managed INTERNAL
3343 **
3344 * Determine if the given physical address is managed by the PV system.
3345 * Note that this logic assumes that no one will ask for the status of
3346 * addresses which lie in-between the memory banks on the 3/80. If they
3347 * do so, it will falsely report that it is managed.
3348 *
3349 * Note: A "managed" address is one that was reported to the VM system as
3350 * a "usable page" during system startup. As such, the VM system expects the
3351 * pmap module to keep an accurate track of the useage of those pages.
3352 * Any page not given to the VM system at startup does not exist (as far as
3353 * the VM system is concerned) and is therefore "unmanaged." Examples are
3354 * those pages which belong to the ROM monitor and the memory allocated before
3355 * the VM system was started.
3356 */
3357 boolean_t
3358 is_managed(pa)
3359 paddr_t pa;
3360 {
3361 if (pa >= avail_start && pa < avail_end)
3362 return TRUE;
3363 else
3364 return FALSE;
3365 }
3366
3367 /* pmap_bootstrap_alloc INTERNAL
3368 **
3369 * Used internally for memory allocation at startup when malloc is not
3370 * available. This code will fail once it crosses the first memory
3371 * bank boundary on the 3/80. Hopefully by then however, the VM system
3372 * will be in charge of allocation.
3373 */
3374 void *
3375 pmap_bootstrap_alloc(size)
3376 int size;
3377 {
3378 void *rtn;
3379
3380 #ifdef PMAP_DEBUG
3381 if (bootstrap_alloc_enabled == FALSE) {
3382 mon_printf("pmap_bootstrap_alloc: disabled\n");
3383 sunmon_abort();
3384 }
3385 #endif
3386
3387 rtn = (void *) virtual_avail;
3388 virtual_avail += size;
3389
3390 #ifdef PMAP_DEBUG
3391 if (virtual_avail > virtual_contig_end) {
3392 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3393 sunmon_abort();
3394 }
3395 #endif
3396
3397 return rtn;
3398 }
3399
3400 /* pmap_bootstap_aalign INTERNAL
3401 **
3402 * Used to insure that the next call to pmap_bootstrap_alloc() will
3403 * return a chunk of memory aligned to the specified size.
3404 *
3405 * Note: This function will only support alignment sizes that are powers
3406 * of two.
3407 */
3408 void
3409 pmap_bootstrap_aalign(size)
3410 int size;
3411 {
3412 int off;
3413
3414 off = virtual_avail & (size - 1);
3415 if (off) {
3416 (void) pmap_bootstrap_alloc(size - off);
3417 }
3418 }
3419
3420 /* pmap_pa_exists
3421 **
3422 * Used by the /dev/mem driver to see if a given PA is memory
3423 * that can be mapped. (The PA is not in a hole.)
3424 */
3425 int
3426 pmap_pa_exists(pa)
3427 paddr_t pa;
3428 {
3429 int i;
3430
3431 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3432 if ((pa >= avail_mem[i].pmem_start) &&
3433 (pa < avail_mem[i].pmem_end))
3434 return (1);
3435 if (avail_mem[i].pmem_next == NULL)
3436 break;
3437 }
3438 return (0);
3439 }
3440
3441 /* Called only from locore.s and pmap.c */
3442 void _pmap_switch __P((pmap_t pmap));
3443
3444 /*
3445 * _pmap_switch INTERNAL
3446 *
3447 * This is called by locore.s:cpu_switch() when it is
3448 * switching to a new process. Load new translations.
3449 * Note: done in-line by locore.s unless PMAP_DEBUG
3450 *
3451 * Note that we do NOT allocate a context here, but
3452 * share the "kernel only" context until we really
3453 * need our own context for user-space mappings in
3454 * pmap_enter_user(). [ s/context/mmu A table/ ]
3455 */
3456 void
3457 _pmap_switch(pmap)
3458 pmap_t pmap;
3459 {
3460 u_long rootpa;
3461
3462 /*
3463 * Only do reload/flush if we have to.
3464 * Note that if the old and new process
3465 * were BOTH using the "null" context,
3466 * then this will NOT flush the TLB.
3467 */
3468 rootpa = pmap->pm_a_phys;
3469 if (kernel_crp.rp_addr != rootpa) {
3470 DPRINT(("pmap_activate(%p)\n", pmap));
3471 kernel_crp.rp_addr = rootpa;
3472 loadcrp(&kernel_crp);
3473 TBIAU();
3474 }
3475 }
3476
3477 /*
3478 * Exported version of pmap_activate(). This is called from the
3479 * machine-independent VM code when a process is given a new pmap.
3480 * If (p == curlwp) do like cpu_switch would do; otherwise just
3481 * take this as notification that the process has a new pmap.
3482 */
3483 void
3484 pmap_activate(l)
3485 struct lwp *l;
3486 {
3487 if (l->l_proc == curproc) {
3488 _pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
3489 }
3490 }
3491
3492 /*
3493 * pmap_deactivate INTERFACE
3494 **
3495 * This is called to deactivate the specified process's address space.
3496 */
3497 void
3498 pmap_deactivate(l)
3499 struct lwp *l;
3500 {
3501 /* Nothing to do. */
3502 }
3503
3504 /*
3505 * Fill in the sun3x-specific part of the kernel core header
3506 * for dumpsys(). (See machdep.c for the rest.)
3507 */
3508 void
3509 pmap_kcore_hdr(sh)
3510 struct sun3x_kcore_hdr *sh;
3511 {
3512 u_long spa, len;
3513 int i;
3514
3515 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3516 sh->pg_valid = MMU_DT_PAGE;
3517 sh->contig_end = virtual_contig_end;
3518 sh->kernCbase = (u_long)kernCbase;
3519 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3520 spa = avail_mem[i].pmem_start;
3521 spa = m68k_trunc_page(spa);
3522 len = avail_mem[i].pmem_end - spa;
3523 len = m68k_round_page(len);
3524 sh->ram_segs[i].start = spa;
3525 sh->ram_segs[i].size = len;
3526 }
3527 }
3528
3529
3530 /* pmap_virtual_space INTERFACE
3531 **
3532 * Return the current available range of virtual addresses in the
3533 * arguuments provided. Only really called once.
3534 */
3535 void
3536 pmap_virtual_space(vstart, vend)
3537 vaddr_t *vstart, *vend;
3538 {
3539 *vstart = virtual_avail;
3540 *vend = virtual_end;
3541 }
3542
3543 /*
3544 * Provide memory to the VM system.
3545 *
3546 * Assume avail_start is always in the
3547 * first segment as pmap_bootstrap does.
3548 */
3549 static void
3550 pmap_page_upload()
3551 {
3552 paddr_t a, b; /* memory range */
3553 int i;
3554
3555 /* Supply the memory in segments. */
3556 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3557 a = atop(avail_mem[i].pmem_start);
3558 b = atop(avail_mem[i].pmem_end);
3559 if (i == 0)
3560 a = atop(avail_start);
3561 if (avail_mem[i].pmem_end > avail_end)
3562 b = atop(avail_end);
3563
3564 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3565
3566 if (avail_mem[i].pmem_next == NULL)
3567 break;
3568 }
3569 }
3570
3571 /* pmap_count INTERFACE
3572 **
3573 * Return the number of resident (valid) pages in the given pmap.
3574 *
3575 * Note: If this function is handed the kernel map, it will report
3576 * that it has no mappings. Hopefully the VM system won't ask for kernel
3577 * map statistics.
3578 */
3579 segsz_t
3580 pmap_count(pmap, type)
3581 pmap_t pmap;
3582 int type;
3583 {
3584 u_int count;
3585 int a_idx, b_idx;
3586 a_tmgr_t *a_tbl;
3587 b_tmgr_t *b_tbl;
3588 c_tmgr_t *c_tbl;
3589
3590 /*
3591 * If the pmap does not have its own A table manager, it has no
3592 * valid entires.
3593 */
3594 if (pmap->pm_a_tmgr == NULL)
3595 return 0;
3596
3597 a_tbl = pmap->pm_a_tmgr;
3598
3599 count = 0;
3600 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3601 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3602 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3603 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3604 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3605 c_tbl = mmuC2tmgr(
3606 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3607 if (type == 0)
3608 /*
3609 * A resident entry count has been requested.
3610 */
3611 count += c_tbl->ct_ecnt;
3612 else
3613 /*
3614 * A wired entry count has been requested.
3615 */
3616 count += c_tbl->ct_wcnt;
3617 }
3618 }
3619 }
3620 }
3621
3622 return count;
3623 }
3624
3625 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3626 * The following routines are only used by DDB for tricky kernel text *
3627 * text operations in db_memrw.c. They are provided for sun3 *
3628 * compatibility. *
3629 *************************************************************************/
3630 /* get_pte INTERNAL
3631 **
3632 * Return the page descriptor the describes the kernel mapping
3633 * of the given virtual address.
3634 */
3635 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3636 u_int
3637 get_pte(va)
3638 vaddr_t va;
3639 {
3640 u_long pte_pa;
3641 mmu_short_pte_t *pte;
3642
3643 /* Get the physical address of the PTE */
3644 pte_pa = ptest_addr(va & ~PGOFSET);
3645
3646 /* Convert to a virtual address... */
3647 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3648
3649 /* Make sure it is in our level-C tables... */
3650 if ((pte < kernCbase) ||
3651 (pte >= &mmuCbase[NUM_USER_PTES]))
3652 return 0;
3653
3654 /* ... and just return its contents. */
3655 return (pte->attr.raw);
3656 }
3657
3658
3659 /* set_pte INTERNAL
3660 **
3661 * Set the page descriptor that describes the kernel mapping
3662 * of the given virtual address.
3663 */
3664 void
3665 set_pte(va, pte)
3666 vaddr_t va;
3667 u_int pte;
3668 {
3669 u_long idx;
3670
3671 if (va < KERNBASE)
3672 return;
3673
3674 idx = (unsigned long) m68k_btop(va - KERNBASE);
3675 kernCbase[idx].attr.raw = pte;
3676 TBIS(va);
3677 }
3678
3679 /*
3680 * Routine: pmap_procwr
3681 *
3682 * Function:
3683 * Synchronize caches corresponding to [addr, addr+len) in p.
3684 */
3685 void
3686 pmap_procwr(p, va, len)
3687 struct proc *p;
3688 vaddr_t va;
3689 size_t len;
3690 {
3691 (void)cachectl1(0x80000004, va, len, p);
3692 }
3693
3694
3695 #ifdef PMAP_DEBUG
3696 /************************** DEBUGGING ROUTINES **************************
3697 * The following routines are meant to be an aid to debugging the pmap *
3698 * system. They are callable from the DDB command line and should be *
3699 * prepared to be handed unstable or incomplete states of the system. *
3700 ************************************************************************/
3701
3702 /* pv_list
3703 **
3704 * List all pages found on the pv list for the given physical page.
3705 * To avoid endless loops, the listing will stop at the end of the list
3706 * or after 'n' entries - whichever comes first.
3707 */
3708 void
3709 pv_list(pa, n)
3710 paddr_t pa;
3711 int n;
3712 {
3713 int idx;
3714 vaddr_t va;
3715 pv_t *pv;
3716 c_tmgr_t *c_tbl;
3717 pmap_t pmap;
3718
3719 pv = pa2pv(pa);
3720 idx = pv->pv_idx;
3721 for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3722 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3723 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3724 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3725 }
3726 }
3727 #endif /* PMAP_DEBUG */
3728
3729 #ifdef NOT_YET
3730 /* and maybe not ever */
3731 /************************** LOW-LEVEL ROUTINES **************************
3732 * These routines will eventually be re-written into assembly and placed*
3733 * in locore.s. They are here now as stubs so that the pmap module can *
3734 * be linked as a standalone user program for testing. *
3735 ************************************************************************/
3736 /* flush_atc_crp INTERNAL
3737 **
3738 * Flush all page descriptors derived from the given CPU Root Pointer
3739 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3740 * cache.
3741 */
3742 void
3743 flush_atc_crp(a_tbl)
3744 {
3745 mmu_long_rp_t rp;
3746
3747 /* Create a temporary root table pointer that points to the
3748 * given A table.
3749 */
3750 rp.attr.raw = ~MMU_LONG_RP_LU;
3751 rp.addr.raw = (unsigned int) a_tbl;
3752
3753 mmu_pflushr(&rp);
3754 /* mmu_pflushr:
3755 * movel sp(4)@,a0
3756 * pflushr a0@
3757 * rts
3758 */
3759 }
3760 #endif /* NOT_YET */
3761