pmap.c revision 1.73 1 /* $NetBSD: pmap.c,v 1.73 2001/09/10 21:19:28 chris Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include "opt_ddb.h"
115
116 #include <sys/param.h>
117 #include <sys/systm.h>
118 #include <sys/proc.h>
119 #include <sys/malloc.h>
120 #include <sys/pool.h>
121 #include <sys/user.h>
122 #include <sys/queue.h>
123 #include <sys/kcore.h>
124
125 #include <uvm/uvm.h>
126
127 #include <machine/cpu.h>
128 #include <machine/kcore.h>
129 #include <machine/mon.h>
130 #include <machine/pmap.h>
131 #include <machine/pte.h>
132 #include <machine/vmparam.h>
133
134 #include <sun3/sun3/cache.h>
135 #include <sun3/sun3/machdep.h>
136
137 #include "pmap_pvt.h"
138
139 /* XXX - What headers declare these? */
140 extern struct pcb *curpcb;
141 extern int physmem;
142
143 /* Defined in locore.s */
144 extern char kernel_text[];
145
146 /* Defined by the linker */
147 extern char etext[], edata[], end[];
148 extern char *esym; /* DDB */
149
150 /*************************** DEBUGGING DEFINITIONS ***********************
151 * Macros, preprocessor defines and variables used in debugging can make *
152 * code hard to read. Anything used exclusively for debugging purposes *
153 * is defined here to avoid having such mess scattered around the file. *
154 *************************************************************************/
155 #ifdef PMAP_DEBUG
156 /*
157 * To aid the debugging process, macros should be expanded into smaller steps
158 * that accomplish the same goal, yet provide convenient places for placing
159 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
160 * 'INLINE' keyword is defined to an empty string. This way, any function
161 * defined to be a 'static INLINE' will become 'outlined' and compiled as
162 * a separate function, which is much easier to debug.
163 */
164 #define INLINE /* nothing */
165
166 /*
167 * It is sometimes convenient to watch the activity of a particular table
168 * in the system. The following variables are used for that purpose.
169 */
170 a_tmgr_t *pmap_watch_atbl = 0;
171 b_tmgr_t *pmap_watch_btbl = 0;
172 c_tmgr_t *pmap_watch_ctbl = 0;
173
174 int pmap_debug = 0;
175 #define DPRINT(args) if (pmap_debug) printf args
176
177 #else /********** Stuff below is defined if NOT debugging **************/
178
179 #define INLINE inline
180 #define DPRINT(args) /* nada */
181
182 #endif /* PMAP_DEBUG */
183 /*********************** END OF DEBUGGING DEFINITIONS ********************/
184
185 /*** Management Structure - Memory Layout
186 * For every MMU table in the sun3x pmap system there must be a way to
187 * manage it; we must know which process is using it, what other tables
188 * depend on it, and whether or not it contains any locked pages. This
189 * is solved by the creation of 'table management' or 'tmgr'
190 * structures. One for each MMU table in the system.
191 *
192 * MAP OF MEMORY USED BY THE PMAP SYSTEM
193 *
194 * towards lower memory
195 * kernAbase -> +-------------------------------------------------------+
196 * | Kernel MMU A level table |
197 * kernBbase -> +-------------------------------------------------------+
198 * | Kernel MMU B level tables |
199 * kernCbase -> +-------------------------------------------------------+
200 * | |
201 * | Kernel MMU C level tables |
202 * | |
203 * mmuCbase -> +-------------------------------------------------------+
204 * | User MMU C level tables |
205 * mmuAbase -> +-------------------------------------------------------+
206 * | |
207 * | User MMU A level tables |
208 * | |
209 * mmuBbase -> +-------------------------------------------------------+
210 * | User MMU B level tables |
211 * tmgrAbase -> +-------------------------------------------------------+
212 * | TMGR A level table structures |
213 * tmgrBbase -> +-------------------------------------------------------+
214 * | TMGR B level table structures |
215 * tmgrCbase -> +-------------------------------------------------------+
216 * | TMGR C level table structures |
217 * pvbase -> +-------------------------------------------------------+
218 * | Physical to Virtual mapping table (list heads) |
219 * pvebase -> +-------------------------------------------------------+
220 * | Physical to Virtual mapping table (list elements) |
221 * | |
222 * +-------------------------------------------------------+
223 * towards higher memory
224 *
225 * For every A table in the MMU A area, there will be a corresponding
226 * a_tmgr structure in the TMGR A area. The same will be true for
227 * the B and C tables. This arrangement will make it easy to find the
228 * controling tmgr structure for any table in the system by use of
229 * (relatively) simple macros.
230 */
231
232 /*
233 * Global variables for storing the base addresses for the areas
234 * labeled above.
235 */
236 static vaddr_t kernAphys;
237 static mmu_long_dte_t *kernAbase;
238 static mmu_short_dte_t *kernBbase;
239 static mmu_short_pte_t *kernCbase;
240 static mmu_short_pte_t *mmuCbase;
241 static mmu_short_dte_t *mmuBbase;
242 static mmu_long_dte_t *mmuAbase;
243 static a_tmgr_t *Atmgrbase;
244 static b_tmgr_t *Btmgrbase;
245 static c_tmgr_t *Ctmgrbase;
246 static pv_t *pvbase;
247 static pv_elem_t *pvebase;
248 struct pmap kernel_pmap;
249
250 /*
251 * This holds the CRP currently loaded into the MMU.
252 */
253 struct mmu_rootptr kernel_crp;
254
255 /*
256 * Just all around global variables.
257 */
258 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
259 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
260 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
261
262
263 /*
264 * Flags used to mark the safety/availability of certain operations or
265 * resources.
266 */
267 static boolean_t bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
268 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
269
270 /*
271 * XXX: For now, retain the traditional variables that were
272 * used in the old pmap/vm interface (without NONCONTIG).
273 */
274 /* Kernel virtual address space available: */
275 vaddr_t virtual_avail, virtual_end;
276 /* Physical address space available: */
277 paddr_t avail_start, avail_end;
278
279 /* This keep track of the end of the contiguously mapped range. */
280 vaddr_t virtual_contig_end;
281
282 /* Physical address used by pmap_next_page() */
283 paddr_t avail_next;
284
285 /* These are used by pmap_copy_page(), etc. */
286 vaddr_t tmp_vpages[2];
287
288 /* memory pool for pmap structures */
289 struct pool pmap_pmap_pool;
290
291 /*
292 * The 3/80 is the only member of the sun3x family that has non-contiguous
293 * physical memory. Memory is divided into 4 banks which are physically
294 * locatable on the system board. Although the size of these banks varies
295 * with the size of memory they contain, their base addresses are
296 * permenently fixed. The following structure, which describes these
297 * banks, is initialized by pmap_bootstrap() after it reads from a similar
298 * structure provided by the ROM Monitor.
299 *
300 * For the other machines in the sun3x architecture which do have contiguous
301 * RAM, this list will have only one entry, which will describe the entire
302 * range of available memory.
303 */
304 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
305 u_int total_phys_mem;
306
307 /*************************************************************************/
308
309 /*
310 * XXX - Should "tune" these based on statistics.
311 *
312 * My first guess about the relative numbers of these needed is
313 * based on the fact that a "typical" process will have several
314 * pages mapped at low virtual addresses (text, data, bss), then
315 * some mapped shared libraries, and then some stack pages mapped
316 * near the high end of the VA space. Each process can use only
317 * one A table, and most will use only two B tables (maybe three)
318 * and probably about four C tables. Therefore, the first guess
319 * at the relative numbers of these needed is 1:2:4 -gwr
320 *
321 * The number of C tables needed is closely related to the amount
322 * of physical memory available plus a certain amount attributable
323 * to the use of double mappings. With a few simulation statistics
324 * we can find a reasonably good estimation of this unknown value.
325 * Armed with that and the above ratios, we have a good idea of what
326 * is needed at each level. -j
327 *
328 * Note: It is not physical memory memory size, but the total mapped
329 * virtual space required by the combined working sets of all the
330 * currently _runnable_ processes. (Sleeping ones don't count.)
331 * The amount of physical memory should be irrelevant. -gwr
332 */
333 #ifdef FIXED_NTABLES
334 #define NUM_A_TABLES 16
335 #define NUM_B_TABLES 32
336 #define NUM_C_TABLES 64
337 #else
338 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
339 #endif /* FIXED_NTABLES */
340
341 /*
342 * This determines our total virtual mapping capacity.
343 * Yes, it is a FIXED value so we can pre-allocate.
344 */
345 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
346
347 /*
348 * The size of the Kernel Virtual Address Space (KVAS)
349 * for purposes of MMU table allocation is -KERNBASE
350 * (length from KERNBASE to 0xFFFFffff)
351 */
352 #define KVAS_SIZE (-KERNBASE)
353
354 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
355 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
356 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
357 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
358
359 /*************************** MISCELANEOUS MACROS *************************/
360 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
361 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
362 #define pmap_add_ref(pmap) ++pmap->pm_refcount
363 #define pmap_del_ref(pmap) --pmap->pm_refcount
364 #define pmap_refcount(pmap) pmap->pm_refcount
365
366 void *pmap_bootstrap_alloc(int);
367
368 static INLINE void *mmu_ptov __P((paddr_t));
369 static INLINE paddr_t mmu_vtop __P((void *));
370
371 #if 0
372 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
373 #endif /* 0 */
374 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
375 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
376
377 static INLINE pv_t *pa2pv __P((paddr_t));
378 static INLINE int pteidx __P((mmu_short_pte_t *));
379 static INLINE pmap_t current_pmap __P((void));
380
381 /*
382 * We can always convert between virtual and physical addresses
383 * for anything in the range [KERNBASE ... avail_start] because
384 * that range is GUARANTEED to be mapped linearly.
385 * We rely heavily upon this feature!
386 */
387 static INLINE void *
388 mmu_ptov(pa)
389 paddr_t pa;
390 {
391 vaddr_t va;
392
393 va = (pa + KERNBASE);
394 #ifdef PMAP_DEBUG
395 if ((va < KERNBASE) || (va >= virtual_contig_end))
396 panic("mmu_ptov");
397 #endif
398 return ((void*)va);
399 }
400
401 static INLINE paddr_t
402 mmu_vtop(vva)
403 void *vva;
404 {
405 vaddr_t va;
406
407 va = (vaddr_t)vva;
408 #ifdef PMAP_DEBUG
409 if ((va < KERNBASE) || (va >= virtual_contig_end))
410 panic("mmu_vtop");
411 #endif
412 return (va - KERNBASE);
413 }
414
415 /*
416 * These macros map MMU tables to their corresponding manager structures.
417 * They are needed quite often because many of the pointers in the pmap
418 * system reference MMU tables and not the structures that control them.
419 * There needs to be a way to find one when given the other and these
420 * macros do so by taking advantage of the memory layout described above.
421 * Here's a quick step through the first macro, mmuA2tmgr():
422 *
423 * 1) find the offset of the given MMU A table from the base of its table
424 * pool (table - mmuAbase).
425 * 2) convert this offset into a table index by dividing it by the
426 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
427 * 3) use this index to select the corresponding 'A' table manager
428 * structure from the 'A' table manager pool (Atmgrbase[index]).
429 */
430 /* This function is not currently used. */
431 #if 0
432 static INLINE a_tmgr_t *
433 mmuA2tmgr(mmuAtbl)
434 mmu_long_dte_t *mmuAtbl;
435 {
436 int idx;
437
438 /* Which table is this in? */
439 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
440 #ifdef PMAP_DEBUG
441 if ((idx < 0) || (idx >= NUM_A_TABLES))
442 panic("mmuA2tmgr");
443 #endif
444 return (&Atmgrbase[idx]);
445 }
446 #endif /* 0 */
447
448 static INLINE b_tmgr_t *
449 mmuB2tmgr(mmuBtbl)
450 mmu_short_dte_t *mmuBtbl;
451 {
452 int idx;
453
454 /* Which table is this in? */
455 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
456 #ifdef PMAP_DEBUG
457 if ((idx < 0) || (idx >= NUM_B_TABLES))
458 panic("mmuB2tmgr");
459 #endif
460 return (&Btmgrbase[idx]);
461 }
462
463 /* mmuC2tmgr INTERNAL
464 **
465 * Given a pte known to belong to a C table, return the address of
466 * that table's management structure.
467 */
468 static INLINE c_tmgr_t *
469 mmuC2tmgr(mmuCtbl)
470 mmu_short_pte_t *mmuCtbl;
471 {
472 int idx;
473
474 /* Which table is this in? */
475 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
476 #ifdef PMAP_DEBUG
477 if ((idx < 0) || (idx >= NUM_C_TABLES))
478 panic("mmuC2tmgr");
479 #endif
480 return (&Ctmgrbase[idx]);
481 }
482
483 /* This is now a function call below.
484 * #define pa2pv(pa) \
485 * (&pvbase[(unsigned long)\
486 * m68k_btop(pa)\
487 * ])
488 */
489
490 /* pa2pv INTERNAL
491 **
492 * Return the pv_list_head element which manages the given physical
493 * address.
494 */
495 static INLINE pv_t *
496 pa2pv(pa)
497 paddr_t pa;
498 {
499 struct pmap_physmem_struct *bank;
500 int idx;
501
502 bank = &avail_mem[0];
503 while (pa >= bank->pmem_end)
504 bank = bank->pmem_next;
505
506 pa -= bank->pmem_start;
507 idx = bank->pmem_pvbase + m68k_btop(pa);
508 #ifdef PMAP_DEBUG
509 if ((idx < 0) || (idx >= physmem))
510 panic("pa2pv");
511 #endif
512 return &pvbase[idx];
513 }
514
515 /* pteidx INTERNAL
516 **
517 * Return the index of the given PTE within the entire fixed table of
518 * PTEs.
519 */
520 static INLINE int
521 pteidx(pte)
522 mmu_short_pte_t *pte;
523 {
524 return (pte - kernCbase);
525 }
526
527 /*
528 * This just offers a place to put some debugging checks,
529 * and reduces the number of places "curproc" appears...
530 */
531 static INLINE pmap_t
532 current_pmap()
533 {
534 struct proc *p;
535 struct vmspace *vm;
536 struct vm_map *map;
537 pmap_t pmap;
538
539 p = curproc; /* XXX */
540 if (p == NULL)
541 pmap = &kernel_pmap;
542 else {
543 vm = p->p_vmspace;
544 map = &vm->vm_map;
545 pmap = vm_map_pmap(map);
546 }
547
548 return (pmap);
549 }
550
551
552 /*************************** FUNCTION DEFINITIONS ************************
553 * These appear here merely for the compiler to enforce type checking on *
554 * all function calls. *
555 *************************************************************************/
556
557 /** Internal functions
558 ** Most functions used only within this module are defined in
559 ** pmap_pvt.h (why not here if used only here?)
560 **/
561 static void pmap_page_upload __P((void));
562
563 /** Interface functions
564 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
565 ** defined.
566 **/
567 void pmap_pinit __P((pmap_t));
568 void pmap_release __P((pmap_t));
569
570 /********************************** CODE ********************************
571 * Functions that are called from other parts of the kernel are labeled *
572 * as 'INTERFACE' functions. Functions that are only called from *
573 * within the pmap module are labeled as 'INTERNAL' functions. *
574 * Functions that are internal, but are not (currently) used at all are *
575 * labeled 'INTERNAL_X'. *
576 ************************************************************************/
577
578 /* pmap_bootstrap INTERNAL
579 **
580 * Initializes the pmap system. Called at boot time from
581 * locore2.c:_vm_init()
582 *
583 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
584 * system implement pmap_steal_memory() is redundant.
585 * Don't release this code without removing one or the other!
586 */
587 void
588 pmap_bootstrap(nextva)
589 vaddr_t nextva;
590 {
591 struct physmemory *membank;
592 struct pmap_physmem_struct *pmap_membank;
593 vaddr_t va, eva;
594 paddr_t pa;
595 int b, c, i, j; /* running table counts */
596 int size, resvmem;
597
598 /*
599 * This function is called by __bootstrap after it has
600 * determined the type of machine and made the appropriate
601 * patches to the ROM vectors (XXX- I don't quite know what I meant
602 * by that.) It allocates and sets up enough of the pmap system
603 * to manage the kernel's address space.
604 */
605
606 /*
607 * Determine the range of kernel virtual and physical
608 * space available. Note that we ABSOLUTELY DEPEND on
609 * the fact that the first bank of memory (4MB) is
610 * mapped linearly to KERNBASE (which we guaranteed in
611 * the first instructions of locore.s).
612 * That is plenty for our bootstrap work.
613 */
614 virtual_avail = m68k_round_page(nextva);
615 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
616 virtual_end = VM_MAX_KERNEL_ADDRESS;
617 /* Don't need avail_start til later. */
618
619 /* We may now call pmap_bootstrap_alloc(). */
620 bootstrap_alloc_enabled = TRUE;
621
622 /*
623 * This is a somewhat unwrapped loop to deal with
624 * copying the PROM's 'phsymem' banks into the pmap's
625 * banks. The following is always assumed:
626 * 1. There is always at least one bank of memory.
627 * 2. There is always a last bank of memory, and its
628 * pmem_next member must be set to NULL.
629 */
630 membank = romVectorPtr->v_physmemory;
631 pmap_membank = avail_mem;
632 total_phys_mem = 0;
633
634 for (;;) { /* break on !membank */
635 pmap_membank->pmem_start = membank->address;
636 pmap_membank->pmem_end = membank->address + membank->size;
637 total_phys_mem += membank->size;
638 membank = membank->next;
639 if (!membank)
640 break;
641 /* This silly syntax arises because pmap_membank
642 * is really a pre-allocated array, but it is put into
643 * use as a linked list.
644 */
645 pmap_membank->pmem_next = pmap_membank + 1;
646 pmap_membank = pmap_membank->pmem_next;
647 }
648 /* This is the last element. */
649 pmap_membank->pmem_next = NULL;
650
651 /*
652 * Note: total_phys_mem, physmem represent
653 * actual physical memory, including that
654 * reserved for the PROM monitor.
655 */
656 physmem = btoc(total_phys_mem);
657
658 /*
659 * Avail_end is set to the first byte of physical memory
660 * after the end of the last bank. We use this only to
661 * determine if a physical address is "managed" memory.
662 * This address range should be reduced to prevent the
663 * physical pages needed by the PROM monitor from being used
664 * in the VM system.
665 */
666 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
667 resvmem = m68k_round_page(resvmem);
668 avail_end = pmap_membank->pmem_end - resvmem;
669
670 /*
671 * First allocate enough kernel MMU tables to map all
672 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
673 * Note: All must be aligned on 256 byte boundaries.
674 * Start with the level-A table (one of those).
675 */
676 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
677 kernAbase = pmap_bootstrap_alloc(size);
678 memset(kernAbase, 0, size);
679
680 /* Now the level-B kernel tables... */
681 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
682 kernBbase = pmap_bootstrap_alloc(size);
683 memset(kernBbase, 0, size);
684
685 /* Now the level-C kernel tables... */
686 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
687 kernCbase = pmap_bootstrap_alloc(size);
688 memset(kernCbase, 0, size);
689 /*
690 * Note: In order for the PV system to work correctly, the kernel
691 * and user-level C tables must be allocated contiguously.
692 * Nothing should be allocated between here and the allocation of
693 * mmuCbase below. XXX: Should do this as one allocation, and
694 * then compute a pointer for mmuCbase instead of this...
695 *
696 * Allocate user MMU tables.
697 * These must be contiguous with the preceding.
698 */
699
700 #ifndef FIXED_NTABLES
701 /*
702 * The number of user-level C tables that should be allocated is
703 * related to the size of physical memory. In general, there should
704 * be enough tables to map four times the amount of available RAM.
705 * The extra amount is needed because some table space is wasted by
706 * fragmentation.
707 */
708 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
709 NUM_B_TABLES = NUM_C_TABLES / 2;
710 NUM_A_TABLES = NUM_B_TABLES / 2;
711 #endif /* !FIXED_NTABLES */
712
713 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
714 mmuCbase = pmap_bootstrap_alloc(size);
715
716 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
717 mmuBbase = pmap_bootstrap_alloc(size);
718
719 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
720 mmuAbase = pmap_bootstrap_alloc(size);
721
722 /*
723 * Fill in the never-changing part of the kernel tables.
724 * For simplicity, the kernel's mappings will be editable as a
725 * flat array of page table entries at kernCbase. The
726 * higher level 'A' and 'B' tables must be initialized to point
727 * to this lower one.
728 */
729 b = c = 0;
730
731 /*
732 * Invalidate all mappings below KERNBASE in the A table.
733 * This area has already been zeroed out, but it is good
734 * practice to explicitly show that we are interpreting
735 * it as a list of A table descriptors.
736 */
737 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
738 kernAbase[i].addr.raw = 0;
739 }
740
741 /*
742 * Set up the kernel A and B tables so that they will reference the
743 * correct spots in the contiguous table of PTEs allocated for the
744 * kernel's virtual memory space.
745 */
746 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
747 kernAbase[i].attr.raw =
748 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
749 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
750
751 for (j=0; j < MMU_B_TBL_SIZE; j++) {
752 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
753 | MMU_DT_SHORT;
754 c += MMU_C_TBL_SIZE;
755 }
756 b += MMU_B_TBL_SIZE;
757 }
758
759 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
760 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
761 pmap_alloc_pv(); /* Allocate physical->virtual map. */
762
763 /*
764 * We are now done with pmap_bootstrap_alloc(). Round up
765 * `virtual_avail' to the nearest page, and set the flag
766 * to prevent use of pmap_bootstrap_alloc() hereafter.
767 */
768 pmap_bootstrap_aalign(NBPG);
769 bootstrap_alloc_enabled = FALSE;
770
771 /*
772 * Now that we are done with pmap_bootstrap_alloc(), we
773 * must save the virtual and physical addresses of the
774 * end of the linearly mapped range, which are stored in
775 * virtual_contig_end and avail_start, respectively.
776 * These variables will never change after this point.
777 */
778 virtual_contig_end = virtual_avail;
779 avail_start = virtual_avail - KERNBASE;
780
781 /*
782 * `avail_next' is a running pointer used by pmap_next_page() to
783 * keep track of the next available physical page to be handed
784 * to the VM system during its initialization, in which it
785 * asks for physical pages, one at a time.
786 */
787 avail_next = avail_start;
788
789 /*
790 * Now allocate some virtual addresses, but not the physical pages
791 * behind them. Note that virtual_avail is already page-aligned.
792 *
793 * tmp_vpages[] is an array of two virtual pages used for temporary
794 * kernel mappings in the pmap module to facilitate various physical
795 * address-oritented operations.
796 */
797 tmp_vpages[0] = virtual_avail;
798 virtual_avail += NBPG;
799 tmp_vpages[1] = virtual_avail;
800 virtual_avail += NBPG;
801
802 /** Initialize the PV system **/
803 pmap_init_pv();
804
805 /*
806 * Fill in the kernel_pmap structure and kernel_crp.
807 */
808 kernAphys = mmu_vtop(kernAbase);
809 kernel_pmap.pm_a_tmgr = NULL;
810 kernel_pmap.pm_a_phys = kernAphys;
811 kernel_pmap.pm_refcount = 1; /* always in use */
812 simple_lock_init(&kernel_pmap.pm_lock);
813
814 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
815 kernel_crp.rp_addr = kernAphys;
816
817 /*
818 * Now pmap_enter_kernel() may be used safely and will be
819 * the main interface used hereafter to modify the kernel's
820 * virtual address space. Note that since we are still running
821 * under the PROM's address table, none of these table modifications
822 * actually take effect until pmap_takeover_mmu() is called.
823 *
824 * Note: Our tables do NOT have the PROM linear mappings!
825 * Only the mappings created here exist in our tables, so
826 * remember to map anything we expect to use.
827 */
828 va = (vaddr_t)KERNBASE;
829 pa = 0;
830
831 /*
832 * The first page of the kernel virtual address space is the msgbuf
833 * page. The page attributes (data, non-cached) are set here, while
834 * the address is assigned to this global pointer in cpu_startup().
835 * It is non-cached, mostly due to paranoia.
836 */
837 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
838 va += NBPG; pa += NBPG;
839
840 /* Next page is used as the temporary stack. */
841 pmap_enter_kernel(va, pa, VM_PROT_ALL);
842 va += NBPG; pa += NBPG;
843
844 /*
845 * Map all of the kernel's text segment as read-only and cacheable.
846 * (Cacheable is implied by default). Unfortunately, the last bytes
847 * of kernel text and the first bytes of kernel data will often be
848 * sharing the same page. Therefore, the last page of kernel text
849 * has to be mapped as read/write, to accomodate the data.
850 */
851 eva = m68k_trunc_page((vaddr_t)etext);
852 for (; va < eva; va += NBPG, pa += NBPG)
853 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
854
855 /*
856 * Map all of the kernel's data as read/write and cacheable.
857 * This includes: data, BSS, symbols, and everything in the
858 * contiguous memory used by pmap_bootstrap_alloc()
859 */
860 for (; pa < avail_start; va += NBPG, pa += NBPG)
861 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
862
863 /*
864 * At this point we are almost ready to take over the MMU. But first
865 * we must save the PROM's address space in our map, as we call its
866 * routines and make references to its data later in the kernel.
867 */
868 pmap_bootstrap_copyprom();
869 pmap_takeover_mmu();
870 pmap_bootstrap_setprom();
871
872 /* Notify the VM system of our page size. */
873 uvmexp.pagesize = NBPG;
874 uvm_setpagesize();
875
876 pmap_page_upload();
877 }
878
879
880 /* pmap_alloc_usermmu INTERNAL
881 **
882 * Called from pmap_bootstrap() to allocate MMU tables that will
883 * eventually be used for user mappings.
884 */
885 void
886 pmap_alloc_usermmu()
887 {
888 /* XXX: Moved into caller. */
889 }
890
891 /* pmap_alloc_pv INTERNAL
892 **
893 * Called from pmap_bootstrap() to allocate the physical
894 * to virtual mapping list. Each physical page of memory
895 * in the system has a corresponding element in this list.
896 */
897 void
898 pmap_alloc_pv()
899 {
900 int i;
901 unsigned int total_mem;
902
903 /*
904 * Allocate a pv_head structure for every page of physical
905 * memory that will be managed by the system. Since memory on
906 * the 3/80 is non-contiguous, we cannot arrive at a total page
907 * count by subtraction of the lowest available address from the
908 * highest, but rather we have to step through each memory
909 * bank and add the number of pages in each to the total.
910 *
911 * At this time we also initialize the offset of each bank's
912 * starting pv_head within the pv_head list so that the physical
913 * memory state routines (pmap_is_referenced(),
914 * pmap_is_modified(), et al.) can quickly find coresponding
915 * pv_heads in spite of the non-contiguity.
916 */
917 total_mem = 0;
918 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
919 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
920 total_mem += avail_mem[i].pmem_end -
921 avail_mem[i].pmem_start;
922 if (avail_mem[i].pmem_next == NULL)
923 break;
924 }
925 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
926 m68k_btop(total_phys_mem));
927 }
928
929 /* pmap_alloc_usertmgr INTERNAL
930 **
931 * Called from pmap_bootstrap() to allocate the structures which
932 * facilitate management of user MMU tables. Each user MMU table
933 * in the system has one such structure associated with it.
934 */
935 void
936 pmap_alloc_usertmgr()
937 {
938 /* Allocate user MMU table managers */
939 /* It would be a lot simpler to just make these BSS, but */
940 /* we may want to change their size at boot time... -j */
941 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
942 * NUM_A_TABLES);
943 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
944 * NUM_B_TABLES);
945 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
946 * NUM_C_TABLES);
947
948 /*
949 * Allocate PV list elements for the physical to virtual
950 * mapping system.
951 */
952 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
953 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
954 }
955
956 /* pmap_bootstrap_copyprom() INTERNAL
957 **
958 * Copy the PROM mappings into our own tables. Note, we
959 * can use physical addresses until __bootstrap returns.
960 */
961 void
962 pmap_bootstrap_copyprom()
963 {
964 struct sunromvec *romp;
965 int *mon_ctbl;
966 mmu_short_pte_t *kpte;
967 int i, len;
968
969 romp = romVectorPtr;
970
971 /*
972 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
973 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
974 */
975 mon_ctbl = *romp->monptaddr;
976 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
977 kpte = &kernCbase[i];
978 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
979
980 for (i = 0; i < len; i++) {
981 kpte[i].attr.raw = mon_ctbl[i];
982 }
983
984 /*
985 * Copy the mappings at MON_DVMA_BASE (to the end).
986 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
987 * Actually, we only want the last page, which the
988 * PROM has set up for use by the "ie" driver.
989 * (The i82686 needs its SCP there.)
990 * If we copy all the mappings, pmap_enter_kernel
991 * may complain about finding valid PTEs that are
992 * not recorded in our PV lists...
993 */
994 mon_ctbl = *romp->shadowpteaddr;
995 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
996 kpte = &kernCbase[i];
997 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
998 for (i = (len-1); i < len; i++) {
999 kpte[i].attr.raw = mon_ctbl[i];
1000 }
1001 }
1002
1003 /* pmap_takeover_mmu INTERNAL
1004 **
1005 * Called from pmap_bootstrap() after it has copied enough of the
1006 * PROM mappings into the kernel map so that we can use our own
1007 * MMU table.
1008 */
1009 void
1010 pmap_takeover_mmu()
1011 {
1012
1013 loadcrp(&kernel_crp);
1014 }
1015
1016 /* pmap_bootstrap_setprom() INTERNAL
1017 **
1018 * Set the PROM mappings so it can see kernel space.
1019 * Note that physical addresses are used here, which
1020 * we can get away with because this runs with the
1021 * low 1GB set for transparent translation.
1022 */
1023 void
1024 pmap_bootstrap_setprom()
1025 {
1026 mmu_long_dte_t *mon_dte;
1027 extern struct mmu_rootptr mon_crp;
1028 int i;
1029
1030 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1031 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1032 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1033 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1034 }
1035 }
1036
1037
1038 /* pmap_init INTERFACE
1039 **
1040 * Called at the end of vm_init() to set up the pmap system to go
1041 * into full time operation. All initialization of kernel_pmap
1042 * should be already done by now, so this should just do things
1043 * needed for user-level pmaps to work.
1044 */
1045 void
1046 pmap_init()
1047 {
1048 /** Initialize the manager pools **/
1049 TAILQ_INIT(&a_pool);
1050 TAILQ_INIT(&b_pool);
1051 TAILQ_INIT(&c_pool);
1052
1053 /**************************************************************
1054 * Initialize all tmgr structures and MMU tables they manage. *
1055 **************************************************************/
1056 /** Initialize A tables **/
1057 pmap_init_a_tables();
1058 /** Initialize B tables **/
1059 pmap_init_b_tables();
1060 /** Initialize C tables **/
1061 pmap_init_c_tables();
1062
1063 /** Initialize the pmap pools **/
1064 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1065 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
1066 }
1067
1068 /* pmap_init_a_tables() INTERNAL
1069 **
1070 * Initializes all A managers, their MMU A tables, and inserts
1071 * them into the A manager pool for use by the system.
1072 */
1073 void
1074 pmap_init_a_tables()
1075 {
1076 int i;
1077 a_tmgr_t *a_tbl;
1078
1079 for (i=0; i < NUM_A_TABLES; i++) {
1080 /* Select the next available A manager from the pool */
1081 a_tbl = &Atmgrbase[i];
1082
1083 /*
1084 * Clear its parent entry. Set its wired and valid
1085 * entry count to zero.
1086 */
1087 a_tbl->at_parent = NULL;
1088 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1089
1090 /* Assign it the next available MMU A table from the pool */
1091 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1092
1093 /*
1094 * Initialize the MMU A table with the table in the `proc0',
1095 * or kernel, mapping. This ensures that every process has
1096 * the kernel mapped in the top part of its address space.
1097 */
1098 memcpy(a_tbl->at_dtbl, kernAbase, MMU_A_TBL_SIZE *
1099 sizeof(mmu_long_dte_t));
1100
1101 /*
1102 * Finally, insert the manager into the A pool,
1103 * making it ready to be used by the system.
1104 */
1105 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1106 }
1107 }
1108
1109 /* pmap_init_b_tables() INTERNAL
1110 **
1111 * Initializes all B table managers, their MMU B tables, and
1112 * inserts them into the B manager pool for use by the system.
1113 */
1114 void
1115 pmap_init_b_tables()
1116 {
1117 int i,j;
1118 b_tmgr_t *b_tbl;
1119
1120 for (i=0; i < NUM_B_TABLES; i++) {
1121 /* Select the next available B manager from the pool */
1122 b_tbl = &Btmgrbase[i];
1123
1124 b_tbl->bt_parent = NULL; /* clear its parent, */
1125 b_tbl->bt_pidx = 0; /* parent index, */
1126 b_tbl->bt_wcnt = 0; /* wired entry count, */
1127 b_tbl->bt_ecnt = 0; /* valid entry count. */
1128
1129 /* Assign it the next available MMU B table from the pool */
1130 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1131
1132 /* Invalidate every descriptor in the table */
1133 for (j=0; j < MMU_B_TBL_SIZE; j++)
1134 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1135
1136 /* Insert the manager into the B pool */
1137 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1138 }
1139 }
1140
1141 /* pmap_init_c_tables() INTERNAL
1142 **
1143 * Initializes all C table managers, their MMU C tables, and
1144 * inserts them into the C manager pool for use by the system.
1145 */
1146 void
1147 pmap_init_c_tables()
1148 {
1149 int i,j;
1150 c_tmgr_t *c_tbl;
1151
1152 for (i=0; i < NUM_C_TABLES; i++) {
1153 /* Select the next available C manager from the pool */
1154 c_tbl = &Ctmgrbase[i];
1155
1156 c_tbl->ct_parent = NULL; /* clear its parent, */
1157 c_tbl->ct_pidx = 0; /* parent index, */
1158 c_tbl->ct_wcnt = 0; /* wired entry count, */
1159 c_tbl->ct_ecnt = 0; /* valid entry count, */
1160 c_tbl->ct_pmap = NULL; /* parent pmap, */
1161 c_tbl->ct_va = 0; /* base of managed range */
1162
1163 /* Assign it the next available MMU C table from the pool */
1164 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1165
1166 for (j=0; j < MMU_C_TBL_SIZE; j++)
1167 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1168
1169 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1170 }
1171 }
1172
1173 /* pmap_init_pv() INTERNAL
1174 **
1175 * Initializes the Physical to Virtual mapping system.
1176 */
1177 void
1178 pmap_init_pv()
1179 {
1180 int i;
1181
1182 /* Initialize every PV head. */
1183 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1184 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1185 pvbase[i].pv_flags = 0; /* Zero out page flags */
1186 }
1187 }
1188
1189 /* get_a_table INTERNAL
1190 **
1191 * Retrieve and return a level A table for use in a user map.
1192 */
1193 a_tmgr_t *
1194 get_a_table()
1195 {
1196 a_tmgr_t *tbl;
1197 pmap_t pmap;
1198
1199 /* Get the top A table in the pool */
1200 tbl = a_pool.tqh_first;
1201 if (tbl == NULL) {
1202 /*
1203 * XXX - Instead of panicing here and in other get_x_table
1204 * functions, we do have the option of sleeping on the head of
1205 * the table pool. Any function which updates the table pool
1206 * would then issue a wakeup() on the head, thus waking up any
1207 * processes waiting for a table.
1208 *
1209 * Actually, the place to sleep would be when some process
1210 * asks for a "wired" mapping that would run us short of
1211 * mapping resources. This design DEPENDS on always having
1212 * some mapping resources in the pool for stealing, so we
1213 * must make sure we NEVER let the pool become empty. -gwr
1214 */
1215 panic("get_a_table: out of A tables.");
1216 }
1217
1218 TAILQ_REMOVE(&a_pool, tbl, at_link);
1219 /*
1220 * If the table has a non-null parent pointer then it is in use.
1221 * Forcibly abduct it from its parent and clear its entries.
1222 * No re-entrancy worries here. This table would not be in the
1223 * table pool unless it was available for use.
1224 *
1225 * Note that the second argument to free_a_table() is FALSE. This
1226 * indicates that the table should not be relinked into the A table
1227 * pool. That is a job for the function that called us.
1228 */
1229 if (tbl->at_parent) {
1230 pmap = tbl->at_parent;
1231 free_a_table(tbl, FALSE);
1232 pmap->pm_a_tmgr = NULL;
1233 pmap->pm_a_phys = kernAphys;
1234 }
1235 return tbl;
1236 }
1237
1238 /* get_b_table INTERNAL
1239 **
1240 * Return a level B table for use.
1241 */
1242 b_tmgr_t *
1243 get_b_table()
1244 {
1245 b_tmgr_t *tbl;
1246
1247 /* See 'get_a_table' for comments. */
1248 tbl = b_pool.tqh_first;
1249 if (tbl == NULL)
1250 panic("get_b_table: out of B tables.");
1251 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1252 if (tbl->bt_parent) {
1253 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1254 tbl->bt_parent->at_ecnt--;
1255 free_b_table(tbl, FALSE);
1256 }
1257 return tbl;
1258 }
1259
1260 /* get_c_table INTERNAL
1261 **
1262 * Return a level C table for use.
1263 */
1264 c_tmgr_t *
1265 get_c_table()
1266 {
1267 c_tmgr_t *tbl;
1268
1269 /* See 'get_a_table' for comments */
1270 tbl = c_pool.tqh_first;
1271 if (tbl == NULL)
1272 panic("get_c_table: out of C tables.");
1273 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1274 if (tbl->ct_parent) {
1275 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1276 tbl->ct_parent->bt_ecnt--;
1277 free_c_table(tbl, FALSE);
1278 }
1279 return tbl;
1280 }
1281
1282 /*
1283 * The following 'free_table' and 'steal_table' functions are called to
1284 * detach tables from their current obligations (parents and children) and
1285 * prepare them for reuse in another mapping.
1286 *
1287 * Free_table is used when the calling function will handle the fate
1288 * of the parent table, such as returning it to the free pool when it has
1289 * no valid entries. Functions that do not want to handle this should
1290 * call steal_table, in which the parent table's descriptors and entry
1291 * count are automatically modified when this table is removed.
1292 */
1293
1294 /* free_a_table INTERNAL
1295 **
1296 * Unmaps the given A table and all child tables from their current
1297 * mappings. Returns the number of pages that were invalidated.
1298 * If 'relink' is true, the function will return the table to the head
1299 * of the available table pool.
1300 *
1301 * Cache note: The MC68851 will automatically flush all
1302 * descriptors derived from a given A table from its
1303 * Automatic Translation Cache (ATC) if we issue a
1304 * 'PFLUSHR' instruction with the base address of the
1305 * table. This function should do, and does so.
1306 * Note note: We are using an MC68030 - there is no
1307 * PFLUSHR.
1308 */
1309 int
1310 free_a_table(a_tbl, relink)
1311 a_tmgr_t *a_tbl;
1312 boolean_t relink;
1313 {
1314 int i, removed_cnt;
1315 mmu_long_dte_t *dte;
1316 mmu_short_dte_t *dtbl;
1317 b_tmgr_t *tmgr;
1318
1319 /*
1320 * Flush the ATC cache of all cached descriptors derived
1321 * from this table.
1322 * Sun3x does not use 68851's cached table feature
1323 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1324 */
1325
1326 /*
1327 * Remove any pending cache flushes that were designated
1328 * for the pmap this A table belongs to.
1329 * a_tbl->parent->atc_flushq[0] = 0;
1330 * Not implemented in sun3x.
1331 */
1332
1333 /*
1334 * All A tables in the system should retain a map for the
1335 * kernel. If the table contains any valid descriptors
1336 * (other than those for the kernel area), invalidate them all,
1337 * stopping short of the kernel's entries.
1338 */
1339 removed_cnt = 0;
1340 if (a_tbl->at_ecnt) {
1341 dte = a_tbl->at_dtbl;
1342 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1343 /*
1344 * If a table entry points to a valid B table, free
1345 * it and its children.
1346 */
1347 if (MMU_VALID_DT(dte[i])) {
1348 /*
1349 * The following block does several things,
1350 * from innermost expression to the
1351 * outermost:
1352 * 1) It extracts the base (cc 1996)
1353 * address of the B table pointed
1354 * to in the A table entry dte[i].
1355 * 2) It converts this base address into
1356 * the virtual address it can be
1357 * accessed with. (all MMU tables point
1358 * to physical addresses.)
1359 * 3) It finds the corresponding manager
1360 * structure which manages this MMU table.
1361 * 4) It frees the manager structure.
1362 * (This frees the MMU table and all
1363 * child tables. See 'free_b_table' for
1364 * details.)
1365 */
1366 dtbl = mmu_ptov(dte[i].addr.raw);
1367 tmgr = mmuB2tmgr(dtbl);
1368 removed_cnt += free_b_table(tmgr, TRUE);
1369 dte[i].attr.raw = MMU_DT_INVALID;
1370 }
1371 }
1372 a_tbl->at_ecnt = 0;
1373 }
1374 if (relink) {
1375 a_tbl->at_parent = NULL;
1376 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1377 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1378 }
1379 return removed_cnt;
1380 }
1381
1382 /* free_b_table INTERNAL
1383 **
1384 * Unmaps the given B table and all its children from their current
1385 * mappings. Returns the number of pages that were invalidated.
1386 * (For comments, see 'free_a_table()').
1387 */
1388 int
1389 free_b_table(b_tbl, relink)
1390 b_tmgr_t *b_tbl;
1391 boolean_t relink;
1392 {
1393 int i, removed_cnt;
1394 mmu_short_dte_t *dte;
1395 mmu_short_pte_t *dtbl;
1396 c_tmgr_t *tmgr;
1397
1398 removed_cnt = 0;
1399 if (b_tbl->bt_ecnt) {
1400 dte = b_tbl->bt_dtbl;
1401 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1402 if (MMU_VALID_DT(dte[i])) {
1403 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1404 tmgr = mmuC2tmgr(dtbl);
1405 removed_cnt += free_c_table(tmgr, TRUE);
1406 dte[i].attr.raw = MMU_DT_INVALID;
1407 }
1408 }
1409 b_tbl->bt_ecnt = 0;
1410 }
1411
1412 if (relink) {
1413 b_tbl->bt_parent = NULL;
1414 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1415 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1416 }
1417 return removed_cnt;
1418 }
1419
1420 /* free_c_table INTERNAL
1421 **
1422 * Unmaps the given C table from use and returns it to the pool for
1423 * re-use. Returns the number of pages that were invalidated.
1424 *
1425 * This function preserves any physical page modification information
1426 * contained in the page descriptors within the C table by calling
1427 * 'pmap_remove_pte().'
1428 */
1429 int
1430 free_c_table(c_tbl, relink)
1431 c_tmgr_t *c_tbl;
1432 boolean_t relink;
1433 {
1434 int i, removed_cnt;
1435
1436 removed_cnt = 0;
1437 if (c_tbl->ct_ecnt) {
1438 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1439 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1440 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1441 removed_cnt++;
1442 }
1443 }
1444 c_tbl->ct_ecnt = 0;
1445 }
1446
1447 if (relink) {
1448 c_tbl->ct_parent = NULL;
1449 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1450 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1451 }
1452 return removed_cnt;
1453 }
1454
1455
1456 /* pmap_remove_pte INTERNAL
1457 **
1458 * Unmap the given pte and preserve any page modification
1459 * information by transfering it to the pv head of the
1460 * physical page it maps to. This function does not update
1461 * any reference counts because it is assumed that the calling
1462 * function will do so.
1463 */
1464 void
1465 pmap_remove_pte(pte)
1466 mmu_short_pte_t *pte;
1467 {
1468 u_short pv_idx, targ_idx;
1469 paddr_t pa;
1470 pv_t *pv;
1471
1472 pa = MMU_PTE_PA(*pte);
1473 if (is_managed(pa)) {
1474 pv = pa2pv(pa);
1475 targ_idx = pteidx(pte); /* Index of PTE being removed */
1476
1477 /*
1478 * If the PTE being removed is the first (or only) PTE in
1479 * the list of PTEs currently mapped to this page, remove the
1480 * PTE by changing the index found on the PV head. Otherwise
1481 * a linear search through the list will have to be executed
1482 * in order to find the PVE which points to the PTE being
1483 * removed, so that it may be modified to point to its new
1484 * neighbor.
1485 */
1486
1487 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1488 if (pv_idx == targ_idx) {
1489 pv->pv_idx = pvebase[targ_idx].pve_next;
1490 } else {
1491
1492 /*
1493 * Find the PV element pointing to the target
1494 * element. Note: may have pv_idx==PVE_EOL
1495 */
1496
1497 for (;;) {
1498 if (pv_idx == PVE_EOL) {
1499 goto pv_not_found;
1500 }
1501 if (pvebase[pv_idx].pve_next == targ_idx)
1502 break;
1503 pv_idx = pvebase[pv_idx].pve_next;
1504 }
1505
1506 /*
1507 * At this point, pv_idx is the index of the PV
1508 * element just before the target element in the list.
1509 * Unlink the target.
1510 */
1511
1512 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1513 }
1514
1515 /*
1516 * Save the mod/ref bits of the pte by simply
1517 * ORing the entire pte onto the pv_flags member
1518 * of the pv structure.
1519 * There is no need to use a separate bit pattern
1520 * for usage information on the pv head than that
1521 * which is used on the MMU ptes.
1522 */
1523
1524 pv_not_found:
1525 pv->pv_flags |= (u_short) pte->attr.raw;
1526 }
1527 pte->attr.raw = MMU_DT_INVALID;
1528 }
1529
1530 /* pmap_stroll INTERNAL
1531 **
1532 * Retrieve the addresses of all table managers involved in the mapping of
1533 * the given virtual address. If the table walk completed sucessfully,
1534 * return TRUE. If it was only partially sucessful, return FALSE.
1535 * The table walk performed by this function is important to many other
1536 * functions in this module.
1537 *
1538 * Note: This function ought to be easier to read.
1539 */
1540 boolean_t
1541 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1542 pmap_t pmap;
1543 vaddr_t va;
1544 a_tmgr_t **a_tbl;
1545 b_tmgr_t **b_tbl;
1546 c_tmgr_t **c_tbl;
1547 mmu_short_pte_t **pte;
1548 int *a_idx, *b_idx, *pte_idx;
1549 {
1550 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1551 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1552
1553 if (pmap == pmap_kernel())
1554 return FALSE;
1555
1556 /* Does the given pmap have its own A table? */
1557 *a_tbl = pmap->pm_a_tmgr;
1558 if (*a_tbl == NULL)
1559 return FALSE; /* No. Return unknown. */
1560 /* Does the A table have a valid B table
1561 * under the corresponding table entry?
1562 */
1563 *a_idx = MMU_TIA(va);
1564 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1565 if (!MMU_VALID_DT(*a_dte))
1566 return FALSE; /* No. Return unknown. */
1567 /* Yes. Extract B table from the A table. */
1568 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1569 /* Does the B table have a valid C table
1570 * under the corresponding table entry?
1571 */
1572 *b_idx = MMU_TIB(va);
1573 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1574 if (!MMU_VALID_DT(*b_dte))
1575 return FALSE; /* No. Return unknown. */
1576 /* Yes. Extract C table from the B table. */
1577 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1578 *pte_idx = MMU_TIC(va);
1579 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1580
1581 return TRUE;
1582 }
1583
1584 /* pmap_enter INTERFACE
1585 **
1586 * Called by the kernel to map a virtual address
1587 * to a physical address in the given process map.
1588 *
1589 * Note: this function should apply an exclusive lock
1590 * on the pmap system for its duration. (it certainly
1591 * would save my hair!!)
1592 * This function ought to be easier to read.
1593 */
1594 int
1595 pmap_enter(pmap, va, pa, prot, flags)
1596 pmap_t pmap;
1597 vaddr_t va;
1598 paddr_t pa;
1599 vm_prot_t prot;
1600 int flags;
1601 {
1602 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1603 u_short nidx; /* PV list index */
1604 int mapflags; /* Flags for the mapping (see NOTE1) */
1605 u_int a_idx, b_idx, pte_idx; /* table indices */
1606 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1607 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1608 c_tmgr_t *c_tbl; /* C: short page table manager */
1609 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1610 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1611 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1612 pv_t *pv; /* pv list head */
1613 boolean_t wired; /* is the mapping to be wired? */
1614 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1615
1616 if (pmap == pmap_kernel()) {
1617 pmap_enter_kernel(va, pa, prot);
1618 return 0;
1619 }
1620
1621 /*
1622 * Determine if the mapping should be wired.
1623 */
1624 wired = ((flags & PMAP_WIRED) != 0);
1625
1626 /*
1627 * NOTE1:
1628 *
1629 * On November 13, 1999, someone changed the pmap_enter() API such
1630 * that it now accepts a 'flags' argument. This new argument
1631 * contains bit-flags for the architecture-independent (UVM) system to
1632 * use in signalling certain mapping requirements to the architecture-
1633 * dependent (pmap) system. The argument it replaces, 'wired', is now
1634 * one of the flags within it.
1635 *
1636 * In addition to flags signaled by the architecture-independent
1637 * system, parts of the architecture-dependent section of the sun3x
1638 * kernel pass their own flags in the lower, unused bits of the
1639 * physical address supplied to this function. These flags are
1640 * extracted and stored in the temporary variable 'mapflags'.
1641 *
1642 * Extract sun3x specific flags from the physical address.
1643 */
1644 mapflags = (pa & ~MMU_PAGE_MASK);
1645 pa &= MMU_PAGE_MASK;
1646
1647 /*
1648 * Determine if the physical address being mapped is on-board RAM.
1649 * Any other area of the address space is likely to belong to a
1650 * device and hence it would be disasterous to cache its contents.
1651 */
1652 if ((managed = is_managed(pa)) == FALSE)
1653 mapflags |= PMAP_NC;
1654
1655 /*
1656 * For user mappings we walk along the MMU tables of the given
1657 * pmap, reaching a PTE which describes the virtual page being
1658 * mapped or changed. If any level of the walk ends in an invalid
1659 * entry, a table must be allocated and the entry must be updated
1660 * to point to it.
1661 * There is a bit of confusion as to whether this code must be
1662 * re-entrant. For now we will assume it is. To support
1663 * re-entrancy we must unlink tables from the table pool before
1664 * we assume we may use them. Tables are re-linked into the pool
1665 * when we are finished with them at the end of the function.
1666 * But I don't feel like doing that until we have proof that this
1667 * needs to be re-entrant.
1668 * 'llevel' records which tables need to be relinked.
1669 */
1670 llevel = NONE;
1671
1672 /*
1673 * Step 1 - Retrieve the A table from the pmap. If it has no
1674 * A table, allocate a new one from the available pool.
1675 */
1676
1677 a_tbl = pmap->pm_a_tmgr;
1678 if (a_tbl == NULL) {
1679 /*
1680 * This pmap does not currently have an A table. Allocate
1681 * a new one.
1682 */
1683 a_tbl = get_a_table();
1684 a_tbl->at_parent = pmap;
1685
1686 /*
1687 * Assign this new A table to the pmap, and calculate its
1688 * physical address so that loadcrp() can be used to make
1689 * the table active.
1690 */
1691 pmap->pm_a_tmgr = a_tbl;
1692 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1693
1694 /*
1695 * If the process receiving a new A table is the current
1696 * process, we are responsible for setting the MMU so that
1697 * it becomes the current address space. This only adds
1698 * new mappings, so no need to flush anything.
1699 */
1700 if (pmap == current_pmap()) {
1701 kernel_crp.rp_addr = pmap->pm_a_phys;
1702 loadcrp(&kernel_crp);
1703 }
1704
1705 if (!wired)
1706 llevel = NEWA;
1707 } else {
1708 /*
1709 * Use the A table already allocated for this pmap.
1710 * Unlink it from the A table pool if necessary.
1711 */
1712 if (wired && !a_tbl->at_wcnt)
1713 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1714 }
1715
1716 /*
1717 * Step 2 - Walk into the B table. If there is no valid B table,
1718 * allocate one.
1719 */
1720
1721 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1722 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1723 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1724 /* The descriptor is valid. Use the B table it points to. */
1725 /*************************************
1726 * a_idx *
1727 * v *
1728 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1729 * | | | | | | | | | | | | *
1730 * +-+-+-+-+-+-+-+-+-+-+-+- *
1731 * | *
1732 * \- b_tbl -> +-+- *
1733 * | | *
1734 * +-+- *
1735 *************************************/
1736 b_dte = mmu_ptov(a_dte->addr.raw);
1737 b_tbl = mmuB2tmgr(b_dte);
1738
1739 /*
1740 * If the requested mapping must be wired, but this table
1741 * being used to map it is not, the table must be removed
1742 * from the available pool and its wired entry count
1743 * incremented.
1744 */
1745 if (wired && !b_tbl->bt_wcnt) {
1746 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1747 a_tbl->at_wcnt++;
1748 }
1749 } else {
1750 /* The descriptor is invalid. Allocate a new B table. */
1751 b_tbl = get_b_table();
1752
1753 /* Point the parent A table descriptor to this new B table. */
1754 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1755 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1756 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1757
1758 /* Create the necessary back references to the parent table */
1759 b_tbl->bt_parent = a_tbl;
1760 b_tbl->bt_pidx = a_idx;
1761
1762 /*
1763 * If this table is to be wired, make sure the parent A table
1764 * wired count is updated to reflect that it has another wired
1765 * entry.
1766 */
1767 if (wired)
1768 a_tbl->at_wcnt++;
1769 else if (llevel == NONE)
1770 llevel = NEWB;
1771 }
1772
1773 /*
1774 * Step 3 - Walk into the C table, if there is no valid C table,
1775 * allocate one.
1776 */
1777
1778 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1779 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1780 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1781 /* The descriptor is valid. Use the C table it points to. */
1782 /**************************************
1783 * c_idx *
1784 * | v *
1785 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1786 * | | | | | | | | | | | *
1787 * +-+-+-+-+-+-+-+-+-+-+- *
1788 * | *
1789 * \- c_tbl -> +-+-- *
1790 * | | | *
1791 * +-+-- *
1792 **************************************/
1793 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1794 c_tbl = mmuC2tmgr(c_pte);
1795
1796 /* If mapping is wired and table is not */
1797 if (wired && !c_tbl->ct_wcnt) {
1798 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1799 b_tbl->bt_wcnt++;
1800 }
1801 } else {
1802 /* The descriptor is invalid. Allocate a new C table. */
1803 c_tbl = get_c_table();
1804
1805 /* Point the parent B table descriptor to this new C table. */
1806 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1807 b_dte->attr.raw |= MMU_DT_SHORT;
1808 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1809
1810 /* Create the necessary back references to the parent table */
1811 c_tbl->ct_parent = b_tbl;
1812 c_tbl->ct_pidx = b_idx;
1813 /*
1814 * Store the pmap and base virtual managed address for faster
1815 * retrieval in the PV functions.
1816 */
1817 c_tbl->ct_pmap = pmap;
1818 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1819
1820 /*
1821 * If this table is to be wired, make sure the parent B table
1822 * wired count is updated to reflect that it has another wired
1823 * entry.
1824 */
1825 if (wired)
1826 b_tbl->bt_wcnt++;
1827 else if (llevel == NONE)
1828 llevel = NEWC;
1829 }
1830
1831 /*
1832 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1833 * slot of the C table, describing the PA to which the VA is mapped.
1834 */
1835
1836 pte_idx = MMU_TIC(va);
1837 c_pte = &c_tbl->ct_dtbl[pte_idx];
1838 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1839 /*
1840 * The PTE is currently valid. This particular call
1841 * is just a synonym for one (or more) of the following
1842 * operations:
1843 * change protection of a page
1844 * change wiring status of a page
1845 * remove the mapping of a page
1846 *
1847 * XXX - Semi critical: This code should unwire the PTE
1848 * and, possibly, associated parent tables if this is a
1849 * change wiring operation. Currently it does not.
1850 *
1851 * This may be ok if pmap_unwire() is the only
1852 * interface used to UNWIRE a page.
1853 */
1854
1855 /* First check if this is a wiring operation. */
1856 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1857 /*
1858 * The PTE is already wired. To prevent it from being
1859 * counted as a new wiring operation, reset the 'wired'
1860 * variable.
1861 */
1862 wired = FALSE;
1863 }
1864
1865 /* Is the new address the same as the old? */
1866 if (MMU_PTE_PA(*c_pte) == pa) {
1867 /*
1868 * Yes, mark that it does not need to be reinserted
1869 * into the PV list.
1870 */
1871 insert = FALSE;
1872
1873 /*
1874 * Clear all but the modified, referenced and wired
1875 * bits on the PTE.
1876 */
1877 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1878 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1879 } else {
1880 /* No, remove the old entry */
1881 pmap_remove_pte(c_pte);
1882 insert = TRUE;
1883 }
1884
1885 /*
1886 * TLB flush is only necessary if modifying current map.
1887 * However, in pmap_enter(), the pmap almost always IS
1888 * the current pmap, so don't even bother to check.
1889 */
1890 TBIS(va);
1891 } else {
1892 /*
1893 * The PTE is invalid. Increment the valid entry count in
1894 * the C table manager to reflect the addition of a new entry.
1895 */
1896 c_tbl->ct_ecnt++;
1897
1898 /* XXX - temporarily make sure the PTE is cleared. */
1899 c_pte->attr.raw = 0;
1900
1901 /* It will also need to be inserted into the PV list. */
1902 insert = TRUE;
1903 }
1904
1905 /*
1906 * If page is changing from unwired to wired status, set an unused bit
1907 * within the PTE to indicate that it is wired. Also increment the
1908 * wired entry count in the C table manager.
1909 */
1910 if (wired) {
1911 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1912 c_tbl->ct_wcnt++;
1913 }
1914
1915 /*
1916 * Map the page, being careful to preserve modify/reference/wired
1917 * bits. At this point it is assumed that the PTE either has no bits
1918 * set, or if there are set bits, they are only modified, reference or
1919 * wired bits. If not, the following statement will cause erratic
1920 * behavior.
1921 */
1922 #ifdef PMAP_DEBUG
1923 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1924 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1925 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1926 Debugger();
1927 }
1928 #endif
1929 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
1930
1931 /*
1932 * If the mapping should be read-only, set the write protect
1933 * bit in the PTE.
1934 */
1935 if (!(prot & VM_PROT_WRITE))
1936 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
1937
1938 /*
1939 * If the mapping should be cache inhibited (indicated by the flag
1940 * bits found on the lower order of the physical address.)
1941 * mark the PTE as a cache inhibited page.
1942 */
1943 if (mapflags & PMAP_NC)
1944 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
1945
1946 /*
1947 * If the physical address being mapped is managed by the PV
1948 * system then link the pte into the list of pages mapped to that
1949 * address.
1950 */
1951 if (insert && managed) {
1952 pv = pa2pv(pa);
1953 nidx = pteidx(c_pte);
1954
1955 pvebase[nidx].pve_next = pv->pv_idx;
1956 pv->pv_idx = nidx;
1957 }
1958
1959 /* Move any allocated tables back into the active pool. */
1960
1961 switch (llevel) {
1962 case NEWA:
1963 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1964 /* FALLTHROUGH */
1965 case NEWB:
1966 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1967 /* FALLTHROUGH */
1968 case NEWC:
1969 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1970 /* FALLTHROUGH */
1971 default:
1972 break;
1973 }
1974
1975 return 0;
1976 }
1977
1978 /* pmap_enter_kernel INTERNAL
1979 **
1980 * Map the given virtual address to the given physical address within the
1981 * kernel address space. This function exists because the kernel map does
1982 * not do dynamic table allocation. It consists of a contiguous array of ptes
1983 * and can be edited directly without the need to walk through any tables.
1984 *
1985 * XXX: "Danger, Will Robinson!"
1986 * Note that the kernel should never take a fault on any page
1987 * between [ KERNBASE .. virtual_avail ] and this is checked in
1988 * trap.c for kernel-mode MMU faults. This means that mappings
1989 * created in that range must be implicily wired. -gwr
1990 */
1991 void
1992 pmap_enter_kernel(va, pa, prot)
1993 vaddr_t va;
1994 paddr_t pa;
1995 vm_prot_t prot;
1996 {
1997 boolean_t was_valid, insert;
1998 u_short pte_idx;
1999 int flags;
2000 mmu_short_pte_t *pte;
2001 pv_t *pv;
2002 paddr_t old_pa;
2003
2004 flags = (pa & ~MMU_PAGE_MASK);
2005 pa &= MMU_PAGE_MASK;
2006
2007 if (is_managed(pa))
2008 insert = TRUE;
2009 else
2010 insert = FALSE;
2011
2012 /*
2013 * Calculate the index of the PTE being modified.
2014 */
2015 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2016
2017 /* This array is traditionally named "Sysmap" */
2018 pte = &kernCbase[pte_idx];
2019
2020 if (MMU_VALID_DT(*pte)) {
2021 was_valid = TRUE;
2022 /*
2023 * If the PTE already maps a different
2024 * physical address, umap and pv_unlink.
2025 */
2026 old_pa = MMU_PTE_PA(*pte);
2027 if (pa != old_pa)
2028 pmap_remove_pte(pte);
2029 else {
2030 /*
2031 * Old PA and new PA are the same. No need to
2032 * relink the mapping within the PV list.
2033 */
2034 insert = FALSE;
2035
2036 /*
2037 * Save any mod/ref bits on the PTE.
2038 */
2039 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2040 }
2041 } else {
2042 pte->attr.raw = MMU_DT_INVALID;
2043 was_valid = FALSE;
2044 }
2045
2046 /*
2047 * Map the page. Being careful to preserve modified/referenced bits
2048 * on the PTE.
2049 */
2050 pte->attr.raw |= (pa | MMU_DT_PAGE);
2051
2052 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2053 pte->attr.raw |= MMU_SHORT_PTE_WP;
2054 if (flags & PMAP_NC)
2055 pte->attr.raw |= MMU_SHORT_PTE_CI;
2056 if (was_valid)
2057 TBIS(va);
2058
2059 /*
2060 * Insert the PTE into the PV system, if need be.
2061 */
2062 if (insert) {
2063 pv = pa2pv(pa);
2064 pvebase[pte_idx].pve_next = pv->pv_idx;
2065 pv->pv_idx = pte_idx;
2066 }
2067 }
2068
2069 void
2070 pmap_kenter_pa(va, pa, prot)
2071 vaddr_t va;
2072 paddr_t pa;
2073 vm_prot_t prot;
2074 {
2075 mmu_short_pte_t *pte;
2076
2077 /* This array is traditionally named "Sysmap" */
2078 pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
2079
2080 KASSERT(!MMU_VALID_DT(*pte));
2081 pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2082 if (!(prot & VM_PROT_WRITE))
2083 pte->attr.raw |= MMU_SHORT_PTE_WP;
2084 }
2085
2086 void
2087 pmap_kremove(va, len)
2088 vaddr_t va;
2089 vsize_t len;
2090 {
2091 int idx, eidx;
2092
2093 #ifdef PMAP_DEBUG
2094 if ((sva & PGOFSET) || (eva & PGOFSET))
2095 panic("pmap_kremove: alignment");
2096 #endif
2097
2098 idx = m68k_btop(va - KERNBASE);
2099 eidx = m68k_btop(va + len - KERNBASE);
2100
2101 while (idx < eidx) {
2102 kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2103 TBIS(va);
2104 va += NBPG;
2105 }
2106 }
2107
2108 /* pmap_map INTERNAL
2109 **
2110 * Map a contiguous range of physical memory into a contiguous range of
2111 * the kernel virtual address space.
2112 *
2113 * Used for device mappings and early mapping of the kernel text/data/bss.
2114 * Returns the first virtual address beyond the end of the range.
2115 */
2116 vaddr_t
2117 pmap_map(va, pa, endpa, prot)
2118 vaddr_t va;
2119 paddr_t pa;
2120 paddr_t endpa;
2121 int prot;
2122 {
2123 int sz;
2124
2125 sz = endpa - pa;
2126 do {
2127 pmap_enter_kernel(va, pa, prot);
2128 va += NBPG;
2129 pa += NBPG;
2130 sz -= NBPG;
2131 } while (sz > 0);
2132 pmap_update(pmap_kernel());
2133 return(va);
2134 }
2135
2136 /* pmap_protect INTERFACE
2137 **
2138 * Apply the given protection to the given virtual address range within
2139 * the given map.
2140 *
2141 * It is ok for the protection applied to be stronger than what is
2142 * specified. We use this to our advantage when the given map has no
2143 * mapping for the virtual address. By skipping a page when this
2144 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2145 * and therefore do not need to map the page just to apply a protection
2146 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2147 *
2148 * XXX - This function could be speeded up by using pmap_stroll() for inital
2149 * setup, and then manual scrolling in the for() loop.
2150 */
2151 void
2152 pmap_protect(pmap, startva, endva, prot)
2153 pmap_t pmap;
2154 vaddr_t startva, endva;
2155 vm_prot_t prot;
2156 {
2157 boolean_t iscurpmap;
2158 int a_idx, b_idx, c_idx;
2159 a_tmgr_t *a_tbl;
2160 b_tmgr_t *b_tbl;
2161 c_tmgr_t *c_tbl;
2162 mmu_short_pte_t *pte;
2163
2164 if (pmap == pmap_kernel()) {
2165 pmap_protect_kernel(startva, endva, prot);
2166 return;
2167 }
2168
2169 /*
2170 * In this particular pmap implementation, there are only three
2171 * types of memory protection: 'all' (read/write/execute),
2172 * 'read-only' (read/execute) and 'none' (no mapping.)
2173 * It is not possible for us to treat 'executable' as a separate
2174 * protection type. Therefore, protection requests that seek to
2175 * remove execute permission while retaining read or write, and those
2176 * that make little sense (write-only for example) are ignored.
2177 */
2178 switch (prot) {
2179 case VM_PROT_NONE:
2180 /*
2181 * A request to apply the protection code of
2182 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2183 */
2184 pmap_remove(pmap, startva, endva);
2185 return;
2186 case VM_PROT_EXECUTE:
2187 case VM_PROT_READ:
2188 case VM_PROT_READ|VM_PROT_EXECUTE:
2189 /* continue */
2190 break;
2191 case VM_PROT_WRITE:
2192 case VM_PROT_WRITE|VM_PROT_READ:
2193 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2194 case VM_PROT_ALL:
2195 /* None of these should happen in a sane system. */
2196 return;
2197 }
2198
2199 /*
2200 * If the pmap has no A table, it has no mappings and therefore
2201 * there is nothing to protect.
2202 */
2203 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2204 return;
2205
2206 a_idx = MMU_TIA(startva);
2207 b_idx = MMU_TIB(startva);
2208 c_idx = MMU_TIC(startva);
2209 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2210
2211 iscurpmap = (pmap == current_pmap());
2212 while (startva < endva) {
2213 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2214 if (b_tbl == NULL) {
2215 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2216 b_tbl = mmu_ptov((vaddr_t)b_tbl);
2217 b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2218 }
2219 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2220 if (c_tbl == NULL) {
2221 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2222 c_tbl = mmu_ptov((vaddr_t)c_tbl);
2223 c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2224 }
2225 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2226 pte = &c_tbl->ct_dtbl[c_idx];
2227 /* make the mapping read-only */
2228 pte->attr.raw |= MMU_SHORT_PTE_WP;
2229 /*
2230 * If we just modified the current address space,
2231 * flush any translations for the modified page from
2232 * the translation cache and any data from it in the
2233 * data cache.
2234 */
2235 if (iscurpmap)
2236 TBIS(startva);
2237 }
2238 startva += NBPG;
2239
2240 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2241 c_tbl = NULL;
2242 c_idx = 0;
2243 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2244 b_tbl = NULL;
2245 b_idx = 0;
2246 }
2247 }
2248 } else { /* C table wasn't valid */
2249 c_tbl = NULL;
2250 c_idx = 0;
2251 startva += MMU_TIB_RANGE;
2252 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2253 b_tbl = NULL;
2254 b_idx = 0;
2255 }
2256 } /* C table */
2257 } else { /* B table wasn't valid */
2258 b_tbl = NULL;
2259 b_idx = 0;
2260 startva += MMU_TIA_RANGE;
2261 a_idx++;
2262 } /* B table */
2263 }
2264 }
2265
2266 /* pmap_protect_kernel INTERNAL
2267 **
2268 * Apply the given protection code to a kernel address range.
2269 */
2270 void
2271 pmap_protect_kernel(startva, endva, prot)
2272 vaddr_t startva, endva;
2273 vm_prot_t prot;
2274 {
2275 vaddr_t va;
2276 mmu_short_pte_t *pte;
2277
2278 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2279 for (va = startva; va < endva; va += NBPG, pte++) {
2280 if (MMU_VALID_DT(*pte)) {
2281 switch (prot) {
2282 case VM_PROT_ALL:
2283 break;
2284 case VM_PROT_EXECUTE:
2285 case VM_PROT_READ:
2286 case VM_PROT_READ|VM_PROT_EXECUTE:
2287 pte->attr.raw |= MMU_SHORT_PTE_WP;
2288 break;
2289 case VM_PROT_NONE:
2290 /* this is an alias for 'pmap_remove_kernel' */
2291 pmap_remove_pte(pte);
2292 break;
2293 default:
2294 break;
2295 }
2296 /*
2297 * since this is the kernel, immediately flush any cached
2298 * descriptors for this address.
2299 */
2300 TBIS(va);
2301 }
2302 }
2303 }
2304
2305 /* pmap_unwire INTERFACE
2306 **
2307 * Clear the wired attribute of the specified page.
2308 *
2309 * This function is called from vm_fault.c to unwire
2310 * a mapping.
2311 */
2312 void
2313 pmap_unwire(pmap, va)
2314 pmap_t pmap;
2315 vaddr_t va;
2316 {
2317 int a_idx, b_idx, c_idx;
2318 a_tmgr_t *a_tbl;
2319 b_tmgr_t *b_tbl;
2320 c_tmgr_t *c_tbl;
2321 mmu_short_pte_t *pte;
2322
2323 /* Kernel mappings always remain wired. */
2324 if (pmap == pmap_kernel())
2325 return;
2326
2327 /*
2328 * Walk through the tables. If the walk terminates without
2329 * a valid PTE then the address wasn't wired in the first place.
2330 * Return immediately.
2331 */
2332 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2333 &b_idx, &c_idx) == FALSE)
2334 return;
2335
2336
2337 /* Is the PTE wired? If not, return. */
2338 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2339 return;
2340
2341 /* Remove the wiring bit. */
2342 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2343
2344 /*
2345 * Decrement the wired entry count in the C table.
2346 * If it reaches zero the following things happen:
2347 * 1. The table no longer has any wired entries and is considered
2348 * unwired.
2349 * 2. It is placed on the available queue.
2350 * 3. The parent table's wired entry count is decremented.
2351 * 4. If it reaches zero, this process repeats at step 1 and
2352 * stops at after reaching the A table.
2353 */
2354 if (--c_tbl->ct_wcnt == 0) {
2355 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2356 if (--b_tbl->bt_wcnt == 0) {
2357 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2358 if (--a_tbl->at_wcnt == 0) {
2359 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2360 }
2361 }
2362 }
2363 }
2364
2365 /* pmap_copy INTERFACE
2366 **
2367 * Copy the mappings of a range of addresses in one pmap, into
2368 * the destination address of another.
2369 *
2370 * This routine is advisory. Should we one day decide that MMU tables
2371 * may be shared by more than one pmap, this function should be used to
2372 * link them together. Until that day however, we do nothing.
2373 */
2374 void
2375 pmap_copy(pmap_a, pmap_b, dst, len, src)
2376 pmap_t pmap_a, pmap_b;
2377 vaddr_t dst;
2378 vsize_t len;
2379 vaddr_t src;
2380 {
2381 /* not implemented. */
2382 }
2383
2384 /* pmap_copy_page INTERFACE
2385 **
2386 * Copy the contents of one physical page into another.
2387 *
2388 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2389 * to map the two specified physical pages into the kernel address space.
2390 *
2391 * Note: We could use the transparent translation registers to make the
2392 * mappings. If we do so, be sure to disable interrupts before using them.
2393 */
2394 void
2395 pmap_copy_page(srcpa, dstpa)
2396 paddr_t srcpa, dstpa;
2397 {
2398 vaddr_t srcva, dstva;
2399 int s;
2400
2401 srcva = tmp_vpages[0];
2402 dstva = tmp_vpages[1];
2403
2404 s = splvm();
2405 #ifdef DIAGNOSTIC
2406 if (tmp_vpages_inuse++)
2407 panic("pmap_copy_page: temporary vpages are in use.");
2408 #endif
2409
2410 /* Map pages as non-cacheable to avoid cache polution? */
2411 pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
2412 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2413
2414 /* Hand-optimized version of bcopy(src, dst, NBPG) */
2415 copypage((char *) srcva, (char *) dstva);
2416
2417 pmap_kremove(srcva, NBPG);
2418 pmap_kremove(dstva, NBPG);
2419
2420 #ifdef DIAGNOSTIC
2421 --tmp_vpages_inuse;
2422 #endif
2423 splx(s);
2424 }
2425
2426 /* pmap_zero_page INTERFACE
2427 **
2428 * Zero the contents of the specified physical page.
2429 *
2430 * Uses one of the virtual pages allocated in pmap_boostrap()
2431 * to map the specified page into the kernel address space.
2432 */
2433 void
2434 pmap_zero_page(dstpa)
2435 paddr_t dstpa;
2436 {
2437 vaddr_t dstva;
2438 int s;
2439
2440 dstva = tmp_vpages[1];
2441 s = splvm();
2442 #ifdef DIAGNOSTIC
2443 if (tmp_vpages_inuse++)
2444 panic("pmap_zero_page: temporary vpages are in use.");
2445 #endif
2446
2447 /* The comments in pmap_copy_page() above apply here also. */
2448 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2449
2450 /* Hand-optimized version of bzero(ptr, NBPG) */
2451 zeropage((char *) dstva);
2452
2453 pmap_kremove(dstva, NBPG);
2454 #ifdef DIAGNOSTIC
2455 --tmp_vpages_inuse;
2456 #endif
2457 splx(s);
2458 }
2459
2460 /* pmap_collect INTERFACE
2461 **
2462 * Called from the VM system when we are about to swap out
2463 * the process using this pmap. This should give up any
2464 * resources held here, including all its MMU tables.
2465 */
2466 void
2467 pmap_collect(pmap)
2468 pmap_t pmap;
2469 {
2470 /* XXX - todo... */
2471 }
2472
2473 /* pmap_create INTERFACE
2474 **
2475 * Create and return a pmap structure.
2476 */
2477 pmap_t
2478 pmap_create()
2479 {
2480 pmap_t pmap;
2481
2482 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2483 pmap_pinit(pmap);
2484 return pmap;
2485 }
2486
2487 /* pmap_pinit INTERNAL
2488 **
2489 * Initialize a pmap structure.
2490 */
2491 void
2492 pmap_pinit(pmap)
2493 pmap_t pmap;
2494 {
2495 memset(pmap, 0, sizeof(struct pmap));
2496 pmap->pm_a_tmgr = NULL;
2497 pmap->pm_a_phys = kernAphys;
2498 pmap->pm_refcount = 1;
2499 simple_lock_init(&pmap->pm_lock);
2500 }
2501
2502 /* pmap_release INTERFACE
2503 **
2504 * Release any resources held by the given pmap.
2505 *
2506 * This is the reverse analog to pmap_pinit. It does not
2507 * necessarily mean for the pmap structure to be deallocated,
2508 * as in pmap_destroy.
2509 */
2510 void
2511 pmap_release(pmap)
2512 pmap_t pmap;
2513 {
2514 /*
2515 * As long as the pmap contains no mappings,
2516 * which always should be the case whenever
2517 * this function is called, there really should
2518 * be nothing to do.
2519 */
2520 #ifdef PMAP_DEBUG
2521 if (pmap == pmap_kernel())
2522 panic("pmap_release: kernel pmap");
2523 #endif
2524 /*
2525 * XXX - If this pmap has an A table, give it back.
2526 * The pmap SHOULD be empty by now, and pmap_remove
2527 * should have already given back the A table...
2528 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2529 * at this point, which means some mapping was not
2530 * removed when it should have been. -gwr
2531 */
2532 if (pmap->pm_a_tmgr != NULL) {
2533 /* First make sure we are not using it! */
2534 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2535 kernel_crp.rp_addr = kernAphys;
2536 loadcrp(&kernel_crp);
2537 }
2538 #ifdef PMAP_DEBUG /* XXX - todo! */
2539 /* XXX - Now complain... */
2540 printf("pmap_release: still have table\n");
2541 Debugger();
2542 #endif
2543 free_a_table(pmap->pm_a_tmgr, TRUE);
2544 pmap->pm_a_tmgr = NULL;
2545 pmap->pm_a_phys = kernAphys;
2546 }
2547 }
2548
2549 /* pmap_reference INTERFACE
2550 **
2551 * Increment the reference count of a pmap.
2552 */
2553 void
2554 pmap_reference(pmap)
2555 pmap_t pmap;
2556 {
2557 pmap_lock(pmap);
2558 pmap_add_ref(pmap);
2559 pmap_unlock(pmap);
2560 }
2561
2562 /* pmap_dereference INTERNAL
2563 **
2564 * Decrease the reference count on the given pmap
2565 * by one and return the current count.
2566 */
2567 int
2568 pmap_dereference(pmap)
2569 pmap_t pmap;
2570 {
2571 int rtn;
2572
2573 pmap_lock(pmap);
2574 rtn = pmap_del_ref(pmap);
2575 pmap_unlock(pmap);
2576
2577 return rtn;
2578 }
2579
2580 /* pmap_destroy INTERFACE
2581 **
2582 * Decrement a pmap's reference count and delete
2583 * the pmap if it becomes zero. Will be called
2584 * only after all mappings have been removed.
2585 */
2586 void
2587 pmap_destroy(pmap)
2588 pmap_t pmap;
2589 {
2590 if (pmap_dereference(pmap) == 0) {
2591 pmap_release(pmap);
2592 pool_put(&pmap_pmap_pool, pmap);
2593 }
2594 }
2595
2596 /* pmap_is_referenced INTERFACE
2597 **
2598 * Determine if the given physical page has been
2599 * referenced (read from [or written to.])
2600 */
2601 boolean_t
2602 pmap_is_referenced(pg)
2603 struct vm_page *pg;
2604 {
2605 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2606 pv_t *pv;
2607 int idx;
2608
2609 /*
2610 * Check the flags on the pv head. If they are set,
2611 * return immediately. Otherwise a search must be done.
2612 */
2613
2614 pv = pa2pv(pa);
2615 if (pv->pv_flags & PV_FLAGS_USED)
2616 return TRUE;
2617
2618 /*
2619 * Search through all pv elements pointing
2620 * to this page and query their reference bits
2621 */
2622
2623 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2624 if (MMU_PTE_USED(kernCbase[idx])) {
2625 return TRUE;
2626 }
2627 }
2628 return FALSE;
2629 }
2630
2631 /* pmap_is_modified INTERFACE
2632 **
2633 * Determine if the given physical page has been
2634 * modified (written to.)
2635 */
2636 boolean_t
2637 pmap_is_modified(pg)
2638 struct vm_page *pg;
2639 {
2640 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2641 pv_t *pv;
2642 int idx;
2643
2644 /* see comments in pmap_is_referenced() */
2645 pv = pa2pv(pa);
2646 if (pv->pv_flags & PV_FLAGS_MDFY)
2647 return TRUE;
2648
2649 for (idx = pv->pv_idx;
2650 idx != PVE_EOL;
2651 idx = pvebase[idx].pve_next) {
2652
2653 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2654 return TRUE;
2655 }
2656 }
2657
2658 return FALSE;
2659 }
2660
2661 /* pmap_page_protect INTERFACE
2662 **
2663 * Applies the given protection to all mappings to the given
2664 * physical page.
2665 */
2666 void
2667 pmap_page_protect(pg, prot)
2668 struct vm_page *pg;
2669 vm_prot_t prot;
2670 {
2671 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2672 pv_t *pv;
2673 int idx;
2674 vaddr_t va;
2675 struct mmu_short_pte_struct *pte;
2676 c_tmgr_t *c_tbl;
2677 pmap_t pmap, curpmap;
2678
2679 curpmap = current_pmap();
2680 pv = pa2pv(pa);
2681
2682 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2683 pte = &kernCbase[idx];
2684 switch (prot) {
2685 case VM_PROT_ALL:
2686 /* do nothing */
2687 break;
2688 case VM_PROT_EXECUTE:
2689 case VM_PROT_READ:
2690 case VM_PROT_READ|VM_PROT_EXECUTE:
2691 /*
2692 * Determine the virtual address mapped by
2693 * the PTE and flush ATC entries if necessary.
2694 */
2695 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2696 pte->attr.raw |= MMU_SHORT_PTE_WP;
2697 if (pmap == curpmap || pmap == pmap_kernel())
2698 TBIS(va);
2699 break;
2700 case VM_PROT_NONE:
2701 /* Save the mod/ref bits. */
2702 pv->pv_flags |= pte->attr.raw;
2703 /* Invalidate the PTE. */
2704 pte->attr.raw = MMU_DT_INVALID;
2705
2706 /*
2707 * Update table counts. And flush ATC entries
2708 * if necessary.
2709 */
2710 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2711
2712 /*
2713 * If the PTE belongs to the kernel map,
2714 * be sure to flush the page it maps.
2715 */
2716 if (pmap == pmap_kernel()) {
2717 TBIS(va);
2718 } else {
2719 /*
2720 * The PTE belongs to a user map.
2721 * update the entry count in the C
2722 * table to which it belongs and flush
2723 * the ATC if the mapping belongs to
2724 * the current pmap.
2725 */
2726 c_tbl->ct_ecnt--;
2727 if (pmap == curpmap)
2728 TBIS(va);
2729 }
2730 break;
2731 default:
2732 break;
2733 }
2734 }
2735
2736 /*
2737 * If the protection code indicates that all mappings to the page
2738 * be removed, truncate the PV list to zero entries.
2739 */
2740 if (prot == VM_PROT_NONE)
2741 pv->pv_idx = PVE_EOL;
2742 }
2743
2744 /* pmap_get_pteinfo INTERNAL
2745 **
2746 * Called internally to find the pmap and virtual address within that
2747 * map to which the pte at the given index maps. Also includes the PTE's C
2748 * table manager.
2749 *
2750 * Returns the pmap in the argument provided, and the virtual address
2751 * by return value.
2752 */
2753 vaddr_t
2754 pmap_get_pteinfo(idx, pmap, tbl)
2755 u_int idx;
2756 pmap_t *pmap;
2757 c_tmgr_t **tbl;
2758 {
2759 vaddr_t va = 0;
2760
2761 /*
2762 * Determine if the PTE is a kernel PTE or a user PTE.
2763 */
2764 if (idx >= NUM_KERN_PTES) {
2765 /*
2766 * The PTE belongs to a user mapping.
2767 */
2768 /* XXX: Would like an inline for this to validate idx... */
2769 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2770
2771 *pmap = (*tbl)->ct_pmap;
2772 /*
2773 * To find the va to which the PTE maps, we first take
2774 * the table's base virtual address mapping which is stored
2775 * in ct_va. We then increment this address by a page for
2776 * every slot skipped until we reach the PTE.
2777 */
2778 va = (*tbl)->ct_va;
2779 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2780 } else {
2781 /*
2782 * The PTE belongs to the kernel map.
2783 */
2784 *pmap = pmap_kernel();
2785
2786 va = m68k_ptob(idx);
2787 va += KERNBASE;
2788 }
2789
2790 return va;
2791 }
2792
2793 /* pmap_clear_modify INTERFACE
2794 **
2795 * Clear the modification bit on the page at the specified
2796 * physical address.
2797 *
2798 */
2799 boolean_t
2800 pmap_clear_modify(pg)
2801 struct vm_page *pg;
2802 {
2803 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2804 boolean_t rv;
2805
2806 rv = pmap_is_modified(pg);
2807 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2808 return rv;
2809 }
2810
2811 /* pmap_clear_reference INTERFACE
2812 **
2813 * Clear the referenced bit on the page at the specified
2814 * physical address.
2815 */
2816 boolean_t
2817 pmap_clear_reference(pg)
2818 struct vm_page *pg;
2819 {
2820 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2821 boolean_t rv;
2822
2823 rv = pmap_is_referenced(pg);
2824 pmap_clear_pv(pa, PV_FLAGS_USED);
2825 return rv;
2826 }
2827
2828 /* pmap_clear_pv INTERNAL
2829 **
2830 * Clears the specified flag from the specified physical address.
2831 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2832 *
2833 * Flag is one of:
2834 * PV_FLAGS_MDFY - Page modified bit.
2835 * PV_FLAGS_USED - Page used (referenced) bit.
2836 *
2837 * This routine must not only clear the flag on the pv list
2838 * head. It must also clear the bit on every pte in the pv
2839 * list associated with the address.
2840 */
2841 void
2842 pmap_clear_pv(pa, flag)
2843 paddr_t pa;
2844 int flag;
2845 {
2846 pv_t *pv;
2847 int idx;
2848 vaddr_t va;
2849 pmap_t pmap;
2850 mmu_short_pte_t *pte;
2851 c_tmgr_t *c_tbl;
2852
2853 pv = pa2pv(pa);
2854 pv->pv_flags &= ~(flag);
2855 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2856 pte = &kernCbase[idx];
2857 pte->attr.raw &= ~(flag);
2858
2859 /*
2860 * The MC68030 MMU will not set the modified or
2861 * referenced bits on any MMU tables for which it has
2862 * a cached descriptor with its modify bit set. To insure
2863 * that it will modify these bits on the PTE during the next
2864 * time it is written to or read from, we must flush it from
2865 * the ATC.
2866 *
2867 * Ordinarily it is only necessary to flush the descriptor
2868 * if it is used in the current address space. But since I
2869 * am not sure that there will always be a notion of
2870 * 'the current address space' when this function is called,
2871 * I will skip the test and always flush the address. It
2872 * does no harm.
2873 */
2874
2875 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2876 TBIS(va);
2877 }
2878 }
2879
2880 /* pmap_extract INTERFACE
2881 **
2882 * Return the physical address mapped by the virtual address
2883 * in the specified pmap.
2884 *
2885 * Note: this function should also apply an exclusive lock
2886 * on the pmap system during its duration.
2887 */
2888 boolean_t
2889 pmap_extract(pmap, va, pap)
2890 pmap_t pmap;
2891 vaddr_t va;
2892 paddr_t *pap;
2893 {
2894 int a_idx, b_idx, pte_idx;
2895 a_tmgr_t *a_tbl;
2896 b_tmgr_t *b_tbl;
2897 c_tmgr_t *c_tbl;
2898 mmu_short_pte_t *c_pte;
2899
2900 if (pmap == pmap_kernel())
2901 return pmap_extract_kernel(va, pap);
2902
2903 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2904 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2905 return FALSE;
2906
2907 if (!MMU_VALID_DT(*c_pte))
2908 return FALSE;
2909
2910 if (pap != NULL)
2911 *pap = MMU_PTE_PA(*c_pte);
2912 return (TRUE);
2913 }
2914
2915 /* pmap_extract_kernel INTERNAL
2916 **
2917 * Extract a translation from the kernel address space.
2918 */
2919 boolean_t
2920 pmap_extract_kernel(va, pap)
2921 vaddr_t va;
2922 paddr_t *pap;
2923 {
2924 mmu_short_pte_t *pte;
2925
2926 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
2927 if (!MMU_VALID_DT(*pte))
2928 return (FALSE);
2929 if (pap != NULL)
2930 *pap = MMU_PTE_PA(*pte);
2931 return (TRUE);
2932 }
2933
2934 /* pmap_remove_kernel INTERNAL
2935 **
2936 * Remove the mapping of a range of virtual addresses from the kernel map.
2937 * The arguments are already page-aligned.
2938 */
2939 void
2940 pmap_remove_kernel(sva, eva)
2941 vaddr_t sva;
2942 vaddr_t eva;
2943 {
2944 int idx, eidx;
2945
2946 #ifdef PMAP_DEBUG
2947 if ((sva & PGOFSET) || (eva & PGOFSET))
2948 panic("pmap_remove_kernel: alignment");
2949 #endif
2950
2951 idx = m68k_btop(sva - KERNBASE);
2952 eidx = m68k_btop(eva - KERNBASE);
2953
2954 while (idx < eidx) {
2955 pmap_remove_pte(&kernCbase[idx++]);
2956 TBIS(sva);
2957 sva += NBPG;
2958 }
2959 }
2960
2961 /* pmap_remove INTERFACE
2962 **
2963 * Remove the mapping of a range of virtual addresses from the given pmap.
2964 *
2965 * If the range contains any wired entries, this function will probably create
2966 * disaster.
2967 */
2968 void
2969 pmap_remove(pmap, start, end)
2970 pmap_t pmap;
2971 vaddr_t start;
2972 vaddr_t end;
2973 {
2974
2975 if (pmap == pmap_kernel()) {
2976 pmap_remove_kernel(start, end);
2977 return;
2978 }
2979
2980 /*
2981 * If the pmap doesn't have an A table of its own, it has no mappings
2982 * that can be removed.
2983 */
2984 if (pmap->pm_a_tmgr == NULL)
2985 return;
2986
2987 /*
2988 * Remove the specified range from the pmap. If the function
2989 * returns true, the operation removed all the valid mappings
2990 * in the pmap and freed its A table. If this happened to the
2991 * currently loaded pmap, the MMU root pointer must be reloaded
2992 * with the default 'kernel' map.
2993 */
2994 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
2995 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2996 kernel_crp.rp_addr = kernAphys;
2997 loadcrp(&kernel_crp);
2998 /* will do TLB flush below */
2999 }
3000 pmap->pm_a_tmgr = NULL;
3001 pmap->pm_a_phys = kernAphys;
3002 }
3003
3004 /*
3005 * If we just modified the current address space,
3006 * make sure to flush the MMU cache.
3007 *
3008 * XXX - this could be an unecessarily large flush.
3009 * XXX - Could decide, based on the size of the VA range
3010 * to be removed, whether to flush "by pages" or "all".
3011 */
3012 if (pmap == current_pmap())
3013 TBIAU();
3014 }
3015
3016 /* pmap_remove_a INTERNAL
3017 **
3018 * This is function number one in a set of three that removes a range
3019 * of memory in the most efficient manner by removing the highest possible
3020 * tables from the memory space. This particular function attempts to remove
3021 * as many B tables as it can, delegating the remaining fragmented ranges to
3022 * pmap_remove_b().
3023 *
3024 * If the removal operation results in an empty A table, the function returns
3025 * TRUE.
3026 *
3027 * It's ugly but will do for now.
3028 */
3029 boolean_t
3030 pmap_remove_a(a_tbl, start, end)
3031 a_tmgr_t *a_tbl;
3032 vaddr_t start;
3033 vaddr_t end;
3034 {
3035 boolean_t empty;
3036 int idx;
3037 vaddr_t nstart, nend;
3038 b_tmgr_t *b_tbl;
3039 mmu_long_dte_t *a_dte;
3040 mmu_short_dte_t *b_dte;
3041
3042 /*
3043 * The following code works with what I call a 'granularity
3044 * reduction algorithim'. A range of addresses will always have
3045 * the following properties, which are classified according to
3046 * how the range relates to the size of the current granularity
3047 * - an A table entry:
3048 *
3049 * 1 2 3 4
3050 * -+---+---+---+---+---+---+---+-
3051 * -+---+---+---+---+---+---+---+-
3052 *
3053 * A range will always start on a granularity boundary, illustrated
3054 * by '+' signs in the table above, or it will start at some point
3055 * inbetween a granularity boundary, as illustrated by point 1.
3056 * The first step in removing a range of addresses is to remove the
3057 * range between 1 and 2, the nearest granularity boundary. This
3058 * job is handled by the section of code governed by the
3059 * 'if (start < nstart)' statement.
3060 *
3061 * A range will always encompass zero or more intergral granules,
3062 * illustrated by points 2 and 3. Integral granules are easy to
3063 * remove. The removal of these granules is the second step, and
3064 * is handled by the code block 'if (nstart < nend)'.
3065 *
3066 * Lastly, a range will always end on a granularity boundary,
3067 * ill. by point 3, or it will fall just beyond one, ill. by point
3068 * 4. The last step involves removing this range and is handled by
3069 * the code block 'if (nend < end)'.
3070 */
3071 nstart = MMU_ROUND_UP_A(start);
3072 nend = MMU_ROUND_A(end);
3073
3074 if (start < nstart) {
3075 /*
3076 * This block is executed if the range starts between
3077 * a granularity boundary.
3078 *
3079 * First find the DTE which is responsible for mapping
3080 * the start of the range.
3081 */
3082 idx = MMU_TIA(start);
3083 a_dte = &a_tbl->at_dtbl[idx];
3084
3085 /*
3086 * If the DTE is valid then delegate the removal of the sub
3087 * range to pmap_remove_b(), which can remove addresses at
3088 * a finer granularity.
3089 */
3090 if (MMU_VALID_DT(*a_dte)) {
3091 b_dte = mmu_ptov(a_dte->addr.raw);
3092 b_tbl = mmuB2tmgr(b_dte);
3093
3094 /*
3095 * The sub range to be removed starts at the start
3096 * of the full range we were asked to remove, and ends
3097 * at the greater of:
3098 * 1. The end of the full range, -or-
3099 * 2. The end of the full range, rounded down to the
3100 * nearest granularity boundary.
3101 */
3102 if (end < nstart)
3103 empty = pmap_remove_b(b_tbl, start, end);
3104 else
3105 empty = pmap_remove_b(b_tbl, start, nstart);
3106
3107 /*
3108 * If the removal resulted in an empty B table,
3109 * invalidate the DTE that points to it and decrement
3110 * the valid entry count of the A table.
3111 */
3112 if (empty) {
3113 a_dte->attr.raw = MMU_DT_INVALID;
3114 a_tbl->at_ecnt--;
3115 }
3116 }
3117 /*
3118 * If the DTE is invalid, the address range is already non-
3119 * existent and can simply be skipped.
3120 */
3121 }
3122 if (nstart < nend) {
3123 /*
3124 * This block is executed if the range spans a whole number
3125 * multiple of granules (A table entries.)
3126 *
3127 * First find the DTE which is responsible for mapping
3128 * the start of the first granule involved.
3129 */
3130 idx = MMU_TIA(nstart);
3131 a_dte = &a_tbl->at_dtbl[idx];
3132
3133 /*
3134 * Remove entire sub-granules (B tables) one at a time,
3135 * until reaching the end of the range.
3136 */
3137 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3138 if (MMU_VALID_DT(*a_dte)) {
3139 /*
3140 * Find the B table manager for the
3141 * entry and free it.
3142 */
3143 b_dte = mmu_ptov(a_dte->addr.raw);
3144 b_tbl = mmuB2tmgr(b_dte);
3145 free_b_table(b_tbl, TRUE);
3146
3147 /*
3148 * Invalidate the DTE that points to the
3149 * B table and decrement the valid entry
3150 * count of the A table.
3151 */
3152 a_dte->attr.raw = MMU_DT_INVALID;
3153 a_tbl->at_ecnt--;
3154 }
3155 }
3156 if (nend < end) {
3157 /*
3158 * This block is executed if the range ends beyond a
3159 * granularity boundary.
3160 *
3161 * First find the DTE which is responsible for mapping
3162 * the start of the nearest (rounded down) granularity
3163 * boundary.
3164 */
3165 idx = MMU_TIA(nend);
3166 a_dte = &a_tbl->at_dtbl[idx];
3167
3168 /*
3169 * If the DTE is valid then delegate the removal of the sub
3170 * range to pmap_remove_b(), which can remove addresses at
3171 * a finer granularity.
3172 */
3173 if (MMU_VALID_DT(*a_dte)) {
3174 /*
3175 * Find the B table manager for the entry
3176 * and hand it to pmap_remove_b() along with
3177 * the sub range.
3178 */
3179 b_dte = mmu_ptov(a_dte->addr.raw);
3180 b_tbl = mmuB2tmgr(b_dte);
3181
3182 empty = pmap_remove_b(b_tbl, nend, end);
3183
3184 /*
3185 * If the removal resulted in an empty B table,
3186 * invalidate the DTE that points to it and decrement
3187 * the valid entry count of the A table.
3188 */
3189 if (empty) {
3190 a_dte->attr.raw = MMU_DT_INVALID;
3191 a_tbl->at_ecnt--;
3192 }
3193 }
3194 }
3195
3196 /*
3197 * If there are no more entries in the A table, release it
3198 * back to the available pool and return TRUE.
3199 */
3200 if (a_tbl->at_ecnt == 0) {
3201 a_tbl->at_parent = NULL;
3202 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3203 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3204 empty = TRUE;
3205 } else {
3206 empty = FALSE;
3207 }
3208
3209 return empty;
3210 }
3211
3212 /* pmap_remove_b INTERNAL
3213 **
3214 * Remove a range of addresses from an address space, trying to remove entire
3215 * C tables if possible.
3216 *
3217 * If the operation results in an empty B table, the function returns TRUE.
3218 */
3219 boolean_t
3220 pmap_remove_b(b_tbl, start, end)
3221 b_tmgr_t *b_tbl;
3222 vaddr_t start;
3223 vaddr_t end;
3224 {
3225 boolean_t empty;
3226 int idx;
3227 vaddr_t nstart, nend, rstart;
3228 c_tmgr_t *c_tbl;
3229 mmu_short_dte_t *b_dte;
3230 mmu_short_pte_t *c_dte;
3231
3232
3233 nstart = MMU_ROUND_UP_B(start);
3234 nend = MMU_ROUND_B(end);
3235
3236 if (start < nstart) {
3237 idx = MMU_TIB(start);
3238 b_dte = &b_tbl->bt_dtbl[idx];
3239 if (MMU_VALID_DT(*b_dte)) {
3240 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3241 c_tbl = mmuC2tmgr(c_dte);
3242 if (end < nstart)
3243 empty = pmap_remove_c(c_tbl, start, end);
3244 else
3245 empty = pmap_remove_c(c_tbl, start, nstart);
3246 if (empty) {
3247 b_dte->attr.raw = MMU_DT_INVALID;
3248 b_tbl->bt_ecnt--;
3249 }
3250 }
3251 }
3252 if (nstart < nend) {
3253 idx = MMU_TIB(nstart);
3254 b_dte = &b_tbl->bt_dtbl[idx];
3255 rstart = nstart;
3256 while (rstart < nend) {
3257 if (MMU_VALID_DT(*b_dte)) {
3258 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3259 c_tbl = mmuC2tmgr(c_dte);
3260 free_c_table(c_tbl, TRUE);
3261 b_dte->attr.raw = MMU_DT_INVALID;
3262 b_tbl->bt_ecnt--;
3263 }
3264 b_dte++;
3265 rstart += MMU_TIB_RANGE;
3266 }
3267 }
3268 if (nend < end) {
3269 idx = MMU_TIB(nend);
3270 b_dte = &b_tbl->bt_dtbl[idx];
3271 if (MMU_VALID_DT(*b_dte)) {
3272 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3273 c_tbl = mmuC2tmgr(c_dte);
3274 empty = pmap_remove_c(c_tbl, nend, end);
3275 if (empty) {
3276 b_dte->attr.raw = MMU_DT_INVALID;
3277 b_tbl->bt_ecnt--;
3278 }
3279 }
3280 }
3281
3282 if (b_tbl->bt_ecnt == 0) {
3283 b_tbl->bt_parent = NULL;
3284 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3285 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3286 empty = TRUE;
3287 } else {
3288 empty = FALSE;
3289 }
3290
3291 return empty;
3292 }
3293
3294 /* pmap_remove_c INTERNAL
3295 **
3296 * Remove a range of addresses from the given C table.
3297 */
3298 boolean_t
3299 pmap_remove_c(c_tbl, start, end)
3300 c_tmgr_t *c_tbl;
3301 vaddr_t start;
3302 vaddr_t end;
3303 {
3304 boolean_t empty;
3305 int idx;
3306 mmu_short_pte_t *c_pte;
3307
3308 idx = MMU_TIC(start);
3309 c_pte = &c_tbl->ct_dtbl[idx];
3310 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3311 if (MMU_VALID_DT(*c_pte)) {
3312 pmap_remove_pte(c_pte);
3313 c_tbl->ct_ecnt--;
3314 }
3315 }
3316
3317 if (c_tbl->ct_ecnt == 0) {
3318 c_tbl->ct_parent = NULL;
3319 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3320 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3321 empty = TRUE;
3322 } else {
3323 empty = FALSE;
3324 }
3325
3326 return empty;
3327 }
3328
3329 /* is_managed INTERNAL
3330 **
3331 * Determine if the given physical address is managed by the PV system.
3332 * Note that this logic assumes that no one will ask for the status of
3333 * addresses which lie in-between the memory banks on the 3/80. If they
3334 * do so, it will falsely report that it is managed.
3335 *
3336 * Note: A "managed" address is one that was reported to the VM system as
3337 * a "usable page" during system startup. As such, the VM system expects the
3338 * pmap module to keep an accurate track of the useage of those pages.
3339 * Any page not given to the VM system at startup does not exist (as far as
3340 * the VM system is concerned) and is therefore "unmanaged." Examples are
3341 * those pages which belong to the ROM monitor and the memory allocated before
3342 * the VM system was started.
3343 */
3344 boolean_t
3345 is_managed(pa)
3346 paddr_t pa;
3347 {
3348 if (pa >= avail_start && pa < avail_end)
3349 return TRUE;
3350 else
3351 return FALSE;
3352 }
3353
3354 /* pmap_bootstrap_alloc INTERNAL
3355 **
3356 * Used internally for memory allocation at startup when malloc is not
3357 * available. This code will fail once it crosses the first memory
3358 * bank boundary on the 3/80. Hopefully by then however, the VM system
3359 * will be in charge of allocation.
3360 */
3361 void *
3362 pmap_bootstrap_alloc(size)
3363 int size;
3364 {
3365 void *rtn;
3366
3367 #ifdef PMAP_DEBUG
3368 if (bootstrap_alloc_enabled == FALSE) {
3369 mon_printf("pmap_bootstrap_alloc: disabled\n");
3370 sunmon_abort();
3371 }
3372 #endif
3373
3374 rtn = (void *) virtual_avail;
3375 virtual_avail += size;
3376
3377 #ifdef PMAP_DEBUG
3378 if (virtual_avail > virtual_contig_end) {
3379 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3380 sunmon_abort();
3381 }
3382 #endif
3383
3384 return rtn;
3385 }
3386
3387 /* pmap_bootstap_aalign INTERNAL
3388 **
3389 * Used to insure that the next call to pmap_bootstrap_alloc() will
3390 * return a chunk of memory aligned to the specified size.
3391 *
3392 * Note: This function will only support alignment sizes that are powers
3393 * of two.
3394 */
3395 void
3396 pmap_bootstrap_aalign(size)
3397 int size;
3398 {
3399 int off;
3400
3401 off = virtual_avail & (size - 1);
3402 if (off) {
3403 (void) pmap_bootstrap_alloc(size - off);
3404 }
3405 }
3406
3407 /* pmap_pa_exists
3408 **
3409 * Used by the /dev/mem driver to see if a given PA is memory
3410 * that can be mapped. (The PA is not in a hole.)
3411 */
3412 int
3413 pmap_pa_exists(pa)
3414 paddr_t pa;
3415 {
3416 int i;
3417
3418 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3419 if ((pa >= avail_mem[i].pmem_start) &&
3420 (pa < avail_mem[i].pmem_end))
3421 return (1);
3422 if (avail_mem[i].pmem_next == NULL)
3423 break;
3424 }
3425 return (0);
3426 }
3427
3428 /* Called only from locore.s and pmap.c */
3429 void _pmap_switch __P((pmap_t pmap));
3430
3431 /*
3432 * _pmap_switch INTERNAL
3433 *
3434 * This is called by locore.s:cpu_switch() when it is
3435 * switching to a new process. Load new translations.
3436 * Note: done in-line by locore.s unless PMAP_DEBUG
3437 *
3438 * Note that we do NOT allocate a context here, but
3439 * share the "kernel only" context until we really
3440 * need our own context for user-space mappings in
3441 * pmap_enter_user(). [ s/context/mmu A table/ ]
3442 */
3443 void
3444 _pmap_switch(pmap)
3445 pmap_t pmap;
3446 {
3447 u_long rootpa;
3448
3449 /*
3450 * Only do reload/flush if we have to.
3451 * Note that if the old and new process
3452 * were BOTH using the "null" context,
3453 * then this will NOT flush the TLB.
3454 */
3455 rootpa = pmap->pm_a_phys;
3456 if (kernel_crp.rp_addr != rootpa) {
3457 DPRINT(("pmap_activate(%p)\n", pmap));
3458 kernel_crp.rp_addr = rootpa;
3459 loadcrp(&kernel_crp);
3460 TBIAU();
3461 }
3462 }
3463
3464 /*
3465 * Exported version of pmap_activate(). This is called from the
3466 * machine-independent VM code when a process is given a new pmap.
3467 * If (p == curproc) do like cpu_switch would do; otherwise just
3468 * take this as notification that the process has a new pmap.
3469 */
3470 void
3471 pmap_activate(p)
3472 struct proc *p;
3473 {
3474 if (p == curproc) {
3475 _pmap_switch(p->p_vmspace->vm_map.pmap);
3476 }
3477 }
3478
3479 /*
3480 * pmap_deactivate INTERFACE
3481 **
3482 * This is called to deactivate the specified process's address space.
3483 */
3484 void
3485 pmap_deactivate(p)
3486 struct proc *p;
3487 {
3488 /* Nothing to do. */
3489 }
3490
3491 /*
3492 * Fill in the sun3x-specific part of the kernel core header
3493 * for dumpsys(). (See machdep.c for the rest.)
3494 */
3495 void
3496 pmap_kcore_hdr(sh)
3497 struct sun3x_kcore_hdr *sh;
3498 {
3499 u_long spa, len;
3500 int i;
3501
3502 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3503 sh->pg_valid = MMU_DT_PAGE;
3504 sh->contig_end = virtual_contig_end;
3505 sh->kernCbase = (u_long)kernCbase;
3506 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3507 spa = avail_mem[i].pmem_start;
3508 spa = m68k_trunc_page(spa);
3509 len = avail_mem[i].pmem_end - spa;
3510 len = m68k_round_page(len);
3511 sh->ram_segs[i].start = spa;
3512 sh->ram_segs[i].size = len;
3513 }
3514 }
3515
3516
3517 /* pmap_virtual_space INTERFACE
3518 **
3519 * Return the current available range of virtual addresses in the
3520 * arguuments provided. Only really called once.
3521 */
3522 void
3523 pmap_virtual_space(vstart, vend)
3524 vaddr_t *vstart, *vend;
3525 {
3526 *vstart = virtual_avail;
3527 *vend = virtual_end;
3528 }
3529
3530 /*
3531 * Provide memory to the VM system.
3532 *
3533 * Assume avail_start is always in the
3534 * first segment as pmap_bootstrap does.
3535 */
3536 static void
3537 pmap_page_upload()
3538 {
3539 paddr_t a, b; /* memory range */
3540 int i;
3541
3542 /* Supply the memory in segments. */
3543 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3544 a = atop(avail_mem[i].pmem_start);
3545 b = atop(avail_mem[i].pmem_end);
3546 if (i == 0)
3547 a = atop(avail_start);
3548 if (avail_mem[i].pmem_end > avail_end)
3549 b = atop(avail_end);
3550
3551 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3552
3553 if (avail_mem[i].pmem_next == NULL)
3554 break;
3555 }
3556 }
3557
3558 /* pmap_count INTERFACE
3559 **
3560 * Return the number of resident (valid) pages in the given pmap.
3561 *
3562 * Note: If this function is handed the kernel map, it will report
3563 * that it has no mappings. Hopefully the VM system won't ask for kernel
3564 * map statistics.
3565 */
3566 segsz_t
3567 pmap_count(pmap, type)
3568 pmap_t pmap;
3569 int type;
3570 {
3571 u_int count;
3572 int a_idx, b_idx;
3573 a_tmgr_t *a_tbl;
3574 b_tmgr_t *b_tbl;
3575 c_tmgr_t *c_tbl;
3576
3577 /*
3578 * If the pmap does not have its own A table manager, it has no
3579 * valid entires.
3580 */
3581 if (pmap->pm_a_tmgr == NULL)
3582 return 0;
3583
3584 a_tbl = pmap->pm_a_tmgr;
3585
3586 count = 0;
3587 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3588 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3589 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3590 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3591 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3592 c_tbl = mmuC2tmgr(
3593 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3594 if (type == 0)
3595 /*
3596 * A resident entry count has been requested.
3597 */
3598 count += c_tbl->ct_ecnt;
3599 else
3600 /*
3601 * A wired entry count has been requested.
3602 */
3603 count += c_tbl->ct_wcnt;
3604 }
3605 }
3606 }
3607 }
3608
3609 return count;
3610 }
3611
3612 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3613 * The following routines are only used by DDB for tricky kernel text *
3614 * text operations in db_memrw.c. They are provided for sun3 *
3615 * compatibility. *
3616 *************************************************************************/
3617 /* get_pte INTERNAL
3618 **
3619 * Return the page descriptor the describes the kernel mapping
3620 * of the given virtual address.
3621 */
3622 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3623 u_int
3624 get_pte(va)
3625 vaddr_t va;
3626 {
3627 u_long pte_pa;
3628 mmu_short_pte_t *pte;
3629
3630 /* Get the physical address of the PTE */
3631 pte_pa = ptest_addr(va & ~PGOFSET);
3632
3633 /* Convert to a virtual address... */
3634 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3635
3636 /* Make sure it is in our level-C tables... */
3637 if ((pte < kernCbase) ||
3638 (pte >= &mmuCbase[NUM_USER_PTES]))
3639 return 0;
3640
3641 /* ... and just return its contents. */
3642 return (pte->attr.raw);
3643 }
3644
3645
3646 /* set_pte INTERNAL
3647 **
3648 * Set the page descriptor that describes the kernel mapping
3649 * of the given virtual address.
3650 */
3651 void
3652 set_pte(va, pte)
3653 vaddr_t va;
3654 u_int pte;
3655 {
3656 u_long idx;
3657
3658 if (va < KERNBASE)
3659 return;
3660
3661 idx = (unsigned long) m68k_btop(va - KERNBASE);
3662 kernCbase[idx].attr.raw = pte;
3663 TBIS(va);
3664 }
3665
3666 /*
3667 * Routine: pmap_procwr
3668 *
3669 * Function:
3670 * Synchronize caches corresponding to [addr, addr+len) in p.
3671 */
3672 void
3673 pmap_procwr(p, va, len)
3674 struct proc *p;
3675 vaddr_t va;
3676 size_t len;
3677 {
3678 (void)cachectl1(0x80000004, va, len, p);
3679 }
3680
3681
3682 #ifdef PMAP_DEBUG
3683 /************************** DEBUGGING ROUTINES **************************
3684 * The following routines are meant to be an aid to debugging the pmap *
3685 * system. They are callable from the DDB command line and should be *
3686 * prepared to be handed unstable or incomplete states of the system. *
3687 ************************************************************************/
3688
3689 /* pv_list
3690 **
3691 * List all pages found on the pv list for the given physical page.
3692 * To avoid endless loops, the listing will stop at the end of the list
3693 * or after 'n' entries - whichever comes first.
3694 */
3695 void
3696 pv_list(pa, n)
3697 paddr_t pa;
3698 int n;
3699 {
3700 int idx;
3701 vaddr_t va;
3702 pv_t *pv;
3703 c_tmgr_t *c_tbl;
3704 pmap_t pmap;
3705
3706 pv = pa2pv(pa);
3707 idx = pv->pv_idx;
3708 for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3709 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3710 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3711 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3712 }
3713 }
3714 #endif /* PMAP_DEBUG */
3715
3716 #ifdef NOT_YET
3717 /* and maybe not ever */
3718 /************************** LOW-LEVEL ROUTINES **************************
3719 * These routines will eventualy be re-written into assembly and placed *
3720 * in locore.s. They are here now as stubs so that the pmap module can *
3721 * be linked as a standalone user program for testing. *
3722 ************************************************************************/
3723 /* flush_atc_crp INTERNAL
3724 **
3725 * Flush all page descriptors derived from the given CPU Root Pointer
3726 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3727 * cache.
3728 */
3729 void
3730 flush_atc_crp(a_tbl)
3731 {
3732 mmu_long_rp_t rp;
3733
3734 /* Create a temporary root table pointer that points to the
3735 * given A table.
3736 */
3737 rp.attr.raw = ~MMU_LONG_RP_LU;
3738 rp.addr.raw = (unsigned int) a_tbl;
3739
3740 mmu_pflushr(&rp);
3741 /* mmu_pflushr:
3742 * movel sp(4)@,a0
3743 * pflushr a0@
3744 * rts
3745 */
3746 }
3747 #endif /* NOT_YET */
3748