pmap.c revision 1.73.4.5 1 /* $NetBSD: pmap.c,v 1.73.4.5 2002/07/12 01:39:53 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include "opt_ddb.h"
115
116 #include <sys/param.h>
117 #include <sys/systm.h>
118 #include <sys/proc.h>
119 #include <sys/malloc.h>
120 #include <sys/pool.h>
121 #include <sys/user.h>
122 #include <sys/queue.h>
123 #include <sys/kcore.h>
124
125 #include <uvm/uvm.h>
126
127 #include <machine/cpu.h>
128 #include <machine/kcore.h>
129 #include <machine/mon.h>
130 #include <machine/pmap.h>
131 #include <machine/pte.h>
132 #include <machine/vmparam.h>
133
134 #include <sun3/sun3/cache.h>
135 #include <sun3/sun3/machdep.h>
136
137 #include "pmap_pvt.h"
138
139 /* XXX - What headers declare these? */
140 extern struct pcb *curpcb;
141 extern int physmem;
142
143 /* Defined in locore.s */
144 extern char kernel_text[];
145
146 /* Defined by the linker */
147 extern char etext[], edata[], end[];
148 extern char *esym; /* DDB */
149
150 /*************************** DEBUGGING DEFINITIONS ***********************
151 * Macros, preprocessor defines and variables used in debugging can make *
152 * code hard to read. Anything used exclusively for debugging purposes *
153 * is defined here to avoid having such mess scattered around the file. *
154 *************************************************************************/
155 #ifdef PMAP_DEBUG
156 /*
157 * To aid the debugging process, macros should be expanded into smaller steps
158 * that accomplish the same goal, yet provide convenient places for placing
159 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
160 * 'INLINE' keyword is defined to an empty string. This way, any function
161 * defined to be a 'static INLINE' will become 'outlined' and compiled as
162 * a separate function, which is much easier to debug.
163 */
164 #define INLINE /* nothing */
165
166 /*
167 * It is sometimes convenient to watch the activity of a particular table
168 * in the system. The following variables are used for that purpose.
169 */
170 a_tmgr_t *pmap_watch_atbl = 0;
171 b_tmgr_t *pmap_watch_btbl = 0;
172 c_tmgr_t *pmap_watch_ctbl = 0;
173
174 int pmap_debug = 0;
175 #define DPRINT(args) if (pmap_debug) printf args
176
177 #else /********** Stuff below is defined if NOT debugging **************/
178
179 #define INLINE inline
180 #define DPRINT(args) /* nada */
181
182 #endif /* PMAP_DEBUG */
183 /*********************** END OF DEBUGGING DEFINITIONS ********************/
184
185 /*** Management Structure - Memory Layout
186 * For every MMU table in the sun3x pmap system there must be a way to
187 * manage it; we must know which process is using it, what other tables
188 * depend on it, and whether or not it contains any locked pages. This
189 * is solved by the creation of 'table management' or 'tmgr'
190 * structures. One for each MMU table in the system.
191 *
192 * MAP OF MEMORY USED BY THE PMAP SYSTEM
193 *
194 * towards lower memory
195 * kernAbase -> +-------------------------------------------------------+
196 * | Kernel MMU A level table |
197 * kernBbase -> +-------------------------------------------------------+
198 * | Kernel MMU B level tables |
199 * kernCbase -> +-------------------------------------------------------+
200 * | |
201 * | Kernel MMU C level tables |
202 * | |
203 * mmuCbase -> +-------------------------------------------------------+
204 * | User MMU C level tables |
205 * mmuAbase -> +-------------------------------------------------------+
206 * | |
207 * | User MMU A level tables |
208 * | |
209 * mmuBbase -> +-------------------------------------------------------+
210 * | User MMU B level tables |
211 * tmgrAbase -> +-------------------------------------------------------+
212 * | TMGR A level table structures |
213 * tmgrBbase -> +-------------------------------------------------------+
214 * | TMGR B level table structures |
215 * tmgrCbase -> +-------------------------------------------------------+
216 * | TMGR C level table structures |
217 * pvbase -> +-------------------------------------------------------+
218 * | Physical to Virtual mapping table (list heads) |
219 * pvebase -> +-------------------------------------------------------+
220 * | Physical to Virtual mapping table (list elements) |
221 * | |
222 * +-------------------------------------------------------+
223 * towards higher memory
224 *
225 * For every A table in the MMU A area, there will be a corresponding
226 * a_tmgr structure in the TMGR A area. The same will be true for
227 * the B and C tables. This arrangement will make it easy to find the
228 * controling tmgr structure for any table in the system by use of
229 * (relatively) simple macros.
230 */
231
232 /*
233 * Global variables for storing the base addresses for the areas
234 * labeled above.
235 */
236 static vaddr_t kernAphys;
237 static mmu_long_dte_t *kernAbase;
238 static mmu_short_dte_t *kernBbase;
239 static mmu_short_pte_t *kernCbase;
240 static mmu_short_pte_t *mmuCbase;
241 static mmu_short_dte_t *mmuBbase;
242 static mmu_long_dte_t *mmuAbase;
243 static a_tmgr_t *Atmgrbase;
244 static b_tmgr_t *Btmgrbase;
245 static c_tmgr_t *Ctmgrbase;
246 static pv_t *pvbase;
247 static pv_elem_t *pvebase;
248 struct pmap kernel_pmap;
249
250 /*
251 * This holds the CRP currently loaded into the MMU.
252 */
253 struct mmu_rootptr kernel_crp;
254
255 /*
256 * Just all around global variables.
257 */
258 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
259 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
260 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
261
262
263 /*
264 * Flags used to mark the safety/availability of certain operations or
265 * resources.
266 */
267 static boolean_t bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
268 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
269
270 /*
271 * XXX: For now, retain the traditional variables that were
272 * used in the old pmap/vm interface (without NONCONTIG).
273 */
274 /* Kernel virtual address space available: */
275 vaddr_t virtual_avail, virtual_end;
276 /* Physical address space available: */
277 paddr_t avail_start, avail_end;
278
279 /* This keep track of the end of the contiguously mapped range. */
280 vaddr_t virtual_contig_end;
281
282 /* Physical address used by pmap_next_page() */
283 paddr_t avail_next;
284
285 /* These are used by pmap_copy_page(), etc. */
286 vaddr_t tmp_vpages[2];
287
288 /* memory pool for pmap structures */
289 struct pool pmap_pmap_pool;
290
291 /*
292 * The 3/80 is the only member of the sun3x family that has non-contiguous
293 * physical memory. Memory is divided into 4 banks which are physically
294 * locatable on the system board. Although the size of these banks varies
295 * with the size of memory they contain, their base addresses are
296 * permenently fixed. The following structure, which describes these
297 * banks, is initialized by pmap_bootstrap() after it reads from a similar
298 * structure provided by the ROM Monitor.
299 *
300 * For the other machines in the sun3x architecture which do have contiguous
301 * RAM, this list will have only one entry, which will describe the entire
302 * range of available memory.
303 */
304 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
305 u_int total_phys_mem;
306
307 /*************************************************************************/
308
309 /*
310 * XXX - Should "tune" these based on statistics.
311 *
312 * My first guess about the relative numbers of these needed is
313 * based on the fact that a "typical" process will have several
314 * pages mapped at low virtual addresses (text, data, bss), then
315 * some mapped shared libraries, and then some stack pages mapped
316 * near the high end of the VA space. Each process can use only
317 * one A table, and most will use only two B tables (maybe three)
318 * and probably about four C tables. Therefore, the first guess
319 * at the relative numbers of these needed is 1:2:4 -gwr
320 *
321 * The number of C tables needed is closely related to the amount
322 * of physical memory available plus a certain amount attributable
323 * to the use of double mappings. With a few simulation statistics
324 * we can find a reasonably good estimation of this unknown value.
325 * Armed with that and the above ratios, we have a good idea of what
326 * is needed at each level. -j
327 *
328 * Note: It is not physical memory memory size, but the total mapped
329 * virtual space required by the combined working sets of all the
330 * currently _runnable_ processes. (Sleeping ones don't count.)
331 * The amount of physical memory should be irrelevant. -gwr
332 */
333 #ifdef FIXED_NTABLES
334 #define NUM_A_TABLES 16
335 #define NUM_B_TABLES 32
336 #define NUM_C_TABLES 64
337 #else
338 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
339 #endif /* FIXED_NTABLES */
340
341 /*
342 * This determines our total virtual mapping capacity.
343 * Yes, it is a FIXED value so we can pre-allocate.
344 */
345 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
346
347 /*
348 * The size of the Kernel Virtual Address Space (KVAS)
349 * for purposes of MMU table allocation is -KERNBASE
350 * (length from KERNBASE to 0xFFFFffff)
351 */
352 #define KVAS_SIZE (-KERNBASE)
353
354 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
355 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
356 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
357 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
358
359 /*************************** MISCELANEOUS MACROS *************************/
360 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
361 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
362 #define pmap_add_ref(pmap) ++pmap->pm_refcount
363 #define pmap_del_ref(pmap) --pmap->pm_refcount
364 #define pmap_refcount(pmap) pmap->pm_refcount
365
366 void *pmap_bootstrap_alloc(int);
367
368 static INLINE void *mmu_ptov __P((paddr_t));
369 static INLINE paddr_t mmu_vtop __P((void *));
370
371 #if 0
372 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
373 #endif /* 0 */
374 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
375 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
376
377 static INLINE pv_t *pa2pv __P((paddr_t));
378 static INLINE int pteidx __P((mmu_short_pte_t *));
379 static INLINE pmap_t current_pmap __P((void));
380
381 /*
382 * We can always convert between virtual and physical addresses
383 * for anything in the range [KERNBASE ... avail_start] because
384 * that range is GUARANTEED to be mapped linearly.
385 * We rely heavily upon this feature!
386 */
387 static INLINE void *
388 mmu_ptov(pa)
389 paddr_t pa;
390 {
391 vaddr_t va;
392
393 va = (pa + KERNBASE);
394 #ifdef PMAP_DEBUG
395 if ((va < KERNBASE) || (va >= virtual_contig_end))
396 panic("mmu_ptov");
397 #endif
398 return ((void*)va);
399 }
400
401 static INLINE paddr_t
402 mmu_vtop(vva)
403 void *vva;
404 {
405 vaddr_t va;
406
407 va = (vaddr_t)vva;
408 #ifdef PMAP_DEBUG
409 if ((va < KERNBASE) || (va >= virtual_contig_end))
410 panic("mmu_vtop");
411 #endif
412 return (va - KERNBASE);
413 }
414
415 /*
416 * These macros map MMU tables to their corresponding manager structures.
417 * They are needed quite often because many of the pointers in the pmap
418 * system reference MMU tables and not the structures that control them.
419 * There needs to be a way to find one when given the other and these
420 * macros do so by taking advantage of the memory layout described above.
421 * Here's a quick step through the first macro, mmuA2tmgr():
422 *
423 * 1) find the offset of the given MMU A table from the base of its table
424 * pool (table - mmuAbase).
425 * 2) convert this offset into a table index by dividing it by the
426 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
427 * 3) use this index to select the corresponding 'A' table manager
428 * structure from the 'A' table manager pool (Atmgrbase[index]).
429 */
430 /* This function is not currently used. */
431 #if 0
432 static INLINE a_tmgr_t *
433 mmuA2tmgr(mmuAtbl)
434 mmu_long_dte_t *mmuAtbl;
435 {
436 int idx;
437
438 /* Which table is this in? */
439 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
440 #ifdef PMAP_DEBUG
441 if ((idx < 0) || (idx >= NUM_A_TABLES))
442 panic("mmuA2tmgr");
443 #endif
444 return (&Atmgrbase[idx]);
445 }
446 #endif /* 0 */
447
448 static INLINE b_tmgr_t *
449 mmuB2tmgr(mmuBtbl)
450 mmu_short_dte_t *mmuBtbl;
451 {
452 int idx;
453
454 /* Which table is this in? */
455 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
456 #ifdef PMAP_DEBUG
457 if ((idx < 0) || (idx >= NUM_B_TABLES))
458 panic("mmuB2tmgr");
459 #endif
460 return (&Btmgrbase[idx]);
461 }
462
463 /* mmuC2tmgr INTERNAL
464 **
465 * Given a pte known to belong to a C table, return the address of
466 * that table's management structure.
467 */
468 static INLINE c_tmgr_t *
469 mmuC2tmgr(mmuCtbl)
470 mmu_short_pte_t *mmuCtbl;
471 {
472 int idx;
473
474 /* Which table is this in? */
475 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
476 #ifdef PMAP_DEBUG
477 if ((idx < 0) || (idx >= NUM_C_TABLES))
478 panic("mmuC2tmgr");
479 #endif
480 return (&Ctmgrbase[idx]);
481 }
482
483 /* This is now a function call below.
484 * #define pa2pv(pa) \
485 * (&pvbase[(unsigned long)\
486 * m68k_btop(pa)\
487 * ])
488 */
489
490 /* pa2pv INTERNAL
491 **
492 * Return the pv_list_head element which manages the given physical
493 * address.
494 */
495 static INLINE pv_t *
496 pa2pv(pa)
497 paddr_t pa;
498 {
499 struct pmap_physmem_struct *bank;
500 int idx;
501
502 bank = &avail_mem[0];
503 while (pa >= bank->pmem_end)
504 bank = bank->pmem_next;
505
506 pa -= bank->pmem_start;
507 idx = bank->pmem_pvbase + m68k_btop(pa);
508 #ifdef PMAP_DEBUG
509 if ((idx < 0) || (idx >= physmem))
510 panic("pa2pv");
511 #endif
512 return &pvbase[idx];
513 }
514
515 /* pteidx INTERNAL
516 **
517 * Return the index of the given PTE within the entire fixed table of
518 * PTEs.
519 */
520 static INLINE int
521 pteidx(pte)
522 mmu_short_pte_t *pte;
523 {
524 return (pte - kernCbase);
525 }
526
527 /*
528 * This just offers a place to put some debugging checks,
529 * and reduces the number of places "curlwp" appears...
530 */
531 static INLINE pmap_t
532 current_pmap()
533 {
534 struct vmspace *vm;
535 struct vm_map *map;
536 pmap_t pmap;
537
538 if (curlwp == NULL)
539 pmap = &kernel_pmap;
540 else {
541 vm = curproc->p_vmspace;
542 map = &vm->vm_map;
543 pmap = vm_map_pmap(map);
544 }
545
546 return (pmap);
547 }
548
549
550 /*************************** FUNCTION DEFINITIONS ************************
551 * These appear here merely for the compiler to enforce type checking on *
552 * all function calls. *
553 *************************************************************************/
554
555 /** Internal functions
556 ** Most functions used only within this module are defined in
557 ** pmap_pvt.h (why not here if used only here?)
558 **/
559 static void pmap_page_upload __P((void));
560
561 /** Interface functions
562 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
563 ** defined.
564 **/
565 void pmap_pinit __P((pmap_t));
566 void pmap_release __P((pmap_t));
567
568 /********************************** CODE ********************************
569 * Functions that are called from other parts of the kernel are labeled *
570 * as 'INTERFACE' functions. Functions that are only called from *
571 * within the pmap module are labeled as 'INTERNAL' functions. *
572 * Functions that are internal, but are not (currently) used at all are *
573 * labeled 'INTERNAL_X'. *
574 ************************************************************************/
575
576 /* pmap_bootstrap INTERNAL
577 **
578 * Initializes the pmap system. Called at boot time from
579 * locore2.c:_vm_init()
580 *
581 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
582 * system implement pmap_steal_memory() is redundant.
583 * Don't release this code without removing one or the other!
584 */
585 void
586 pmap_bootstrap(nextva)
587 vaddr_t nextva;
588 {
589 struct physmemory *membank;
590 struct pmap_physmem_struct *pmap_membank;
591 vaddr_t va, eva;
592 paddr_t pa;
593 int b, c, i, j; /* running table counts */
594 int size, resvmem;
595
596 /*
597 * This function is called by __bootstrap after it has
598 * determined the type of machine and made the appropriate
599 * patches to the ROM vectors (XXX- I don't quite know what I meant
600 * by that.) It allocates and sets up enough of the pmap system
601 * to manage the kernel's address space.
602 */
603
604 /*
605 * Determine the range of kernel virtual and physical
606 * space available. Note that we ABSOLUTELY DEPEND on
607 * the fact that the first bank of memory (4MB) is
608 * mapped linearly to KERNBASE (which we guaranteed in
609 * the first instructions of locore.s).
610 * That is plenty for our bootstrap work.
611 */
612 virtual_avail = m68k_round_page(nextva);
613 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
614 virtual_end = VM_MAX_KERNEL_ADDRESS;
615 /* Don't need avail_start til later. */
616
617 /* We may now call pmap_bootstrap_alloc(). */
618 bootstrap_alloc_enabled = TRUE;
619
620 /*
621 * This is a somewhat unwrapped loop to deal with
622 * copying the PROM's 'phsymem' banks into the pmap's
623 * banks. The following is always assumed:
624 * 1. There is always at least one bank of memory.
625 * 2. There is always a last bank of memory, and its
626 * pmem_next member must be set to NULL.
627 */
628 membank = romVectorPtr->v_physmemory;
629 pmap_membank = avail_mem;
630 total_phys_mem = 0;
631
632 for (;;) { /* break on !membank */
633 pmap_membank->pmem_start = membank->address;
634 pmap_membank->pmem_end = membank->address + membank->size;
635 total_phys_mem += membank->size;
636 membank = membank->next;
637 if (!membank)
638 break;
639 /* This silly syntax arises because pmap_membank
640 * is really a pre-allocated array, but it is put into
641 * use as a linked list.
642 */
643 pmap_membank->pmem_next = pmap_membank + 1;
644 pmap_membank = pmap_membank->pmem_next;
645 }
646 /* This is the last element. */
647 pmap_membank->pmem_next = NULL;
648
649 /*
650 * Note: total_phys_mem, physmem represent
651 * actual physical memory, including that
652 * reserved for the PROM monitor.
653 */
654 physmem = btoc(total_phys_mem);
655
656 /*
657 * Avail_end is set to the first byte of physical memory
658 * after the end of the last bank. We use this only to
659 * determine if a physical address is "managed" memory.
660 * This address range should be reduced to prevent the
661 * physical pages needed by the PROM monitor from being used
662 * in the VM system.
663 */
664 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
665 resvmem = m68k_round_page(resvmem);
666 avail_end = pmap_membank->pmem_end - resvmem;
667
668 /*
669 * First allocate enough kernel MMU tables to map all
670 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
671 * Note: All must be aligned on 256 byte boundaries.
672 * Start with the level-A table (one of those).
673 */
674 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
675 kernAbase = pmap_bootstrap_alloc(size);
676 memset(kernAbase, 0, size);
677
678 /* Now the level-B kernel tables... */
679 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
680 kernBbase = pmap_bootstrap_alloc(size);
681 memset(kernBbase, 0, size);
682
683 /* Now the level-C kernel tables... */
684 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
685 kernCbase = pmap_bootstrap_alloc(size);
686 memset(kernCbase, 0, size);
687 /*
688 * Note: In order for the PV system to work correctly, the kernel
689 * and user-level C tables must be allocated contiguously.
690 * Nothing should be allocated between here and the allocation of
691 * mmuCbase below. XXX: Should do this as one allocation, and
692 * then compute a pointer for mmuCbase instead of this...
693 *
694 * Allocate user MMU tables.
695 * These must be contiguous with the preceding.
696 */
697
698 #ifndef FIXED_NTABLES
699 /*
700 * The number of user-level C tables that should be allocated is
701 * related to the size of physical memory. In general, there should
702 * be enough tables to map four times the amount of available RAM.
703 * The extra amount is needed because some table space is wasted by
704 * fragmentation.
705 */
706 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
707 NUM_B_TABLES = NUM_C_TABLES / 2;
708 NUM_A_TABLES = NUM_B_TABLES / 2;
709 #endif /* !FIXED_NTABLES */
710
711 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
712 mmuCbase = pmap_bootstrap_alloc(size);
713
714 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
715 mmuBbase = pmap_bootstrap_alloc(size);
716
717 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
718 mmuAbase = pmap_bootstrap_alloc(size);
719
720 /*
721 * Fill in the never-changing part of the kernel tables.
722 * For simplicity, the kernel's mappings will be editable as a
723 * flat array of page table entries at kernCbase. The
724 * higher level 'A' and 'B' tables must be initialized to point
725 * to this lower one.
726 */
727 b = c = 0;
728
729 /*
730 * Invalidate all mappings below KERNBASE in the A table.
731 * This area has already been zeroed out, but it is good
732 * practice to explicitly show that we are interpreting
733 * it as a list of A table descriptors.
734 */
735 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
736 kernAbase[i].addr.raw = 0;
737 }
738
739 /*
740 * Set up the kernel A and B tables so that they will reference the
741 * correct spots in the contiguous table of PTEs allocated for the
742 * kernel's virtual memory space.
743 */
744 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
745 kernAbase[i].attr.raw =
746 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
747 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
748
749 for (j=0; j < MMU_B_TBL_SIZE; j++) {
750 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
751 | MMU_DT_SHORT;
752 c += MMU_C_TBL_SIZE;
753 }
754 b += MMU_B_TBL_SIZE;
755 }
756
757 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
758 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
759 pmap_alloc_pv(); /* Allocate physical->virtual map. */
760
761 /*
762 * We are now done with pmap_bootstrap_alloc(). Round up
763 * `virtual_avail' to the nearest page, and set the flag
764 * to prevent use of pmap_bootstrap_alloc() hereafter.
765 */
766 pmap_bootstrap_aalign(NBPG);
767 bootstrap_alloc_enabled = FALSE;
768
769 /*
770 * Now that we are done with pmap_bootstrap_alloc(), we
771 * must save the virtual and physical addresses of the
772 * end of the linearly mapped range, which are stored in
773 * virtual_contig_end and avail_start, respectively.
774 * These variables will never change after this point.
775 */
776 virtual_contig_end = virtual_avail;
777 avail_start = virtual_avail - KERNBASE;
778
779 /*
780 * `avail_next' is a running pointer used by pmap_next_page() to
781 * keep track of the next available physical page to be handed
782 * to the VM system during its initialization, in which it
783 * asks for physical pages, one at a time.
784 */
785 avail_next = avail_start;
786
787 /*
788 * Now allocate some virtual addresses, but not the physical pages
789 * behind them. Note that virtual_avail is already page-aligned.
790 *
791 * tmp_vpages[] is an array of two virtual pages used for temporary
792 * kernel mappings in the pmap module to facilitate various physical
793 * address-oritented operations.
794 */
795 tmp_vpages[0] = virtual_avail;
796 virtual_avail += NBPG;
797 tmp_vpages[1] = virtual_avail;
798 virtual_avail += NBPG;
799
800 /** Initialize the PV system **/
801 pmap_init_pv();
802
803 /*
804 * Fill in the kernel_pmap structure and kernel_crp.
805 */
806 kernAphys = mmu_vtop(kernAbase);
807 kernel_pmap.pm_a_tmgr = NULL;
808 kernel_pmap.pm_a_phys = kernAphys;
809 kernel_pmap.pm_refcount = 1; /* always in use */
810 simple_lock_init(&kernel_pmap.pm_lock);
811
812 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
813 kernel_crp.rp_addr = kernAphys;
814
815 /*
816 * Now pmap_enter_kernel() may be used safely and will be
817 * the main interface used hereafter to modify the kernel's
818 * virtual address space. Note that since we are still running
819 * under the PROM's address table, none of these table modifications
820 * actually take effect until pmap_takeover_mmu() is called.
821 *
822 * Note: Our tables do NOT have the PROM linear mappings!
823 * Only the mappings created here exist in our tables, so
824 * remember to map anything we expect to use.
825 */
826 va = (vaddr_t)KERNBASE;
827 pa = 0;
828
829 /*
830 * The first page of the kernel virtual address space is the msgbuf
831 * page. The page attributes (data, non-cached) are set here, while
832 * the address is assigned to this global pointer in cpu_startup().
833 * It is non-cached, mostly due to paranoia.
834 */
835 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
836 va += NBPG; pa += NBPG;
837
838 /* Next page is used as the temporary stack. */
839 pmap_enter_kernel(va, pa, VM_PROT_ALL);
840 va += NBPG; pa += NBPG;
841
842 /*
843 * Map all of the kernel's text segment as read-only and cacheable.
844 * (Cacheable is implied by default). Unfortunately, the last bytes
845 * of kernel text and the first bytes of kernel data will often be
846 * sharing the same page. Therefore, the last page of kernel text
847 * has to be mapped as read/write, to accomodate the data.
848 */
849 eva = m68k_trunc_page((vaddr_t)etext);
850 for (; va < eva; va += NBPG, pa += NBPG)
851 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
852
853 /*
854 * Map all of the kernel's data as read/write and cacheable.
855 * This includes: data, BSS, symbols, and everything in the
856 * contiguous memory used by pmap_bootstrap_alloc()
857 */
858 for (; pa < avail_start; va += NBPG, pa += NBPG)
859 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
860
861 /*
862 * At this point we are almost ready to take over the MMU. But first
863 * we must save the PROM's address space in our map, as we call its
864 * routines and make references to its data later in the kernel.
865 */
866 pmap_bootstrap_copyprom();
867 pmap_takeover_mmu();
868 pmap_bootstrap_setprom();
869
870 /* Notify the VM system of our page size. */
871 uvmexp.pagesize = NBPG;
872 uvm_setpagesize();
873
874 pmap_page_upload();
875 }
876
877
878 /* pmap_alloc_usermmu INTERNAL
879 **
880 * Called from pmap_bootstrap() to allocate MMU tables that will
881 * eventually be used for user mappings.
882 */
883 void
884 pmap_alloc_usermmu()
885 {
886 /* XXX: Moved into caller. */
887 }
888
889 /* pmap_alloc_pv INTERNAL
890 **
891 * Called from pmap_bootstrap() to allocate the physical
892 * to virtual mapping list. Each physical page of memory
893 * in the system has a corresponding element in this list.
894 */
895 void
896 pmap_alloc_pv()
897 {
898 int i;
899 unsigned int total_mem;
900
901 /*
902 * Allocate a pv_head structure for every page of physical
903 * memory that will be managed by the system. Since memory on
904 * the 3/80 is non-contiguous, we cannot arrive at a total page
905 * count by subtraction of the lowest available address from the
906 * highest, but rather we have to step through each memory
907 * bank and add the number of pages in each to the total.
908 *
909 * At this time we also initialize the offset of each bank's
910 * starting pv_head within the pv_head list so that the physical
911 * memory state routines (pmap_is_referenced(),
912 * pmap_is_modified(), et al.) can quickly find coresponding
913 * pv_heads in spite of the non-contiguity.
914 */
915 total_mem = 0;
916 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
917 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
918 total_mem += avail_mem[i].pmem_end -
919 avail_mem[i].pmem_start;
920 if (avail_mem[i].pmem_next == NULL)
921 break;
922 }
923 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
924 m68k_btop(total_phys_mem));
925 }
926
927 /* pmap_alloc_usertmgr INTERNAL
928 **
929 * Called from pmap_bootstrap() to allocate the structures which
930 * facilitate management of user MMU tables. Each user MMU table
931 * in the system has one such structure associated with it.
932 */
933 void
934 pmap_alloc_usertmgr()
935 {
936 /* Allocate user MMU table managers */
937 /* It would be a lot simpler to just make these BSS, but */
938 /* we may want to change their size at boot time... -j */
939 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
940 * NUM_A_TABLES);
941 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
942 * NUM_B_TABLES);
943 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
944 * NUM_C_TABLES);
945
946 /*
947 * Allocate PV list elements for the physical to virtual
948 * mapping system.
949 */
950 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
951 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
952 }
953
954 /* pmap_bootstrap_copyprom() INTERNAL
955 **
956 * Copy the PROM mappings into our own tables. Note, we
957 * can use physical addresses until __bootstrap returns.
958 */
959 void
960 pmap_bootstrap_copyprom()
961 {
962 struct sunromvec *romp;
963 int *mon_ctbl;
964 mmu_short_pte_t *kpte;
965 int i, len;
966
967 romp = romVectorPtr;
968
969 /*
970 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
971 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
972 */
973 mon_ctbl = *romp->monptaddr;
974 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
975 kpte = &kernCbase[i];
976 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
977
978 for (i = 0; i < len; i++) {
979 kpte[i].attr.raw = mon_ctbl[i];
980 }
981
982 /*
983 * Copy the mappings at MON_DVMA_BASE (to the end).
984 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
985 * Actually, we only want the last page, which the
986 * PROM has set up for use by the "ie" driver.
987 * (The i82686 needs its SCP there.)
988 * If we copy all the mappings, pmap_enter_kernel
989 * may complain about finding valid PTEs that are
990 * not recorded in our PV lists...
991 */
992 mon_ctbl = *romp->shadowpteaddr;
993 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
994 kpte = &kernCbase[i];
995 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
996 for (i = (len-1); i < len; i++) {
997 kpte[i].attr.raw = mon_ctbl[i];
998 }
999 }
1000
1001 /* pmap_takeover_mmu INTERNAL
1002 **
1003 * Called from pmap_bootstrap() after it has copied enough of the
1004 * PROM mappings into the kernel map so that we can use our own
1005 * MMU table.
1006 */
1007 void
1008 pmap_takeover_mmu()
1009 {
1010
1011 loadcrp(&kernel_crp);
1012 }
1013
1014 /* pmap_bootstrap_setprom() INTERNAL
1015 **
1016 * Set the PROM mappings so it can see kernel space.
1017 * Note that physical addresses are used here, which
1018 * we can get away with because this runs with the
1019 * low 1GB set for transparent translation.
1020 */
1021 void
1022 pmap_bootstrap_setprom()
1023 {
1024 mmu_long_dte_t *mon_dte;
1025 extern struct mmu_rootptr mon_crp;
1026 int i;
1027
1028 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1029 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1030 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1031 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1032 }
1033 }
1034
1035
1036 /* pmap_init INTERFACE
1037 **
1038 * Called at the end of vm_init() to set up the pmap system to go
1039 * into full time operation. All initialization of kernel_pmap
1040 * should be already done by now, so this should just do things
1041 * needed for user-level pmaps to work.
1042 */
1043 void
1044 pmap_init()
1045 {
1046 /** Initialize the manager pools **/
1047 TAILQ_INIT(&a_pool);
1048 TAILQ_INIT(&b_pool);
1049 TAILQ_INIT(&c_pool);
1050
1051 /**************************************************************
1052 * Initialize all tmgr structures and MMU tables they manage. *
1053 **************************************************************/
1054 /** Initialize A tables **/
1055 pmap_init_a_tables();
1056 /** Initialize B tables **/
1057 pmap_init_b_tables();
1058 /** Initialize C tables **/
1059 pmap_init_c_tables();
1060
1061 /** Initialize the pmap pools **/
1062 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1063 &pool_allocator_nointr);
1064 }
1065
1066 /* pmap_init_a_tables() INTERNAL
1067 **
1068 * Initializes all A managers, their MMU A tables, and inserts
1069 * them into the A manager pool for use by the system.
1070 */
1071 void
1072 pmap_init_a_tables()
1073 {
1074 int i;
1075 a_tmgr_t *a_tbl;
1076
1077 for (i=0; i < NUM_A_TABLES; i++) {
1078 /* Select the next available A manager from the pool */
1079 a_tbl = &Atmgrbase[i];
1080
1081 /*
1082 * Clear its parent entry. Set its wired and valid
1083 * entry count to zero.
1084 */
1085 a_tbl->at_parent = NULL;
1086 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1087
1088 /* Assign it the next available MMU A table from the pool */
1089 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1090
1091 /*
1092 * Initialize the MMU A table with the table in the `proc0',
1093 * or kernel, mapping. This ensures that every process has
1094 * the kernel mapped in the top part of its address space.
1095 */
1096 memcpy(a_tbl->at_dtbl, kernAbase, MMU_A_TBL_SIZE *
1097 sizeof(mmu_long_dte_t));
1098
1099 /*
1100 * Finally, insert the manager into the A pool,
1101 * making it ready to be used by the system.
1102 */
1103 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1104 }
1105 }
1106
1107 /* pmap_init_b_tables() INTERNAL
1108 **
1109 * Initializes all B table managers, their MMU B tables, and
1110 * inserts them into the B manager pool for use by the system.
1111 */
1112 void
1113 pmap_init_b_tables()
1114 {
1115 int i,j;
1116 b_tmgr_t *b_tbl;
1117
1118 for (i=0; i < NUM_B_TABLES; i++) {
1119 /* Select the next available B manager from the pool */
1120 b_tbl = &Btmgrbase[i];
1121
1122 b_tbl->bt_parent = NULL; /* clear its parent, */
1123 b_tbl->bt_pidx = 0; /* parent index, */
1124 b_tbl->bt_wcnt = 0; /* wired entry count, */
1125 b_tbl->bt_ecnt = 0; /* valid entry count. */
1126
1127 /* Assign it the next available MMU B table from the pool */
1128 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1129
1130 /* Invalidate every descriptor in the table */
1131 for (j=0; j < MMU_B_TBL_SIZE; j++)
1132 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1133
1134 /* Insert the manager into the B pool */
1135 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1136 }
1137 }
1138
1139 /* pmap_init_c_tables() INTERNAL
1140 **
1141 * Initializes all C table managers, their MMU C tables, and
1142 * inserts them into the C manager pool for use by the system.
1143 */
1144 void
1145 pmap_init_c_tables()
1146 {
1147 int i,j;
1148 c_tmgr_t *c_tbl;
1149
1150 for (i=0; i < NUM_C_TABLES; i++) {
1151 /* Select the next available C manager from the pool */
1152 c_tbl = &Ctmgrbase[i];
1153
1154 c_tbl->ct_parent = NULL; /* clear its parent, */
1155 c_tbl->ct_pidx = 0; /* parent index, */
1156 c_tbl->ct_wcnt = 0; /* wired entry count, */
1157 c_tbl->ct_ecnt = 0; /* valid entry count, */
1158 c_tbl->ct_pmap = NULL; /* parent pmap, */
1159 c_tbl->ct_va = 0; /* base of managed range */
1160
1161 /* Assign it the next available MMU C table from the pool */
1162 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1163
1164 for (j=0; j < MMU_C_TBL_SIZE; j++)
1165 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1166
1167 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1168 }
1169 }
1170
1171 /* pmap_init_pv() INTERNAL
1172 **
1173 * Initializes the Physical to Virtual mapping system.
1174 */
1175 void
1176 pmap_init_pv()
1177 {
1178 int i;
1179
1180 /* Initialize every PV head. */
1181 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1182 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1183 pvbase[i].pv_flags = 0; /* Zero out page flags */
1184 }
1185 }
1186
1187 /* get_a_table INTERNAL
1188 **
1189 * Retrieve and return a level A table for use in a user map.
1190 */
1191 a_tmgr_t *
1192 get_a_table()
1193 {
1194 a_tmgr_t *tbl;
1195 pmap_t pmap;
1196
1197 /* Get the top A table in the pool */
1198 tbl = a_pool.tqh_first;
1199 if (tbl == NULL) {
1200 /*
1201 * XXX - Instead of panicing here and in other get_x_table
1202 * functions, we do have the option of sleeping on the head of
1203 * the table pool. Any function which updates the table pool
1204 * would then issue a wakeup() on the head, thus waking up any
1205 * processes waiting for a table.
1206 *
1207 * Actually, the place to sleep would be when some process
1208 * asks for a "wired" mapping that would run us short of
1209 * mapping resources. This design DEPENDS on always having
1210 * some mapping resources in the pool for stealing, so we
1211 * must make sure we NEVER let the pool become empty. -gwr
1212 */
1213 panic("get_a_table: out of A tables.");
1214 }
1215
1216 TAILQ_REMOVE(&a_pool, tbl, at_link);
1217 /*
1218 * If the table has a non-null parent pointer then it is in use.
1219 * Forcibly abduct it from its parent and clear its entries.
1220 * No re-entrancy worries here. This table would not be in the
1221 * table pool unless it was available for use.
1222 *
1223 * Note that the second argument to free_a_table() is FALSE. This
1224 * indicates that the table should not be relinked into the A table
1225 * pool. That is a job for the function that called us.
1226 */
1227 if (tbl->at_parent) {
1228 pmap = tbl->at_parent;
1229 free_a_table(tbl, FALSE);
1230 pmap->pm_a_tmgr = NULL;
1231 pmap->pm_a_phys = kernAphys;
1232 }
1233 return tbl;
1234 }
1235
1236 /* get_b_table INTERNAL
1237 **
1238 * Return a level B table for use.
1239 */
1240 b_tmgr_t *
1241 get_b_table()
1242 {
1243 b_tmgr_t *tbl;
1244
1245 /* See 'get_a_table' for comments. */
1246 tbl = b_pool.tqh_first;
1247 if (tbl == NULL)
1248 panic("get_b_table: out of B tables.");
1249 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1250 if (tbl->bt_parent) {
1251 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1252 tbl->bt_parent->at_ecnt--;
1253 free_b_table(tbl, FALSE);
1254 }
1255 return tbl;
1256 }
1257
1258 /* get_c_table INTERNAL
1259 **
1260 * Return a level C table for use.
1261 */
1262 c_tmgr_t *
1263 get_c_table()
1264 {
1265 c_tmgr_t *tbl;
1266
1267 /* See 'get_a_table' for comments */
1268 tbl = c_pool.tqh_first;
1269 if (tbl == NULL)
1270 panic("get_c_table: out of C tables.");
1271 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1272 if (tbl->ct_parent) {
1273 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1274 tbl->ct_parent->bt_ecnt--;
1275 free_c_table(tbl, FALSE);
1276 }
1277 return tbl;
1278 }
1279
1280 /*
1281 * The following 'free_table' and 'steal_table' functions are called to
1282 * detach tables from their current obligations (parents and children) and
1283 * prepare them for reuse in another mapping.
1284 *
1285 * Free_table is used when the calling function will handle the fate
1286 * of the parent table, such as returning it to the free pool when it has
1287 * no valid entries. Functions that do not want to handle this should
1288 * call steal_table, in which the parent table's descriptors and entry
1289 * count are automatically modified when this table is removed.
1290 */
1291
1292 /* free_a_table INTERNAL
1293 **
1294 * Unmaps the given A table and all child tables from their current
1295 * mappings. Returns the number of pages that were invalidated.
1296 * If 'relink' is true, the function will return the table to the head
1297 * of the available table pool.
1298 *
1299 * Cache note: The MC68851 will automatically flush all
1300 * descriptors derived from a given A table from its
1301 * Automatic Translation Cache (ATC) if we issue a
1302 * 'PFLUSHR' instruction with the base address of the
1303 * table. This function should do, and does so.
1304 * Note note: We are using an MC68030 - there is no
1305 * PFLUSHR.
1306 */
1307 int
1308 free_a_table(a_tbl, relink)
1309 a_tmgr_t *a_tbl;
1310 boolean_t relink;
1311 {
1312 int i, removed_cnt;
1313 mmu_long_dte_t *dte;
1314 mmu_short_dte_t *dtbl;
1315 b_tmgr_t *tmgr;
1316
1317 /*
1318 * Flush the ATC cache of all cached descriptors derived
1319 * from this table.
1320 * Sun3x does not use 68851's cached table feature
1321 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1322 */
1323
1324 /*
1325 * Remove any pending cache flushes that were designated
1326 * for the pmap this A table belongs to.
1327 * a_tbl->parent->atc_flushq[0] = 0;
1328 * Not implemented in sun3x.
1329 */
1330
1331 /*
1332 * All A tables in the system should retain a map for the
1333 * kernel. If the table contains any valid descriptors
1334 * (other than those for the kernel area), invalidate them all,
1335 * stopping short of the kernel's entries.
1336 */
1337 removed_cnt = 0;
1338 if (a_tbl->at_ecnt) {
1339 dte = a_tbl->at_dtbl;
1340 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1341 /*
1342 * If a table entry points to a valid B table, free
1343 * it and its children.
1344 */
1345 if (MMU_VALID_DT(dte[i])) {
1346 /*
1347 * The following block does several things,
1348 * from innermost expression to the
1349 * outermost:
1350 * 1) It extracts the base (cc 1996)
1351 * address of the B table pointed
1352 * to in the A table entry dte[i].
1353 * 2) It converts this base address into
1354 * the virtual address it can be
1355 * accessed with. (all MMU tables point
1356 * to physical addresses.)
1357 * 3) It finds the corresponding manager
1358 * structure which manages this MMU table.
1359 * 4) It frees the manager structure.
1360 * (This frees the MMU table and all
1361 * child tables. See 'free_b_table' for
1362 * details.)
1363 */
1364 dtbl = mmu_ptov(dte[i].addr.raw);
1365 tmgr = mmuB2tmgr(dtbl);
1366 removed_cnt += free_b_table(tmgr, TRUE);
1367 dte[i].attr.raw = MMU_DT_INVALID;
1368 }
1369 }
1370 a_tbl->at_ecnt = 0;
1371 }
1372 if (relink) {
1373 a_tbl->at_parent = NULL;
1374 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1375 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1376 }
1377 return removed_cnt;
1378 }
1379
1380 /* free_b_table INTERNAL
1381 **
1382 * Unmaps the given B table and all its children from their current
1383 * mappings. Returns the number of pages that were invalidated.
1384 * (For comments, see 'free_a_table()').
1385 */
1386 int
1387 free_b_table(b_tbl, relink)
1388 b_tmgr_t *b_tbl;
1389 boolean_t relink;
1390 {
1391 int i, removed_cnt;
1392 mmu_short_dte_t *dte;
1393 mmu_short_pte_t *dtbl;
1394 c_tmgr_t *tmgr;
1395
1396 removed_cnt = 0;
1397 if (b_tbl->bt_ecnt) {
1398 dte = b_tbl->bt_dtbl;
1399 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1400 if (MMU_VALID_DT(dte[i])) {
1401 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1402 tmgr = mmuC2tmgr(dtbl);
1403 removed_cnt += free_c_table(tmgr, TRUE);
1404 dte[i].attr.raw = MMU_DT_INVALID;
1405 }
1406 }
1407 b_tbl->bt_ecnt = 0;
1408 }
1409
1410 if (relink) {
1411 b_tbl->bt_parent = NULL;
1412 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1413 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1414 }
1415 return removed_cnt;
1416 }
1417
1418 /* free_c_table INTERNAL
1419 **
1420 * Unmaps the given C table from use and returns it to the pool for
1421 * re-use. Returns the number of pages that were invalidated.
1422 *
1423 * This function preserves any physical page modification information
1424 * contained in the page descriptors within the C table by calling
1425 * 'pmap_remove_pte().'
1426 */
1427 int
1428 free_c_table(c_tbl, relink)
1429 c_tmgr_t *c_tbl;
1430 boolean_t relink;
1431 {
1432 int i, removed_cnt;
1433
1434 removed_cnt = 0;
1435 if (c_tbl->ct_ecnt) {
1436 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1437 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1438 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1439 removed_cnt++;
1440 }
1441 }
1442 c_tbl->ct_ecnt = 0;
1443 }
1444
1445 if (relink) {
1446 c_tbl->ct_parent = NULL;
1447 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1448 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1449 }
1450 return removed_cnt;
1451 }
1452
1453
1454 /* pmap_remove_pte INTERNAL
1455 **
1456 * Unmap the given pte and preserve any page modification
1457 * information by transfering it to the pv head of the
1458 * physical page it maps to. This function does not update
1459 * any reference counts because it is assumed that the calling
1460 * function will do so.
1461 */
1462 void
1463 pmap_remove_pte(pte)
1464 mmu_short_pte_t *pte;
1465 {
1466 u_short pv_idx, targ_idx;
1467 paddr_t pa;
1468 pv_t *pv;
1469
1470 pa = MMU_PTE_PA(*pte);
1471 if (is_managed(pa)) {
1472 pv = pa2pv(pa);
1473 targ_idx = pteidx(pte); /* Index of PTE being removed */
1474
1475 /*
1476 * If the PTE being removed is the first (or only) PTE in
1477 * the list of PTEs currently mapped to this page, remove the
1478 * PTE by changing the index found on the PV head. Otherwise
1479 * a linear search through the list will have to be executed
1480 * in order to find the PVE which points to the PTE being
1481 * removed, so that it may be modified to point to its new
1482 * neighbor.
1483 */
1484
1485 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1486 if (pv_idx == targ_idx) {
1487 pv->pv_idx = pvebase[targ_idx].pve_next;
1488 } else {
1489
1490 /*
1491 * Find the PV element pointing to the target
1492 * element. Note: may have pv_idx==PVE_EOL
1493 */
1494
1495 for (;;) {
1496 if (pv_idx == PVE_EOL) {
1497 goto pv_not_found;
1498 }
1499 if (pvebase[pv_idx].pve_next == targ_idx)
1500 break;
1501 pv_idx = pvebase[pv_idx].pve_next;
1502 }
1503
1504 /*
1505 * At this point, pv_idx is the index of the PV
1506 * element just before the target element in the list.
1507 * Unlink the target.
1508 */
1509
1510 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1511 }
1512
1513 /*
1514 * Save the mod/ref bits of the pte by simply
1515 * ORing the entire pte onto the pv_flags member
1516 * of the pv structure.
1517 * There is no need to use a separate bit pattern
1518 * for usage information on the pv head than that
1519 * which is used on the MMU ptes.
1520 */
1521
1522 pv_not_found:
1523 pv->pv_flags |= (u_short) pte->attr.raw;
1524 }
1525 pte->attr.raw = MMU_DT_INVALID;
1526 }
1527
1528 /* pmap_stroll INTERNAL
1529 **
1530 * Retrieve the addresses of all table managers involved in the mapping of
1531 * the given virtual address. If the table walk completed sucessfully,
1532 * return TRUE. If it was only partially sucessful, return FALSE.
1533 * The table walk performed by this function is important to many other
1534 * functions in this module.
1535 *
1536 * Note: This function ought to be easier to read.
1537 */
1538 boolean_t
1539 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1540 pmap_t pmap;
1541 vaddr_t va;
1542 a_tmgr_t **a_tbl;
1543 b_tmgr_t **b_tbl;
1544 c_tmgr_t **c_tbl;
1545 mmu_short_pte_t **pte;
1546 int *a_idx, *b_idx, *pte_idx;
1547 {
1548 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1549 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1550
1551 if (pmap == pmap_kernel())
1552 return FALSE;
1553
1554 /* Does the given pmap have its own A table? */
1555 *a_tbl = pmap->pm_a_tmgr;
1556 if (*a_tbl == NULL)
1557 return FALSE; /* No. Return unknown. */
1558 /* Does the A table have a valid B table
1559 * under the corresponding table entry?
1560 */
1561 *a_idx = MMU_TIA(va);
1562 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1563 if (!MMU_VALID_DT(*a_dte))
1564 return FALSE; /* No. Return unknown. */
1565 /* Yes. Extract B table from the A table. */
1566 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1567 /* Does the B table have a valid C table
1568 * under the corresponding table entry?
1569 */
1570 *b_idx = MMU_TIB(va);
1571 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1572 if (!MMU_VALID_DT(*b_dte))
1573 return FALSE; /* No. Return unknown. */
1574 /* Yes. Extract C table from the B table. */
1575 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1576 *pte_idx = MMU_TIC(va);
1577 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1578
1579 return TRUE;
1580 }
1581
1582 /* pmap_enter INTERFACE
1583 **
1584 * Called by the kernel to map a virtual address
1585 * to a physical address in the given process map.
1586 *
1587 * Note: this function should apply an exclusive lock
1588 * on the pmap system for its duration. (it certainly
1589 * would save my hair!!)
1590 * This function ought to be easier to read.
1591 */
1592 int
1593 pmap_enter(pmap, va, pa, prot, flags)
1594 pmap_t pmap;
1595 vaddr_t va;
1596 paddr_t pa;
1597 vm_prot_t prot;
1598 int flags;
1599 {
1600 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1601 u_short nidx; /* PV list index */
1602 int mapflags; /* Flags for the mapping (see NOTE1) */
1603 u_int a_idx, b_idx, pte_idx; /* table indices */
1604 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1605 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1606 c_tmgr_t *c_tbl; /* C: short page table manager */
1607 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1608 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1609 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1610 pv_t *pv; /* pv list head */
1611 boolean_t wired; /* is the mapping to be wired? */
1612 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1613
1614 if (pmap == pmap_kernel()) {
1615 pmap_enter_kernel(va, pa, prot);
1616 return 0;
1617 }
1618
1619 /*
1620 * Determine if the mapping should be wired.
1621 */
1622 wired = ((flags & PMAP_WIRED) != 0);
1623
1624 /*
1625 * NOTE1:
1626 *
1627 * On November 13, 1999, someone changed the pmap_enter() API such
1628 * that it now accepts a 'flags' argument. This new argument
1629 * contains bit-flags for the architecture-independent (UVM) system to
1630 * use in signalling certain mapping requirements to the architecture-
1631 * dependent (pmap) system. The argument it replaces, 'wired', is now
1632 * one of the flags within it.
1633 *
1634 * In addition to flags signaled by the architecture-independent
1635 * system, parts of the architecture-dependent section of the sun3x
1636 * kernel pass their own flags in the lower, unused bits of the
1637 * physical address supplied to this function. These flags are
1638 * extracted and stored in the temporary variable 'mapflags'.
1639 *
1640 * Extract sun3x specific flags from the physical address.
1641 */
1642 mapflags = (pa & ~MMU_PAGE_MASK);
1643 pa &= MMU_PAGE_MASK;
1644
1645 /*
1646 * Determine if the physical address being mapped is on-board RAM.
1647 * Any other area of the address space is likely to belong to a
1648 * device and hence it would be disasterous to cache its contents.
1649 */
1650 if ((managed = is_managed(pa)) == FALSE)
1651 mapflags |= PMAP_NC;
1652
1653 /*
1654 * For user mappings we walk along the MMU tables of the given
1655 * pmap, reaching a PTE which describes the virtual page being
1656 * mapped or changed. If any level of the walk ends in an invalid
1657 * entry, a table must be allocated and the entry must be updated
1658 * to point to it.
1659 * There is a bit of confusion as to whether this code must be
1660 * re-entrant. For now we will assume it is. To support
1661 * re-entrancy we must unlink tables from the table pool before
1662 * we assume we may use them. Tables are re-linked into the pool
1663 * when we are finished with them at the end of the function.
1664 * But I don't feel like doing that until we have proof that this
1665 * needs to be re-entrant.
1666 * 'llevel' records which tables need to be relinked.
1667 */
1668 llevel = NONE;
1669
1670 /*
1671 * Step 1 - Retrieve the A table from the pmap. If it has no
1672 * A table, allocate a new one from the available pool.
1673 */
1674
1675 a_tbl = pmap->pm_a_tmgr;
1676 if (a_tbl == NULL) {
1677 /*
1678 * This pmap does not currently have an A table. Allocate
1679 * a new one.
1680 */
1681 a_tbl = get_a_table();
1682 a_tbl->at_parent = pmap;
1683
1684 /*
1685 * Assign this new A table to the pmap, and calculate its
1686 * physical address so that loadcrp() can be used to make
1687 * the table active.
1688 */
1689 pmap->pm_a_tmgr = a_tbl;
1690 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1691
1692 /*
1693 * If the process receiving a new A table is the current
1694 * process, we are responsible for setting the MMU so that
1695 * it becomes the current address space. This only adds
1696 * new mappings, so no need to flush anything.
1697 */
1698 if (pmap == current_pmap()) {
1699 kernel_crp.rp_addr = pmap->pm_a_phys;
1700 loadcrp(&kernel_crp);
1701 }
1702
1703 if (!wired)
1704 llevel = NEWA;
1705 } else {
1706 /*
1707 * Use the A table already allocated for this pmap.
1708 * Unlink it from the A table pool if necessary.
1709 */
1710 if (wired && !a_tbl->at_wcnt)
1711 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1712 }
1713
1714 /*
1715 * Step 2 - Walk into the B table. If there is no valid B table,
1716 * allocate one.
1717 */
1718
1719 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1720 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1721 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1722 /* The descriptor is valid. Use the B table it points to. */
1723 /*************************************
1724 * a_idx *
1725 * v *
1726 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1727 * | | | | | | | | | | | | *
1728 * +-+-+-+-+-+-+-+-+-+-+-+- *
1729 * | *
1730 * \- b_tbl -> +-+- *
1731 * | | *
1732 * +-+- *
1733 *************************************/
1734 b_dte = mmu_ptov(a_dte->addr.raw);
1735 b_tbl = mmuB2tmgr(b_dte);
1736
1737 /*
1738 * If the requested mapping must be wired, but this table
1739 * being used to map it is not, the table must be removed
1740 * from the available pool and its wired entry count
1741 * incremented.
1742 */
1743 if (wired && !b_tbl->bt_wcnt) {
1744 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1745 a_tbl->at_wcnt++;
1746 }
1747 } else {
1748 /* The descriptor is invalid. Allocate a new B table. */
1749 b_tbl = get_b_table();
1750
1751 /* Point the parent A table descriptor to this new B table. */
1752 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1753 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1754 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1755
1756 /* Create the necessary back references to the parent table */
1757 b_tbl->bt_parent = a_tbl;
1758 b_tbl->bt_pidx = a_idx;
1759
1760 /*
1761 * If this table is to be wired, make sure the parent A table
1762 * wired count is updated to reflect that it has another wired
1763 * entry.
1764 */
1765 if (wired)
1766 a_tbl->at_wcnt++;
1767 else if (llevel == NONE)
1768 llevel = NEWB;
1769 }
1770
1771 /*
1772 * Step 3 - Walk into the C table, if there is no valid C table,
1773 * allocate one.
1774 */
1775
1776 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1777 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1778 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1779 /* The descriptor is valid. Use the C table it points to. */
1780 /**************************************
1781 * c_idx *
1782 * | v *
1783 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1784 * | | | | | | | | | | | *
1785 * +-+-+-+-+-+-+-+-+-+-+- *
1786 * | *
1787 * \- c_tbl -> +-+-- *
1788 * | | | *
1789 * +-+-- *
1790 **************************************/
1791 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1792 c_tbl = mmuC2tmgr(c_pte);
1793
1794 /* If mapping is wired and table is not */
1795 if (wired && !c_tbl->ct_wcnt) {
1796 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1797 b_tbl->bt_wcnt++;
1798 }
1799 } else {
1800 /* The descriptor is invalid. Allocate a new C table. */
1801 c_tbl = get_c_table();
1802
1803 /* Point the parent B table descriptor to this new C table. */
1804 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1805 b_dte->attr.raw |= MMU_DT_SHORT;
1806 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1807
1808 /* Create the necessary back references to the parent table */
1809 c_tbl->ct_parent = b_tbl;
1810 c_tbl->ct_pidx = b_idx;
1811 /*
1812 * Store the pmap and base virtual managed address for faster
1813 * retrieval in the PV functions.
1814 */
1815 c_tbl->ct_pmap = pmap;
1816 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1817
1818 /*
1819 * If this table is to be wired, make sure the parent B table
1820 * wired count is updated to reflect that it has another wired
1821 * entry.
1822 */
1823 if (wired)
1824 b_tbl->bt_wcnt++;
1825 else if (llevel == NONE)
1826 llevel = NEWC;
1827 }
1828
1829 /*
1830 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1831 * slot of the C table, describing the PA to which the VA is mapped.
1832 */
1833
1834 pte_idx = MMU_TIC(va);
1835 c_pte = &c_tbl->ct_dtbl[pte_idx];
1836 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1837 /*
1838 * The PTE is currently valid. This particular call
1839 * is just a synonym for one (or more) of the following
1840 * operations:
1841 * change protection of a page
1842 * change wiring status of a page
1843 * remove the mapping of a page
1844 *
1845 * XXX - Semi critical: This code should unwire the PTE
1846 * and, possibly, associated parent tables if this is a
1847 * change wiring operation. Currently it does not.
1848 *
1849 * This may be ok if pmap_unwire() is the only
1850 * interface used to UNWIRE a page.
1851 */
1852
1853 /* First check if this is a wiring operation. */
1854 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1855 /*
1856 * The PTE is already wired. To prevent it from being
1857 * counted as a new wiring operation, reset the 'wired'
1858 * variable.
1859 */
1860 wired = FALSE;
1861 }
1862
1863 /* Is the new address the same as the old? */
1864 if (MMU_PTE_PA(*c_pte) == pa) {
1865 /*
1866 * Yes, mark that it does not need to be reinserted
1867 * into the PV list.
1868 */
1869 insert = FALSE;
1870
1871 /*
1872 * Clear all but the modified, referenced and wired
1873 * bits on the PTE.
1874 */
1875 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1876 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1877 } else {
1878 /* No, remove the old entry */
1879 pmap_remove_pte(c_pte);
1880 insert = TRUE;
1881 }
1882
1883 /*
1884 * TLB flush is only necessary if modifying current map.
1885 * However, in pmap_enter(), the pmap almost always IS
1886 * the current pmap, so don't even bother to check.
1887 */
1888 TBIS(va);
1889 } else {
1890 /*
1891 * The PTE is invalid. Increment the valid entry count in
1892 * the C table manager to reflect the addition of a new entry.
1893 */
1894 c_tbl->ct_ecnt++;
1895
1896 /* XXX - temporarily make sure the PTE is cleared. */
1897 c_pte->attr.raw = 0;
1898
1899 /* It will also need to be inserted into the PV list. */
1900 insert = TRUE;
1901 }
1902
1903 /*
1904 * If page is changing from unwired to wired status, set an unused bit
1905 * within the PTE to indicate that it is wired. Also increment the
1906 * wired entry count in the C table manager.
1907 */
1908 if (wired) {
1909 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1910 c_tbl->ct_wcnt++;
1911 }
1912
1913 /*
1914 * Map the page, being careful to preserve modify/reference/wired
1915 * bits. At this point it is assumed that the PTE either has no bits
1916 * set, or if there are set bits, they are only modified, reference or
1917 * wired bits. If not, the following statement will cause erratic
1918 * behavior.
1919 */
1920 #ifdef PMAP_DEBUG
1921 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1922 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1923 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1924 Debugger();
1925 }
1926 #endif
1927 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
1928
1929 /*
1930 * If the mapping should be read-only, set the write protect
1931 * bit in the PTE.
1932 */
1933 if (!(prot & VM_PROT_WRITE))
1934 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
1935
1936 /*
1937 * If the mapping should be cache inhibited (indicated by the flag
1938 * bits found on the lower order of the physical address.)
1939 * mark the PTE as a cache inhibited page.
1940 */
1941 if (mapflags & PMAP_NC)
1942 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
1943
1944 /*
1945 * If the physical address being mapped is managed by the PV
1946 * system then link the pte into the list of pages mapped to that
1947 * address.
1948 */
1949 if (insert && managed) {
1950 pv = pa2pv(pa);
1951 nidx = pteidx(c_pte);
1952
1953 pvebase[nidx].pve_next = pv->pv_idx;
1954 pv->pv_idx = nidx;
1955 }
1956
1957 /* Move any allocated tables back into the active pool. */
1958
1959 switch (llevel) {
1960 case NEWA:
1961 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1962 /* FALLTHROUGH */
1963 case NEWB:
1964 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1965 /* FALLTHROUGH */
1966 case NEWC:
1967 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1968 /* FALLTHROUGH */
1969 default:
1970 break;
1971 }
1972
1973 return 0;
1974 }
1975
1976 /* pmap_enter_kernel INTERNAL
1977 **
1978 * Map the given virtual address to the given physical address within the
1979 * kernel address space. This function exists because the kernel map does
1980 * not do dynamic table allocation. It consists of a contiguous array of ptes
1981 * and can be edited directly without the need to walk through any tables.
1982 *
1983 * XXX: "Danger, Will Robinson!"
1984 * Note that the kernel should never take a fault on any page
1985 * between [ KERNBASE .. virtual_avail ] and this is checked in
1986 * trap.c for kernel-mode MMU faults. This means that mappings
1987 * created in that range must be implicily wired. -gwr
1988 */
1989 void
1990 pmap_enter_kernel(va, pa, prot)
1991 vaddr_t va;
1992 paddr_t pa;
1993 vm_prot_t prot;
1994 {
1995 boolean_t was_valid, insert;
1996 u_short pte_idx;
1997 int flags;
1998 mmu_short_pte_t *pte;
1999 pv_t *pv;
2000 paddr_t old_pa;
2001
2002 flags = (pa & ~MMU_PAGE_MASK);
2003 pa &= MMU_PAGE_MASK;
2004
2005 if (is_managed(pa))
2006 insert = TRUE;
2007 else
2008 insert = FALSE;
2009
2010 /*
2011 * Calculate the index of the PTE being modified.
2012 */
2013 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2014
2015 /* This array is traditionally named "Sysmap" */
2016 pte = &kernCbase[pte_idx];
2017
2018 if (MMU_VALID_DT(*pte)) {
2019 was_valid = TRUE;
2020 /*
2021 * If the PTE already maps a different
2022 * physical address, umap and pv_unlink.
2023 */
2024 old_pa = MMU_PTE_PA(*pte);
2025 if (pa != old_pa)
2026 pmap_remove_pte(pte);
2027 else {
2028 /*
2029 * Old PA and new PA are the same. No need to
2030 * relink the mapping within the PV list.
2031 */
2032 insert = FALSE;
2033
2034 /*
2035 * Save any mod/ref bits on the PTE.
2036 */
2037 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2038 }
2039 } else {
2040 pte->attr.raw = MMU_DT_INVALID;
2041 was_valid = FALSE;
2042 }
2043
2044 /*
2045 * Map the page. Being careful to preserve modified/referenced bits
2046 * on the PTE.
2047 */
2048 pte->attr.raw |= (pa | MMU_DT_PAGE);
2049
2050 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2051 pte->attr.raw |= MMU_SHORT_PTE_WP;
2052 if (flags & PMAP_NC)
2053 pte->attr.raw |= MMU_SHORT_PTE_CI;
2054 if (was_valid)
2055 TBIS(va);
2056
2057 /*
2058 * Insert the PTE into the PV system, if need be.
2059 */
2060 if (insert) {
2061 pv = pa2pv(pa);
2062 pvebase[pte_idx].pve_next = pv->pv_idx;
2063 pv->pv_idx = pte_idx;
2064 }
2065 }
2066
2067 void
2068 pmap_kenter_pa(va, pa, prot)
2069 vaddr_t va;
2070 paddr_t pa;
2071 vm_prot_t prot;
2072 {
2073 mmu_short_pte_t *pte;
2074
2075 /* This array is traditionally named "Sysmap" */
2076 pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
2077
2078 KASSERT(!MMU_VALID_DT(*pte));
2079 pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2080 if (!(prot & VM_PROT_WRITE))
2081 pte->attr.raw |= MMU_SHORT_PTE_WP;
2082 }
2083
2084 void
2085 pmap_kremove(va, len)
2086 vaddr_t va;
2087 vsize_t len;
2088 {
2089 int idx, eidx;
2090
2091 #ifdef PMAP_DEBUG
2092 if ((sva & PGOFSET) || (eva & PGOFSET))
2093 panic("pmap_kremove: alignment");
2094 #endif
2095
2096 idx = m68k_btop(va - KERNBASE);
2097 eidx = m68k_btop(va + len - KERNBASE);
2098
2099 while (idx < eidx) {
2100 kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2101 TBIS(va);
2102 va += NBPG;
2103 }
2104 }
2105
2106 /* pmap_map INTERNAL
2107 **
2108 * Map a contiguous range of physical memory into a contiguous range of
2109 * the kernel virtual address space.
2110 *
2111 * Used for device mappings and early mapping of the kernel text/data/bss.
2112 * Returns the first virtual address beyond the end of the range.
2113 */
2114 vaddr_t
2115 pmap_map(va, pa, endpa, prot)
2116 vaddr_t va;
2117 paddr_t pa;
2118 paddr_t endpa;
2119 int prot;
2120 {
2121 int sz;
2122
2123 sz = endpa - pa;
2124 do {
2125 pmap_enter_kernel(va, pa, prot);
2126 va += NBPG;
2127 pa += NBPG;
2128 sz -= NBPG;
2129 } while (sz > 0);
2130 pmap_update(pmap_kernel());
2131 return(va);
2132 }
2133
2134 /* pmap_protect INTERFACE
2135 **
2136 * Apply the given protection to the given virtual address range within
2137 * the given map.
2138 *
2139 * It is ok for the protection applied to be stronger than what is
2140 * specified. We use this to our advantage when the given map has no
2141 * mapping for the virtual address. By skipping a page when this
2142 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2143 * and therefore do not need to map the page just to apply a protection
2144 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2145 *
2146 * XXX - This function could be speeded up by using pmap_stroll() for inital
2147 * setup, and then manual scrolling in the for() loop.
2148 */
2149 void
2150 pmap_protect(pmap, startva, endva, prot)
2151 pmap_t pmap;
2152 vaddr_t startva, endva;
2153 vm_prot_t prot;
2154 {
2155 boolean_t iscurpmap;
2156 int a_idx, b_idx, c_idx;
2157 a_tmgr_t *a_tbl;
2158 b_tmgr_t *b_tbl;
2159 c_tmgr_t *c_tbl;
2160 mmu_short_pte_t *pte;
2161
2162 if (pmap == pmap_kernel()) {
2163 pmap_protect_kernel(startva, endva, prot);
2164 return;
2165 }
2166
2167 /*
2168 * In this particular pmap implementation, there are only three
2169 * types of memory protection: 'all' (read/write/execute),
2170 * 'read-only' (read/execute) and 'none' (no mapping.)
2171 * It is not possible for us to treat 'executable' as a separate
2172 * protection type. Therefore, protection requests that seek to
2173 * remove execute permission while retaining read or write, and those
2174 * that make little sense (write-only for example) are ignored.
2175 */
2176 switch (prot) {
2177 case VM_PROT_NONE:
2178 /*
2179 * A request to apply the protection code of
2180 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2181 */
2182 pmap_remove(pmap, startva, endva);
2183 return;
2184 case VM_PROT_EXECUTE:
2185 case VM_PROT_READ:
2186 case VM_PROT_READ|VM_PROT_EXECUTE:
2187 /* continue */
2188 break;
2189 case VM_PROT_WRITE:
2190 case VM_PROT_WRITE|VM_PROT_READ:
2191 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2192 case VM_PROT_ALL:
2193 /* None of these should happen in a sane system. */
2194 return;
2195 }
2196
2197 /*
2198 * If the pmap has no A table, it has no mappings and therefore
2199 * there is nothing to protect.
2200 */
2201 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2202 return;
2203
2204 a_idx = MMU_TIA(startva);
2205 b_idx = MMU_TIB(startva);
2206 c_idx = MMU_TIC(startva);
2207 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2208
2209 iscurpmap = (pmap == current_pmap());
2210 while (startva < endva) {
2211 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2212 if (b_tbl == NULL) {
2213 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2214 b_tbl = mmu_ptov((vaddr_t)b_tbl);
2215 b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2216 }
2217 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2218 if (c_tbl == NULL) {
2219 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2220 c_tbl = mmu_ptov((vaddr_t)c_tbl);
2221 c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2222 }
2223 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2224 pte = &c_tbl->ct_dtbl[c_idx];
2225 /* make the mapping read-only */
2226 pte->attr.raw |= MMU_SHORT_PTE_WP;
2227 /*
2228 * If we just modified the current address space,
2229 * flush any translations for the modified page from
2230 * the translation cache and any data from it in the
2231 * data cache.
2232 */
2233 if (iscurpmap)
2234 TBIS(startva);
2235 }
2236 startva += NBPG;
2237
2238 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2239 c_tbl = NULL;
2240 c_idx = 0;
2241 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2242 b_tbl = NULL;
2243 b_idx = 0;
2244 }
2245 }
2246 } else { /* C table wasn't valid */
2247 c_tbl = NULL;
2248 c_idx = 0;
2249 startva += MMU_TIB_RANGE;
2250 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2251 b_tbl = NULL;
2252 b_idx = 0;
2253 }
2254 } /* C table */
2255 } else { /* B table wasn't valid */
2256 b_tbl = NULL;
2257 b_idx = 0;
2258 startva += MMU_TIA_RANGE;
2259 a_idx++;
2260 } /* B table */
2261 }
2262 }
2263
2264 /* pmap_protect_kernel INTERNAL
2265 **
2266 * Apply the given protection code to a kernel address range.
2267 */
2268 void
2269 pmap_protect_kernel(startva, endva, prot)
2270 vaddr_t startva, endva;
2271 vm_prot_t prot;
2272 {
2273 vaddr_t va;
2274 mmu_short_pte_t *pte;
2275
2276 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2277 for (va = startva; va < endva; va += NBPG, pte++) {
2278 if (MMU_VALID_DT(*pte)) {
2279 switch (prot) {
2280 case VM_PROT_ALL:
2281 break;
2282 case VM_PROT_EXECUTE:
2283 case VM_PROT_READ:
2284 case VM_PROT_READ|VM_PROT_EXECUTE:
2285 pte->attr.raw |= MMU_SHORT_PTE_WP;
2286 break;
2287 case VM_PROT_NONE:
2288 /* this is an alias for 'pmap_remove_kernel' */
2289 pmap_remove_pte(pte);
2290 break;
2291 default:
2292 break;
2293 }
2294 /*
2295 * since this is the kernel, immediately flush any cached
2296 * descriptors for this address.
2297 */
2298 TBIS(va);
2299 }
2300 }
2301 }
2302
2303 /* pmap_unwire INTERFACE
2304 **
2305 * Clear the wired attribute of the specified page.
2306 *
2307 * This function is called from vm_fault.c to unwire
2308 * a mapping.
2309 */
2310 void
2311 pmap_unwire(pmap, va)
2312 pmap_t pmap;
2313 vaddr_t va;
2314 {
2315 int a_idx, b_idx, c_idx;
2316 a_tmgr_t *a_tbl;
2317 b_tmgr_t *b_tbl;
2318 c_tmgr_t *c_tbl;
2319 mmu_short_pte_t *pte;
2320
2321 /* Kernel mappings always remain wired. */
2322 if (pmap == pmap_kernel())
2323 return;
2324
2325 /*
2326 * Walk through the tables. If the walk terminates without
2327 * a valid PTE then the address wasn't wired in the first place.
2328 * Return immediately.
2329 */
2330 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2331 &b_idx, &c_idx) == FALSE)
2332 return;
2333
2334
2335 /* Is the PTE wired? If not, return. */
2336 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2337 return;
2338
2339 /* Remove the wiring bit. */
2340 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2341
2342 /*
2343 * Decrement the wired entry count in the C table.
2344 * If it reaches zero the following things happen:
2345 * 1. The table no longer has any wired entries and is considered
2346 * unwired.
2347 * 2. It is placed on the available queue.
2348 * 3. The parent table's wired entry count is decremented.
2349 * 4. If it reaches zero, this process repeats at step 1 and
2350 * stops at after reaching the A table.
2351 */
2352 if (--c_tbl->ct_wcnt == 0) {
2353 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2354 if (--b_tbl->bt_wcnt == 0) {
2355 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2356 if (--a_tbl->at_wcnt == 0) {
2357 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2358 }
2359 }
2360 }
2361 }
2362
2363 /* pmap_copy INTERFACE
2364 **
2365 * Copy the mappings of a range of addresses in one pmap, into
2366 * the destination address of another.
2367 *
2368 * This routine is advisory. Should we one day decide that MMU tables
2369 * may be shared by more than one pmap, this function should be used to
2370 * link them together. Until that day however, we do nothing.
2371 */
2372 void
2373 pmap_copy(pmap_a, pmap_b, dst, len, src)
2374 pmap_t pmap_a, pmap_b;
2375 vaddr_t dst;
2376 vsize_t len;
2377 vaddr_t src;
2378 {
2379 /* not implemented. */
2380 }
2381
2382 /* pmap_copy_page INTERFACE
2383 **
2384 * Copy the contents of one physical page into another.
2385 *
2386 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2387 * to map the two specified physical pages into the kernel address space.
2388 *
2389 * Note: We could use the transparent translation registers to make the
2390 * mappings. If we do so, be sure to disable interrupts before using them.
2391 */
2392 void
2393 pmap_copy_page(srcpa, dstpa)
2394 paddr_t srcpa, dstpa;
2395 {
2396 vaddr_t srcva, dstva;
2397 int s;
2398
2399 srcva = tmp_vpages[0];
2400 dstva = tmp_vpages[1];
2401
2402 s = splvm();
2403 #ifdef DIAGNOSTIC
2404 if (tmp_vpages_inuse++)
2405 panic("pmap_copy_page: temporary vpages are in use.");
2406 #endif
2407
2408 /* Map pages as non-cacheable to avoid cache polution? */
2409 pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
2410 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2411
2412 /* Hand-optimized version of bcopy(src, dst, NBPG) */
2413 copypage((char *) srcva, (char *) dstva);
2414
2415 pmap_kremove(srcva, NBPG);
2416 pmap_kremove(dstva, NBPG);
2417
2418 #ifdef DIAGNOSTIC
2419 --tmp_vpages_inuse;
2420 #endif
2421 splx(s);
2422 }
2423
2424 /* pmap_zero_page INTERFACE
2425 **
2426 * Zero the contents of the specified physical page.
2427 *
2428 * Uses one of the virtual pages allocated in pmap_boostrap()
2429 * to map the specified page into the kernel address space.
2430 */
2431 void
2432 pmap_zero_page(dstpa)
2433 paddr_t dstpa;
2434 {
2435 vaddr_t dstva;
2436 int s;
2437
2438 dstva = tmp_vpages[1];
2439 s = splvm();
2440 #ifdef DIAGNOSTIC
2441 if (tmp_vpages_inuse++)
2442 panic("pmap_zero_page: temporary vpages are in use.");
2443 #endif
2444
2445 /* The comments in pmap_copy_page() above apply here also. */
2446 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2447
2448 /* Hand-optimized version of bzero(ptr, NBPG) */
2449 zeropage((char *) dstva);
2450
2451 pmap_kremove(dstva, NBPG);
2452 #ifdef DIAGNOSTIC
2453 --tmp_vpages_inuse;
2454 #endif
2455 splx(s);
2456 }
2457
2458 /* pmap_collect INTERFACE
2459 **
2460 * Called from the VM system when we are about to swap out
2461 * the process using this pmap. This should give up any
2462 * resources held here, including all its MMU tables.
2463 */
2464 void
2465 pmap_collect(pmap)
2466 pmap_t pmap;
2467 {
2468 /* XXX - todo... */
2469 }
2470
2471 /* pmap_create INTERFACE
2472 **
2473 * Create and return a pmap structure.
2474 */
2475 pmap_t
2476 pmap_create()
2477 {
2478 pmap_t pmap;
2479
2480 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2481 pmap_pinit(pmap);
2482 return pmap;
2483 }
2484
2485 /* pmap_pinit INTERNAL
2486 **
2487 * Initialize a pmap structure.
2488 */
2489 void
2490 pmap_pinit(pmap)
2491 pmap_t pmap;
2492 {
2493 memset(pmap, 0, sizeof(struct pmap));
2494 pmap->pm_a_tmgr = NULL;
2495 pmap->pm_a_phys = kernAphys;
2496 pmap->pm_refcount = 1;
2497 simple_lock_init(&pmap->pm_lock);
2498 }
2499
2500 /* pmap_release INTERFACE
2501 **
2502 * Release any resources held by the given pmap.
2503 *
2504 * This is the reverse analog to pmap_pinit. It does not
2505 * necessarily mean for the pmap structure to be deallocated,
2506 * as in pmap_destroy.
2507 */
2508 void
2509 pmap_release(pmap)
2510 pmap_t pmap;
2511 {
2512 /*
2513 * As long as the pmap contains no mappings,
2514 * which always should be the case whenever
2515 * this function is called, there really should
2516 * be nothing to do.
2517 */
2518 #ifdef PMAP_DEBUG
2519 if (pmap == pmap_kernel())
2520 panic("pmap_release: kernel pmap");
2521 #endif
2522 /*
2523 * XXX - If this pmap has an A table, give it back.
2524 * The pmap SHOULD be empty by now, and pmap_remove
2525 * should have already given back the A table...
2526 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2527 * at this point, which means some mapping was not
2528 * removed when it should have been. -gwr
2529 */
2530 if (pmap->pm_a_tmgr != NULL) {
2531 /* First make sure we are not using it! */
2532 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2533 kernel_crp.rp_addr = kernAphys;
2534 loadcrp(&kernel_crp);
2535 }
2536 #ifdef PMAP_DEBUG /* XXX - todo! */
2537 /* XXX - Now complain... */
2538 printf("pmap_release: still have table\n");
2539 Debugger();
2540 #endif
2541 free_a_table(pmap->pm_a_tmgr, TRUE);
2542 pmap->pm_a_tmgr = NULL;
2543 pmap->pm_a_phys = kernAphys;
2544 }
2545 }
2546
2547 /* pmap_reference INTERFACE
2548 **
2549 * Increment the reference count of a pmap.
2550 */
2551 void
2552 pmap_reference(pmap)
2553 pmap_t pmap;
2554 {
2555 pmap_lock(pmap);
2556 pmap_add_ref(pmap);
2557 pmap_unlock(pmap);
2558 }
2559
2560 /* pmap_dereference INTERNAL
2561 **
2562 * Decrease the reference count on the given pmap
2563 * by one and return the current count.
2564 */
2565 int
2566 pmap_dereference(pmap)
2567 pmap_t pmap;
2568 {
2569 int rtn;
2570
2571 pmap_lock(pmap);
2572 rtn = pmap_del_ref(pmap);
2573 pmap_unlock(pmap);
2574
2575 return rtn;
2576 }
2577
2578 /* pmap_destroy INTERFACE
2579 **
2580 * Decrement a pmap's reference count and delete
2581 * the pmap if it becomes zero. Will be called
2582 * only after all mappings have been removed.
2583 */
2584 void
2585 pmap_destroy(pmap)
2586 pmap_t pmap;
2587 {
2588 if (pmap_dereference(pmap) == 0) {
2589 pmap_release(pmap);
2590 pool_put(&pmap_pmap_pool, pmap);
2591 }
2592 }
2593
2594 /* pmap_is_referenced INTERFACE
2595 **
2596 * Determine if the given physical page has been
2597 * referenced (read from [or written to.])
2598 */
2599 boolean_t
2600 pmap_is_referenced(pg)
2601 struct vm_page *pg;
2602 {
2603 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2604 pv_t *pv;
2605 int idx;
2606
2607 /*
2608 * Check the flags on the pv head. If they are set,
2609 * return immediately. Otherwise a search must be done.
2610 */
2611
2612 pv = pa2pv(pa);
2613 if (pv->pv_flags & PV_FLAGS_USED)
2614 return TRUE;
2615
2616 /*
2617 * Search through all pv elements pointing
2618 * to this page and query their reference bits
2619 */
2620
2621 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2622 if (MMU_PTE_USED(kernCbase[idx])) {
2623 return TRUE;
2624 }
2625 }
2626 return FALSE;
2627 }
2628
2629 /* pmap_is_modified INTERFACE
2630 **
2631 * Determine if the given physical page has been
2632 * modified (written to.)
2633 */
2634 boolean_t
2635 pmap_is_modified(pg)
2636 struct vm_page *pg;
2637 {
2638 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2639 pv_t *pv;
2640 int idx;
2641
2642 /* see comments in pmap_is_referenced() */
2643 pv = pa2pv(pa);
2644 if (pv->pv_flags & PV_FLAGS_MDFY)
2645 return TRUE;
2646
2647 for (idx = pv->pv_idx;
2648 idx != PVE_EOL;
2649 idx = pvebase[idx].pve_next) {
2650
2651 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2652 return TRUE;
2653 }
2654 }
2655
2656 return FALSE;
2657 }
2658
2659 /* pmap_page_protect INTERFACE
2660 **
2661 * Applies the given protection to all mappings to the given
2662 * physical page.
2663 */
2664 void
2665 pmap_page_protect(pg, prot)
2666 struct vm_page *pg;
2667 vm_prot_t prot;
2668 {
2669 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2670 pv_t *pv;
2671 int idx;
2672 vaddr_t va;
2673 struct mmu_short_pte_struct *pte;
2674 c_tmgr_t *c_tbl;
2675 pmap_t pmap, curpmap;
2676
2677 curpmap = current_pmap();
2678 pv = pa2pv(pa);
2679
2680 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2681 pte = &kernCbase[idx];
2682 switch (prot) {
2683 case VM_PROT_ALL:
2684 /* do nothing */
2685 break;
2686 case VM_PROT_EXECUTE:
2687 case VM_PROT_READ:
2688 case VM_PROT_READ|VM_PROT_EXECUTE:
2689 /*
2690 * Determine the virtual address mapped by
2691 * the PTE and flush ATC entries if necessary.
2692 */
2693 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2694 pte->attr.raw |= MMU_SHORT_PTE_WP;
2695 if (pmap == curpmap || pmap == pmap_kernel())
2696 TBIS(va);
2697 break;
2698 case VM_PROT_NONE:
2699 /* Save the mod/ref bits. */
2700 pv->pv_flags |= pte->attr.raw;
2701 /* Invalidate the PTE. */
2702 pte->attr.raw = MMU_DT_INVALID;
2703
2704 /*
2705 * Update table counts. And flush ATC entries
2706 * if necessary.
2707 */
2708 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2709
2710 /*
2711 * If the PTE belongs to the kernel map,
2712 * be sure to flush the page it maps.
2713 */
2714 if (pmap == pmap_kernel()) {
2715 TBIS(va);
2716 } else {
2717 /*
2718 * The PTE belongs to a user map.
2719 * update the entry count in the C
2720 * table to which it belongs and flush
2721 * the ATC if the mapping belongs to
2722 * the current pmap.
2723 */
2724 c_tbl->ct_ecnt--;
2725 if (pmap == curpmap)
2726 TBIS(va);
2727 }
2728 break;
2729 default:
2730 break;
2731 }
2732 }
2733
2734 /*
2735 * If the protection code indicates that all mappings to the page
2736 * be removed, truncate the PV list to zero entries.
2737 */
2738 if (prot == VM_PROT_NONE)
2739 pv->pv_idx = PVE_EOL;
2740 }
2741
2742 /* pmap_get_pteinfo INTERNAL
2743 **
2744 * Called internally to find the pmap and virtual address within that
2745 * map to which the pte at the given index maps. Also includes the PTE's C
2746 * table manager.
2747 *
2748 * Returns the pmap in the argument provided, and the virtual address
2749 * by return value.
2750 */
2751 vaddr_t
2752 pmap_get_pteinfo(idx, pmap, tbl)
2753 u_int idx;
2754 pmap_t *pmap;
2755 c_tmgr_t **tbl;
2756 {
2757 vaddr_t va = 0;
2758
2759 /*
2760 * Determine if the PTE is a kernel PTE or a user PTE.
2761 */
2762 if (idx >= NUM_KERN_PTES) {
2763 /*
2764 * The PTE belongs to a user mapping.
2765 */
2766 /* XXX: Would like an inline for this to validate idx... */
2767 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2768
2769 *pmap = (*tbl)->ct_pmap;
2770 /*
2771 * To find the va to which the PTE maps, we first take
2772 * the table's base virtual address mapping which is stored
2773 * in ct_va. We then increment this address by a page for
2774 * every slot skipped until we reach the PTE.
2775 */
2776 va = (*tbl)->ct_va;
2777 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2778 } else {
2779 /*
2780 * The PTE belongs to the kernel map.
2781 */
2782 *pmap = pmap_kernel();
2783
2784 va = m68k_ptob(idx);
2785 va += KERNBASE;
2786 }
2787
2788 return va;
2789 }
2790
2791 /* pmap_clear_modify INTERFACE
2792 **
2793 * Clear the modification bit on the page at the specified
2794 * physical address.
2795 *
2796 */
2797 boolean_t
2798 pmap_clear_modify(pg)
2799 struct vm_page *pg;
2800 {
2801 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2802 boolean_t rv;
2803
2804 rv = pmap_is_modified(pg);
2805 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2806 return rv;
2807 }
2808
2809 /* pmap_clear_reference INTERFACE
2810 **
2811 * Clear the referenced bit on the page at the specified
2812 * physical address.
2813 */
2814 boolean_t
2815 pmap_clear_reference(pg)
2816 struct vm_page *pg;
2817 {
2818 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2819 boolean_t rv;
2820
2821 rv = pmap_is_referenced(pg);
2822 pmap_clear_pv(pa, PV_FLAGS_USED);
2823 return rv;
2824 }
2825
2826 /* pmap_clear_pv INTERNAL
2827 **
2828 * Clears the specified flag from the specified physical address.
2829 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2830 *
2831 * Flag is one of:
2832 * PV_FLAGS_MDFY - Page modified bit.
2833 * PV_FLAGS_USED - Page used (referenced) bit.
2834 *
2835 * This routine must not only clear the flag on the pv list
2836 * head. It must also clear the bit on every pte in the pv
2837 * list associated with the address.
2838 */
2839 void
2840 pmap_clear_pv(pa, flag)
2841 paddr_t pa;
2842 int flag;
2843 {
2844 pv_t *pv;
2845 int idx;
2846 vaddr_t va;
2847 pmap_t pmap;
2848 mmu_short_pte_t *pte;
2849 c_tmgr_t *c_tbl;
2850
2851 pv = pa2pv(pa);
2852 pv->pv_flags &= ~(flag);
2853 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2854 pte = &kernCbase[idx];
2855 pte->attr.raw &= ~(flag);
2856
2857 /*
2858 * The MC68030 MMU will not set the modified or
2859 * referenced bits on any MMU tables for which it has
2860 * a cached descriptor with its modify bit set. To insure
2861 * that it will modify these bits on the PTE during the next
2862 * time it is written to or read from, we must flush it from
2863 * the ATC.
2864 *
2865 * Ordinarily it is only necessary to flush the descriptor
2866 * if it is used in the current address space. But since I
2867 * am not sure that there will always be a notion of
2868 * 'the current address space' when this function is called,
2869 * I will skip the test and always flush the address. It
2870 * does no harm.
2871 */
2872
2873 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2874 TBIS(va);
2875 }
2876 }
2877
2878 /* pmap_extract INTERFACE
2879 **
2880 * Return the physical address mapped by the virtual address
2881 * in the specified pmap.
2882 *
2883 * Note: this function should also apply an exclusive lock
2884 * on the pmap system during its duration.
2885 */
2886 boolean_t
2887 pmap_extract(pmap, va, pap)
2888 pmap_t pmap;
2889 vaddr_t va;
2890 paddr_t *pap;
2891 {
2892 int a_idx, b_idx, pte_idx;
2893 a_tmgr_t *a_tbl;
2894 b_tmgr_t *b_tbl;
2895 c_tmgr_t *c_tbl;
2896 mmu_short_pte_t *c_pte;
2897
2898 if (pmap == pmap_kernel())
2899 return pmap_extract_kernel(va, pap);
2900
2901 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2902 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2903 return FALSE;
2904
2905 if (!MMU_VALID_DT(*c_pte))
2906 return FALSE;
2907
2908 if (pap != NULL)
2909 *pap = MMU_PTE_PA(*c_pte);
2910 return (TRUE);
2911 }
2912
2913 /* pmap_extract_kernel INTERNAL
2914 **
2915 * Extract a translation from the kernel address space.
2916 */
2917 boolean_t
2918 pmap_extract_kernel(va, pap)
2919 vaddr_t va;
2920 paddr_t *pap;
2921 {
2922 mmu_short_pte_t *pte;
2923
2924 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
2925 if (!MMU_VALID_DT(*pte))
2926 return (FALSE);
2927 if (pap != NULL)
2928 *pap = MMU_PTE_PA(*pte);
2929 return (TRUE);
2930 }
2931
2932 /* pmap_remove_kernel INTERNAL
2933 **
2934 * Remove the mapping of a range of virtual addresses from the kernel map.
2935 * The arguments are already page-aligned.
2936 */
2937 void
2938 pmap_remove_kernel(sva, eva)
2939 vaddr_t sva;
2940 vaddr_t eva;
2941 {
2942 int idx, eidx;
2943
2944 #ifdef PMAP_DEBUG
2945 if ((sva & PGOFSET) || (eva & PGOFSET))
2946 panic("pmap_remove_kernel: alignment");
2947 #endif
2948
2949 idx = m68k_btop(sva - KERNBASE);
2950 eidx = m68k_btop(eva - KERNBASE);
2951
2952 while (idx < eidx) {
2953 pmap_remove_pte(&kernCbase[idx++]);
2954 TBIS(sva);
2955 sva += NBPG;
2956 }
2957 }
2958
2959 /* pmap_remove INTERFACE
2960 **
2961 * Remove the mapping of a range of virtual addresses from the given pmap.
2962 *
2963 * If the range contains any wired entries, this function will probably create
2964 * disaster.
2965 */
2966 void
2967 pmap_remove(pmap, start, end)
2968 pmap_t pmap;
2969 vaddr_t start;
2970 vaddr_t end;
2971 {
2972
2973 if (pmap == pmap_kernel()) {
2974 pmap_remove_kernel(start, end);
2975 return;
2976 }
2977
2978 /*
2979 * If the pmap doesn't have an A table of its own, it has no mappings
2980 * that can be removed.
2981 */
2982 if (pmap->pm_a_tmgr == NULL)
2983 return;
2984
2985 /*
2986 * Remove the specified range from the pmap. If the function
2987 * returns true, the operation removed all the valid mappings
2988 * in the pmap and freed its A table. If this happened to the
2989 * currently loaded pmap, the MMU root pointer must be reloaded
2990 * with the default 'kernel' map.
2991 */
2992 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
2993 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2994 kernel_crp.rp_addr = kernAphys;
2995 loadcrp(&kernel_crp);
2996 /* will do TLB flush below */
2997 }
2998 pmap->pm_a_tmgr = NULL;
2999 pmap->pm_a_phys = kernAphys;
3000 }
3001
3002 /*
3003 * If we just modified the current address space,
3004 * make sure to flush the MMU cache.
3005 *
3006 * XXX - this could be an unecessarily large flush.
3007 * XXX - Could decide, based on the size of the VA range
3008 * to be removed, whether to flush "by pages" or "all".
3009 */
3010 if (pmap == current_pmap())
3011 TBIAU();
3012 }
3013
3014 /* pmap_remove_a INTERNAL
3015 **
3016 * This is function number one in a set of three that removes a range
3017 * of memory in the most efficient manner by removing the highest possible
3018 * tables from the memory space. This particular function attempts to remove
3019 * as many B tables as it can, delegating the remaining fragmented ranges to
3020 * pmap_remove_b().
3021 *
3022 * If the removal operation results in an empty A table, the function returns
3023 * TRUE.
3024 *
3025 * It's ugly but will do for now.
3026 */
3027 boolean_t
3028 pmap_remove_a(a_tbl, start, end)
3029 a_tmgr_t *a_tbl;
3030 vaddr_t start;
3031 vaddr_t end;
3032 {
3033 boolean_t empty;
3034 int idx;
3035 vaddr_t nstart, nend;
3036 b_tmgr_t *b_tbl;
3037 mmu_long_dte_t *a_dte;
3038 mmu_short_dte_t *b_dte;
3039
3040 /*
3041 * The following code works with what I call a 'granularity
3042 * reduction algorithim'. A range of addresses will always have
3043 * the following properties, which are classified according to
3044 * how the range relates to the size of the current granularity
3045 * - an A table entry:
3046 *
3047 * 1 2 3 4
3048 * -+---+---+---+---+---+---+---+-
3049 * -+---+---+---+---+---+---+---+-
3050 *
3051 * A range will always start on a granularity boundary, illustrated
3052 * by '+' signs in the table above, or it will start at some point
3053 * inbetween a granularity boundary, as illustrated by point 1.
3054 * The first step in removing a range of addresses is to remove the
3055 * range between 1 and 2, the nearest granularity boundary. This
3056 * job is handled by the section of code governed by the
3057 * 'if (start < nstart)' statement.
3058 *
3059 * A range will always encompass zero or more intergral granules,
3060 * illustrated by points 2 and 3. Integral granules are easy to
3061 * remove. The removal of these granules is the second step, and
3062 * is handled by the code block 'if (nstart < nend)'.
3063 *
3064 * Lastly, a range will always end on a granularity boundary,
3065 * ill. by point 3, or it will fall just beyond one, ill. by point
3066 * 4. The last step involves removing this range and is handled by
3067 * the code block 'if (nend < end)'.
3068 */
3069 nstart = MMU_ROUND_UP_A(start);
3070 nend = MMU_ROUND_A(end);
3071
3072 if (start < nstart) {
3073 /*
3074 * This block is executed if the range starts between
3075 * a granularity boundary.
3076 *
3077 * First find the DTE which is responsible for mapping
3078 * the start of the range.
3079 */
3080 idx = MMU_TIA(start);
3081 a_dte = &a_tbl->at_dtbl[idx];
3082
3083 /*
3084 * If the DTE is valid then delegate the removal of the sub
3085 * range to pmap_remove_b(), which can remove addresses at
3086 * a finer granularity.
3087 */
3088 if (MMU_VALID_DT(*a_dte)) {
3089 b_dte = mmu_ptov(a_dte->addr.raw);
3090 b_tbl = mmuB2tmgr(b_dte);
3091
3092 /*
3093 * The sub range to be removed starts at the start
3094 * of the full range we were asked to remove, and ends
3095 * at the greater of:
3096 * 1. The end of the full range, -or-
3097 * 2. The end of the full range, rounded down to the
3098 * nearest granularity boundary.
3099 */
3100 if (end < nstart)
3101 empty = pmap_remove_b(b_tbl, start, end);
3102 else
3103 empty = pmap_remove_b(b_tbl, start, nstart);
3104
3105 /*
3106 * If the removal resulted in an empty B table,
3107 * invalidate the DTE that points to it and decrement
3108 * the valid entry count of the A table.
3109 */
3110 if (empty) {
3111 a_dte->attr.raw = MMU_DT_INVALID;
3112 a_tbl->at_ecnt--;
3113 }
3114 }
3115 /*
3116 * If the DTE is invalid, the address range is already non-
3117 * existent and can simply be skipped.
3118 */
3119 }
3120 if (nstart < nend) {
3121 /*
3122 * This block is executed if the range spans a whole number
3123 * multiple of granules (A table entries.)
3124 *
3125 * First find the DTE which is responsible for mapping
3126 * the start of the first granule involved.
3127 */
3128 idx = MMU_TIA(nstart);
3129 a_dte = &a_tbl->at_dtbl[idx];
3130
3131 /*
3132 * Remove entire sub-granules (B tables) one at a time,
3133 * until reaching the end of the range.
3134 */
3135 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3136 if (MMU_VALID_DT(*a_dte)) {
3137 /*
3138 * Find the B table manager for the
3139 * entry and free it.
3140 */
3141 b_dte = mmu_ptov(a_dte->addr.raw);
3142 b_tbl = mmuB2tmgr(b_dte);
3143 free_b_table(b_tbl, TRUE);
3144
3145 /*
3146 * Invalidate the DTE that points to the
3147 * B table and decrement the valid entry
3148 * count of the A table.
3149 */
3150 a_dte->attr.raw = MMU_DT_INVALID;
3151 a_tbl->at_ecnt--;
3152 }
3153 }
3154 if (nend < end) {
3155 /*
3156 * This block is executed if the range ends beyond a
3157 * granularity boundary.
3158 *
3159 * First find the DTE which is responsible for mapping
3160 * the start of the nearest (rounded down) granularity
3161 * boundary.
3162 */
3163 idx = MMU_TIA(nend);
3164 a_dte = &a_tbl->at_dtbl[idx];
3165
3166 /*
3167 * If the DTE is valid then delegate the removal of the sub
3168 * range to pmap_remove_b(), which can remove addresses at
3169 * a finer granularity.
3170 */
3171 if (MMU_VALID_DT(*a_dte)) {
3172 /*
3173 * Find the B table manager for the entry
3174 * and hand it to pmap_remove_b() along with
3175 * the sub range.
3176 */
3177 b_dte = mmu_ptov(a_dte->addr.raw);
3178 b_tbl = mmuB2tmgr(b_dte);
3179
3180 empty = pmap_remove_b(b_tbl, nend, end);
3181
3182 /*
3183 * If the removal resulted in an empty B table,
3184 * invalidate the DTE that points to it and decrement
3185 * the valid entry count of the A table.
3186 */
3187 if (empty) {
3188 a_dte->attr.raw = MMU_DT_INVALID;
3189 a_tbl->at_ecnt--;
3190 }
3191 }
3192 }
3193
3194 /*
3195 * If there are no more entries in the A table, release it
3196 * back to the available pool and return TRUE.
3197 */
3198 if (a_tbl->at_ecnt == 0) {
3199 a_tbl->at_parent = NULL;
3200 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3201 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3202 empty = TRUE;
3203 } else {
3204 empty = FALSE;
3205 }
3206
3207 return empty;
3208 }
3209
3210 /* pmap_remove_b INTERNAL
3211 **
3212 * Remove a range of addresses from an address space, trying to remove entire
3213 * C tables if possible.
3214 *
3215 * If the operation results in an empty B table, the function returns TRUE.
3216 */
3217 boolean_t
3218 pmap_remove_b(b_tbl, start, end)
3219 b_tmgr_t *b_tbl;
3220 vaddr_t start;
3221 vaddr_t end;
3222 {
3223 boolean_t empty;
3224 int idx;
3225 vaddr_t nstart, nend, rstart;
3226 c_tmgr_t *c_tbl;
3227 mmu_short_dte_t *b_dte;
3228 mmu_short_pte_t *c_dte;
3229
3230
3231 nstart = MMU_ROUND_UP_B(start);
3232 nend = MMU_ROUND_B(end);
3233
3234 if (start < nstart) {
3235 idx = MMU_TIB(start);
3236 b_dte = &b_tbl->bt_dtbl[idx];
3237 if (MMU_VALID_DT(*b_dte)) {
3238 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3239 c_tbl = mmuC2tmgr(c_dte);
3240 if (end < nstart)
3241 empty = pmap_remove_c(c_tbl, start, end);
3242 else
3243 empty = pmap_remove_c(c_tbl, start, nstart);
3244 if (empty) {
3245 b_dte->attr.raw = MMU_DT_INVALID;
3246 b_tbl->bt_ecnt--;
3247 }
3248 }
3249 }
3250 if (nstart < nend) {
3251 idx = MMU_TIB(nstart);
3252 b_dte = &b_tbl->bt_dtbl[idx];
3253 rstart = nstart;
3254 while (rstart < nend) {
3255 if (MMU_VALID_DT(*b_dte)) {
3256 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3257 c_tbl = mmuC2tmgr(c_dte);
3258 free_c_table(c_tbl, TRUE);
3259 b_dte->attr.raw = MMU_DT_INVALID;
3260 b_tbl->bt_ecnt--;
3261 }
3262 b_dte++;
3263 rstart += MMU_TIB_RANGE;
3264 }
3265 }
3266 if (nend < end) {
3267 idx = MMU_TIB(nend);
3268 b_dte = &b_tbl->bt_dtbl[idx];
3269 if (MMU_VALID_DT(*b_dte)) {
3270 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3271 c_tbl = mmuC2tmgr(c_dte);
3272 empty = pmap_remove_c(c_tbl, nend, end);
3273 if (empty) {
3274 b_dte->attr.raw = MMU_DT_INVALID;
3275 b_tbl->bt_ecnt--;
3276 }
3277 }
3278 }
3279
3280 if (b_tbl->bt_ecnt == 0) {
3281 b_tbl->bt_parent = NULL;
3282 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3283 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3284 empty = TRUE;
3285 } else {
3286 empty = FALSE;
3287 }
3288
3289 return empty;
3290 }
3291
3292 /* pmap_remove_c INTERNAL
3293 **
3294 * Remove a range of addresses from the given C table.
3295 */
3296 boolean_t
3297 pmap_remove_c(c_tbl, start, end)
3298 c_tmgr_t *c_tbl;
3299 vaddr_t start;
3300 vaddr_t end;
3301 {
3302 boolean_t empty;
3303 int idx;
3304 mmu_short_pte_t *c_pte;
3305
3306 idx = MMU_TIC(start);
3307 c_pte = &c_tbl->ct_dtbl[idx];
3308 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3309 if (MMU_VALID_DT(*c_pte)) {
3310 pmap_remove_pte(c_pte);
3311 c_tbl->ct_ecnt--;
3312 }
3313 }
3314
3315 if (c_tbl->ct_ecnt == 0) {
3316 c_tbl->ct_parent = NULL;
3317 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3318 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3319 empty = TRUE;
3320 } else {
3321 empty = FALSE;
3322 }
3323
3324 return empty;
3325 }
3326
3327 /* is_managed INTERNAL
3328 **
3329 * Determine if the given physical address is managed by the PV system.
3330 * Note that this logic assumes that no one will ask for the status of
3331 * addresses which lie in-between the memory banks on the 3/80. If they
3332 * do so, it will falsely report that it is managed.
3333 *
3334 * Note: A "managed" address is one that was reported to the VM system as
3335 * a "usable page" during system startup. As such, the VM system expects the
3336 * pmap module to keep an accurate track of the useage of those pages.
3337 * Any page not given to the VM system at startup does not exist (as far as
3338 * the VM system is concerned) and is therefore "unmanaged." Examples are
3339 * those pages which belong to the ROM monitor and the memory allocated before
3340 * the VM system was started.
3341 */
3342 boolean_t
3343 is_managed(pa)
3344 paddr_t pa;
3345 {
3346 if (pa >= avail_start && pa < avail_end)
3347 return TRUE;
3348 else
3349 return FALSE;
3350 }
3351
3352 /* pmap_bootstrap_alloc INTERNAL
3353 **
3354 * Used internally for memory allocation at startup when malloc is not
3355 * available. This code will fail once it crosses the first memory
3356 * bank boundary on the 3/80. Hopefully by then however, the VM system
3357 * will be in charge of allocation.
3358 */
3359 void *
3360 pmap_bootstrap_alloc(size)
3361 int size;
3362 {
3363 void *rtn;
3364
3365 #ifdef PMAP_DEBUG
3366 if (bootstrap_alloc_enabled == FALSE) {
3367 mon_printf("pmap_bootstrap_alloc: disabled\n");
3368 sunmon_abort();
3369 }
3370 #endif
3371
3372 rtn = (void *) virtual_avail;
3373 virtual_avail += size;
3374
3375 #ifdef PMAP_DEBUG
3376 if (virtual_avail > virtual_contig_end) {
3377 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3378 sunmon_abort();
3379 }
3380 #endif
3381
3382 return rtn;
3383 }
3384
3385 /* pmap_bootstap_aalign INTERNAL
3386 **
3387 * Used to insure that the next call to pmap_bootstrap_alloc() will
3388 * return a chunk of memory aligned to the specified size.
3389 *
3390 * Note: This function will only support alignment sizes that are powers
3391 * of two.
3392 */
3393 void
3394 pmap_bootstrap_aalign(size)
3395 int size;
3396 {
3397 int off;
3398
3399 off = virtual_avail & (size - 1);
3400 if (off) {
3401 (void) pmap_bootstrap_alloc(size - off);
3402 }
3403 }
3404
3405 /* pmap_pa_exists
3406 **
3407 * Used by the /dev/mem driver to see if a given PA is memory
3408 * that can be mapped. (The PA is not in a hole.)
3409 */
3410 int
3411 pmap_pa_exists(pa)
3412 paddr_t pa;
3413 {
3414 int i;
3415
3416 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3417 if ((pa >= avail_mem[i].pmem_start) &&
3418 (pa < avail_mem[i].pmem_end))
3419 return (1);
3420 if (avail_mem[i].pmem_next == NULL)
3421 break;
3422 }
3423 return (0);
3424 }
3425
3426 /* Called only from locore.s and pmap.c */
3427 void _pmap_switch __P((pmap_t pmap));
3428
3429 /*
3430 * _pmap_switch INTERNAL
3431 *
3432 * This is called by locore.s:cpu_switch() when it is
3433 * switching to a new process. Load new translations.
3434 * Note: done in-line by locore.s unless PMAP_DEBUG
3435 *
3436 * Note that we do NOT allocate a context here, but
3437 * share the "kernel only" context until we really
3438 * need our own context for user-space mappings in
3439 * pmap_enter_user(). [ s/context/mmu A table/ ]
3440 */
3441 void
3442 _pmap_switch(pmap)
3443 pmap_t pmap;
3444 {
3445 u_long rootpa;
3446
3447 /*
3448 * Only do reload/flush if we have to.
3449 * Note that if the old and new process
3450 * were BOTH using the "null" context,
3451 * then this will NOT flush the TLB.
3452 */
3453 rootpa = pmap->pm_a_phys;
3454 if (kernel_crp.rp_addr != rootpa) {
3455 DPRINT(("pmap_activate(%p)\n", pmap));
3456 kernel_crp.rp_addr = rootpa;
3457 loadcrp(&kernel_crp);
3458 TBIAU();
3459 }
3460 }
3461
3462 /*
3463 * Exported version of pmap_activate(). This is called from the
3464 * machine-independent VM code when a process is given a new pmap.
3465 * If (p == curlwp) do like cpu_switch would do; otherwise just
3466 * take this as notification that the process has a new pmap.
3467 */
3468 void
3469 pmap_activate(l)
3470 struct lwp *l;
3471 {
3472 if (l->l_proc == curproc) {
3473 _pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
3474 }
3475 }
3476
3477 /*
3478 * pmap_deactivate INTERFACE
3479 **
3480 * This is called to deactivate the specified process's address space.
3481 */
3482 void
3483 pmap_deactivate(l)
3484 struct lwp *l;
3485 {
3486 /* Nothing to do. */
3487 }
3488
3489 /*
3490 * Fill in the sun3x-specific part of the kernel core header
3491 * for dumpsys(). (See machdep.c for the rest.)
3492 */
3493 void
3494 pmap_kcore_hdr(sh)
3495 struct sun3x_kcore_hdr *sh;
3496 {
3497 u_long spa, len;
3498 int i;
3499
3500 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3501 sh->pg_valid = MMU_DT_PAGE;
3502 sh->contig_end = virtual_contig_end;
3503 sh->kernCbase = (u_long)kernCbase;
3504 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3505 spa = avail_mem[i].pmem_start;
3506 spa = m68k_trunc_page(spa);
3507 len = avail_mem[i].pmem_end - spa;
3508 len = m68k_round_page(len);
3509 sh->ram_segs[i].start = spa;
3510 sh->ram_segs[i].size = len;
3511 }
3512 }
3513
3514
3515 /* pmap_virtual_space INTERFACE
3516 **
3517 * Return the current available range of virtual addresses in the
3518 * arguuments provided. Only really called once.
3519 */
3520 void
3521 pmap_virtual_space(vstart, vend)
3522 vaddr_t *vstart, *vend;
3523 {
3524 *vstart = virtual_avail;
3525 *vend = virtual_end;
3526 }
3527
3528 /*
3529 * Provide memory to the VM system.
3530 *
3531 * Assume avail_start is always in the
3532 * first segment as pmap_bootstrap does.
3533 */
3534 static void
3535 pmap_page_upload()
3536 {
3537 paddr_t a, b; /* memory range */
3538 int i;
3539
3540 /* Supply the memory in segments. */
3541 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3542 a = atop(avail_mem[i].pmem_start);
3543 b = atop(avail_mem[i].pmem_end);
3544 if (i == 0)
3545 a = atop(avail_start);
3546 if (avail_mem[i].pmem_end > avail_end)
3547 b = atop(avail_end);
3548
3549 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3550
3551 if (avail_mem[i].pmem_next == NULL)
3552 break;
3553 }
3554 }
3555
3556 /* pmap_count INTERFACE
3557 **
3558 * Return the number of resident (valid) pages in the given pmap.
3559 *
3560 * Note: If this function is handed the kernel map, it will report
3561 * that it has no mappings. Hopefully the VM system won't ask for kernel
3562 * map statistics.
3563 */
3564 segsz_t
3565 pmap_count(pmap, type)
3566 pmap_t pmap;
3567 int type;
3568 {
3569 u_int count;
3570 int a_idx, b_idx;
3571 a_tmgr_t *a_tbl;
3572 b_tmgr_t *b_tbl;
3573 c_tmgr_t *c_tbl;
3574
3575 /*
3576 * If the pmap does not have its own A table manager, it has no
3577 * valid entires.
3578 */
3579 if (pmap->pm_a_tmgr == NULL)
3580 return 0;
3581
3582 a_tbl = pmap->pm_a_tmgr;
3583
3584 count = 0;
3585 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3586 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3587 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3588 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3589 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3590 c_tbl = mmuC2tmgr(
3591 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3592 if (type == 0)
3593 /*
3594 * A resident entry count has been requested.
3595 */
3596 count += c_tbl->ct_ecnt;
3597 else
3598 /*
3599 * A wired entry count has been requested.
3600 */
3601 count += c_tbl->ct_wcnt;
3602 }
3603 }
3604 }
3605 }
3606
3607 return count;
3608 }
3609
3610 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3611 * The following routines are only used by DDB for tricky kernel text *
3612 * text operations in db_memrw.c. They are provided for sun3 *
3613 * compatibility. *
3614 *************************************************************************/
3615 /* get_pte INTERNAL
3616 **
3617 * Return the page descriptor the describes the kernel mapping
3618 * of the given virtual address.
3619 */
3620 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3621 u_int
3622 get_pte(va)
3623 vaddr_t va;
3624 {
3625 u_long pte_pa;
3626 mmu_short_pte_t *pte;
3627
3628 /* Get the physical address of the PTE */
3629 pte_pa = ptest_addr(va & ~PGOFSET);
3630
3631 /* Convert to a virtual address... */
3632 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3633
3634 /* Make sure it is in our level-C tables... */
3635 if ((pte < kernCbase) ||
3636 (pte >= &mmuCbase[NUM_USER_PTES]))
3637 return 0;
3638
3639 /* ... and just return its contents. */
3640 return (pte->attr.raw);
3641 }
3642
3643
3644 /* set_pte INTERNAL
3645 **
3646 * Set the page descriptor that describes the kernel mapping
3647 * of the given virtual address.
3648 */
3649 void
3650 set_pte(va, pte)
3651 vaddr_t va;
3652 u_int pte;
3653 {
3654 u_long idx;
3655
3656 if (va < KERNBASE)
3657 return;
3658
3659 idx = (unsigned long) m68k_btop(va - KERNBASE);
3660 kernCbase[idx].attr.raw = pte;
3661 TBIS(va);
3662 }
3663
3664 /*
3665 * Routine: pmap_procwr
3666 *
3667 * Function:
3668 * Synchronize caches corresponding to [addr, addr+len) in p.
3669 */
3670 void
3671 pmap_procwr(p, va, len)
3672 struct proc *p;
3673 vaddr_t va;
3674 size_t len;
3675 {
3676 (void)cachectl1(0x80000004, va, len, p);
3677 }
3678
3679
3680 #ifdef PMAP_DEBUG
3681 /************************** DEBUGGING ROUTINES **************************
3682 * The following routines are meant to be an aid to debugging the pmap *
3683 * system. They are callable from the DDB command line and should be *
3684 * prepared to be handed unstable or incomplete states of the system. *
3685 ************************************************************************/
3686
3687 /* pv_list
3688 **
3689 * List all pages found on the pv list for the given physical page.
3690 * To avoid endless loops, the listing will stop at the end of the list
3691 * or after 'n' entries - whichever comes first.
3692 */
3693 void
3694 pv_list(pa, n)
3695 paddr_t pa;
3696 int n;
3697 {
3698 int idx;
3699 vaddr_t va;
3700 pv_t *pv;
3701 c_tmgr_t *c_tbl;
3702 pmap_t pmap;
3703
3704 pv = pa2pv(pa);
3705 idx = pv->pv_idx;
3706 for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3707 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3708 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3709 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3710 }
3711 }
3712 #endif /* PMAP_DEBUG */
3713
3714 #ifdef NOT_YET
3715 /* and maybe not ever */
3716 /************************** LOW-LEVEL ROUTINES **************************
3717 * These routines will eventualy be re-written into assembly and placed *
3718 * in locore.s. They are here now as stubs so that the pmap module can *
3719 * be linked as a standalone user program for testing. *
3720 ************************************************************************/
3721 /* flush_atc_crp INTERNAL
3722 **
3723 * Flush all page descriptors derived from the given CPU Root Pointer
3724 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3725 * cache.
3726 */
3727 void
3728 flush_atc_crp(a_tbl)
3729 {
3730 mmu_long_rp_t rp;
3731
3732 /* Create a temporary root table pointer that points to the
3733 * given A table.
3734 */
3735 rp.attr.raw = ~MMU_LONG_RP_LU;
3736 rp.addr.raw = (unsigned int) a_tbl;
3737
3738 mmu_pflushr(&rp);
3739 /* mmu_pflushr:
3740 * movel sp(4)@,a0
3741 * pflushr a0@
3742 * rts
3743 */
3744 }
3745 #endif /* NOT_YET */
3746