pmap.c revision 1.85 1 /* $NetBSD: pmap.c,v 1.85 2004/05/16 15:44:10 wiz Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a process called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/cdefs.h>
115 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.85 2004/05/16 15:44:10 wiz Exp $");
116
117 #include "opt_ddb.h"
118 #include "opt_pmap_debug.h"
119
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/proc.h>
123 #include <sys/malloc.h>
124 #include <sys/pool.h>
125 #include <sys/user.h>
126 #include <sys/queue.h>
127 #include <sys/kcore.h>
128
129 #include <uvm/uvm.h>
130
131 #include <machine/cpu.h>
132 #include <machine/kcore.h>
133 #include <machine/mon.h>
134 #include <machine/pmap.h>
135 #include <machine/pte.h>
136 #include <machine/vmparam.h>
137 #include <m68k/cacheops.h>
138
139 #include <sun3/sun3/cache.h>
140 #include <sun3/sun3/machdep.h>
141
142 #include "pmap_pvt.h"
143
144 /* XXX - What headers declare these? */
145 extern struct pcb *curpcb;
146 extern int physmem;
147
148 /* Defined in locore.s */
149 extern char kernel_text[];
150
151 /* Defined by the linker */
152 extern char etext[], edata[], end[];
153 extern char *esym; /* DDB */
154
155 /*************************** DEBUGGING DEFINITIONS ***********************
156 * Macros, preprocessor defines and variables used in debugging can make *
157 * code hard to read. Anything used exclusively for debugging purposes *
158 * is defined here to avoid having such mess scattered around the file. *
159 *************************************************************************/
160 #ifdef PMAP_DEBUG
161 /*
162 * To aid the debugging process, macros should be expanded into smaller steps
163 * that accomplish the same goal, yet provide convenient places for placing
164 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
165 * 'INLINE' keyword is defined to an empty string. This way, any function
166 * defined to be a 'static INLINE' will become 'outlined' and compiled as
167 * a separate function, which is much easier to debug.
168 */
169 #define INLINE /* nothing */
170
171 /*
172 * It is sometimes convenient to watch the activity of a particular table
173 * in the system. The following variables are used for that purpose.
174 */
175 a_tmgr_t *pmap_watch_atbl = 0;
176 b_tmgr_t *pmap_watch_btbl = 0;
177 c_tmgr_t *pmap_watch_ctbl = 0;
178
179 int pmap_debug = 0;
180 #define DPRINT(args) if (pmap_debug) printf args
181
182 #else /********** Stuff below is defined if NOT debugging **************/
183
184 #define INLINE inline
185 #define DPRINT(args) /* nada */
186
187 #endif /* PMAP_DEBUG */
188 /*********************** END OF DEBUGGING DEFINITIONS ********************/
189
190 /*** Management Structure - Memory Layout
191 * For every MMU table in the sun3x pmap system there must be a way to
192 * manage it; we must know which process is using it, what other tables
193 * depend on it, and whether or not it contains any locked pages. This
194 * is solved by the creation of 'table management' or 'tmgr'
195 * structures. One for each MMU table in the system.
196 *
197 * MAP OF MEMORY USED BY THE PMAP SYSTEM
198 *
199 * towards lower memory
200 * kernAbase -> +-------------------------------------------------------+
201 * | Kernel MMU A level table |
202 * kernBbase -> +-------------------------------------------------------+
203 * | Kernel MMU B level tables |
204 * kernCbase -> +-------------------------------------------------------+
205 * | |
206 * | Kernel MMU C level tables |
207 * | |
208 * mmuCbase -> +-------------------------------------------------------+
209 * | User MMU C level tables |
210 * mmuAbase -> +-------------------------------------------------------+
211 * | |
212 * | User MMU A level tables |
213 * | |
214 * mmuBbase -> +-------------------------------------------------------+
215 * | User MMU B level tables |
216 * tmgrAbase -> +-------------------------------------------------------+
217 * | TMGR A level table structures |
218 * tmgrBbase -> +-------------------------------------------------------+
219 * | TMGR B level table structures |
220 * tmgrCbase -> +-------------------------------------------------------+
221 * | TMGR C level table structures |
222 * pvbase -> +-------------------------------------------------------+
223 * | Physical to Virtual mapping table (list heads) |
224 * pvebase -> +-------------------------------------------------------+
225 * | Physical to Virtual mapping table (list elements) |
226 * | |
227 * +-------------------------------------------------------+
228 * towards higher memory
229 *
230 * For every A table in the MMU A area, there will be a corresponding
231 * a_tmgr structure in the TMGR A area. The same will be true for
232 * the B and C tables. This arrangement will make it easy to find the
233 * controling tmgr structure for any table in the system by use of
234 * (relatively) simple macros.
235 */
236
237 /*
238 * Global variables for storing the base addresses for the areas
239 * labeled above.
240 */
241 static vaddr_t kernAphys;
242 static mmu_long_dte_t *kernAbase;
243 static mmu_short_dte_t *kernBbase;
244 static mmu_short_pte_t *kernCbase;
245 static mmu_short_pte_t *mmuCbase;
246 static mmu_short_dte_t *mmuBbase;
247 static mmu_long_dte_t *mmuAbase;
248 static a_tmgr_t *Atmgrbase;
249 static b_tmgr_t *Btmgrbase;
250 static c_tmgr_t *Ctmgrbase;
251 static pv_t *pvbase;
252 static pv_elem_t *pvebase;
253 struct pmap kernel_pmap;
254
255 /*
256 * This holds the CRP currently loaded into the MMU.
257 */
258 struct mmu_rootptr kernel_crp;
259
260 /*
261 * Just all around global variables.
262 */
263 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
264 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
265 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
266
267
268 /*
269 * Flags used to mark the safety/availability of certain operations or
270 * resources.
271 */
272 static boolean_t bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
273 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
274
275 /*
276 * XXX: For now, retain the traditional variables that were
277 * used in the old pmap/vm interface (without NONCONTIG).
278 */
279 /* Kernel virtual address space available: */
280 vaddr_t virtual_avail, virtual_end;
281 /* Physical address space available: */
282 paddr_t avail_start, avail_end;
283
284 /* This keep track of the end of the contiguously mapped range. */
285 vaddr_t virtual_contig_end;
286
287 /* Physical address used by pmap_next_page() */
288 paddr_t avail_next;
289
290 /* These are used by pmap_copy_page(), etc. */
291 vaddr_t tmp_vpages[2];
292
293 /* memory pool for pmap structures */
294 struct pool pmap_pmap_pool;
295
296 /*
297 * The 3/80 is the only member of the sun3x family that has non-contiguous
298 * physical memory. Memory is divided into 4 banks which are physically
299 * locatable on the system board. Although the size of these banks varies
300 * with the size of memory they contain, their base addresses are
301 * permenently fixed. The following structure, which describes these
302 * banks, is initialized by pmap_bootstrap() after it reads from a similar
303 * structure provided by the ROM Monitor.
304 *
305 * For the other machines in the sun3x architecture which do have contiguous
306 * RAM, this list will have only one entry, which will describe the entire
307 * range of available memory.
308 */
309 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
310 u_int total_phys_mem;
311
312 /*************************************************************************/
313
314 /*
315 * XXX - Should "tune" these based on statistics.
316 *
317 * My first guess about the relative numbers of these needed is
318 * based on the fact that a "typical" process will have several
319 * pages mapped at low virtual addresses (text, data, bss), then
320 * some mapped shared libraries, and then some stack pages mapped
321 * near the high end of the VA space. Each process can use only
322 * one A table, and most will use only two B tables (maybe three)
323 * and probably about four C tables. Therefore, the first guess
324 * at the relative numbers of these needed is 1:2:4 -gwr
325 *
326 * The number of C tables needed is closely related to the amount
327 * of physical memory available plus a certain amount attributable
328 * to the use of double mappings. With a few simulation statistics
329 * we can find a reasonably good estimation of this unknown value.
330 * Armed with that and the above ratios, we have a good idea of what
331 * is needed at each level. -j
332 *
333 * Note: It is not physical memory memory size, but the total mapped
334 * virtual space required by the combined working sets of all the
335 * currently _runnable_ processes. (Sleeping ones don't count.)
336 * The amount of physical memory should be irrelevant. -gwr
337 */
338 #ifdef FIXED_NTABLES
339 #define NUM_A_TABLES 16
340 #define NUM_B_TABLES 32
341 #define NUM_C_TABLES 64
342 #else
343 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
344 #endif /* FIXED_NTABLES */
345
346 /*
347 * This determines our total virtual mapping capacity.
348 * Yes, it is a FIXED value so we can pre-allocate.
349 */
350 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
351
352 /*
353 * The size of the Kernel Virtual Address Space (KVAS)
354 * for purposes of MMU table allocation is -KERNBASE
355 * (length from KERNBASE to 0xFFFFffff)
356 */
357 #define KVAS_SIZE (-KERNBASE)
358
359 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
360 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
361 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
362 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
363
364 /*************************** MISCELANEOUS MACROS *************************/
365 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
366 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
367 #define pmap_add_ref(pmap) ++pmap->pm_refcount
368 #define pmap_del_ref(pmap) --pmap->pm_refcount
369 #define pmap_refcount(pmap) pmap->pm_refcount
370
371 void *pmap_bootstrap_alloc(int);
372
373 static INLINE void *mmu_ptov __P((paddr_t));
374 static INLINE paddr_t mmu_vtop __P((void *));
375
376 #if 0
377 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
378 #endif /* 0 */
379 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
380 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
381
382 static INLINE pv_t *pa2pv __P((paddr_t));
383 static INLINE int pteidx __P((mmu_short_pte_t *));
384 static INLINE pmap_t current_pmap __P((void));
385
386 /*
387 * We can always convert between virtual and physical addresses
388 * for anything in the range [KERNBASE ... avail_start] because
389 * that range is GUARANTEED to be mapped linearly.
390 * We rely heavily upon this feature!
391 */
392 static INLINE void *
393 mmu_ptov(pa)
394 paddr_t pa;
395 {
396 vaddr_t va;
397
398 va = (pa + KERNBASE);
399 #ifdef PMAP_DEBUG
400 if ((va < KERNBASE) || (va >= virtual_contig_end))
401 panic("mmu_ptov");
402 #endif
403 return ((void*)va);
404 }
405
406 static INLINE paddr_t
407 mmu_vtop(vva)
408 void *vva;
409 {
410 vaddr_t va;
411
412 va = (vaddr_t)vva;
413 #ifdef PMAP_DEBUG
414 if ((va < KERNBASE) || (va >= virtual_contig_end))
415 panic("mmu_vtop");
416 #endif
417 return (va - KERNBASE);
418 }
419
420 /*
421 * These macros map MMU tables to their corresponding manager structures.
422 * They are needed quite often because many of the pointers in the pmap
423 * system reference MMU tables and not the structures that control them.
424 * There needs to be a way to find one when given the other and these
425 * macros do so by taking advantage of the memory layout described above.
426 * Here's a quick step through the first macro, mmuA2tmgr():
427 *
428 * 1) find the offset of the given MMU A table from the base of its table
429 * pool (table - mmuAbase).
430 * 2) convert this offset into a table index by dividing it by the
431 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
432 * 3) use this index to select the corresponding 'A' table manager
433 * structure from the 'A' table manager pool (Atmgrbase[index]).
434 */
435 /* This function is not currently used. */
436 #if 0
437 static INLINE a_tmgr_t *
438 mmuA2tmgr(mmuAtbl)
439 mmu_long_dte_t *mmuAtbl;
440 {
441 int idx;
442
443 /* Which table is this in? */
444 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
445 #ifdef PMAP_DEBUG
446 if ((idx < 0) || (idx >= NUM_A_TABLES))
447 panic("mmuA2tmgr");
448 #endif
449 return (&Atmgrbase[idx]);
450 }
451 #endif /* 0 */
452
453 static INLINE b_tmgr_t *
454 mmuB2tmgr(mmuBtbl)
455 mmu_short_dte_t *mmuBtbl;
456 {
457 int idx;
458
459 /* Which table is this in? */
460 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
461 #ifdef PMAP_DEBUG
462 if ((idx < 0) || (idx >= NUM_B_TABLES))
463 panic("mmuB2tmgr");
464 #endif
465 return (&Btmgrbase[idx]);
466 }
467
468 /* mmuC2tmgr INTERNAL
469 **
470 * Given a pte known to belong to a C table, return the address of
471 * that table's management structure.
472 */
473 static INLINE c_tmgr_t *
474 mmuC2tmgr(mmuCtbl)
475 mmu_short_pte_t *mmuCtbl;
476 {
477 int idx;
478
479 /* Which table is this in? */
480 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
481 #ifdef PMAP_DEBUG
482 if ((idx < 0) || (idx >= NUM_C_TABLES))
483 panic("mmuC2tmgr");
484 #endif
485 return (&Ctmgrbase[idx]);
486 }
487
488 /* This is now a function call below.
489 * #define pa2pv(pa) \
490 * (&pvbase[(unsigned long)\
491 * m68k_btop(pa)\
492 * ])
493 */
494
495 /* pa2pv INTERNAL
496 **
497 * Return the pv_list_head element which manages the given physical
498 * address.
499 */
500 static INLINE pv_t *
501 pa2pv(pa)
502 paddr_t pa;
503 {
504 struct pmap_physmem_struct *bank;
505 int idx;
506
507 bank = &avail_mem[0];
508 while (pa >= bank->pmem_end)
509 bank = bank->pmem_next;
510
511 pa -= bank->pmem_start;
512 idx = bank->pmem_pvbase + m68k_btop(pa);
513 #ifdef PMAP_DEBUG
514 if ((idx < 0) || (idx >= physmem))
515 panic("pa2pv");
516 #endif
517 return &pvbase[idx];
518 }
519
520 /* pteidx INTERNAL
521 **
522 * Return the index of the given PTE within the entire fixed table of
523 * PTEs.
524 */
525 static INLINE int
526 pteidx(pte)
527 mmu_short_pte_t *pte;
528 {
529 return (pte - kernCbase);
530 }
531
532 /*
533 * This just offers a place to put some debugging checks,
534 * and reduces the number of places "curlwp" appears...
535 */
536 static INLINE pmap_t
537 current_pmap()
538 {
539 struct vmspace *vm;
540 struct vm_map *map;
541 pmap_t pmap;
542
543 if (curlwp == NULL)
544 pmap = &kernel_pmap;
545 else {
546 vm = curproc->p_vmspace;
547 map = &vm->vm_map;
548 pmap = vm_map_pmap(map);
549 }
550
551 return (pmap);
552 }
553
554
555 /*************************** FUNCTION DEFINITIONS ************************
556 * These appear here merely for the compiler to enforce type checking on *
557 * all function calls. *
558 *************************************************************************/
559
560 /** Internal functions
561 ** Most functions used only within this module are defined in
562 ** pmap_pvt.h (why not here if used only here?)
563 **/
564 static void pmap_page_upload __P((void));
565
566 /** Interface functions
567 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
568 ** defined.
569 **/
570 void pmap_pinit __P((pmap_t));
571 void pmap_release __P((pmap_t));
572
573 /********************************** CODE ********************************
574 * Functions that are called from other parts of the kernel are labeled *
575 * as 'INTERFACE' functions. Functions that are only called from *
576 * within the pmap module are labeled as 'INTERNAL' functions. *
577 * Functions that are internal, but are not (currently) used at all are *
578 * labeled 'INTERNAL_X'. *
579 ************************************************************************/
580
581 /* pmap_bootstrap INTERNAL
582 **
583 * Initializes the pmap system. Called at boot time from
584 * locore2.c:_vm_init()
585 *
586 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
587 * system implement pmap_steal_memory() is redundant.
588 * Don't release this code without removing one or the other!
589 */
590 void
591 pmap_bootstrap(nextva)
592 vaddr_t nextva;
593 {
594 struct physmemory *membank;
595 struct pmap_physmem_struct *pmap_membank;
596 vaddr_t va, eva;
597 paddr_t pa;
598 int b, c, i, j; /* running table counts */
599 int size, resvmem;
600
601 /*
602 * This function is called by __bootstrap after it has
603 * determined the type of machine and made the appropriate
604 * patches to the ROM vectors (XXX- I don't quite know what I meant
605 * by that.) It allocates and sets up enough of the pmap system
606 * to manage the kernel's address space.
607 */
608
609 /*
610 * Determine the range of kernel virtual and physical
611 * space available. Note that we ABSOLUTELY DEPEND on
612 * the fact that the first bank of memory (4MB) is
613 * mapped linearly to KERNBASE (which we guaranteed in
614 * the first instructions of locore.s).
615 * That is plenty for our bootstrap work.
616 */
617 virtual_avail = m68k_round_page(nextva);
618 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
619 virtual_end = VM_MAX_KERNEL_ADDRESS;
620 /* Don't need avail_start til later. */
621
622 /* We may now call pmap_bootstrap_alloc(). */
623 bootstrap_alloc_enabled = TRUE;
624
625 /*
626 * This is a somewhat unwrapped loop to deal with
627 * copying the PROM's 'phsymem' banks into the pmap's
628 * banks. The following is always assumed:
629 * 1. There is always at least one bank of memory.
630 * 2. There is always a last bank of memory, and its
631 * pmem_next member must be set to NULL.
632 */
633 membank = romVectorPtr->v_physmemory;
634 pmap_membank = avail_mem;
635 total_phys_mem = 0;
636
637 for (;;) { /* break on !membank */
638 pmap_membank->pmem_start = membank->address;
639 pmap_membank->pmem_end = membank->address + membank->size;
640 total_phys_mem += membank->size;
641 membank = membank->next;
642 if (!membank)
643 break;
644 /* This silly syntax arises because pmap_membank
645 * is really a pre-allocated array, but it is put into
646 * use as a linked list.
647 */
648 pmap_membank->pmem_next = pmap_membank + 1;
649 pmap_membank = pmap_membank->pmem_next;
650 }
651 /* This is the last element. */
652 pmap_membank->pmem_next = NULL;
653
654 /*
655 * Note: total_phys_mem, physmem represent
656 * actual physical memory, including that
657 * reserved for the PROM monitor.
658 */
659 physmem = btoc(total_phys_mem);
660
661 /*
662 * Avail_end is set to the first byte of physical memory
663 * after the end of the last bank. We use this only to
664 * determine if a physical address is "managed" memory.
665 * This address range should be reduced to prevent the
666 * physical pages needed by the PROM monitor from being used
667 * in the VM system.
668 */
669 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
670 resvmem = m68k_round_page(resvmem);
671 avail_end = pmap_membank->pmem_end - resvmem;
672
673 /*
674 * First allocate enough kernel MMU tables to map all
675 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
676 * Note: All must be aligned on 256 byte boundaries.
677 * Start with the level-A table (one of those).
678 */
679 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
680 kernAbase = pmap_bootstrap_alloc(size);
681 memset(kernAbase, 0, size);
682
683 /* Now the level-B kernel tables... */
684 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
685 kernBbase = pmap_bootstrap_alloc(size);
686 memset(kernBbase, 0, size);
687
688 /* Now the level-C kernel tables... */
689 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
690 kernCbase = pmap_bootstrap_alloc(size);
691 memset(kernCbase, 0, size);
692 /*
693 * Note: In order for the PV system to work correctly, the kernel
694 * and user-level C tables must be allocated contiguously.
695 * Nothing should be allocated between here and the allocation of
696 * mmuCbase below. XXX: Should do this as one allocation, and
697 * then compute a pointer for mmuCbase instead of this...
698 *
699 * Allocate user MMU tables.
700 * These must be contiguous with the preceding.
701 */
702
703 #ifndef FIXED_NTABLES
704 /*
705 * The number of user-level C tables that should be allocated is
706 * related to the size of physical memory. In general, there should
707 * be enough tables to map four times the amount of available RAM.
708 * The extra amount is needed because some table space is wasted by
709 * fragmentation.
710 */
711 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
712 NUM_B_TABLES = NUM_C_TABLES / 2;
713 NUM_A_TABLES = NUM_B_TABLES / 2;
714 #endif /* !FIXED_NTABLES */
715
716 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
717 mmuCbase = pmap_bootstrap_alloc(size);
718
719 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
720 mmuBbase = pmap_bootstrap_alloc(size);
721
722 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
723 mmuAbase = pmap_bootstrap_alloc(size);
724
725 /*
726 * Fill in the never-changing part of the kernel tables.
727 * For simplicity, the kernel's mappings will be editable as a
728 * flat array of page table entries at kernCbase. The
729 * higher level 'A' and 'B' tables must be initialized to point
730 * to this lower one.
731 */
732 b = c = 0;
733
734 /*
735 * Invalidate all mappings below KERNBASE in the A table.
736 * This area has already been zeroed out, but it is good
737 * practice to explicitly show that we are interpreting
738 * it as a list of A table descriptors.
739 */
740 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
741 kernAbase[i].addr.raw = 0;
742 }
743
744 /*
745 * Set up the kernel A and B tables so that they will reference the
746 * correct spots in the contiguous table of PTEs allocated for the
747 * kernel's virtual memory space.
748 */
749 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
750 kernAbase[i].attr.raw =
751 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
752 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
753
754 for (j=0; j < MMU_B_TBL_SIZE; j++) {
755 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
756 | MMU_DT_SHORT;
757 c += MMU_C_TBL_SIZE;
758 }
759 b += MMU_B_TBL_SIZE;
760 }
761
762 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
763 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
764 pmap_alloc_pv(); /* Allocate physical->virtual map. */
765
766 /*
767 * We are now done with pmap_bootstrap_alloc(). Round up
768 * `virtual_avail' to the nearest page, and set the flag
769 * to prevent use of pmap_bootstrap_alloc() hereafter.
770 */
771 pmap_bootstrap_aalign(PAGE_SIZE);
772 bootstrap_alloc_enabled = FALSE;
773
774 /*
775 * Now that we are done with pmap_bootstrap_alloc(), we
776 * must save the virtual and physical addresses of the
777 * end of the linearly mapped range, which are stored in
778 * virtual_contig_end and avail_start, respectively.
779 * These variables will never change after this point.
780 */
781 virtual_contig_end = virtual_avail;
782 avail_start = virtual_avail - KERNBASE;
783
784 /*
785 * `avail_next' is a running pointer used by pmap_next_page() to
786 * keep track of the next available physical page to be handed
787 * to the VM system during its initialization, in which it
788 * asks for physical pages, one at a time.
789 */
790 avail_next = avail_start;
791
792 /*
793 * Now allocate some virtual addresses, but not the physical pages
794 * behind them. Note that virtual_avail is already page-aligned.
795 *
796 * tmp_vpages[] is an array of two virtual pages used for temporary
797 * kernel mappings in the pmap module to facilitate various physical
798 * address-oritented operations.
799 */
800 tmp_vpages[0] = virtual_avail;
801 virtual_avail += PAGE_SIZE;
802 tmp_vpages[1] = virtual_avail;
803 virtual_avail += PAGE_SIZE;
804
805 /** Initialize the PV system **/
806 pmap_init_pv();
807
808 /*
809 * Fill in the kernel_pmap structure and kernel_crp.
810 */
811 kernAphys = mmu_vtop(kernAbase);
812 kernel_pmap.pm_a_tmgr = NULL;
813 kernel_pmap.pm_a_phys = kernAphys;
814 kernel_pmap.pm_refcount = 1; /* always in use */
815 simple_lock_init(&kernel_pmap.pm_lock);
816
817 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
818 kernel_crp.rp_addr = kernAphys;
819
820 /*
821 * Now pmap_enter_kernel() may be used safely and will be
822 * the main interface used hereafter to modify the kernel's
823 * virtual address space. Note that since we are still running
824 * under the PROM's address table, none of these table modifications
825 * actually take effect until pmap_takeover_mmu() is called.
826 *
827 * Note: Our tables do NOT have the PROM linear mappings!
828 * Only the mappings created here exist in our tables, so
829 * remember to map anything we expect to use.
830 */
831 va = (vaddr_t)KERNBASE;
832 pa = 0;
833
834 /*
835 * The first page of the kernel virtual address space is the msgbuf
836 * page. The page attributes (data, non-cached) are set here, while
837 * the address is assigned to this global pointer in cpu_startup().
838 * It is non-cached, mostly due to paranoia.
839 */
840 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
841 va += PAGE_SIZE; pa += PAGE_SIZE;
842
843 /* Next page is used as the temporary stack. */
844 pmap_enter_kernel(va, pa, VM_PROT_ALL);
845 va += PAGE_SIZE; pa += PAGE_SIZE;
846
847 /*
848 * Map all of the kernel's text segment as read-only and cacheable.
849 * (Cacheable is implied by default). Unfortunately, the last bytes
850 * of kernel text and the first bytes of kernel data will often be
851 * sharing the same page. Therefore, the last page of kernel text
852 * has to be mapped as read/write, to accomodate the data.
853 */
854 eva = m68k_trunc_page((vaddr_t)etext);
855 for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
856 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
857
858 /*
859 * Map all of the kernel's data as read/write and cacheable.
860 * This includes: data, BSS, symbols, and everything in the
861 * contiguous memory used by pmap_bootstrap_alloc()
862 */
863 for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
864 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
865
866 /*
867 * At this point we are almost ready to take over the MMU. But first
868 * we must save the PROM's address space in our map, as we call its
869 * routines and make references to its data later in the kernel.
870 */
871 pmap_bootstrap_copyprom();
872 pmap_takeover_mmu();
873 pmap_bootstrap_setprom();
874
875 /* Notify the VM system of our page size. */
876 uvmexp.pagesize = PAGE_SIZE;
877 uvm_setpagesize();
878
879 pmap_page_upload();
880 }
881
882
883 /* pmap_alloc_usermmu INTERNAL
884 **
885 * Called from pmap_bootstrap() to allocate MMU tables that will
886 * eventually be used for user mappings.
887 */
888 void
889 pmap_alloc_usermmu()
890 {
891 /* XXX: Moved into caller. */
892 }
893
894 /* pmap_alloc_pv INTERNAL
895 **
896 * Called from pmap_bootstrap() to allocate the physical
897 * to virtual mapping list. Each physical page of memory
898 * in the system has a corresponding element in this list.
899 */
900 void
901 pmap_alloc_pv()
902 {
903 int i;
904 unsigned int total_mem;
905
906 /*
907 * Allocate a pv_head structure for every page of physical
908 * memory that will be managed by the system. Since memory on
909 * the 3/80 is non-contiguous, we cannot arrive at a total page
910 * count by subtraction of the lowest available address from the
911 * highest, but rather we have to step through each memory
912 * bank and add the number of pages in each to the total.
913 *
914 * At this time we also initialize the offset of each bank's
915 * starting pv_head within the pv_head list so that the physical
916 * memory state routines (pmap_is_referenced(),
917 * pmap_is_modified(), et al.) can quickly find coresponding
918 * pv_heads in spite of the non-contiguity.
919 */
920 total_mem = 0;
921 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
922 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
923 total_mem += avail_mem[i].pmem_end -
924 avail_mem[i].pmem_start;
925 if (avail_mem[i].pmem_next == NULL)
926 break;
927 }
928 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
929 m68k_btop(total_phys_mem));
930 }
931
932 /* pmap_alloc_usertmgr INTERNAL
933 **
934 * Called from pmap_bootstrap() to allocate the structures which
935 * facilitate management of user MMU tables. Each user MMU table
936 * in the system has one such structure associated with it.
937 */
938 void
939 pmap_alloc_usertmgr()
940 {
941 /* Allocate user MMU table managers */
942 /* It would be a lot simpler to just make these BSS, but */
943 /* we may want to change their size at boot time... -j */
944 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
945 * NUM_A_TABLES);
946 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
947 * NUM_B_TABLES);
948 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
949 * NUM_C_TABLES);
950
951 /*
952 * Allocate PV list elements for the physical to virtual
953 * mapping system.
954 */
955 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
956 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
957 }
958
959 /* pmap_bootstrap_copyprom() INTERNAL
960 **
961 * Copy the PROM mappings into our own tables. Note, we
962 * can use physical addresses until __bootstrap returns.
963 */
964 void
965 pmap_bootstrap_copyprom()
966 {
967 struct sunromvec *romp;
968 int *mon_ctbl;
969 mmu_short_pte_t *kpte;
970 int i, len;
971
972 romp = romVectorPtr;
973
974 /*
975 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
976 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
977 */
978 mon_ctbl = *romp->monptaddr;
979 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
980 kpte = &kernCbase[i];
981 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
982
983 for (i = 0; i < len; i++) {
984 kpte[i].attr.raw = mon_ctbl[i];
985 }
986
987 /*
988 * Copy the mappings at MON_DVMA_BASE (to the end).
989 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
990 * Actually, we only want the last page, which the
991 * PROM has set up for use by the "ie" driver.
992 * (The i82686 needs its SCP there.)
993 * If we copy all the mappings, pmap_enter_kernel
994 * may complain about finding valid PTEs that are
995 * not recorded in our PV lists...
996 */
997 mon_ctbl = *romp->shadowpteaddr;
998 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
999 kpte = &kernCbase[i];
1000 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1001 for (i = (len-1); i < len; i++) {
1002 kpte[i].attr.raw = mon_ctbl[i];
1003 }
1004 }
1005
1006 /* pmap_takeover_mmu INTERNAL
1007 **
1008 * Called from pmap_bootstrap() after it has copied enough of the
1009 * PROM mappings into the kernel map so that we can use our own
1010 * MMU table.
1011 */
1012 void
1013 pmap_takeover_mmu()
1014 {
1015
1016 loadcrp(&kernel_crp);
1017 }
1018
1019 /* pmap_bootstrap_setprom() INTERNAL
1020 **
1021 * Set the PROM mappings so it can see kernel space.
1022 * Note that physical addresses are used here, which
1023 * we can get away with because this runs with the
1024 * low 1GB set for transparent translation.
1025 */
1026 void
1027 pmap_bootstrap_setprom()
1028 {
1029 mmu_long_dte_t *mon_dte;
1030 extern struct mmu_rootptr mon_crp;
1031 int i;
1032
1033 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1034 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1035 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1036 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1037 }
1038 }
1039
1040
1041 /* pmap_init INTERFACE
1042 **
1043 * Called at the end of vm_init() to set up the pmap system to go
1044 * into full time operation. All initialization of kernel_pmap
1045 * should be already done by now, so this should just do things
1046 * needed for user-level pmaps to work.
1047 */
1048 void
1049 pmap_init()
1050 {
1051 /** Initialize the manager pools **/
1052 TAILQ_INIT(&a_pool);
1053 TAILQ_INIT(&b_pool);
1054 TAILQ_INIT(&c_pool);
1055
1056 /**************************************************************
1057 * Initialize all tmgr structures and MMU tables they manage. *
1058 **************************************************************/
1059 /** Initialize A tables **/
1060 pmap_init_a_tables();
1061 /** Initialize B tables **/
1062 pmap_init_b_tables();
1063 /** Initialize C tables **/
1064 pmap_init_c_tables();
1065
1066 /** Initialize the pmap pools **/
1067 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1068 &pool_allocator_nointr);
1069 }
1070
1071 /* pmap_init_a_tables() INTERNAL
1072 **
1073 * Initializes all A managers, their MMU A tables, and inserts
1074 * them into the A manager pool for use by the system.
1075 */
1076 void
1077 pmap_init_a_tables()
1078 {
1079 int i;
1080 a_tmgr_t *a_tbl;
1081
1082 for (i=0; i < NUM_A_TABLES; i++) {
1083 /* Select the next available A manager from the pool */
1084 a_tbl = &Atmgrbase[i];
1085
1086 /*
1087 * Clear its parent entry. Set its wired and valid
1088 * entry count to zero.
1089 */
1090 a_tbl->at_parent = NULL;
1091 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1092
1093 /* Assign it the next available MMU A table from the pool */
1094 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1095
1096 /*
1097 * Initialize the MMU A table with the table in the `proc0',
1098 * or kernel, mapping. This ensures that every process has
1099 * the kernel mapped in the top part of its address space.
1100 */
1101 memcpy(a_tbl->at_dtbl, kernAbase, MMU_A_TBL_SIZE *
1102 sizeof(mmu_long_dte_t));
1103
1104 /*
1105 * Finally, insert the manager into the A pool,
1106 * making it ready to be used by the system.
1107 */
1108 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1109 }
1110 }
1111
1112 /* pmap_init_b_tables() INTERNAL
1113 **
1114 * Initializes all B table managers, their MMU B tables, and
1115 * inserts them into the B manager pool for use by the system.
1116 */
1117 void
1118 pmap_init_b_tables()
1119 {
1120 int i,j;
1121 b_tmgr_t *b_tbl;
1122
1123 for (i=0; i < NUM_B_TABLES; i++) {
1124 /* Select the next available B manager from the pool */
1125 b_tbl = &Btmgrbase[i];
1126
1127 b_tbl->bt_parent = NULL; /* clear its parent, */
1128 b_tbl->bt_pidx = 0; /* parent index, */
1129 b_tbl->bt_wcnt = 0; /* wired entry count, */
1130 b_tbl->bt_ecnt = 0; /* valid entry count. */
1131
1132 /* Assign it the next available MMU B table from the pool */
1133 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1134
1135 /* Invalidate every descriptor in the table */
1136 for (j=0; j < MMU_B_TBL_SIZE; j++)
1137 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1138
1139 /* Insert the manager into the B pool */
1140 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1141 }
1142 }
1143
1144 /* pmap_init_c_tables() INTERNAL
1145 **
1146 * Initializes all C table managers, their MMU C tables, and
1147 * inserts them into the C manager pool for use by the system.
1148 */
1149 void
1150 pmap_init_c_tables()
1151 {
1152 int i,j;
1153 c_tmgr_t *c_tbl;
1154
1155 for (i=0; i < NUM_C_TABLES; i++) {
1156 /* Select the next available C manager from the pool */
1157 c_tbl = &Ctmgrbase[i];
1158
1159 c_tbl->ct_parent = NULL; /* clear its parent, */
1160 c_tbl->ct_pidx = 0; /* parent index, */
1161 c_tbl->ct_wcnt = 0; /* wired entry count, */
1162 c_tbl->ct_ecnt = 0; /* valid entry count, */
1163 c_tbl->ct_pmap = NULL; /* parent pmap, */
1164 c_tbl->ct_va = 0; /* base of managed range */
1165
1166 /* Assign it the next available MMU C table from the pool */
1167 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1168
1169 for (j=0; j < MMU_C_TBL_SIZE; j++)
1170 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1171
1172 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1173 }
1174 }
1175
1176 /* pmap_init_pv() INTERNAL
1177 **
1178 * Initializes the Physical to Virtual mapping system.
1179 */
1180 void
1181 pmap_init_pv()
1182 {
1183 int i;
1184
1185 /* Initialize every PV head. */
1186 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1187 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1188 pvbase[i].pv_flags = 0; /* Zero out page flags */
1189 }
1190 }
1191
1192 /* get_a_table INTERNAL
1193 **
1194 * Retrieve and return a level A table for use in a user map.
1195 */
1196 a_tmgr_t *
1197 get_a_table()
1198 {
1199 a_tmgr_t *tbl;
1200 pmap_t pmap;
1201
1202 /* Get the top A table in the pool */
1203 tbl = a_pool.tqh_first;
1204 if (tbl == NULL) {
1205 /*
1206 * XXX - Instead of panicking here and in other get_x_table
1207 * functions, we do have the option of sleeping on the head of
1208 * the table pool. Any function which updates the table pool
1209 * would then issue a wakeup() on the head, thus waking up any
1210 * processes waiting for a table.
1211 *
1212 * Actually, the place to sleep would be when some process
1213 * asks for a "wired" mapping that would run us short of
1214 * mapping resources. This design DEPENDS on always having
1215 * some mapping resources in the pool for stealing, so we
1216 * must make sure we NEVER let the pool become empty. -gwr
1217 */
1218 panic("get_a_table: out of A tables.");
1219 }
1220
1221 TAILQ_REMOVE(&a_pool, tbl, at_link);
1222 /*
1223 * If the table has a non-null parent pointer then it is in use.
1224 * Forcibly abduct it from its parent and clear its entries.
1225 * No re-entrancy worries here. This table would not be in the
1226 * table pool unless it was available for use.
1227 *
1228 * Note that the second argument to free_a_table() is FALSE. This
1229 * indicates that the table should not be relinked into the A table
1230 * pool. That is a job for the function that called us.
1231 */
1232 if (tbl->at_parent) {
1233 pmap = tbl->at_parent;
1234 free_a_table(tbl, FALSE);
1235 pmap->pm_a_tmgr = NULL;
1236 pmap->pm_a_phys = kernAphys;
1237 }
1238 return tbl;
1239 }
1240
1241 /* get_b_table INTERNAL
1242 **
1243 * Return a level B table for use.
1244 */
1245 b_tmgr_t *
1246 get_b_table()
1247 {
1248 b_tmgr_t *tbl;
1249
1250 /* See 'get_a_table' for comments. */
1251 tbl = b_pool.tqh_first;
1252 if (tbl == NULL)
1253 panic("get_b_table: out of B tables.");
1254 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1255 if (tbl->bt_parent) {
1256 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1257 tbl->bt_parent->at_ecnt--;
1258 free_b_table(tbl, FALSE);
1259 }
1260 return tbl;
1261 }
1262
1263 /* get_c_table INTERNAL
1264 **
1265 * Return a level C table for use.
1266 */
1267 c_tmgr_t *
1268 get_c_table()
1269 {
1270 c_tmgr_t *tbl;
1271
1272 /* See 'get_a_table' for comments */
1273 tbl = c_pool.tqh_first;
1274 if (tbl == NULL)
1275 panic("get_c_table: out of C tables.");
1276 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1277 if (tbl->ct_parent) {
1278 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1279 tbl->ct_parent->bt_ecnt--;
1280 free_c_table(tbl, FALSE);
1281 }
1282 return tbl;
1283 }
1284
1285 /*
1286 * The following 'free_table' and 'steal_table' functions are called to
1287 * detach tables from their current obligations (parents and children) and
1288 * prepare them for reuse in another mapping.
1289 *
1290 * Free_table is used when the calling function will handle the fate
1291 * of the parent table, such as returning it to the free pool when it has
1292 * no valid entries. Functions that do not want to handle this should
1293 * call steal_table, in which the parent table's descriptors and entry
1294 * count are automatically modified when this table is removed.
1295 */
1296
1297 /* free_a_table INTERNAL
1298 **
1299 * Unmaps the given A table and all child tables from their current
1300 * mappings. Returns the number of pages that were invalidated.
1301 * If 'relink' is true, the function will return the table to the head
1302 * of the available table pool.
1303 *
1304 * Cache note: The MC68851 will automatically flush all
1305 * descriptors derived from a given A table from its
1306 * Automatic Translation Cache (ATC) if we issue a
1307 * 'PFLUSHR' instruction with the base address of the
1308 * table. This function should do, and does so.
1309 * Note note: We are using an MC68030 - there is no
1310 * PFLUSHR.
1311 */
1312 int
1313 free_a_table(a_tbl, relink)
1314 a_tmgr_t *a_tbl;
1315 boolean_t relink;
1316 {
1317 int i, removed_cnt;
1318 mmu_long_dte_t *dte;
1319 mmu_short_dte_t *dtbl;
1320 b_tmgr_t *tmgr;
1321
1322 /*
1323 * Flush the ATC cache of all cached descriptors derived
1324 * from this table.
1325 * Sun3x does not use 68851's cached table feature
1326 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1327 */
1328
1329 /*
1330 * Remove any pending cache flushes that were designated
1331 * for the pmap this A table belongs to.
1332 * a_tbl->parent->atc_flushq[0] = 0;
1333 * Not implemented in sun3x.
1334 */
1335
1336 /*
1337 * All A tables in the system should retain a map for the
1338 * kernel. If the table contains any valid descriptors
1339 * (other than those for the kernel area), invalidate them all,
1340 * stopping short of the kernel's entries.
1341 */
1342 removed_cnt = 0;
1343 if (a_tbl->at_ecnt) {
1344 dte = a_tbl->at_dtbl;
1345 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1346 /*
1347 * If a table entry points to a valid B table, free
1348 * it and its children.
1349 */
1350 if (MMU_VALID_DT(dte[i])) {
1351 /*
1352 * The following block does several things,
1353 * from innermost expression to the
1354 * outermost:
1355 * 1) It extracts the base (cc 1996)
1356 * address of the B table pointed
1357 * to in the A table entry dte[i].
1358 * 2) It converts this base address into
1359 * the virtual address it can be
1360 * accessed with. (all MMU tables point
1361 * to physical addresses.)
1362 * 3) It finds the corresponding manager
1363 * structure which manages this MMU table.
1364 * 4) It frees the manager structure.
1365 * (This frees the MMU table and all
1366 * child tables. See 'free_b_table' for
1367 * details.)
1368 */
1369 dtbl = mmu_ptov(dte[i].addr.raw);
1370 tmgr = mmuB2tmgr(dtbl);
1371 removed_cnt += free_b_table(tmgr, TRUE);
1372 dte[i].attr.raw = MMU_DT_INVALID;
1373 }
1374 }
1375 a_tbl->at_ecnt = 0;
1376 }
1377 if (relink) {
1378 a_tbl->at_parent = NULL;
1379 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1380 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1381 }
1382 return removed_cnt;
1383 }
1384
1385 /* free_b_table INTERNAL
1386 **
1387 * Unmaps the given B table and all its children from their current
1388 * mappings. Returns the number of pages that were invalidated.
1389 * (For comments, see 'free_a_table()').
1390 */
1391 int
1392 free_b_table(b_tbl, relink)
1393 b_tmgr_t *b_tbl;
1394 boolean_t relink;
1395 {
1396 int i, removed_cnt;
1397 mmu_short_dte_t *dte;
1398 mmu_short_pte_t *dtbl;
1399 c_tmgr_t *tmgr;
1400
1401 removed_cnt = 0;
1402 if (b_tbl->bt_ecnt) {
1403 dte = b_tbl->bt_dtbl;
1404 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1405 if (MMU_VALID_DT(dte[i])) {
1406 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1407 tmgr = mmuC2tmgr(dtbl);
1408 removed_cnt += free_c_table(tmgr, TRUE);
1409 dte[i].attr.raw = MMU_DT_INVALID;
1410 }
1411 }
1412 b_tbl->bt_ecnt = 0;
1413 }
1414
1415 if (relink) {
1416 b_tbl->bt_parent = NULL;
1417 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1418 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1419 }
1420 return removed_cnt;
1421 }
1422
1423 /* free_c_table INTERNAL
1424 **
1425 * Unmaps the given C table from use and returns it to the pool for
1426 * re-use. Returns the number of pages that were invalidated.
1427 *
1428 * This function preserves any physical page modification information
1429 * contained in the page descriptors within the C table by calling
1430 * 'pmap_remove_pte().'
1431 */
1432 int
1433 free_c_table(c_tbl, relink)
1434 c_tmgr_t *c_tbl;
1435 boolean_t relink;
1436 {
1437 int i, removed_cnt;
1438
1439 removed_cnt = 0;
1440 if (c_tbl->ct_ecnt) {
1441 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1442 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1443 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1444 removed_cnt++;
1445 }
1446 }
1447 c_tbl->ct_ecnt = 0;
1448 }
1449
1450 if (relink) {
1451 c_tbl->ct_parent = NULL;
1452 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1453 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1454 }
1455 return removed_cnt;
1456 }
1457
1458
1459 /* pmap_remove_pte INTERNAL
1460 **
1461 * Unmap the given pte and preserve any page modification
1462 * information by transfering it to the pv head of the
1463 * physical page it maps to. This function does not update
1464 * any reference counts because it is assumed that the calling
1465 * function will do so.
1466 */
1467 void
1468 pmap_remove_pte(pte)
1469 mmu_short_pte_t *pte;
1470 {
1471 u_short pv_idx, targ_idx;
1472 paddr_t pa;
1473 pv_t *pv;
1474
1475 pa = MMU_PTE_PA(*pte);
1476 if (is_managed(pa)) {
1477 pv = pa2pv(pa);
1478 targ_idx = pteidx(pte); /* Index of PTE being removed */
1479
1480 /*
1481 * If the PTE being removed is the first (or only) PTE in
1482 * the list of PTEs currently mapped to this page, remove the
1483 * PTE by changing the index found on the PV head. Otherwise
1484 * a linear search through the list will have to be executed
1485 * in order to find the PVE which points to the PTE being
1486 * removed, so that it may be modified to point to its new
1487 * neighbor.
1488 */
1489
1490 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1491 if (pv_idx == targ_idx) {
1492 pv->pv_idx = pvebase[targ_idx].pve_next;
1493 } else {
1494
1495 /*
1496 * Find the PV element pointing to the target
1497 * element. Note: may have pv_idx==PVE_EOL
1498 */
1499
1500 for (;;) {
1501 if (pv_idx == PVE_EOL) {
1502 goto pv_not_found;
1503 }
1504 if (pvebase[pv_idx].pve_next == targ_idx)
1505 break;
1506 pv_idx = pvebase[pv_idx].pve_next;
1507 }
1508
1509 /*
1510 * At this point, pv_idx is the index of the PV
1511 * element just before the target element in the list.
1512 * Unlink the target.
1513 */
1514
1515 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1516 }
1517
1518 /*
1519 * Save the mod/ref bits of the pte by simply
1520 * ORing the entire pte onto the pv_flags member
1521 * of the pv structure.
1522 * There is no need to use a separate bit pattern
1523 * for usage information on the pv head than that
1524 * which is used on the MMU ptes.
1525 */
1526
1527 pv_not_found:
1528 pv->pv_flags |= (u_short) pte->attr.raw;
1529 }
1530 pte->attr.raw = MMU_DT_INVALID;
1531 }
1532
1533 /* pmap_stroll INTERNAL
1534 **
1535 * Retrieve the addresses of all table managers involved in the mapping of
1536 * the given virtual address. If the table walk completed successfully,
1537 * return TRUE. If it was only partially successful, return FALSE.
1538 * The table walk performed by this function is important to many other
1539 * functions in this module.
1540 *
1541 * Note: This function ought to be easier to read.
1542 */
1543 boolean_t
1544 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1545 pmap_t pmap;
1546 vaddr_t va;
1547 a_tmgr_t **a_tbl;
1548 b_tmgr_t **b_tbl;
1549 c_tmgr_t **c_tbl;
1550 mmu_short_pte_t **pte;
1551 int *a_idx, *b_idx, *pte_idx;
1552 {
1553 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1554 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1555
1556 if (pmap == pmap_kernel())
1557 return FALSE;
1558
1559 /* Does the given pmap have its own A table? */
1560 *a_tbl = pmap->pm_a_tmgr;
1561 if (*a_tbl == NULL)
1562 return FALSE; /* No. Return unknown. */
1563 /* Does the A table have a valid B table
1564 * under the corresponding table entry?
1565 */
1566 *a_idx = MMU_TIA(va);
1567 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1568 if (!MMU_VALID_DT(*a_dte))
1569 return FALSE; /* No. Return unknown. */
1570 /* Yes. Extract B table from the A table. */
1571 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1572 /* Does the B table have a valid C table
1573 * under the corresponding table entry?
1574 */
1575 *b_idx = MMU_TIB(va);
1576 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1577 if (!MMU_VALID_DT(*b_dte))
1578 return FALSE; /* No. Return unknown. */
1579 /* Yes. Extract C table from the B table. */
1580 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1581 *pte_idx = MMU_TIC(va);
1582 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1583
1584 return TRUE;
1585 }
1586
1587 /* pmap_enter INTERFACE
1588 **
1589 * Called by the kernel to map a virtual address
1590 * to a physical address in the given process map.
1591 *
1592 * Note: this function should apply an exclusive lock
1593 * on the pmap system for its duration. (it certainly
1594 * would save my hair!!)
1595 * This function ought to be easier to read.
1596 */
1597 int
1598 pmap_enter(pmap, va, pa, prot, flags)
1599 pmap_t pmap;
1600 vaddr_t va;
1601 paddr_t pa;
1602 vm_prot_t prot;
1603 int flags;
1604 {
1605 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1606 u_short nidx; /* PV list index */
1607 int mapflags; /* Flags for the mapping (see NOTE1) */
1608 u_int a_idx, b_idx, pte_idx; /* table indices */
1609 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1610 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1611 c_tmgr_t *c_tbl; /* C: short page table manager */
1612 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1613 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1614 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1615 pv_t *pv; /* pv list head */
1616 boolean_t wired; /* is the mapping to be wired? */
1617 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1618
1619 if (pmap == pmap_kernel()) {
1620 pmap_enter_kernel(va, pa, prot);
1621 return 0;
1622 }
1623
1624 /*
1625 * Determine if the mapping should be wired.
1626 */
1627 wired = ((flags & PMAP_WIRED) != 0);
1628
1629 /*
1630 * NOTE1:
1631 *
1632 * On November 13, 1999, someone changed the pmap_enter() API such
1633 * that it now accepts a 'flags' argument. This new argument
1634 * contains bit-flags for the architecture-independent (UVM) system to
1635 * use in signalling certain mapping requirements to the architecture-
1636 * dependent (pmap) system. The argument it replaces, 'wired', is now
1637 * one of the flags within it.
1638 *
1639 * In addition to flags signaled by the architecture-independent
1640 * system, parts of the architecture-dependent section of the sun3x
1641 * kernel pass their own flags in the lower, unused bits of the
1642 * physical address supplied to this function. These flags are
1643 * extracted and stored in the temporary variable 'mapflags'.
1644 *
1645 * Extract sun3x specific flags from the physical address.
1646 */
1647 mapflags = (pa & ~MMU_PAGE_MASK);
1648 pa &= MMU_PAGE_MASK;
1649
1650 /*
1651 * Determine if the physical address being mapped is on-board RAM.
1652 * Any other area of the address space is likely to belong to a
1653 * device and hence it would be disasterous to cache its contents.
1654 */
1655 if ((managed = is_managed(pa)) == FALSE)
1656 mapflags |= PMAP_NC;
1657
1658 /*
1659 * For user mappings we walk along the MMU tables of the given
1660 * pmap, reaching a PTE which describes the virtual page being
1661 * mapped or changed. If any level of the walk ends in an invalid
1662 * entry, a table must be allocated and the entry must be updated
1663 * to point to it.
1664 * There is a bit of confusion as to whether this code must be
1665 * re-entrant. For now we will assume it is. To support
1666 * re-entrancy we must unlink tables from the table pool before
1667 * we assume we may use them. Tables are re-linked into the pool
1668 * when we are finished with them at the end of the function.
1669 * But I don't feel like doing that until we have proof that this
1670 * needs to be re-entrant.
1671 * 'llevel' records which tables need to be relinked.
1672 */
1673 llevel = NONE;
1674
1675 /*
1676 * Step 1 - Retrieve the A table from the pmap. If it has no
1677 * A table, allocate a new one from the available pool.
1678 */
1679
1680 a_tbl = pmap->pm_a_tmgr;
1681 if (a_tbl == NULL) {
1682 /*
1683 * This pmap does not currently have an A table. Allocate
1684 * a new one.
1685 */
1686 a_tbl = get_a_table();
1687 a_tbl->at_parent = pmap;
1688
1689 /*
1690 * Assign this new A table to the pmap, and calculate its
1691 * physical address so that loadcrp() can be used to make
1692 * the table active.
1693 */
1694 pmap->pm_a_tmgr = a_tbl;
1695 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1696
1697 /*
1698 * If the process receiving a new A table is the current
1699 * process, we are responsible for setting the MMU so that
1700 * it becomes the current address space. This only adds
1701 * new mappings, so no need to flush anything.
1702 */
1703 if (pmap == current_pmap()) {
1704 kernel_crp.rp_addr = pmap->pm_a_phys;
1705 loadcrp(&kernel_crp);
1706 }
1707
1708 if (!wired)
1709 llevel = NEWA;
1710 } else {
1711 /*
1712 * Use the A table already allocated for this pmap.
1713 * Unlink it from the A table pool if necessary.
1714 */
1715 if (wired && !a_tbl->at_wcnt)
1716 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1717 }
1718
1719 /*
1720 * Step 2 - Walk into the B table. If there is no valid B table,
1721 * allocate one.
1722 */
1723
1724 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1725 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1726 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1727 /* The descriptor is valid. Use the B table it points to. */
1728 /*************************************
1729 * a_idx *
1730 * v *
1731 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1732 * | | | | | | | | | | | | *
1733 * +-+-+-+-+-+-+-+-+-+-+-+- *
1734 * | *
1735 * \- b_tbl -> +-+- *
1736 * | | *
1737 * +-+- *
1738 *************************************/
1739 b_dte = mmu_ptov(a_dte->addr.raw);
1740 b_tbl = mmuB2tmgr(b_dte);
1741
1742 /*
1743 * If the requested mapping must be wired, but this table
1744 * being used to map it is not, the table must be removed
1745 * from the available pool and its wired entry count
1746 * incremented.
1747 */
1748 if (wired && !b_tbl->bt_wcnt) {
1749 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1750 a_tbl->at_wcnt++;
1751 }
1752 } else {
1753 /* The descriptor is invalid. Allocate a new B table. */
1754 b_tbl = get_b_table();
1755
1756 /* Point the parent A table descriptor to this new B table. */
1757 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1758 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1759 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1760
1761 /* Create the necessary back references to the parent table */
1762 b_tbl->bt_parent = a_tbl;
1763 b_tbl->bt_pidx = a_idx;
1764
1765 /*
1766 * If this table is to be wired, make sure the parent A table
1767 * wired count is updated to reflect that it has another wired
1768 * entry.
1769 */
1770 if (wired)
1771 a_tbl->at_wcnt++;
1772 else if (llevel == NONE)
1773 llevel = NEWB;
1774 }
1775
1776 /*
1777 * Step 3 - Walk into the C table, if there is no valid C table,
1778 * allocate one.
1779 */
1780
1781 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1782 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1783 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1784 /* The descriptor is valid. Use the C table it points to. */
1785 /**************************************
1786 * c_idx *
1787 * | v *
1788 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1789 * | | | | | | | | | | | *
1790 * +-+-+-+-+-+-+-+-+-+-+- *
1791 * | *
1792 * \- c_tbl -> +-+-- *
1793 * | | | *
1794 * +-+-- *
1795 **************************************/
1796 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1797 c_tbl = mmuC2tmgr(c_pte);
1798
1799 /* If mapping is wired and table is not */
1800 if (wired && !c_tbl->ct_wcnt) {
1801 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1802 b_tbl->bt_wcnt++;
1803 }
1804 } else {
1805 /* The descriptor is invalid. Allocate a new C table. */
1806 c_tbl = get_c_table();
1807
1808 /* Point the parent B table descriptor to this new C table. */
1809 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1810 b_dte->attr.raw |= MMU_DT_SHORT;
1811 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1812
1813 /* Create the necessary back references to the parent table */
1814 c_tbl->ct_parent = b_tbl;
1815 c_tbl->ct_pidx = b_idx;
1816 /*
1817 * Store the pmap and base virtual managed address for faster
1818 * retrieval in the PV functions.
1819 */
1820 c_tbl->ct_pmap = pmap;
1821 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1822
1823 /*
1824 * If this table is to be wired, make sure the parent B table
1825 * wired count is updated to reflect that it has another wired
1826 * entry.
1827 */
1828 if (wired)
1829 b_tbl->bt_wcnt++;
1830 else if (llevel == NONE)
1831 llevel = NEWC;
1832 }
1833
1834 /*
1835 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1836 * slot of the C table, describing the PA to which the VA is mapped.
1837 */
1838
1839 pte_idx = MMU_TIC(va);
1840 c_pte = &c_tbl->ct_dtbl[pte_idx];
1841 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1842 /*
1843 * The PTE is currently valid. This particular call
1844 * is just a synonym for one (or more) of the following
1845 * operations:
1846 * change protection of a page
1847 * change wiring status of a page
1848 * remove the mapping of a page
1849 *
1850 * XXX - Semi critical: This code should unwire the PTE
1851 * and, possibly, associated parent tables if this is a
1852 * change wiring operation. Currently it does not.
1853 *
1854 * This may be ok if pmap_unwire() is the only
1855 * interface used to UNWIRE a page.
1856 */
1857
1858 /* First check if this is a wiring operation. */
1859 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1860 /*
1861 * The PTE is already wired. To prevent it from being
1862 * counted as a new wiring operation, reset the 'wired'
1863 * variable.
1864 */
1865 wired = FALSE;
1866 }
1867
1868 /* Is the new address the same as the old? */
1869 if (MMU_PTE_PA(*c_pte) == pa) {
1870 /*
1871 * Yes, mark that it does not need to be reinserted
1872 * into the PV list.
1873 */
1874 insert = FALSE;
1875
1876 /*
1877 * Clear all but the modified, referenced and wired
1878 * bits on the PTE.
1879 */
1880 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1881 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1882 } else {
1883 /* No, remove the old entry */
1884 pmap_remove_pte(c_pte);
1885 insert = TRUE;
1886 }
1887
1888 /*
1889 * TLB flush is only necessary if modifying current map.
1890 * However, in pmap_enter(), the pmap almost always IS
1891 * the current pmap, so don't even bother to check.
1892 */
1893 TBIS(va);
1894 } else {
1895 /*
1896 * The PTE is invalid. Increment the valid entry count in
1897 * the C table manager to reflect the addition of a new entry.
1898 */
1899 c_tbl->ct_ecnt++;
1900
1901 /* XXX - temporarily make sure the PTE is cleared. */
1902 c_pte->attr.raw = 0;
1903
1904 /* It will also need to be inserted into the PV list. */
1905 insert = TRUE;
1906 }
1907
1908 /*
1909 * If page is changing from unwired to wired status, set an unused bit
1910 * within the PTE to indicate that it is wired. Also increment the
1911 * wired entry count in the C table manager.
1912 */
1913 if (wired) {
1914 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1915 c_tbl->ct_wcnt++;
1916 }
1917
1918 /*
1919 * Map the page, being careful to preserve modify/reference/wired
1920 * bits. At this point it is assumed that the PTE either has no bits
1921 * set, or if there are set bits, they are only modified, reference or
1922 * wired bits. If not, the following statement will cause erratic
1923 * behavior.
1924 */
1925 #ifdef PMAP_DEBUG
1926 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1927 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1928 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1929 Debugger();
1930 }
1931 #endif
1932 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
1933
1934 /*
1935 * If the mapping should be read-only, set the write protect
1936 * bit in the PTE.
1937 */
1938 if (!(prot & VM_PROT_WRITE))
1939 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
1940
1941 /*
1942 * If the mapping should be cache inhibited (indicated by the flag
1943 * bits found on the lower order of the physical address.)
1944 * mark the PTE as a cache inhibited page.
1945 */
1946 if (mapflags & PMAP_NC)
1947 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
1948
1949 /*
1950 * If the physical address being mapped is managed by the PV
1951 * system then link the pte into the list of pages mapped to that
1952 * address.
1953 */
1954 if (insert && managed) {
1955 pv = pa2pv(pa);
1956 nidx = pteidx(c_pte);
1957
1958 pvebase[nidx].pve_next = pv->pv_idx;
1959 pv->pv_idx = nidx;
1960 }
1961
1962 /* Move any allocated tables back into the active pool. */
1963
1964 switch (llevel) {
1965 case NEWA:
1966 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1967 /* FALLTHROUGH */
1968 case NEWB:
1969 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1970 /* FALLTHROUGH */
1971 case NEWC:
1972 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1973 /* FALLTHROUGH */
1974 default:
1975 break;
1976 }
1977
1978 return 0;
1979 }
1980
1981 /* pmap_enter_kernel INTERNAL
1982 **
1983 * Map the given virtual address to the given physical address within the
1984 * kernel address space. This function exists because the kernel map does
1985 * not do dynamic table allocation. It consists of a contiguous array of ptes
1986 * and can be edited directly without the need to walk through any tables.
1987 *
1988 * XXX: "Danger, Will Robinson!"
1989 * Note that the kernel should never take a fault on any page
1990 * between [ KERNBASE .. virtual_avail ] and this is checked in
1991 * trap.c for kernel-mode MMU faults. This means that mappings
1992 * created in that range must be implicily wired. -gwr
1993 */
1994 void
1995 pmap_enter_kernel(va, pa, prot)
1996 vaddr_t va;
1997 paddr_t pa;
1998 vm_prot_t prot;
1999 {
2000 boolean_t was_valid, insert;
2001 u_short pte_idx;
2002 int flags;
2003 mmu_short_pte_t *pte;
2004 pv_t *pv;
2005 paddr_t old_pa;
2006
2007 flags = (pa & ~MMU_PAGE_MASK);
2008 pa &= MMU_PAGE_MASK;
2009
2010 if (is_managed(pa))
2011 insert = TRUE;
2012 else
2013 insert = FALSE;
2014
2015 /*
2016 * Calculate the index of the PTE being modified.
2017 */
2018 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2019
2020 /* This array is traditionally named "Sysmap" */
2021 pte = &kernCbase[pte_idx];
2022
2023 if (MMU_VALID_DT(*pte)) {
2024 was_valid = TRUE;
2025 /*
2026 * If the PTE already maps a different
2027 * physical address, umap and pv_unlink.
2028 */
2029 old_pa = MMU_PTE_PA(*pte);
2030 if (pa != old_pa)
2031 pmap_remove_pte(pte);
2032 else {
2033 /*
2034 * Old PA and new PA are the same. No need to
2035 * relink the mapping within the PV list.
2036 */
2037 insert = FALSE;
2038
2039 /*
2040 * Save any mod/ref bits on the PTE.
2041 */
2042 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2043 }
2044 } else {
2045 pte->attr.raw = MMU_DT_INVALID;
2046 was_valid = FALSE;
2047 }
2048
2049 /*
2050 * Map the page. Being careful to preserve modified/referenced bits
2051 * on the PTE.
2052 */
2053 pte->attr.raw |= (pa | MMU_DT_PAGE);
2054
2055 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2056 pte->attr.raw |= MMU_SHORT_PTE_WP;
2057 if (flags & PMAP_NC)
2058 pte->attr.raw |= MMU_SHORT_PTE_CI;
2059 if (was_valid)
2060 TBIS(va);
2061
2062 /*
2063 * Insert the PTE into the PV system, if need be.
2064 */
2065 if (insert) {
2066 pv = pa2pv(pa);
2067 pvebase[pte_idx].pve_next = pv->pv_idx;
2068 pv->pv_idx = pte_idx;
2069 }
2070 }
2071
2072 void
2073 pmap_kenter_pa(va, pa, prot)
2074 vaddr_t va;
2075 paddr_t pa;
2076 vm_prot_t prot;
2077 {
2078 mmu_short_pte_t *pte;
2079
2080 /* This array is traditionally named "Sysmap" */
2081 pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
2082
2083 KASSERT(!MMU_VALID_DT(*pte));
2084 pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2085 if (!(prot & VM_PROT_WRITE))
2086 pte->attr.raw |= MMU_SHORT_PTE_WP;
2087 }
2088
2089 void
2090 pmap_kremove(va, len)
2091 vaddr_t va;
2092 vsize_t len;
2093 {
2094 int idx, eidx;
2095
2096 #ifdef PMAP_DEBUG
2097 if ((sva & PGOFSET) || (eva & PGOFSET))
2098 panic("pmap_kremove: alignment");
2099 #endif
2100
2101 idx = m68k_btop(va - KERNBASE);
2102 eidx = m68k_btop(va + len - KERNBASE);
2103
2104 while (idx < eidx) {
2105 kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2106 TBIS(va);
2107 va += PAGE_SIZE;
2108 }
2109 }
2110
2111 /* pmap_map INTERNAL
2112 **
2113 * Map a contiguous range of physical memory into a contiguous range of
2114 * the kernel virtual address space.
2115 *
2116 * Used for device mappings and early mapping of the kernel text/data/bss.
2117 * Returns the first virtual address beyond the end of the range.
2118 */
2119 vaddr_t
2120 pmap_map(va, pa, endpa, prot)
2121 vaddr_t va;
2122 paddr_t pa;
2123 paddr_t endpa;
2124 int prot;
2125 {
2126 int sz;
2127
2128 sz = endpa - pa;
2129 do {
2130 pmap_enter_kernel(va, pa, prot);
2131 va += PAGE_SIZE;
2132 pa += PAGE_SIZE;
2133 sz -= PAGE_SIZE;
2134 } while (sz > 0);
2135 pmap_update(pmap_kernel());
2136 return(va);
2137 }
2138
2139 /* pmap_protect INTERFACE
2140 **
2141 * Apply the given protection to the given virtual address range within
2142 * the given map.
2143 *
2144 * It is ok for the protection applied to be stronger than what is
2145 * specified. We use this to our advantage when the given map has no
2146 * mapping for the virtual address. By skipping a page when this
2147 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2148 * and therefore do not need to map the page just to apply a protection
2149 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2150 *
2151 * XXX - This function could be speeded up by using pmap_stroll() for inital
2152 * setup, and then manual scrolling in the for() loop.
2153 */
2154 void
2155 pmap_protect(pmap, startva, endva, prot)
2156 pmap_t pmap;
2157 vaddr_t startva, endva;
2158 vm_prot_t prot;
2159 {
2160 boolean_t iscurpmap;
2161 int a_idx, b_idx, c_idx;
2162 a_tmgr_t *a_tbl;
2163 b_tmgr_t *b_tbl;
2164 c_tmgr_t *c_tbl;
2165 mmu_short_pte_t *pte;
2166
2167 if (pmap == pmap_kernel()) {
2168 pmap_protect_kernel(startva, endva, prot);
2169 return;
2170 }
2171
2172 /*
2173 * In this particular pmap implementation, there are only three
2174 * types of memory protection: 'all' (read/write/execute),
2175 * 'read-only' (read/execute) and 'none' (no mapping.)
2176 * It is not possible for us to treat 'executable' as a separate
2177 * protection type. Therefore, protection requests that seek to
2178 * remove execute permission while retaining read or write, and those
2179 * that make little sense (write-only for example) are ignored.
2180 */
2181 switch (prot) {
2182 case VM_PROT_NONE:
2183 /*
2184 * A request to apply the protection code of
2185 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2186 */
2187 pmap_remove(pmap, startva, endva);
2188 return;
2189 case VM_PROT_EXECUTE:
2190 case VM_PROT_READ:
2191 case VM_PROT_READ|VM_PROT_EXECUTE:
2192 /* continue */
2193 break;
2194 case VM_PROT_WRITE:
2195 case VM_PROT_WRITE|VM_PROT_READ:
2196 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2197 case VM_PROT_ALL:
2198 /* None of these should happen in a sane system. */
2199 return;
2200 }
2201
2202 /*
2203 * If the pmap has no A table, it has no mappings and therefore
2204 * there is nothing to protect.
2205 */
2206 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2207 return;
2208
2209 a_idx = MMU_TIA(startva);
2210 b_idx = MMU_TIB(startva);
2211 c_idx = MMU_TIC(startva);
2212 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2213
2214 iscurpmap = (pmap == current_pmap());
2215 while (startva < endva) {
2216 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2217 if (b_tbl == NULL) {
2218 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2219 b_tbl = mmu_ptov((vaddr_t)b_tbl);
2220 b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2221 }
2222 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2223 if (c_tbl == NULL) {
2224 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2225 c_tbl = mmu_ptov((vaddr_t)c_tbl);
2226 c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2227 }
2228 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2229 pte = &c_tbl->ct_dtbl[c_idx];
2230 /* make the mapping read-only */
2231 pte->attr.raw |= MMU_SHORT_PTE_WP;
2232 /*
2233 * If we just modified the current address space,
2234 * flush any translations for the modified page from
2235 * the translation cache and any data from it in the
2236 * data cache.
2237 */
2238 if (iscurpmap)
2239 TBIS(startva);
2240 }
2241 startva += PAGE_SIZE;
2242
2243 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2244 c_tbl = NULL;
2245 c_idx = 0;
2246 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2247 b_tbl = NULL;
2248 b_idx = 0;
2249 }
2250 }
2251 } else { /* C table wasn't valid */
2252 c_tbl = NULL;
2253 c_idx = 0;
2254 startva += MMU_TIB_RANGE;
2255 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2256 b_tbl = NULL;
2257 b_idx = 0;
2258 }
2259 } /* C table */
2260 } else { /* B table wasn't valid */
2261 b_tbl = NULL;
2262 b_idx = 0;
2263 startva += MMU_TIA_RANGE;
2264 a_idx++;
2265 } /* B table */
2266 }
2267 }
2268
2269 /* pmap_protect_kernel INTERNAL
2270 **
2271 * Apply the given protection code to a kernel address range.
2272 */
2273 void
2274 pmap_protect_kernel(startva, endva, prot)
2275 vaddr_t startva, endva;
2276 vm_prot_t prot;
2277 {
2278 vaddr_t va;
2279 mmu_short_pte_t *pte;
2280
2281 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2282 for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
2283 if (MMU_VALID_DT(*pte)) {
2284 switch (prot) {
2285 case VM_PROT_ALL:
2286 break;
2287 case VM_PROT_EXECUTE:
2288 case VM_PROT_READ:
2289 case VM_PROT_READ|VM_PROT_EXECUTE:
2290 pte->attr.raw |= MMU_SHORT_PTE_WP;
2291 break;
2292 case VM_PROT_NONE:
2293 /* this is an alias for 'pmap_remove_kernel' */
2294 pmap_remove_pte(pte);
2295 break;
2296 default:
2297 break;
2298 }
2299 /*
2300 * since this is the kernel, immediately flush any cached
2301 * descriptors for this address.
2302 */
2303 TBIS(va);
2304 }
2305 }
2306 }
2307
2308 /* pmap_unwire INTERFACE
2309 **
2310 * Clear the wired attribute of the specified page.
2311 *
2312 * This function is called from vm_fault.c to unwire
2313 * a mapping.
2314 */
2315 void
2316 pmap_unwire(pmap, va)
2317 pmap_t pmap;
2318 vaddr_t va;
2319 {
2320 int a_idx, b_idx, c_idx;
2321 a_tmgr_t *a_tbl;
2322 b_tmgr_t *b_tbl;
2323 c_tmgr_t *c_tbl;
2324 mmu_short_pte_t *pte;
2325
2326 /* Kernel mappings always remain wired. */
2327 if (pmap == pmap_kernel())
2328 return;
2329
2330 /*
2331 * Walk through the tables. If the walk terminates without
2332 * a valid PTE then the address wasn't wired in the first place.
2333 * Return immediately.
2334 */
2335 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2336 &b_idx, &c_idx) == FALSE)
2337 return;
2338
2339
2340 /* Is the PTE wired? If not, return. */
2341 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2342 return;
2343
2344 /* Remove the wiring bit. */
2345 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2346
2347 /*
2348 * Decrement the wired entry count in the C table.
2349 * If it reaches zero the following things happen:
2350 * 1. The table no longer has any wired entries and is considered
2351 * unwired.
2352 * 2. It is placed on the available queue.
2353 * 3. The parent table's wired entry count is decremented.
2354 * 4. If it reaches zero, this process repeats at step 1 and
2355 * stops at after reaching the A table.
2356 */
2357 if (--c_tbl->ct_wcnt == 0) {
2358 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2359 if (--b_tbl->bt_wcnt == 0) {
2360 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2361 if (--a_tbl->at_wcnt == 0) {
2362 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2363 }
2364 }
2365 }
2366 }
2367
2368 /* pmap_copy INTERFACE
2369 **
2370 * Copy the mappings of a range of addresses in one pmap, into
2371 * the destination address of another.
2372 *
2373 * This routine is advisory. Should we one day decide that MMU tables
2374 * may be shared by more than one pmap, this function should be used to
2375 * link them together. Until that day however, we do nothing.
2376 */
2377 void
2378 pmap_copy(pmap_a, pmap_b, dst, len, src)
2379 pmap_t pmap_a, pmap_b;
2380 vaddr_t dst;
2381 vsize_t len;
2382 vaddr_t src;
2383 {
2384 /* not implemented. */
2385 }
2386
2387 /* pmap_copy_page INTERFACE
2388 **
2389 * Copy the contents of one physical page into another.
2390 *
2391 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2392 * to map the two specified physical pages into the kernel address space.
2393 *
2394 * Note: We could use the transparent translation registers to make the
2395 * mappings. If we do so, be sure to disable interrupts before using them.
2396 */
2397 void
2398 pmap_copy_page(srcpa, dstpa)
2399 paddr_t srcpa, dstpa;
2400 {
2401 vaddr_t srcva, dstva;
2402 int s;
2403
2404 srcva = tmp_vpages[0];
2405 dstva = tmp_vpages[1];
2406
2407 s = splvm();
2408 #ifdef DIAGNOSTIC
2409 if (tmp_vpages_inuse++)
2410 panic("pmap_copy_page: temporary vpages are in use.");
2411 #endif
2412
2413 /* Map pages as non-cacheable to avoid cache polution? */
2414 pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
2415 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2416
2417 /* Hand-optimized version of bcopy(src, dst, PAGE_SIZE) */
2418 copypage((char *) srcva, (char *) dstva);
2419
2420 pmap_kremove(srcva, PAGE_SIZE);
2421 pmap_kremove(dstva, PAGE_SIZE);
2422
2423 #ifdef DIAGNOSTIC
2424 --tmp_vpages_inuse;
2425 #endif
2426 splx(s);
2427 }
2428
2429 /* pmap_zero_page INTERFACE
2430 **
2431 * Zero the contents of the specified physical page.
2432 *
2433 * Uses one of the virtual pages allocated in pmap_boostrap()
2434 * to map the specified page into the kernel address space.
2435 */
2436 void
2437 pmap_zero_page(dstpa)
2438 paddr_t dstpa;
2439 {
2440 vaddr_t dstva;
2441 int s;
2442
2443 dstva = tmp_vpages[1];
2444 s = splvm();
2445 #ifdef DIAGNOSTIC
2446 if (tmp_vpages_inuse++)
2447 panic("pmap_zero_page: temporary vpages are in use.");
2448 #endif
2449
2450 /* The comments in pmap_copy_page() above apply here also. */
2451 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2452
2453 /* Hand-optimized version of bzero(ptr, PAGE_SIZE) */
2454 zeropage((char *) dstva);
2455
2456 pmap_kremove(dstva, PAGE_SIZE);
2457 #ifdef DIAGNOSTIC
2458 --tmp_vpages_inuse;
2459 #endif
2460 splx(s);
2461 }
2462
2463 /* pmap_collect INTERFACE
2464 **
2465 * Called from the VM system when we are about to swap out
2466 * the process using this pmap. This should give up any
2467 * resources held here, including all its MMU tables.
2468 */
2469 void
2470 pmap_collect(pmap)
2471 pmap_t pmap;
2472 {
2473 /* XXX - todo... */
2474 }
2475
2476 /* pmap_create INTERFACE
2477 **
2478 * Create and return a pmap structure.
2479 */
2480 pmap_t
2481 pmap_create()
2482 {
2483 pmap_t pmap;
2484
2485 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2486 pmap_pinit(pmap);
2487 return pmap;
2488 }
2489
2490 /* pmap_pinit INTERNAL
2491 **
2492 * Initialize a pmap structure.
2493 */
2494 void
2495 pmap_pinit(pmap)
2496 pmap_t pmap;
2497 {
2498 memset(pmap, 0, sizeof(struct pmap));
2499 pmap->pm_a_tmgr = NULL;
2500 pmap->pm_a_phys = kernAphys;
2501 pmap->pm_refcount = 1;
2502 simple_lock_init(&pmap->pm_lock);
2503 }
2504
2505 /* pmap_release INTERFACE
2506 **
2507 * Release any resources held by the given pmap.
2508 *
2509 * This is the reverse analog to pmap_pinit. It does not
2510 * necessarily mean for the pmap structure to be deallocated,
2511 * as in pmap_destroy.
2512 */
2513 void
2514 pmap_release(pmap)
2515 pmap_t pmap;
2516 {
2517 /*
2518 * As long as the pmap contains no mappings,
2519 * which always should be the case whenever
2520 * this function is called, there really should
2521 * be nothing to do.
2522 */
2523 #ifdef PMAP_DEBUG
2524 if (pmap == pmap_kernel())
2525 panic("pmap_release: kernel pmap");
2526 #endif
2527 /*
2528 * XXX - If this pmap has an A table, give it back.
2529 * The pmap SHOULD be empty by now, and pmap_remove
2530 * should have already given back the A table...
2531 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2532 * at this point, which means some mapping was not
2533 * removed when it should have been. -gwr
2534 */
2535 if (pmap->pm_a_tmgr != NULL) {
2536 /* First make sure we are not using it! */
2537 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2538 kernel_crp.rp_addr = kernAphys;
2539 loadcrp(&kernel_crp);
2540 }
2541 #ifdef PMAP_DEBUG /* XXX - todo! */
2542 /* XXX - Now complain... */
2543 printf("pmap_release: still have table\n");
2544 Debugger();
2545 #endif
2546 free_a_table(pmap->pm_a_tmgr, TRUE);
2547 pmap->pm_a_tmgr = NULL;
2548 pmap->pm_a_phys = kernAphys;
2549 }
2550 }
2551
2552 /* pmap_reference INTERFACE
2553 **
2554 * Increment the reference count of a pmap.
2555 */
2556 void
2557 pmap_reference(pmap)
2558 pmap_t pmap;
2559 {
2560 pmap_lock(pmap);
2561 pmap_add_ref(pmap);
2562 pmap_unlock(pmap);
2563 }
2564
2565 /* pmap_dereference INTERNAL
2566 **
2567 * Decrease the reference count on the given pmap
2568 * by one and return the current count.
2569 */
2570 int
2571 pmap_dereference(pmap)
2572 pmap_t pmap;
2573 {
2574 int rtn;
2575
2576 pmap_lock(pmap);
2577 rtn = pmap_del_ref(pmap);
2578 pmap_unlock(pmap);
2579
2580 return rtn;
2581 }
2582
2583 /* pmap_destroy INTERFACE
2584 **
2585 * Decrement a pmap's reference count and delete
2586 * the pmap if it becomes zero. Will be called
2587 * only after all mappings have been removed.
2588 */
2589 void
2590 pmap_destroy(pmap)
2591 pmap_t pmap;
2592 {
2593 if (pmap_dereference(pmap) == 0) {
2594 pmap_release(pmap);
2595 pool_put(&pmap_pmap_pool, pmap);
2596 }
2597 }
2598
2599 /* pmap_is_referenced INTERFACE
2600 **
2601 * Determine if the given physical page has been
2602 * referenced (read from [or written to.])
2603 */
2604 boolean_t
2605 pmap_is_referenced(pg)
2606 struct vm_page *pg;
2607 {
2608 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2609 pv_t *pv;
2610 int idx;
2611
2612 /*
2613 * Check the flags on the pv head. If they are set,
2614 * return immediately. Otherwise a search must be done.
2615 */
2616
2617 pv = pa2pv(pa);
2618 if (pv->pv_flags & PV_FLAGS_USED)
2619 return TRUE;
2620
2621 /*
2622 * Search through all pv elements pointing
2623 * to this page and query their reference bits
2624 */
2625
2626 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2627 if (MMU_PTE_USED(kernCbase[idx])) {
2628 return TRUE;
2629 }
2630 }
2631 return FALSE;
2632 }
2633
2634 /* pmap_is_modified INTERFACE
2635 **
2636 * Determine if the given physical page has been
2637 * modified (written to.)
2638 */
2639 boolean_t
2640 pmap_is_modified(pg)
2641 struct vm_page *pg;
2642 {
2643 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2644 pv_t *pv;
2645 int idx;
2646
2647 /* see comments in pmap_is_referenced() */
2648 pv = pa2pv(pa);
2649 if (pv->pv_flags & PV_FLAGS_MDFY)
2650 return TRUE;
2651
2652 for (idx = pv->pv_idx;
2653 idx != PVE_EOL;
2654 idx = pvebase[idx].pve_next) {
2655
2656 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2657 return TRUE;
2658 }
2659 }
2660
2661 return FALSE;
2662 }
2663
2664 /* pmap_page_protect INTERFACE
2665 **
2666 * Applies the given protection to all mappings to the given
2667 * physical page.
2668 */
2669 void
2670 pmap_page_protect(pg, prot)
2671 struct vm_page *pg;
2672 vm_prot_t prot;
2673 {
2674 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2675 pv_t *pv;
2676 int idx;
2677 vaddr_t va;
2678 struct mmu_short_pte_struct *pte;
2679 c_tmgr_t *c_tbl;
2680 pmap_t pmap, curpmap;
2681
2682 curpmap = current_pmap();
2683 pv = pa2pv(pa);
2684
2685 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2686 pte = &kernCbase[idx];
2687 switch (prot) {
2688 case VM_PROT_ALL:
2689 /* do nothing */
2690 break;
2691 case VM_PROT_EXECUTE:
2692 case VM_PROT_READ:
2693 case VM_PROT_READ|VM_PROT_EXECUTE:
2694 /*
2695 * Determine the virtual address mapped by
2696 * the PTE and flush ATC entries if necessary.
2697 */
2698 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2699 pte->attr.raw |= MMU_SHORT_PTE_WP;
2700 if (pmap == curpmap || pmap == pmap_kernel())
2701 TBIS(va);
2702 break;
2703 case VM_PROT_NONE:
2704 /* Save the mod/ref bits. */
2705 pv->pv_flags |= pte->attr.raw;
2706 /* Invalidate the PTE. */
2707 pte->attr.raw = MMU_DT_INVALID;
2708
2709 /*
2710 * Update table counts. And flush ATC entries
2711 * if necessary.
2712 */
2713 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2714
2715 /*
2716 * If the PTE belongs to the kernel map,
2717 * be sure to flush the page it maps.
2718 */
2719 if (pmap == pmap_kernel()) {
2720 TBIS(va);
2721 } else {
2722 /*
2723 * The PTE belongs to a user map.
2724 * update the entry count in the C
2725 * table to which it belongs and flush
2726 * the ATC if the mapping belongs to
2727 * the current pmap.
2728 */
2729 c_tbl->ct_ecnt--;
2730 if (pmap == curpmap)
2731 TBIS(va);
2732 }
2733 break;
2734 default:
2735 break;
2736 }
2737 }
2738
2739 /*
2740 * If the protection code indicates that all mappings to the page
2741 * be removed, truncate the PV list to zero entries.
2742 */
2743 if (prot == VM_PROT_NONE)
2744 pv->pv_idx = PVE_EOL;
2745 }
2746
2747 /* pmap_get_pteinfo INTERNAL
2748 **
2749 * Called internally to find the pmap and virtual address within that
2750 * map to which the pte at the given index maps. Also includes the PTE's C
2751 * table manager.
2752 *
2753 * Returns the pmap in the argument provided, and the virtual address
2754 * by return value.
2755 */
2756 vaddr_t
2757 pmap_get_pteinfo(idx, pmap, tbl)
2758 u_int idx;
2759 pmap_t *pmap;
2760 c_tmgr_t **tbl;
2761 {
2762 vaddr_t va = 0;
2763
2764 /*
2765 * Determine if the PTE is a kernel PTE or a user PTE.
2766 */
2767 if (idx >= NUM_KERN_PTES) {
2768 /*
2769 * The PTE belongs to a user mapping.
2770 */
2771 /* XXX: Would like an inline for this to validate idx... */
2772 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2773
2774 *pmap = (*tbl)->ct_pmap;
2775 /*
2776 * To find the va to which the PTE maps, we first take
2777 * the table's base virtual address mapping which is stored
2778 * in ct_va. We then increment this address by a page for
2779 * every slot skipped until we reach the PTE.
2780 */
2781 va = (*tbl)->ct_va;
2782 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2783 } else {
2784 /*
2785 * The PTE belongs to the kernel map.
2786 */
2787 *pmap = pmap_kernel();
2788
2789 va = m68k_ptob(idx);
2790 va += KERNBASE;
2791 }
2792
2793 return va;
2794 }
2795
2796 /* pmap_clear_modify INTERFACE
2797 **
2798 * Clear the modification bit on the page at the specified
2799 * physical address.
2800 *
2801 */
2802 boolean_t
2803 pmap_clear_modify(pg)
2804 struct vm_page *pg;
2805 {
2806 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2807 boolean_t rv;
2808
2809 rv = pmap_is_modified(pg);
2810 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2811 return rv;
2812 }
2813
2814 /* pmap_clear_reference INTERFACE
2815 **
2816 * Clear the referenced bit on the page at the specified
2817 * physical address.
2818 */
2819 boolean_t
2820 pmap_clear_reference(pg)
2821 struct vm_page *pg;
2822 {
2823 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2824 boolean_t rv;
2825
2826 rv = pmap_is_referenced(pg);
2827 pmap_clear_pv(pa, PV_FLAGS_USED);
2828 return rv;
2829 }
2830
2831 /* pmap_clear_pv INTERNAL
2832 **
2833 * Clears the specified flag from the specified physical address.
2834 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2835 *
2836 * Flag is one of:
2837 * PV_FLAGS_MDFY - Page modified bit.
2838 * PV_FLAGS_USED - Page used (referenced) bit.
2839 *
2840 * This routine must not only clear the flag on the pv list
2841 * head. It must also clear the bit on every pte in the pv
2842 * list associated with the address.
2843 */
2844 void
2845 pmap_clear_pv(pa, flag)
2846 paddr_t pa;
2847 int flag;
2848 {
2849 pv_t *pv;
2850 int idx;
2851 vaddr_t va;
2852 pmap_t pmap;
2853 mmu_short_pte_t *pte;
2854 c_tmgr_t *c_tbl;
2855
2856 pv = pa2pv(pa);
2857 pv->pv_flags &= ~(flag);
2858 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2859 pte = &kernCbase[idx];
2860 pte->attr.raw &= ~(flag);
2861
2862 /*
2863 * The MC68030 MMU will not set the modified or
2864 * referenced bits on any MMU tables for which it has
2865 * a cached descriptor with its modify bit set. To insure
2866 * that it will modify these bits on the PTE during the next
2867 * time it is written to or read from, we must flush it from
2868 * the ATC.
2869 *
2870 * Ordinarily it is only necessary to flush the descriptor
2871 * if it is used in the current address space. But since I
2872 * am not sure that there will always be a notion of
2873 * 'the current address space' when this function is called,
2874 * I will skip the test and always flush the address. It
2875 * does no harm.
2876 */
2877
2878 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2879 TBIS(va);
2880 }
2881 }
2882
2883 /* pmap_extract INTERFACE
2884 **
2885 * Return the physical address mapped by the virtual address
2886 * in the specified pmap.
2887 *
2888 * Note: this function should also apply an exclusive lock
2889 * on the pmap system during its duration.
2890 */
2891 boolean_t
2892 pmap_extract(pmap, va, pap)
2893 pmap_t pmap;
2894 vaddr_t va;
2895 paddr_t *pap;
2896 {
2897 int a_idx, b_idx, pte_idx;
2898 a_tmgr_t *a_tbl;
2899 b_tmgr_t *b_tbl;
2900 c_tmgr_t *c_tbl;
2901 mmu_short_pte_t *c_pte;
2902
2903 if (pmap == pmap_kernel())
2904 return pmap_extract_kernel(va, pap);
2905
2906 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2907 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2908 return FALSE;
2909
2910 if (!MMU_VALID_DT(*c_pte))
2911 return FALSE;
2912
2913 if (pap != NULL)
2914 *pap = MMU_PTE_PA(*c_pte);
2915 return (TRUE);
2916 }
2917
2918 /* pmap_extract_kernel INTERNAL
2919 **
2920 * Extract a translation from the kernel address space.
2921 */
2922 boolean_t
2923 pmap_extract_kernel(va, pap)
2924 vaddr_t va;
2925 paddr_t *pap;
2926 {
2927 mmu_short_pte_t *pte;
2928
2929 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
2930 if (!MMU_VALID_DT(*pte))
2931 return (FALSE);
2932 if (pap != NULL)
2933 *pap = MMU_PTE_PA(*pte);
2934 return (TRUE);
2935 }
2936
2937 /* pmap_remove_kernel INTERNAL
2938 **
2939 * Remove the mapping of a range of virtual addresses from the kernel map.
2940 * The arguments are already page-aligned.
2941 */
2942 void
2943 pmap_remove_kernel(sva, eva)
2944 vaddr_t sva;
2945 vaddr_t eva;
2946 {
2947 int idx, eidx;
2948
2949 #ifdef PMAP_DEBUG
2950 if ((sva & PGOFSET) || (eva & PGOFSET))
2951 panic("pmap_remove_kernel: alignment");
2952 #endif
2953
2954 idx = m68k_btop(sva - KERNBASE);
2955 eidx = m68k_btop(eva - KERNBASE);
2956
2957 while (idx < eidx) {
2958 pmap_remove_pte(&kernCbase[idx++]);
2959 TBIS(sva);
2960 sva += PAGE_SIZE;
2961 }
2962 }
2963
2964 /* pmap_remove INTERFACE
2965 **
2966 * Remove the mapping of a range of virtual addresses from the given pmap.
2967 *
2968 * If the range contains any wired entries, this function will probably create
2969 * disaster.
2970 */
2971 void
2972 pmap_remove(pmap, start, end)
2973 pmap_t pmap;
2974 vaddr_t start;
2975 vaddr_t end;
2976 {
2977
2978 if (pmap == pmap_kernel()) {
2979 pmap_remove_kernel(start, end);
2980 return;
2981 }
2982
2983 /*
2984 * If the pmap doesn't have an A table of its own, it has no mappings
2985 * that can be removed.
2986 */
2987 if (pmap->pm_a_tmgr == NULL)
2988 return;
2989
2990 /*
2991 * Remove the specified range from the pmap. If the function
2992 * returns true, the operation removed all the valid mappings
2993 * in the pmap and freed its A table. If this happened to the
2994 * currently loaded pmap, the MMU root pointer must be reloaded
2995 * with the default 'kernel' map.
2996 */
2997 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
2998 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2999 kernel_crp.rp_addr = kernAphys;
3000 loadcrp(&kernel_crp);
3001 /* will do TLB flush below */
3002 }
3003 pmap->pm_a_tmgr = NULL;
3004 pmap->pm_a_phys = kernAphys;
3005 }
3006
3007 /*
3008 * If we just modified the current address space,
3009 * make sure to flush the MMU cache.
3010 *
3011 * XXX - this could be an unecessarily large flush.
3012 * XXX - Could decide, based on the size of the VA range
3013 * to be removed, whether to flush "by pages" or "all".
3014 */
3015 if (pmap == current_pmap())
3016 TBIAU();
3017 }
3018
3019 /* pmap_remove_a INTERNAL
3020 **
3021 * This is function number one in a set of three that removes a range
3022 * of memory in the most efficient manner by removing the highest possible
3023 * tables from the memory space. This particular function attempts to remove
3024 * as many B tables as it can, delegating the remaining fragmented ranges to
3025 * pmap_remove_b().
3026 *
3027 * If the removal operation results in an empty A table, the function returns
3028 * TRUE.
3029 *
3030 * It's ugly but will do for now.
3031 */
3032 boolean_t
3033 pmap_remove_a(a_tbl, start, end)
3034 a_tmgr_t *a_tbl;
3035 vaddr_t start;
3036 vaddr_t end;
3037 {
3038 boolean_t empty;
3039 int idx;
3040 vaddr_t nstart, nend;
3041 b_tmgr_t *b_tbl;
3042 mmu_long_dte_t *a_dte;
3043 mmu_short_dte_t *b_dte;
3044
3045 /*
3046 * The following code works with what I call a 'granularity
3047 * reduction algorithim'. A range of addresses will always have
3048 * the following properties, which are classified according to
3049 * how the range relates to the size of the current granularity
3050 * - an A table entry:
3051 *
3052 * 1 2 3 4
3053 * -+---+---+---+---+---+---+---+-
3054 * -+---+---+---+---+---+---+---+-
3055 *
3056 * A range will always start on a granularity boundary, illustrated
3057 * by '+' signs in the table above, or it will start at some point
3058 * inbetween a granularity boundary, as illustrated by point 1.
3059 * The first step in removing a range of addresses is to remove the
3060 * range between 1 and 2, the nearest granularity boundary. This
3061 * job is handled by the section of code governed by the
3062 * 'if (start < nstart)' statement.
3063 *
3064 * A range will always encompass zero or more intergral granules,
3065 * illustrated by points 2 and 3. Integral granules are easy to
3066 * remove. The removal of these granules is the second step, and
3067 * is handled by the code block 'if (nstart < nend)'.
3068 *
3069 * Lastly, a range will always end on a granularity boundary,
3070 * ill. by point 3, or it will fall just beyond one, ill. by point
3071 * 4. The last step involves removing this range and is handled by
3072 * the code block 'if (nend < end)'.
3073 */
3074 nstart = MMU_ROUND_UP_A(start);
3075 nend = MMU_ROUND_A(end);
3076
3077 if (start < nstart) {
3078 /*
3079 * This block is executed if the range starts between
3080 * a granularity boundary.
3081 *
3082 * First find the DTE which is responsible for mapping
3083 * the start of the range.
3084 */
3085 idx = MMU_TIA(start);
3086 a_dte = &a_tbl->at_dtbl[idx];
3087
3088 /*
3089 * If the DTE is valid then delegate the removal of the sub
3090 * range to pmap_remove_b(), which can remove addresses at
3091 * a finer granularity.
3092 */
3093 if (MMU_VALID_DT(*a_dte)) {
3094 b_dte = mmu_ptov(a_dte->addr.raw);
3095 b_tbl = mmuB2tmgr(b_dte);
3096
3097 /*
3098 * The sub range to be removed starts at the start
3099 * of the full range we were asked to remove, and ends
3100 * at the greater of:
3101 * 1. The end of the full range, -or-
3102 * 2. The end of the full range, rounded down to the
3103 * nearest granularity boundary.
3104 */
3105 if (end < nstart)
3106 empty = pmap_remove_b(b_tbl, start, end);
3107 else
3108 empty = pmap_remove_b(b_tbl, start, nstart);
3109
3110 /*
3111 * If the removal resulted in an empty B table,
3112 * invalidate the DTE that points to it and decrement
3113 * the valid entry count of the A table.
3114 */
3115 if (empty) {
3116 a_dte->attr.raw = MMU_DT_INVALID;
3117 a_tbl->at_ecnt--;
3118 }
3119 }
3120 /*
3121 * If the DTE is invalid, the address range is already non-
3122 * existent and can simply be skipped.
3123 */
3124 }
3125 if (nstart < nend) {
3126 /*
3127 * This block is executed if the range spans a whole number
3128 * multiple of granules (A table entries.)
3129 *
3130 * First find the DTE which is responsible for mapping
3131 * the start of the first granule involved.
3132 */
3133 idx = MMU_TIA(nstart);
3134 a_dte = &a_tbl->at_dtbl[idx];
3135
3136 /*
3137 * Remove entire sub-granules (B tables) one at a time,
3138 * until reaching the end of the range.
3139 */
3140 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3141 if (MMU_VALID_DT(*a_dte)) {
3142 /*
3143 * Find the B table manager for the
3144 * entry and free it.
3145 */
3146 b_dte = mmu_ptov(a_dte->addr.raw);
3147 b_tbl = mmuB2tmgr(b_dte);
3148 free_b_table(b_tbl, TRUE);
3149
3150 /*
3151 * Invalidate the DTE that points to the
3152 * B table and decrement the valid entry
3153 * count of the A table.
3154 */
3155 a_dte->attr.raw = MMU_DT_INVALID;
3156 a_tbl->at_ecnt--;
3157 }
3158 }
3159 if (nend < end) {
3160 /*
3161 * This block is executed if the range ends beyond a
3162 * granularity boundary.
3163 *
3164 * First find the DTE which is responsible for mapping
3165 * the start of the nearest (rounded down) granularity
3166 * boundary.
3167 */
3168 idx = MMU_TIA(nend);
3169 a_dte = &a_tbl->at_dtbl[idx];
3170
3171 /*
3172 * If the DTE is valid then delegate the removal of the sub
3173 * range to pmap_remove_b(), which can remove addresses at
3174 * a finer granularity.
3175 */
3176 if (MMU_VALID_DT(*a_dte)) {
3177 /*
3178 * Find the B table manager for the entry
3179 * and hand it to pmap_remove_b() along with
3180 * the sub range.
3181 */
3182 b_dte = mmu_ptov(a_dte->addr.raw);
3183 b_tbl = mmuB2tmgr(b_dte);
3184
3185 empty = pmap_remove_b(b_tbl, nend, end);
3186
3187 /*
3188 * If the removal resulted in an empty B table,
3189 * invalidate the DTE that points to it and decrement
3190 * the valid entry count of the A table.
3191 */
3192 if (empty) {
3193 a_dte->attr.raw = MMU_DT_INVALID;
3194 a_tbl->at_ecnt--;
3195 }
3196 }
3197 }
3198
3199 /*
3200 * If there are no more entries in the A table, release it
3201 * back to the available pool and return TRUE.
3202 */
3203 if (a_tbl->at_ecnt == 0) {
3204 a_tbl->at_parent = NULL;
3205 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3206 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3207 empty = TRUE;
3208 } else {
3209 empty = FALSE;
3210 }
3211
3212 return empty;
3213 }
3214
3215 /* pmap_remove_b INTERNAL
3216 **
3217 * Remove a range of addresses from an address space, trying to remove entire
3218 * C tables if possible.
3219 *
3220 * If the operation results in an empty B table, the function returns TRUE.
3221 */
3222 boolean_t
3223 pmap_remove_b(b_tbl, start, end)
3224 b_tmgr_t *b_tbl;
3225 vaddr_t start;
3226 vaddr_t end;
3227 {
3228 boolean_t empty;
3229 int idx;
3230 vaddr_t nstart, nend, rstart;
3231 c_tmgr_t *c_tbl;
3232 mmu_short_dte_t *b_dte;
3233 mmu_short_pte_t *c_dte;
3234
3235
3236 nstart = MMU_ROUND_UP_B(start);
3237 nend = MMU_ROUND_B(end);
3238
3239 if (start < nstart) {
3240 idx = MMU_TIB(start);
3241 b_dte = &b_tbl->bt_dtbl[idx];
3242 if (MMU_VALID_DT(*b_dte)) {
3243 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3244 c_tbl = mmuC2tmgr(c_dte);
3245 if (end < nstart)
3246 empty = pmap_remove_c(c_tbl, start, end);
3247 else
3248 empty = pmap_remove_c(c_tbl, start, nstart);
3249 if (empty) {
3250 b_dte->attr.raw = MMU_DT_INVALID;
3251 b_tbl->bt_ecnt--;
3252 }
3253 }
3254 }
3255 if (nstart < nend) {
3256 idx = MMU_TIB(nstart);
3257 b_dte = &b_tbl->bt_dtbl[idx];
3258 rstart = nstart;
3259 while (rstart < nend) {
3260 if (MMU_VALID_DT(*b_dte)) {
3261 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3262 c_tbl = mmuC2tmgr(c_dte);
3263 free_c_table(c_tbl, TRUE);
3264 b_dte->attr.raw = MMU_DT_INVALID;
3265 b_tbl->bt_ecnt--;
3266 }
3267 b_dte++;
3268 rstart += MMU_TIB_RANGE;
3269 }
3270 }
3271 if (nend < end) {
3272 idx = MMU_TIB(nend);
3273 b_dte = &b_tbl->bt_dtbl[idx];
3274 if (MMU_VALID_DT(*b_dte)) {
3275 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3276 c_tbl = mmuC2tmgr(c_dte);
3277 empty = pmap_remove_c(c_tbl, nend, end);
3278 if (empty) {
3279 b_dte->attr.raw = MMU_DT_INVALID;
3280 b_tbl->bt_ecnt--;
3281 }
3282 }
3283 }
3284
3285 if (b_tbl->bt_ecnt == 0) {
3286 b_tbl->bt_parent = NULL;
3287 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3288 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3289 empty = TRUE;
3290 } else {
3291 empty = FALSE;
3292 }
3293
3294 return empty;
3295 }
3296
3297 /* pmap_remove_c INTERNAL
3298 **
3299 * Remove a range of addresses from the given C table.
3300 */
3301 boolean_t
3302 pmap_remove_c(c_tbl, start, end)
3303 c_tmgr_t *c_tbl;
3304 vaddr_t start;
3305 vaddr_t end;
3306 {
3307 boolean_t empty;
3308 int idx;
3309 mmu_short_pte_t *c_pte;
3310
3311 idx = MMU_TIC(start);
3312 c_pte = &c_tbl->ct_dtbl[idx];
3313 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3314 if (MMU_VALID_DT(*c_pte)) {
3315 pmap_remove_pte(c_pte);
3316 c_tbl->ct_ecnt--;
3317 }
3318 }
3319
3320 if (c_tbl->ct_ecnt == 0) {
3321 c_tbl->ct_parent = NULL;
3322 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3323 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3324 empty = TRUE;
3325 } else {
3326 empty = FALSE;
3327 }
3328
3329 return empty;
3330 }
3331
3332 /* is_managed INTERNAL
3333 **
3334 * Determine if the given physical address is managed by the PV system.
3335 * Note that this logic assumes that no one will ask for the status of
3336 * addresses which lie in-between the memory banks on the 3/80. If they
3337 * do so, it will falsely report that it is managed.
3338 *
3339 * Note: A "managed" address is one that was reported to the VM system as
3340 * a "usable page" during system startup. As such, the VM system expects the
3341 * pmap module to keep an accurate track of the useage of those pages.
3342 * Any page not given to the VM system at startup does not exist (as far as
3343 * the VM system is concerned) and is therefore "unmanaged." Examples are
3344 * those pages which belong to the ROM monitor and the memory allocated before
3345 * the VM system was started.
3346 */
3347 boolean_t
3348 is_managed(pa)
3349 paddr_t pa;
3350 {
3351 if (pa >= avail_start && pa < avail_end)
3352 return TRUE;
3353 else
3354 return FALSE;
3355 }
3356
3357 /* pmap_bootstrap_alloc INTERNAL
3358 **
3359 * Used internally for memory allocation at startup when malloc is not
3360 * available. This code will fail once it crosses the first memory
3361 * bank boundary on the 3/80. Hopefully by then however, the VM system
3362 * will be in charge of allocation.
3363 */
3364 void *
3365 pmap_bootstrap_alloc(size)
3366 int size;
3367 {
3368 void *rtn;
3369
3370 #ifdef PMAP_DEBUG
3371 if (bootstrap_alloc_enabled == FALSE) {
3372 mon_printf("pmap_bootstrap_alloc: disabled\n");
3373 sunmon_abort();
3374 }
3375 #endif
3376
3377 rtn = (void *) virtual_avail;
3378 virtual_avail += size;
3379
3380 #ifdef PMAP_DEBUG
3381 if (virtual_avail > virtual_contig_end) {
3382 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3383 sunmon_abort();
3384 }
3385 #endif
3386
3387 return rtn;
3388 }
3389
3390 /* pmap_bootstap_aalign INTERNAL
3391 **
3392 * Used to insure that the next call to pmap_bootstrap_alloc() will
3393 * return a chunk of memory aligned to the specified size.
3394 *
3395 * Note: This function will only support alignment sizes that are powers
3396 * of two.
3397 */
3398 void
3399 pmap_bootstrap_aalign(size)
3400 int size;
3401 {
3402 int off;
3403
3404 off = virtual_avail & (size - 1);
3405 if (off) {
3406 (void) pmap_bootstrap_alloc(size - off);
3407 }
3408 }
3409
3410 /* pmap_pa_exists
3411 **
3412 * Used by the /dev/mem driver to see if a given PA is memory
3413 * that can be mapped. (The PA is not in a hole.)
3414 */
3415 int
3416 pmap_pa_exists(pa)
3417 paddr_t pa;
3418 {
3419 int i;
3420
3421 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3422 if ((pa >= avail_mem[i].pmem_start) &&
3423 (pa < avail_mem[i].pmem_end))
3424 return (1);
3425 if (avail_mem[i].pmem_next == NULL)
3426 break;
3427 }
3428 return (0);
3429 }
3430
3431 /* Called only from locore.s and pmap.c */
3432 void _pmap_switch __P((pmap_t pmap));
3433
3434 /*
3435 * _pmap_switch INTERNAL
3436 *
3437 * This is called by locore.s:cpu_switch() when it is
3438 * switching to a new process. Load new translations.
3439 * Note: done in-line by locore.s unless PMAP_DEBUG
3440 *
3441 * Note that we do NOT allocate a context here, but
3442 * share the "kernel only" context until we really
3443 * need our own context for user-space mappings in
3444 * pmap_enter_user(). [ s/context/mmu A table/ ]
3445 */
3446 void
3447 _pmap_switch(pmap)
3448 pmap_t pmap;
3449 {
3450 u_long rootpa;
3451
3452 /*
3453 * Only do reload/flush if we have to.
3454 * Note that if the old and new process
3455 * were BOTH using the "null" context,
3456 * then this will NOT flush the TLB.
3457 */
3458 rootpa = pmap->pm_a_phys;
3459 if (kernel_crp.rp_addr != rootpa) {
3460 DPRINT(("pmap_activate(%p)\n", pmap));
3461 kernel_crp.rp_addr = rootpa;
3462 loadcrp(&kernel_crp);
3463 TBIAU();
3464 }
3465 }
3466
3467 /*
3468 * Exported version of pmap_activate(). This is called from the
3469 * machine-independent VM code when a process is given a new pmap.
3470 * If (p == curlwp) do like cpu_switch would do; otherwise just
3471 * take this as notification that the process has a new pmap.
3472 */
3473 void
3474 pmap_activate(l)
3475 struct lwp *l;
3476 {
3477 if (l->l_proc == curproc) {
3478 _pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
3479 }
3480 }
3481
3482 /*
3483 * pmap_deactivate INTERFACE
3484 **
3485 * This is called to deactivate the specified process's address space.
3486 */
3487 void
3488 pmap_deactivate(l)
3489 struct lwp *l;
3490 {
3491 /* Nothing to do. */
3492 }
3493
3494 /*
3495 * Fill in the sun3x-specific part of the kernel core header
3496 * for dumpsys(). (See machdep.c for the rest.)
3497 */
3498 void
3499 pmap_kcore_hdr(sh)
3500 struct sun3x_kcore_hdr *sh;
3501 {
3502 u_long spa, len;
3503 int i;
3504
3505 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3506 sh->pg_valid = MMU_DT_PAGE;
3507 sh->contig_end = virtual_contig_end;
3508 sh->kernCbase = (u_long)kernCbase;
3509 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3510 spa = avail_mem[i].pmem_start;
3511 spa = m68k_trunc_page(spa);
3512 len = avail_mem[i].pmem_end - spa;
3513 len = m68k_round_page(len);
3514 sh->ram_segs[i].start = spa;
3515 sh->ram_segs[i].size = len;
3516 }
3517 }
3518
3519
3520 /* pmap_virtual_space INTERFACE
3521 **
3522 * Return the current available range of virtual addresses in the
3523 * arguuments provided. Only really called once.
3524 */
3525 void
3526 pmap_virtual_space(vstart, vend)
3527 vaddr_t *vstart, *vend;
3528 {
3529 *vstart = virtual_avail;
3530 *vend = virtual_end;
3531 }
3532
3533 /*
3534 * Provide memory to the VM system.
3535 *
3536 * Assume avail_start is always in the
3537 * first segment as pmap_bootstrap does.
3538 */
3539 static void
3540 pmap_page_upload()
3541 {
3542 paddr_t a, b; /* memory range */
3543 int i;
3544
3545 /* Supply the memory in segments. */
3546 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3547 a = atop(avail_mem[i].pmem_start);
3548 b = atop(avail_mem[i].pmem_end);
3549 if (i == 0)
3550 a = atop(avail_start);
3551 if (avail_mem[i].pmem_end > avail_end)
3552 b = atop(avail_end);
3553
3554 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3555
3556 if (avail_mem[i].pmem_next == NULL)
3557 break;
3558 }
3559 }
3560
3561 /* pmap_count INTERFACE
3562 **
3563 * Return the number of resident (valid) pages in the given pmap.
3564 *
3565 * Note: If this function is handed the kernel map, it will report
3566 * that it has no mappings. Hopefully the VM system won't ask for kernel
3567 * map statistics.
3568 */
3569 segsz_t
3570 pmap_count(pmap, type)
3571 pmap_t pmap;
3572 int type;
3573 {
3574 u_int count;
3575 int a_idx, b_idx;
3576 a_tmgr_t *a_tbl;
3577 b_tmgr_t *b_tbl;
3578 c_tmgr_t *c_tbl;
3579
3580 /*
3581 * If the pmap does not have its own A table manager, it has no
3582 * valid entires.
3583 */
3584 if (pmap->pm_a_tmgr == NULL)
3585 return 0;
3586
3587 a_tbl = pmap->pm_a_tmgr;
3588
3589 count = 0;
3590 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3591 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3592 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3593 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3594 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3595 c_tbl = mmuC2tmgr(
3596 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3597 if (type == 0)
3598 /*
3599 * A resident entry count has been requested.
3600 */
3601 count += c_tbl->ct_ecnt;
3602 else
3603 /*
3604 * A wired entry count has been requested.
3605 */
3606 count += c_tbl->ct_wcnt;
3607 }
3608 }
3609 }
3610 }
3611
3612 return count;
3613 }
3614
3615 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3616 * The following routines are only used by DDB for tricky kernel text *
3617 * text operations in db_memrw.c. They are provided for sun3 *
3618 * compatibility. *
3619 *************************************************************************/
3620 /* get_pte INTERNAL
3621 **
3622 * Return the page descriptor the describes the kernel mapping
3623 * of the given virtual address.
3624 */
3625 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3626 u_int
3627 get_pte(va)
3628 vaddr_t va;
3629 {
3630 u_long pte_pa;
3631 mmu_short_pte_t *pte;
3632
3633 /* Get the physical address of the PTE */
3634 pte_pa = ptest_addr(va & ~PGOFSET);
3635
3636 /* Convert to a virtual address... */
3637 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3638
3639 /* Make sure it is in our level-C tables... */
3640 if ((pte < kernCbase) ||
3641 (pte >= &mmuCbase[NUM_USER_PTES]))
3642 return 0;
3643
3644 /* ... and just return its contents. */
3645 return (pte->attr.raw);
3646 }
3647
3648
3649 /* set_pte INTERNAL
3650 **
3651 * Set the page descriptor that describes the kernel mapping
3652 * of the given virtual address.
3653 */
3654 void
3655 set_pte(va, pte)
3656 vaddr_t va;
3657 u_int pte;
3658 {
3659 u_long idx;
3660
3661 if (va < KERNBASE)
3662 return;
3663
3664 idx = (unsigned long) m68k_btop(va - KERNBASE);
3665 kernCbase[idx].attr.raw = pte;
3666 TBIS(va);
3667 }
3668
3669 /*
3670 * Routine: pmap_procwr
3671 *
3672 * Function:
3673 * Synchronize caches corresponding to [addr, addr+len) in p.
3674 */
3675 void
3676 pmap_procwr(p, va, len)
3677 struct proc *p;
3678 vaddr_t va;
3679 size_t len;
3680 {
3681 (void)cachectl1(0x80000004, va, len, p);
3682 }
3683
3684
3685 #ifdef PMAP_DEBUG
3686 /************************** DEBUGGING ROUTINES **************************
3687 * The following routines are meant to be an aid to debugging the pmap *
3688 * system. They are callable from the DDB command line and should be *
3689 * prepared to be handed unstable or incomplete states of the system. *
3690 ************************************************************************/
3691
3692 /* pv_list
3693 **
3694 * List all pages found on the pv list for the given physical page.
3695 * To avoid endless loops, the listing will stop at the end of the list
3696 * or after 'n' entries - whichever comes first.
3697 */
3698 void
3699 pv_list(pa, n)
3700 paddr_t pa;
3701 int n;
3702 {
3703 int idx;
3704 vaddr_t va;
3705 pv_t *pv;
3706 c_tmgr_t *c_tbl;
3707 pmap_t pmap;
3708
3709 pv = pa2pv(pa);
3710 idx = pv->pv_idx;
3711 for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3712 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3713 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3714 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3715 }
3716 }
3717 #endif /* PMAP_DEBUG */
3718
3719 #ifdef NOT_YET
3720 /* and maybe not ever */
3721 /************************** LOW-LEVEL ROUTINES **************************
3722 * These routines will eventually be re-written into assembly and placed*
3723 * in locore.s. They are here now as stubs so that the pmap module can *
3724 * be linked as a standalone user program for testing. *
3725 ************************************************************************/
3726 /* flush_atc_crp INTERNAL
3727 **
3728 * Flush all page descriptors derived from the given CPU Root Pointer
3729 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3730 * cache.
3731 */
3732 void
3733 flush_atc_crp(a_tbl)
3734 {
3735 mmu_long_rp_t rp;
3736
3737 /* Create a temporary root table pointer that points to the
3738 * given A table.
3739 */
3740 rp.attr.raw = ~MMU_LONG_RP_LU;
3741 rp.addr.raw = (unsigned int) a_tbl;
3742
3743 mmu_pflushr(&rp);
3744 /* mmu_pflushr:
3745 * movel sp(4)@,a0
3746 * pflushr a0@
3747 * rts
3748 */
3749 }
3750 #endif /* NOT_YET */
3751