pmap.c revision 1.88 1 /* $NetBSD: pmap.c,v 1.88 2005/06/03 15:09:46 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a process called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/cdefs.h>
115 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.88 2005/06/03 15:09:46 tsutsui Exp $");
116
117 #include "opt_ddb.h"
118 #include "opt_pmap_debug.h"
119
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/proc.h>
123 #include <sys/malloc.h>
124 #include <sys/pool.h>
125 #include <sys/user.h>
126 #include <sys/queue.h>
127 #include <sys/kcore.h>
128
129 #include <uvm/uvm.h>
130
131 #include <machine/cpu.h>
132 #include <machine/kcore.h>
133 #include <machine/mon.h>
134 #include <machine/pmap.h>
135 #include <machine/pte.h>
136 #include <machine/vmparam.h>
137 #include <m68k/cacheops.h>
138
139 #include <sun3/sun3/cache.h>
140 #include <sun3/sun3/machdep.h>
141
142 #include "pmap_pvt.h"
143
144 /* XXX - What headers declare these? */
145 extern struct pcb *curpcb;
146 extern int physmem;
147
148 /* Defined in locore.s */
149 extern char kernel_text[];
150
151 /* Defined by the linker */
152 extern char etext[], edata[], end[];
153 extern char *esym; /* DDB */
154
155 /*************************** DEBUGGING DEFINITIONS ***********************
156 * Macros, preprocessor defines and variables used in debugging can make *
157 * code hard to read. Anything used exclusively for debugging purposes *
158 * is defined here to avoid having such mess scattered around the file. *
159 *************************************************************************/
160 #ifdef PMAP_DEBUG
161 /*
162 * To aid the debugging process, macros should be expanded into smaller steps
163 * that accomplish the same goal, yet provide convenient places for placing
164 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
165 * 'INLINE' keyword is defined to an empty string. This way, any function
166 * defined to be a 'static INLINE' will become 'outlined' and compiled as
167 * a separate function, which is much easier to debug.
168 */
169 #define INLINE /* nothing */
170
171 /*
172 * It is sometimes convenient to watch the activity of a particular table
173 * in the system. The following variables are used for that purpose.
174 */
175 a_tmgr_t *pmap_watch_atbl = 0;
176 b_tmgr_t *pmap_watch_btbl = 0;
177 c_tmgr_t *pmap_watch_ctbl = 0;
178
179 int pmap_debug = 0;
180 #define DPRINT(args) if (pmap_debug) printf args
181
182 #else /********** Stuff below is defined if NOT debugging **************/
183
184 #define INLINE inline
185 #define DPRINT(args) /* nada */
186
187 #endif /* PMAP_DEBUG */
188 /*********************** END OF DEBUGGING DEFINITIONS ********************/
189
190 /*** Management Structure - Memory Layout
191 * For every MMU table in the sun3x pmap system there must be a way to
192 * manage it; we must know which process is using it, what other tables
193 * depend on it, and whether or not it contains any locked pages. This
194 * is solved by the creation of 'table management' or 'tmgr'
195 * structures. One for each MMU table in the system.
196 *
197 * MAP OF MEMORY USED BY THE PMAP SYSTEM
198 *
199 * towards lower memory
200 * kernAbase -> +-------------------------------------------------------+
201 * | Kernel MMU A level table |
202 * kernBbase -> +-------------------------------------------------------+
203 * | Kernel MMU B level tables |
204 * kernCbase -> +-------------------------------------------------------+
205 * | |
206 * | Kernel MMU C level tables |
207 * | |
208 * mmuCbase -> +-------------------------------------------------------+
209 * | User MMU C level tables |
210 * mmuAbase -> +-------------------------------------------------------+
211 * | |
212 * | User MMU A level tables |
213 * | |
214 * mmuBbase -> +-------------------------------------------------------+
215 * | User MMU B level tables |
216 * tmgrAbase -> +-------------------------------------------------------+
217 * | TMGR A level table structures |
218 * tmgrBbase -> +-------------------------------------------------------+
219 * | TMGR B level table structures |
220 * tmgrCbase -> +-------------------------------------------------------+
221 * | TMGR C level table structures |
222 * pvbase -> +-------------------------------------------------------+
223 * | Physical to Virtual mapping table (list heads) |
224 * pvebase -> +-------------------------------------------------------+
225 * | Physical to Virtual mapping table (list elements) |
226 * | |
227 * +-------------------------------------------------------+
228 * towards higher memory
229 *
230 * For every A table in the MMU A area, there will be a corresponding
231 * a_tmgr structure in the TMGR A area. The same will be true for
232 * the B and C tables. This arrangement will make it easy to find the
233 * controling tmgr structure for any table in the system by use of
234 * (relatively) simple macros.
235 */
236
237 /*
238 * Global variables for storing the base addresses for the areas
239 * labeled above.
240 */
241 static vaddr_t kernAphys;
242 static mmu_long_dte_t *kernAbase;
243 static mmu_short_dte_t *kernBbase;
244 static mmu_short_pte_t *kernCbase;
245 static mmu_short_pte_t *mmuCbase;
246 static mmu_short_dte_t *mmuBbase;
247 static mmu_long_dte_t *mmuAbase;
248 static a_tmgr_t *Atmgrbase;
249 static b_tmgr_t *Btmgrbase;
250 static c_tmgr_t *Ctmgrbase;
251 static pv_t *pvbase;
252 static pv_elem_t *pvebase;
253 struct pmap kernel_pmap;
254
255 /*
256 * This holds the CRP currently loaded into the MMU.
257 */
258 struct mmu_rootptr kernel_crp;
259
260 /*
261 * Just all around global variables.
262 */
263 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
264 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
265 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
266
267
268 /*
269 * Flags used to mark the safety/availability of certain operations or
270 * resources.
271 */
272 static boolean_t bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
273 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
274
275 /*
276 * XXX: For now, retain the traditional variables that were
277 * used in the old pmap/vm interface (without NONCONTIG).
278 */
279 /* Kernel virtual address space available: */
280 vaddr_t virtual_avail, virtual_end;
281 /* Physical address space available: */
282 paddr_t avail_start, avail_end;
283
284 /* This keep track of the end of the contiguously mapped range. */
285 vaddr_t virtual_contig_end;
286
287 /* Physical address used by pmap_next_page() */
288 paddr_t avail_next;
289
290 /* These are used by pmap_copy_page(), etc. */
291 vaddr_t tmp_vpages[2];
292
293 /* memory pool for pmap structures */
294 struct pool pmap_pmap_pool;
295
296 /*
297 * The 3/80 is the only member of the sun3x family that has non-contiguous
298 * physical memory. Memory is divided into 4 banks which are physically
299 * locatable on the system board. Although the size of these banks varies
300 * with the size of memory they contain, their base addresses are
301 * permenently fixed. The following structure, which describes these
302 * banks, is initialized by pmap_bootstrap() after it reads from a similar
303 * structure provided by the ROM Monitor.
304 *
305 * For the other machines in the sun3x architecture which do have contiguous
306 * RAM, this list will have only one entry, which will describe the entire
307 * range of available memory.
308 */
309 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
310 u_int total_phys_mem;
311
312 /*************************************************************************/
313
314 /*
315 * XXX - Should "tune" these based on statistics.
316 *
317 * My first guess about the relative numbers of these needed is
318 * based on the fact that a "typical" process will have several
319 * pages mapped at low virtual addresses (text, data, bss), then
320 * some mapped shared libraries, and then some stack pages mapped
321 * near the high end of the VA space. Each process can use only
322 * one A table, and most will use only two B tables (maybe three)
323 * and probably about four C tables. Therefore, the first guess
324 * at the relative numbers of these needed is 1:2:4 -gwr
325 *
326 * The number of C tables needed is closely related to the amount
327 * of physical memory available plus a certain amount attributable
328 * to the use of double mappings. With a few simulation statistics
329 * we can find a reasonably good estimation of this unknown value.
330 * Armed with that and the above ratios, we have a good idea of what
331 * is needed at each level. -j
332 *
333 * Note: It is not physical memory memory size, but the total mapped
334 * virtual space required by the combined working sets of all the
335 * currently _runnable_ processes. (Sleeping ones don't count.)
336 * The amount of physical memory should be irrelevant. -gwr
337 */
338 #ifdef FIXED_NTABLES
339 #define NUM_A_TABLES 16
340 #define NUM_B_TABLES 32
341 #define NUM_C_TABLES 64
342 #else
343 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
344 #endif /* FIXED_NTABLES */
345
346 /*
347 * This determines our total virtual mapping capacity.
348 * Yes, it is a FIXED value so we can pre-allocate.
349 */
350 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
351
352 /*
353 * The size of the Kernel Virtual Address Space (KVAS)
354 * for purposes of MMU table allocation is -KERNBASE
355 * (length from KERNBASE to 0xFFFFffff)
356 */
357 #define KVAS_SIZE (-KERNBASE)
358
359 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
360 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
361 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
362 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
363
364 /*************************** MISCELANEOUS MACROS *************************/
365 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
366 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
367 #define pmap_add_ref(pmap) ++pmap->pm_refcount
368 #define pmap_del_ref(pmap) --pmap->pm_refcount
369 #define pmap_refcount(pmap) pmap->pm_refcount
370
371 void *pmap_bootstrap_alloc(int);
372
373 static INLINE void *mmu_ptov(paddr_t);
374 static INLINE paddr_t mmu_vtop(void *);
375
376 #if 0
377 static INLINE a_tmgr_t * mmuA2tmgr(mmu_long_dte_t *);
378 #endif /* 0 */
379 static INLINE b_tmgr_t * mmuB2tmgr(mmu_short_dte_t *);
380 static INLINE c_tmgr_t * mmuC2tmgr(mmu_short_pte_t *);
381
382 static INLINE pv_t *pa2pv(paddr_t);
383 static INLINE int pteidx(mmu_short_pte_t *);
384 static INLINE pmap_t current_pmap(void);
385
386 /*
387 * We can always convert between virtual and physical addresses
388 * for anything in the range [KERNBASE ... avail_start] because
389 * that range is GUARANTEED to be mapped linearly.
390 * We rely heavily upon this feature!
391 */
392 static INLINE void *
393 mmu_ptov(paddr_t pa)
394 {
395 vaddr_t va;
396
397 va = (pa + KERNBASE);
398 #ifdef PMAP_DEBUG
399 if ((va < KERNBASE) || (va >= virtual_contig_end))
400 panic("mmu_ptov");
401 #endif
402 return ((void*)va);
403 }
404
405 static INLINE paddr_t
406 mmu_vtop(void *vva)
407 {
408 vaddr_t va;
409
410 va = (vaddr_t)vva;
411 #ifdef PMAP_DEBUG
412 if ((va < KERNBASE) || (va >= virtual_contig_end))
413 panic("mmu_vtop");
414 #endif
415 return (va - KERNBASE);
416 }
417
418 /*
419 * These macros map MMU tables to their corresponding manager structures.
420 * They are needed quite often because many of the pointers in the pmap
421 * system reference MMU tables and not the structures that control them.
422 * There needs to be a way to find one when given the other and these
423 * macros do so by taking advantage of the memory layout described above.
424 * Here's a quick step through the first macro, mmuA2tmgr():
425 *
426 * 1) find the offset of the given MMU A table from the base of its table
427 * pool (table - mmuAbase).
428 * 2) convert this offset into a table index by dividing it by the
429 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
430 * 3) use this index to select the corresponding 'A' table manager
431 * structure from the 'A' table manager pool (Atmgrbase[index]).
432 */
433 /* This function is not currently used. */
434 #if 0
435 static INLINE a_tmgr_t *
436 mmuA2tmgr(mmu_long_dte_t *mmuAtbl)
437 {
438 int idx;
439
440 /* Which table is this in? */
441 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
442 #ifdef PMAP_DEBUG
443 if ((idx < 0) || (idx >= NUM_A_TABLES))
444 panic("mmuA2tmgr");
445 #endif
446 return (&Atmgrbase[idx]);
447 }
448 #endif /* 0 */
449
450 static INLINE b_tmgr_t *
451 mmuB2tmgr(mmu_short_dte_t *mmuBtbl)
452 {
453 int idx;
454
455 /* Which table is this in? */
456 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
457 #ifdef PMAP_DEBUG
458 if ((idx < 0) || (idx >= NUM_B_TABLES))
459 panic("mmuB2tmgr");
460 #endif
461 return (&Btmgrbase[idx]);
462 }
463
464 /* mmuC2tmgr INTERNAL
465 **
466 * Given a pte known to belong to a C table, return the address of
467 * that table's management structure.
468 */
469 static INLINE c_tmgr_t *
470 mmuC2tmgr(mmu_short_pte_t *mmuCtbl)
471 {
472 int idx;
473
474 /* Which table is this in? */
475 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
476 #ifdef PMAP_DEBUG
477 if ((idx < 0) || (idx >= NUM_C_TABLES))
478 panic("mmuC2tmgr");
479 #endif
480 return (&Ctmgrbase[idx]);
481 }
482
483 /* This is now a function call below.
484 * #define pa2pv(pa) \
485 * (&pvbase[(unsigned long)\
486 * m68k_btop(pa)\
487 * ])
488 */
489
490 /* pa2pv INTERNAL
491 **
492 * Return the pv_list_head element which manages the given physical
493 * address.
494 */
495 static INLINE pv_t *
496 pa2pv(paddr_t pa)
497 {
498 struct pmap_physmem_struct *bank;
499 int idx;
500
501 bank = &avail_mem[0];
502 while (pa >= bank->pmem_end)
503 bank = bank->pmem_next;
504
505 pa -= bank->pmem_start;
506 idx = bank->pmem_pvbase + m68k_btop(pa);
507 #ifdef PMAP_DEBUG
508 if ((idx < 0) || (idx >= physmem))
509 panic("pa2pv");
510 #endif
511 return &pvbase[idx];
512 }
513
514 /* pteidx INTERNAL
515 **
516 * Return the index of the given PTE within the entire fixed table of
517 * PTEs.
518 */
519 static INLINE int
520 pteidx(mmu_short_pte_t *pte)
521 {
522 return (pte - kernCbase);
523 }
524
525 /*
526 * This just offers a place to put some debugging checks,
527 * and reduces the number of places "curlwp" appears...
528 */
529 static INLINE pmap_t
530 current_pmap(void)
531 {
532 struct vmspace *vm;
533 struct vm_map *map;
534 pmap_t pmap;
535
536 if (curlwp == NULL)
537 pmap = &kernel_pmap;
538 else {
539 vm = curproc->p_vmspace;
540 map = &vm->vm_map;
541 pmap = vm_map_pmap(map);
542 }
543
544 return (pmap);
545 }
546
547
548 /*************************** FUNCTION DEFINITIONS ************************
549 * These appear here merely for the compiler to enforce type checking on *
550 * all function calls. *
551 *************************************************************************/
552
553 /** Internal functions
554 ** Most functions used only within this module are defined in
555 ** pmap_pvt.h (why not here if used only here?)
556 **/
557 static void pmap_page_upload(void);
558
559 /** Interface functions
560 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
561 ** defined.
562 **/
563 void pmap_pinit(pmap_t);
564 void pmap_release(pmap_t);
565
566 /********************************** CODE ********************************
567 * Functions that are called from other parts of the kernel are labeled *
568 * as 'INTERFACE' functions. Functions that are only called from *
569 * within the pmap module are labeled as 'INTERNAL' functions. *
570 * Functions that are internal, but are not (currently) used at all are *
571 * labeled 'INTERNAL_X'. *
572 ************************************************************************/
573
574 /* pmap_bootstrap INTERNAL
575 **
576 * Initializes the pmap system. Called at boot time from
577 * locore2.c:_vm_init()
578 *
579 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
580 * system implement pmap_steal_memory() is redundant.
581 * Don't release this code without removing one or the other!
582 */
583 void
584 pmap_bootstrap(vaddr_t nextva)
585 {
586 struct physmemory *membank;
587 struct pmap_physmem_struct *pmap_membank;
588 vaddr_t va, eva;
589 paddr_t pa;
590 int b, c, i, j; /* running table counts */
591 int size, resvmem;
592
593 /*
594 * This function is called by __bootstrap after it has
595 * determined the type of machine and made the appropriate
596 * patches to the ROM vectors (XXX- I don't quite know what I meant
597 * by that.) It allocates and sets up enough of the pmap system
598 * to manage the kernel's address space.
599 */
600
601 /*
602 * Determine the range of kernel virtual and physical
603 * space available. Note that we ABSOLUTELY DEPEND on
604 * the fact that the first bank of memory (4MB) is
605 * mapped linearly to KERNBASE (which we guaranteed in
606 * the first instructions of locore.s).
607 * That is plenty for our bootstrap work.
608 */
609 virtual_avail = m68k_round_page(nextva);
610 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
611 virtual_end = VM_MAX_KERNEL_ADDRESS;
612 /* Don't need avail_start til later. */
613
614 /* We may now call pmap_bootstrap_alloc(). */
615 bootstrap_alloc_enabled = TRUE;
616
617 /*
618 * This is a somewhat unwrapped loop to deal with
619 * copying the PROM's 'phsymem' banks into the pmap's
620 * banks. The following is always assumed:
621 * 1. There is always at least one bank of memory.
622 * 2. There is always a last bank of memory, and its
623 * pmem_next member must be set to NULL.
624 */
625 membank = romVectorPtr->v_physmemory;
626 pmap_membank = avail_mem;
627 total_phys_mem = 0;
628
629 for (;;) { /* break on !membank */
630 pmap_membank->pmem_start = membank->address;
631 pmap_membank->pmem_end = membank->address + membank->size;
632 total_phys_mem += membank->size;
633 membank = membank->next;
634 if (!membank)
635 break;
636 /* This silly syntax arises because pmap_membank
637 * is really a pre-allocated array, but it is put into
638 * use as a linked list.
639 */
640 pmap_membank->pmem_next = pmap_membank + 1;
641 pmap_membank = pmap_membank->pmem_next;
642 }
643 /* This is the last element. */
644 pmap_membank->pmem_next = NULL;
645
646 /*
647 * Note: total_phys_mem, physmem represent
648 * actual physical memory, including that
649 * reserved for the PROM monitor.
650 */
651 physmem = btoc(total_phys_mem);
652
653 /*
654 * Avail_end is set to the first byte of physical memory
655 * after the end of the last bank. We use this only to
656 * determine if a physical address is "managed" memory.
657 * This address range should be reduced to prevent the
658 * physical pages needed by the PROM monitor from being used
659 * in the VM system.
660 */
661 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
662 resvmem = m68k_round_page(resvmem);
663 avail_end = pmap_membank->pmem_end - resvmem;
664
665 /*
666 * First allocate enough kernel MMU tables to map all
667 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
668 * Note: All must be aligned on 256 byte boundaries.
669 * Start with the level-A table (one of those).
670 */
671 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
672 kernAbase = pmap_bootstrap_alloc(size);
673 memset(kernAbase, 0, size);
674
675 /* Now the level-B kernel tables... */
676 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
677 kernBbase = pmap_bootstrap_alloc(size);
678 memset(kernBbase, 0, size);
679
680 /* Now the level-C kernel tables... */
681 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
682 kernCbase = pmap_bootstrap_alloc(size);
683 memset(kernCbase, 0, size);
684 /*
685 * Note: In order for the PV system to work correctly, the kernel
686 * and user-level C tables must be allocated contiguously.
687 * Nothing should be allocated between here and the allocation of
688 * mmuCbase below. XXX: Should do this as one allocation, and
689 * then compute a pointer for mmuCbase instead of this...
690 *
691 * Allocate user MMU tables.
692 * These must be contiguous with the preceding.
693 */
694
695 #ifndef FIXED_NTABLES
696 /*
697 * The number of user-level C tables that should be allocated is
698 * related to the size of physical memory. In general, there should
699 * be enough tables to map four times the amount of available RAM.
700 * The extra amount is needed because some table space is wasted by
701 * fragmentation.
702 */
703 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
704 NUM_B_TABLES = NUM_C_TABLES / 2;
705 NUM_A_TABLES = NUM_B_TABLES / 2;
706 #endif /* !FIXED_NTABLES */
707
708 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
709 mmuCbase = pmap_bootstrap_alloc(size);
710
711 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
712 mmuBbase = pmap_bootstrap_alloc(size);
713
714 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
715 mmuAbase = pmap_bootstrap_alloc(size);
716
717 /*
718 * Fill in the never-changing part of the kernel tables.
719 * For simplicity, the kernel's mappings will be editable as a
720 * flat array of page table entries at kernCbase. The
721 * higher level 'A' and 'B' tables must be initialized to point
722 * to this lower one.
723 */
724 b = c = 0;
725
726 /*
727 * Invalidate all mappings below KERNBASE in the A table.
728 * This area has already been zeroed out, but it is good
729 * practice to explicitly show that we are interpreting
730 * it as a list of A table descriptors.
731 */
732 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
733 kernAbase[i].addr.raw = 0;
734 }
735
736 /*
737 * Set up the kernel A and B tables so that they will reference the
738 * correct spots in the contiguous table of PTEs allocated for the
739 * kernel's virtual memory space.
740 */
741 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
742 kernAbase[i].attr.raw =
743 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
744 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
745
746 for (j=0; j < MMU_B_TBL_SIZE; j++) {
747 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
748 | MMU_DT_SHORT;
749 c += MMU_C_TBL_SIZE;
750 }
751 b += MMU_B_TBL_SIZE;
752 }
753
754 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
755 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
756 pmap_alloc_pv(); /* Allocate physical->virtual map. */
757
758 /*
759 * We are now done with pmap_bootstrap_alloc(). Round up
760 * `virtual_avail' to the nearest page, and set the flag
761 * to prevent use of pmap_bootstrap_alloc() hereafter.
762 */
763 pmap_bootstrap_aalign(PAGE_SIZE);
764 bootstrap_alloc_enabled = FALSE;
765
766 /*
767 * Now that we are done with pmap_bootstrap_alloc(), we
768 * must save the virtual and physical addresses of the
769 * end of the linearly mapped range, which are stored in
770 * virtual_contig_end and avail_start, respectively.
771 * These variables will never change after this point.
772 */
773 virtual_contig_end = virtual_avail;
774 avail_start = virtual_avail - KERNBASE;
775
776 /*
777 * `avail_next' is a running pointer used by pmap_next_page() to
778 * keep track of the next available physical page to be handed
779 * to the VM system during its initialization, in which it
780 * asks for physical pages, one at a time.
781 */
782 avail_next = avail_start;
783
784 /*
785 * Now allocate some virtual addresses, but not the physical pages
786 * behind them. Note that virtual_avail is already page-aligned.
787 *
788 * tmp_vpages[] is an array of two virtual pages used for temporary
789 * kernel mappings in the pmap module to facilitate various physical
790 * address-oritented operations.
791 */
792 tmp_vpages[0] = virtual_avail;
793 virtual_avail += PAGE_SIZE;
794 tmp_vpages[1] = virtual_avail;
795 virtual_avail += PAGE_SIZE;
796
797 /** Initialize the PV system **/
798 pmap_init_pv();
799
800 /*
801 * Fill in the kernel_pmap structure and kernel_crp.
802 */
803 kernAphys = mmu_vtop(kernAbase);
804 kernel_pmap.pm_a_tmgr = NULL;
805 kernel_pmap.pm_a_phys = kernAphys;
806 kernel_pmap.pm_refcount = 1; /* always in use */
807 simple_lock_init(&kernel_pmap.pm_lock);
808
809 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
810 kernel_crp.rp_addr = kernAphys;
811
812 /*
813 * Now pmap_enter_kernel() may be used safely and will be
814 * the main interface used hereafter to modify the kernel's
815 * virtual address space. Note that since we are still running
816 * under the PROM's address table, none of these table modifications
817 * actually take effect until pmap_takeover_mmu() is called.
818 *
819 * Note: Our tables do NOT have the PROM linear mappings!
820 * Only the mappings created here exist in our tables, so
821 * remember to map anything we expect to use.
822 */
823 va = (vaddr_t)KERNBASE;
824 pa = 0;
825
826 /*
827 * The first page of the kernel virtual address space is the msgbuf
828 * page. The page attributes (data, non-cached) are set here, while
829 * the address is assigned to this global pointer in cpu_startup().
830 * It is non-cached, mostly due to paranoia.
831 */
832 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
833 va += PAGE_SIZE; pa += PAGE_SIZE;
834
835 /* Next page is used as the temporary stack. */
836 pmap_enter_kernel(va, pa, VM_PROT_ALL);
837 va += PAGE_SIZE; pa += PAGE_SIZE;
838
839 /*
840 * Map all of the kernel's text segment as read-only and cacheable.
841 * (Cacheable is implied by default). Unfortunately, the last bytes
842 * of kernel text and the first bytes of kernel data will often be
843 * sharing the same page. Therefore, the last page of kernel text
844 * has to be mapped as read/write, to accomodate the data.
845 */
846 eva = m68k_trunc_page((vaddr_t)etext);
847 for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
848 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
849
850 /*
851 * Map all of the kernel's data as read/write and cacheable.
852 * This includes: data, BSS, symbols, and everything in the
853 * contiguous memory used by pmap_bootstrap_alloc()
854 */
855 for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
856 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
857
858 /*
859 * At this point we are almost ready to take over the MMU. But first
860 * we must save the PROM's address space in our map, as we call its
861 * routines and make references to its data later in the kernel.
862 */
863 pmap_bootstrap_copyprom();
864 pmap_takeover_mmu();
865 pmap_bootstrap_setprom();
866
867 /* Notify the VM system of our page size. */
868 uvmexp.pagesize = PAGE_SIZE;
869 uvm_setpagesize();
870
871 pmap_page_upload();
872 }
873
874
875 /* pmap_alloc_usermmu INTERNAL
876 **
877 * Called from pmap_bootstrap() to allocate MMU tables that will
878 * eventually be used for user mappings.
879 */
880 void
881 pmap_alloc_usermmu(void)
882 {
883 /* XXX: Moved into caller. */
884 }
885
886 /* pmap_alloc_pv INTERNAL
887 **
888 * Called from pmap_bootstrap() to allocate the physical
889 * to virtual mapping list. Each physical page of memory
890 * in the system has a corresponding element in this list.
891 */
892 void
893 pmap_alloc_pv(void)
894 {
895 int i;
896 unsigned int total_mem;
897
898 /*
899 * Allocate a pv_head structure for every page of physical
900 * memory that will be managed by the system. Since memory on
901 * the 3/80 is non-contiguous, we cannot arrive at a total page
902 * count by subtraction of the lowest available address from the
903 * highest, but rather we have to step through each memory
904 * bank and add the number of pages in each to the total.
905 *
906 * At this time we also initialize the offset of each bank's
907 * starting pv_head within the pv_head list so that the physical
908 * memory state routines (pmap_is_referenced(),
909 * pmap_is_modified(), et al.) can quickly find coresponding
910 * pv_heads in spite of the non-contiguity.
911 */
912 total_mem = 0;
913 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
914 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
915 total_mem += avail_mem[i].pmem_end -
916 avail_mem[i].pmem_start;
917 if (avail_mem[i].pmem_next == NULL)
918 break;
919 }
920 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
921 m68k_btop(total_phys_mem));
922 }
923
924 /* pmap_alloc_usertmgr INTERNAL
925 **
926 * Called from pmap_bootstrap() to allocate the structures which
927 * facilitate management of user MMU tables. Each user MMU table
928 * in the system has one such structure associated with it.
929 */
930 void
931 pmap_alloc_usertmgr(void)
932 {
933 /* Allocate user MMU table managers */
934 /* It would be a lot simpler to just make these BSS, but */
935 /* we may want to change their size at boot time... -j */
936 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
937 * NUM_A_TABLES);
938 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
939 * NUM_B_TABLES);
940 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
941 * NUM_C_TABLES);
942
943 /*
944 * Allocate PV list elements for the physical to virtual
945 * mapping system.
946 */
947 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
948 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
949 }
950
951 /* pmap_bootstrap_copyprom() INTERNAL
952 **
953 * Copy the PROM mappings into our own tables. Note, we
954 * can use physical addresses until __bootstrap returns.
955 */
956 void
957 pmap_bootstrap_copyprom(void)
958 {
959 struct sunromvec *romp;
960 int *mon_ctbl;
961 mmu_short_pte_t *kpte;
962 int i, len;
963
964 romp = romVectorPtr;
965
966 /*
967 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
968 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
969 */
970 mon_ctbl = *romp->monptaddr;
971 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
972 kpte = &kernCbase[i];
973 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
974
975 for (i = 0; i < len; i++) {
976 kpte[i].attr.raw = mon_ctbl[i];
977 }
978
979 /*
980 * Copy the mappings at MON_DVMA_BASE (to the end).
981 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
982 * Actually, we only want the last page, which the
983 * PROM has set up for use by the "ie" driver.
984 * (The i82686 needs its SCP there.)
985 * If we copy all the mappings, pmap_enter_kernel
986 * may complain about finding valid PTEs that are
987 * not recorded in our PV lists...
988 */
989 mon_ctbl = *romp->shadowpteaddr;
990 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
991 kpte = &kernCbase[i];
992 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
993 for (i = (len-1); i < len; i++) {
994 kpte[i].attr.raw = mon_ctbl[i];
995 }
996 }
997
998 /* pmap_takeover_mmu INTERNAL
999 **
1000 * Called from pmap_bootstrap() after it has copied enough of the
1001 * PROM mappings into the kernel map so that we can use our own
1002 * MMU table.
1003 */
1004 void
1005 pmap_takeover_mmu(void)
1006 {
1007
1008 loadcrp(&kernel_crp);
1009 }
1010
1011 /* pmap_bootstrap_setprom() INTERNAL
1012 **
1013 * Set the PROM mappings so it can see kernel space.
1014 * Note that physical addresses are used here, which
1015 * we can get away with because this runs with the
1016 * low 1GB set for transparent translation.
1017 */
1018 void
1019 pmap_bootstrap_setprom(void)
1020 {
1021 mmu_long_dte_t *mon_dte;
1022 extern struct mmu_rootptr mon_crp;
1023 int i;
1024
1025 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1026 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1027 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1028 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1029 }
1030 }
1031
1032
1033 /* pmap_init INTERFACE
1034 **
1035 * Called at the end of vm_init() to set up the pmap system to go
1036 * into full time operation. All initialization of kernel_pmap
1037 * should be already done by now, so this should just do things
1038 * needed for user-level pmaps to work.
1039 */
1040 void
1041 pmap_init(void)
1042 {
1043 /** Initialize the manager pools **/
1044 TAILQ_INIT(&a_pool);
1045 TAILQ_INIT(&b_pool);
1046 TAILQ_INIT(&c_pool);
1047
1048 /**************************************************************
1049 * Initialize all tmgr structures and MMU tables they manage. *
1050 **************************************************************/
1051 /** Initialize A tables **/
1052 pmap_init_a_tables();
1053 /** Initialize B tables **/
1054 pmap_init_b_tables();
1055 /** Initialize C tables **/
1056 pmap_init_c_tables();
1057
1058 /** Initialize the pmap pools **/
1059 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1060 &pool_allocator_nointr);
1061 }
1062
1063 /* pmap_init_a_tables() INTERNAL
1064 **
1065 * Initializes all A managers, their MMU A tables, and inserts
1066 * them into the A manager pool for use by the system.
1067 */
1068 void
1069 pmap_init_a_tables(void)
1070 {
1071 int i;
1072 a_tmgr_t *a_tbl;
1073
1074 for (i = 0; i < NUM_A_TABLES; i++) {
1075 /* Select the next available A manager from the pool */
1076 a_tbl = &Atmgrbase[i];
1077
1078 /*
1079 * Clear its parent entry. Set its wired and valid
1080 * entry count to zero.
1081 */
1082 a_tbl->at_parent = NULL;
1083 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1084
1085 /* Assign it the next available MMU A table from the pool */
1086 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1087
1088 /*
1089 * Initialize the MMU A table with the table in the `proc0',
1090 * or kernel, mapping. This ensures that every process has
1091 * the kernel mapped in the top part of its address space.
1092 */
1093 memcpy(a_tbl->at_dtbl, kernAbase, MMU_A_TBL_SIZE *
1094 sizeof(mmu_long_dte_t));
1095
1096 /*
1097 * Finally, insert the manager into the A pool,
1098 * making it ready to be used by the system.
1099 */
1100 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1101 }
1102 }
1103
1104 /* pmap_init_b_tables() INTERNAL
1105 **
1106 * Initializes all B table managers, their MMU B tables, and
1107 * inserts them into the B manager pool for use by the system.
1108 */
1109 void
1110 pmap_init_b_tables(void)
1111 {
1112 int i, j;
1113 b_tmgr_t *b_tbl;
1114
1115 for (i = 0; i < NUM_B_TABLES; i++) {
1116 /* Select the next available B manager from the pool */
1117 b_tbl = &Btmgrbase[i];
1118
1119 b_tbl->bt_parent = NULL; /* clear its parent, */
1120 b_tbl->bt_pidx = 0; /* parent index, */
1121 b_tbl->bt_wcnt = 0; /* wired entry count, */
1122 b_tbl->bt_ecnt = 0; /* valid entry count. */
1123
1124 /* Assign it the next available MMU B table from the pool */
1125 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1126
1127 /* Invalidate every descriptor in the table */
1128 for (j=0; j < MMU_B_TBL_SIZE; j++)
1129 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1130
1131 /* Insert the manager into the B pool */
1132 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1133 }
1134 }
1135
1136 /* pmap_init_c_tables() INTERNAL
1137 **
1138 * Initializes all C table managers, their MMU C tables, and
1139 * inserts them into the C manager pool for use by the system.
1140 */
1141 void
1142 pmap_init_c_tables(void)
1143 {
1144 int i, j;
1145 c_tmgr_t *c_tbl;
1146
1147 for (i = 0; i < NUM_C_TABLES; i++) {
1148 /* Select the next available C manager from the pool */
1149 c_tbl = &Ctmgrbase[i];
1150
1151 c_tbl->ct_parent = NULL; /* clear its parent, */
1152 c_tbl->ct_pidx = 0; /* parent index, */
1153 c_tbl->ct_wcnt = 0; /* wired entry count, */
1154 c_tbl->ct_ecnt = 0; /* valid entry count, */
1155 c_tbl->ct_pmap = NULL; /* parent pmap, */
1156 c_tbl->ct_va = 0; /* base of managed range */
1157
1158 /* Assign it the next available MMU C table from the pool */
1159 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1160
1161 for (j=0; j < MMU_C_TBL_SIZE; j++)
1162 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1163
1164 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1165 }
1166 }
1167
1168 /* pmap_init_pv() INTERNAL
1169 **
1170 * Initializes the Physical to Virtual mapping system.
1171 */
1172 void
1173 pmap_init_pv(void)
1174 {
1175 int i;
1176
1177 /* Initialize every PV head. */
1178 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1179 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1180 pvbase[i].pv_flags = 0; /* Zero out page flags */
1181 }
1182 }
1183
1184 /* get_a_table INTERNAL
1185 **
1186 * Retrieve and return a level A table for use in a user map.
1187 */
1188 a_tmgr_t *
1189 get_a_table(void)
1190 {
1191 a_tmgr_t *tbl;
1192 pmap_t pmap;
1193
1194 /* Get the top A table in the pool */
1195 tbl = TAILQ_FIRST(&a_pool);
1196 if (tbl == NULL) {
1197 /*
1198 * XXX - Instead of panicking here and in other get_x_table
1199 * functions, we do have the option of sleeping on the head of
1200 * the table pool. Any function which updates the table pool
1201 * would then issue a wakeup() on the head, thus waking up any
1202 * processes waiting for a table.
1203 *
1204 * Actually, the place to sleep would be when some process
1205 * asks for a "wired" mapping that would run us short of
1206 * mapping resources. This design DEPENDS on always having
1207 * some mapping resources in the pool for stealing, so we
1208 * must make sure we NEVER let the pool become empty. -gwr
1209 */
1210 panic("get_a_table: out of A tables.");
1211 }
1212
1213 TAILQ_REMOVE(&a_pool, tbl, at_link);
1214 /*
1215 * If the table has a non-null parent pointer then it is in use.
1216 * Forcibly abduct it from its parent and clear its entries.
1217 * No re-entrancy worries here. This table would not be in the
1218 * table pool unless it was available for use.
1219 *
1220 * Note that the second argument to free_a_table() is FALSE. This
1221 * indicates that the table should not be relinked into the A table
1222 * pool. That is a job for the function that called us.
1223 */
1224 if (tbl->at_parent) {
1225 pmap = tbl->at_parent;
1226 free_a_table(tbl, FALSE);
1227 pmap->pm_a_tmgr = NULL;
1228 pmap->pm_a_phys = kernAphys;
1229 }
1230 return tbl;
1231 }
1232
1233 /* get_b_table INTERNAL
1234 **
1235 * Return a level B table for use.
1236 */
1237 b_tmgr_t *
1238 get_b_table(void)
1239 {
1240 b_tmgr_t *tbl;
1241
1242 /* See 'get_a_table' for comments. */
1243 tbl = TAILQ_FIRST(&b_pool);
1244 if (tbl == NULL)
1245 panic("get_b_table: out of B tables.");
1246 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1247 if (tbl->bt_parent) {
1248 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1249 tbl->bt_parent->at_ecnt--;
1250 free_b_table(tbl, FALSE);
1251 }
1252 return tbl;
1253 }
1254
1255 /* get_c_table INTERNAL
1256 **
1257 * Return a level C table for use.
1258 */
1259 c_tmgr_t *
1260 get_c_table(void)
1261 {
1262 c_tmgr_t *tbl;
1263
1264 /* See 'get_a_table' for comments */
1265 tbl = TAILQ_FIRST(&c_pool);
1266 if (tbl == NULL)
1267 panic("get_c_table: out of C tables.");
1268 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1269 if (tbl->ct_parent) {
1270 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1271 tbl->ct_parent->bt_ecnt--;
1272 free_c_table(tbl, FALSE);
1273 }
1274 return tbl;
1275 }
1276
1277 /*
1278 * The following 'free_table' and 'steal_table' functions are called to
1279 * detach tables from their current obligations (parents and children) and
1280 * prepare them for reuse in another mapping.
1281 *
1282 * Free_table is used when the calling function will handle the fate
1283 * of the parent table, such as returning it to the free pool when it has
1284 * no valid entries. Functions that do not want to handle this should
1285 * call steal_table, in which the parent table's descriptors and entry
1286 * count are automatically modified when this table is removed.
1287 */
1288
1289 /* free_a_table INTERNAL
1290 **
1291 * Unmaps the given A table and all child tables from their current
1292 * mappings. Returns the number of pages that were invalidated.
1293 * If 'relink' is true, the function will return the table to the head
1294 * of the available table pool.
1295 *
1296 * Cache note: The MC68851 will automatically flush all
1297 * descriptors derived from a given A table from its
1298 * Automatic Translation Cache (ATC) if we issue a
1299 * 'PFLUSHR' instruction with the base address of the
1300 * table. This function should do, and does so.
1301 * Note note: We are using an MC68030 - there is no
1302 * PFLUSHR.
1303 */
1304 int
1305 free_a_table(a_tmgr_t *a_tbl, boolean_t relink)
1306 {
1307 int i, removed_cnt;
1308 mmu_long_dte_t *dte;
1309 mmu_short_dte_t *dtbl;
1310 b_tmgr_t *tmgr;
1311
1312 /*
1313 * Flush the ATC cache of all cached descriptors derived
1314 * from this table.
1315 * Sun3x does not use 68851's cached table feature
1316 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1317 */
1318
1319 /*
1320 * Remove any pending cache flushes that were designated
1321 * for the pmap this A table belongs to.
1322 * a_tbl->parent->atc_flushq[0] = 0;
1323 * Not implemented in sun3x.
1324 */
1325
1326 /*
1327 * All A tables in the system should retain a map for the
1328 * kernel. If the table contains any valid descriptors
1329 * (other than those for the kernel area), invalidate them all,
1330 * stopping short of the kernel's entries.
1331 */
1332 removed_cnt = 0;
1333 if (a_tbl->at_ecnt) {
1334 dte = a_tbl->at_dtbl;
1335 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1336 /*
1337 * If a table entry points to a valid B table, free
1338 * it and its children.
1339 */
1340 if (MMU_VALID_DT(dte[i])) {
1341 /*
1342 * The following block does several things,
1343 * from innermost expression to the
1344 * outermost:
1345 * 1) It extracts the base (cc 1996)
1346 * address of the B table pointed
1347 * to in the A table entry dte[i].
1348 * 2) It converts this base address into
1349 * the virtual address it can be
1350 * accessed with. (all MMU tables point
1351 * to physical addresses.)
1352 * 3) It finds the corresponding manager
1353 * structure which manages this MMU table.
1354 * 4) It frees the manager structure.
1355 * (This frees the MMU table and all
1356 * child tables. See 'free_b_table' for
1357 * details.)
1358 */
1359 dtbl = mmu_ptov(dte[i].addr.raw);
1360 tmgr = mmuB2tmgr(dtbl);
1361 removed_cnt += free_b_table(tmgr, TRUE);
1362 dte[i].attr.raw = MMU_DT_INVALID;
1363 }
1364 }
1365 a_tbl->at_ecnt = 0;
1366 }
1367 if (relink) {
1368 a_tbl->at_parent = NULL;
1369 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1370 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1371 }
1372 return removed_cnt;
1373 }
1374
1375 /* free_b_table INTERNAL
1376 **
1377 * Unmaps the given B table and all its children from their current
1378 * mappings. Returns the number of pages that were invalidated.
1379 * (For comments, see 'free_a_table()').
1380 */
1381 int
1382 free_b_table(b_tmgr_t *b_tbl, boolean_t relink)
1383 {
1384 int i, removed_cnt;
1385 mmu_short_dte_t *dte;
1386 mmu_short_pte_t *dtbl;
1387 c_tmgr_t *tmgr;
1388
1389 removed_cnt = 0;
1390 if (b_tbl->bt_ecnt) {
1391 dte = b_tbl->bt_dtbl;
1392 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1393 if (MMU_VALID_DT(dte[i])) {
1394 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1395 tmgr = mmuC2tmgr(dtbl);
1396 removed_cnt += free_c_table(tmgr, TRUE);
1397 dte[i].attr.raw = MMU_DT_INVALID;
1398 }
1399 }
1400 b_tbl->bt_ecnt = 0;
1401 }
1402
1403 if (relink) {
1404 b_tbl->bt_parent = NULL;
1405 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1406 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1407 }
1408 return removed_cnt;
1409 }
1410
1411 /* free_c_table INTERNAL
1412 **
1413 * Unmaps the given C table from use and returns it to the pool for
1414 * re-use. Returns the number of pages that were invalidated.
1415 *
1416 * This function preserves any physical page modification information
1417 * contained in the page descriptors within the C table by calling
1418 * 'pmap_remove_pte().'
1419 */
1420 int
1421 free_c_table(c_tmgr_t *c_tbl, boolean_t relink)
1422 {
1423 int i, removed_cnt;
1424
1425 removed_cnt = 0;
1426 if (c_tbl->ct_ecnt) {
1427 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1428 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1429 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1430 removed_cnt++;
1431 }
1432 }
1433 c_tbl->ct_ecnt = 0;
1434 }
1435
1436 if (relink) {
1437 c_tbl->ct_parent = NULL;
1438 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1439 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1440 }
1441 return removed_cnt;
1442 }
1443
1444
1445 /* pmap_remove_pte INTERNAL
1446 **
1447 * Unmap the given pte and preserve any page modification
1448 * information by transfering it to the pv head of the
1449 * physical page it maps to. This function does not update
1450 * any reference counts because it is assumed that the calling
1451 * function will do so.
1452 */
1453 void
1454 pmap_remove_pte(mmu_short_pte_t *pte)
1455 {
1456 u_short pv_idx, targ_idx;
1457 paddr_t pa;
1458 pv_t *pv;
1459
1460 pa = MMU_PTE_PA(*pte);
1461 if (is_managed(pa)) {
1462 pv = pa2pv(pa);
1463 targ_idx = pteidx(pte); /* Index of PTE being removed */
1464
1465 /*
1466 * If the PTE being removed is the first (or only) PTE in
1467 * the list of PTEs currently mapped to this page, remove the
1468 * PTE by changing the index found on the PV head. Otherwise
1469 * a linear search through the list will have to be executed
1470 * in order to find the PVE which points to the PTE being
1471 * removed, so that it may be modified to point to its new
1472 * neighbor.
1473 */
1474
1475 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1476 if (pv_idx == targ_idx) {
1477 pv->pv_idx = pvebase[targ_idx].pve_next;
1478 } else {
1479
1480 /*
1481 * Find the PV element pointing to the target
1482 * element. Note: may have pv_idx==PVE_EOL
1483 */
1484
1485 for (;;) {
1486 if (pv_idx == PVE_EOL) {
1487 goto pv_not_found;
1488 }
1489 if (pvebase[pv_idx].pve_next == targ_idx)
1490 break;
1491 pv_idx = pvebase[pv_idx].pve_next;
1492 }
1493
1494 /*
1495 * At this point, pv_idx is the index of the PV
1496 * element just before the target element in the list.
1497 * Unlink the target.
1498 */
1499
1500 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1501 }
1502
1503 /*
1504 * Save the mod/ref bits of the pte by simply
1505 * ORing the entire pte onto the pv_flags member
1506 * of the pv structure.
1507 * There is no need to use a separate bit pattern
1508 * for usage information on the pv head than that
1509 * which is used on the MMU ptes.
1510 */
1511
1512 pv_not_found:
1513 pv->pv_flags |= (u_short) pte->attr.raw;
1514 }
1515 pte->attr.raw = MMU_DT_INVALID;
1516 }
1517
1518 /* pmap_stroll INTERNAL
1519 **
1520 * Retrieve the addresses of all table managers involved in the mapping of
1521 * the given virtual address. If the table walk completed successfully,
1522 * return TRUE. If it was only partially successful, return FALSE.
1523 * The table walk performed by this function is important to many other
1524 * functions in this module.
1525 *
1526 * Note: This function ought to be easier to read.
1527 */
1528 boolean_t
1529 pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl,
1530 c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx,
1531 int *pte_idx)
1532 {
1533 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1534 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1535
1536 if (pmap == pmap_kernel())
1537 return FALSE;
1538
1539 /* Does the given pmap have its own A table? */
1540 *a_tbl = pmap->pm_a_tmgr;
1541 if (*a_tbl == NULL)
1542 return FALSE; /* No. Return unknown. */
1543 /* Does the A table have a valid B table
1544 * under the corresponding table entry?
1545 */
1546 *a_idx = MMU_TIA(va);
1547 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1548 if (!MMU_VALID_DT(*a_dte))
1549 return FALSE; /* No. Return unknown. */
1550 /* Yes. Extract B table from the A table. */
1551 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1552 /* Does the B table have a valid C table
1553 * under the corresponding table entry?
1554 */
1555 *b_idx = MMU_TIB(va);
1556 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1557 if (!MMU_VALID_DT(*b_dte))
1558 return FALSE; /* No. Return unknown. */
1559 /* Yes. Extract C table from the B table. */
1560 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1561 *pte_idx = MMU_TIC(va);
1562 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1563
1564 return TRUE;
1565 }
1566
1567 /* pmap_enter INTERFACE
1568 **
1569 * Called by the kernel to map a virtual address
1570 * to a physical address in the given process map.
1571 *
1572 * Note: this function should apply an exclusive lock
1573 * on the pmap system for its duration. (it certainly
1574 * would save my hair!!)
1575 * This function ought to be easier to read.
1576 */
1577 int
1578 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1579 {
1580 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1581 u_short nidx; /* PV list index */
1582 int mapflags; /* Flags for the mapping (see NOTE1) */
1583 u_int a_idx, b_idx, pte_idx; /* table indices */
1584 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1585 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1586 c_tmgr_t *c_tbl; /* C: short page table manager */
1587 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1588 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1589 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1590 pv_t *pv; /* pv list head */
1591 boolean_t wired; /* is the mapping to be wired? */
1592 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1593
1594 if (pmap == pmap_kernel()) {
1595 pmap_enter_kernel(va, pa, prot);
1596 return 0;
1597 }
1598
1599 /*
1600 * Determine if the mapping should be wired.
1601 */
1602 wired = ((flags & PMAP_WIRED) != 0);
1603
1604 /*
1605 * NOTE1:
1606 *
1607 * On November 13, 1999, someone changed the pmap_enter() API such
1608 * that it now accepts a 'flags' argument. This new argument
1609 * contains bit-flags for the architecture-independent (UVM) system to
1610 * use in signalling certain mapping requirements to the architecture-
1611 * dependent (pmap) system. The argument it replaces, 'wired', is now
1612 * one of the flags within it.
1613 *
1614 * In addition to flags signaled by the architecture-independent
1615 * system, parts of the architecture-dependent section of the sun3x
1616 * kernel pass their own flags in the lower, unused bits of the
1617 * physical address supplied to this function. These flags are
1618 * extracted and stored in the temporary variable 'mapflags'.
1619 *
1620 * Extract sun3x specific flags from the physical address.
1621 */
1622 mapflags = (pa & ~MMU_PAGE_MASK);
1623 pa &= MMU_PAGE_MASK;
1624
1625 /*
1626 * Determine if the physical address being mapped is on-board RAM.
1627 * Any other area of the address space is likely to belong to a
1628 * device and hence it would be disasterous to cache its contents.
1629 */
1630 if ((managed = is_managed(pa)) == FALSE)
1631 mapflags |= PMAP_NC;
1632
1633 /*
1634 * For user mappings we walk along the MMU tables of the given
1635 * pmap, reaching a PTE which describes the virtual page being
1636 * mapped or changed. If any level of the walk ends in an invalid
1637 * entry, a table must be allocated and the entry must be updated
1638 * to point to it.
1639 * There is a bit of confusion as to whether this code must be
1640 * re-entrant. For now we will assume it is. To support
1641 * re-entrancy we must unlink tables from the table pool before
1642 * we assume we may use them. Tables are re-linked into the pool
1643 * when we are finished with them at the end of the function.
1644 * But I don't feel like doing that until we have proof that this
1645 * needs to be re-entrant.
1646 * 'llevel' records which tables need to be relinked.
1647 */
1648 llevel = NONE;
1649
1650 /*
1651 * Step 1 - Retrieve the A table from the pmap. If it has no
1652 * A table, allocate a new one from the available pool.
1653 */
1654
1655 a_tbl = pmap->pm_a_tmgr;
1656 if (a_tbl == NULL) {
1657 /*
1658 * This pmap does not currently have an A table. Allocate
1659 * a new one.
1660 */
1661 a_tbl = get_a_table();
1662 a_tbl->at_parent = pmap;
1663
1664 /*
1665 * Assign this new A table to the pmap, and calculate its
1666 * physical address so that loadcrp() can be used to make
1667 * the table active.
1668 */
1669 pmap->pm_a_tmgr = a_tbl;
1670 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1671
1672 /*
1673 * If the process receiving a new A table is the current
1674 * process, we are responsible for setting the MMU so that
1675 * it becomes the current address space. This only adds
1676 * new mappings, so no need to flush anything.
1677 */
1678 if (pmap == current_pmap()) {
1679 kernel_crp.rp_addr = pmap->pm_a_phys;
1680 loadcrp(&kernel_crp);
1681 }
1682
1683 if (!wired)
1684 llevel = NEWA;
1685 } else {
1686 /*
1687 * Use the A table already allocated for this pmap.
1688 * Unlink it from the A table pool if necessary.
1689 */
1690 if (wired && !a_tbl->at_wcnt)
1691 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1692 }
1693
1694 /*
1695 * Step 2 - Walk into the B table. If there is no valid B table,
1696 * allocate one.
1697 */
1698
1699 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1700 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1701 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1702 /* The descriptor is valid. Use the B table it points to. */
1703 /*************************************
1704 * a_idx *
1705 * v *
1706 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1707 * | | | | | | | | | | | | *
1708 * +-+-+-+-+-+-+-+-+-+-+-+- *
1709 * | *
1710 * \- b_tbl -> +-+- *
1711 * | | *
1712 * +-+- *
1713 *************************************/
1714 b_dte = mmu_ptov(a_dte->addr.raw);
1715 b_tbl = mmuB2tmgr(b_dte);
1716
1717 /*
1718 * If the requested mapping must be wired, but this table
1719 * being used to map it is not, the table must be removed
1720 * from the available pool and its wired entry count
1721 * incremented.
1722 */
1723 if (wired && !b_tbl->bt_wcnt) {
1724 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1725 a_tbl->at_wcnt++;
1726 }
1727 } else {
1728 /* The descriptor is invalid. Allocate a new B table. */
1729 b_tbl = get_b_table();
1730
1731 /* Point the parent A table descriptor to this new B table. */
1732 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1733 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1734 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1735
1736 /* Create the necessary back references to the parent table */
1737 b_tbl->bt_parent = a_tbl;
1738 b_tbl->bt_pidx = a_idx;
1739
1740 /*
1741 * If this table is to be wired, make sure the parent A table
1742 * wired count is updated to reflect that it has another wired
1743 * entry.
1744 */
1745 if (wired)
1746 a_tbl->at_wcnt++;
1747 else if (llevel == NONE)
1748 llevel = NEWB;
1749 }
1750
1751 /*
1752 * Step 3 - Walk into the C table, if there is no valid C table,
1753 * allocate one.
1754 */
1755
1756 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1757 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1758 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1759 /* The descriptor is valid. Use the C table it points to. */
1760 /**************************************
1761 * c_idx *
1762 * | v *
1763 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1764 * | | | | | | | | | | | *
1765 * +-+-+-+-+-+-+-+-+-+-+- *
1766 * | *
1767 * \- c_tbl -> +-+-- *
1768 * | | | *
1769 * +-+-- *
1770 **************************************/
1771 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1772 c_tbl = mmuC2tmgr(c_pte);
1773
1774 /* If mapping is wired and table is not */
1775 if (wired && !c_tbl->ct_wcnt) {
1776 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1777 b_tbl->bt_wcnt++;
1778 }
1779 } else {
1780 /* The descriptor is invalid. Allocate a new C table. */
1781 c_tbl = get_c_table();
1782
1783 /* Point the parent B table descriptor to this new C table. */
1784 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1785 b_dte->attr.raw |= MMU_DT_SHORT;
1786 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1787
1788 /* Create the necessary back references to the parent table */
1789 c_tbl->ct_parent = b_tbl;
1790 c_tbl->ct_pidx = b_idx;
1791 /*
1792 * Store the pmap and base virtual managed address for faster
1793 * retrieval in the PV functions.
1794 */
1795 c_tbl->ct_pmap = pmap;
1796 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1797
1798 /*
1799 * If this table is to be wired, make sure the parent B table
1800 * wired count is updated to reflect that it has another wired
1801 * entry.
1802 */
1803 if (wired)
1804 b_tbl->bt_wcnt++;
1805 else if (llevel == NONE)
1806 llevel = NEWC;
1807 }
1808
1809 /*
1810 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1811 * slot of the C table, describing the PA to which the VA is mapped.
1812 */
1813
1814 pte_idx = MMU_TIC(va);
1815 c_pte = &c_tbl->ct_dtbl[pte_idx];
1816 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1817 /*
1818 * The PTE is currently valid. This particular call
1819 * is just a synonym for one (or more) of the following
1820 * operations:
1821 * change protection of a page
1822 * change wiring status of a page
1823 * remove the mapping of a page
1824 *
1825 * XXX - Semi critical: This code should unwire the PTE
1826 * and, possibly, associated parent tables if this is a
1827 * change wiring operation. Currently it does not.
1828 *
1829 * This may be ok if pmap_unwire() is the only
1830 * interface used to UNWIRE a page.
1831 */
1832
1833 /* First check if this is a wiring operation. */
1834 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1835 /*
1836 * The PTE is already wired. To prevent it from being
1837 * counted as a new wiring operation, reset the 'wired'
1838 * variable.
1839 */
1840 wired = FALSE;
1841 }
1842
1843 /* Is the new address the same as the old? */
1844 if (MMU_PTE_PA(*c_pte) == pa) {
1845 /*
1846 * Yes, mark that it does not need to be reinserted
1847 * into the PV list.
1848 */
1849 insert = FALSE;
1850
1851 /*
1852 * Clear all but the modified, referenced and wired
1853 * bits on the PTE.
1854 */
1855 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1856 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1857 } else {
1858 /* No, remove the old entry */
1859 pmap_remove_pte(c_pte);
1860 insert = TRUE;
1861 }
1862
1863 /*
1864 * TLB flush is only necessary if modifying current map.
1865 * However, in pmap_enter(), the pmap almost always IS
1866 * the current pmap, so don't even bother to check.
1867 */
1868 TBIS(va);
1869 } else {
1870 /*
1871 * The PTE is invalid. Increment the valid entry count in
1872 * the C table manager to reflect the addition of a new entry.
1873 */
1874 c_tbl->ct_ecnt++;
1875
1876 /* XXX - temporarily make sure the PTE is cleared. */
1877 c_pte->attr.raw = 0;
1878
1879 /* It will also need to be inserted into the PV list. */
1880 insert = TRUE;
1881 }
1882
1883 /*
1884 * If page is changing from unwired to wired status, set an unused bit
1885 * within the PTE to indicate that it is wired. Also increment the
1886 * wired entry count in the C table manager.
1887 */
1888 if (wired) {
1889 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1890 c_tbl->ct_wcnt++;
1891 }
1892
1893 /*
1894 * Map the page, being careful to preserve modify/reference/wired
1895 * bits. At this point it is assumed that the PTE either has no bits
1896 * set, or if there are set bits, they are only modified, reference or
1897 * wired bits. If not, the following statement will cause erratic
1898 * behavior.
1899 */
1900 #ifdef PMAP_DEBUG
1901 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1902 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1903 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1904 Debugger();
1905 }
1906 #endif
1907 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
1908
1909 /*
1910 * If the mapping should be read-only, set the write protect
1911 * bit in the PTE.
1912 */
1913 if (!(prot & VM_PROT_WRITE))
1914 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
1915
1916 /*
1917 * Mark the PTE as used and/or modified as specified by the flags arg.
1918 */
1919 if (flags & VM_PROT_ALL) {
1920 c_pte->attr.raw |= MMU_SHORT_PTE_USED;
1921 if (flags & VM_PROT_WRITE) {
1922 c_pte->attr.raw |= MMU_SHORT_PTE_M;
1923 }
1924 }
1925
1926 /*
1927 * If the mapping should be cache inhibited (indicated by the flag
1928 * bits found on the lower order of the physical address.)
1929 * mark the PTE as a cache inhibited page.
1930 */
1931 if (mapflags & PMAP_NC)
1932 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
1933
1934 /*
1935 * If the physical address being mapped is managed by the PV
1936 * system then link the pte into the list of pages mapped to that
1937 * address.
1938 */
1939 if (insert && managed) {
1940 pv = pa2pv(pa);
1941 nidx = pteidx(c_pte);
1942
1943 pvebase[nidx].pve_next = pv->pv_idx;
1944 pv->pv_idx = nidx;
1945 }
1946
1947 /* Move any allocated tables back into the active pool. */
1948
1949 switch (llevel) {
1950 case NEWA:
1951 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1952 /* FALLTHROUGH */
1953 case NEWB:
1954 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1955 /* FALLTHROUGH */
1956 case NEWC:
1957 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1958 /* FALLTHROUGH */
1959 default:
1960 break;
1961 }
1962
1963 return 0;
1964 }
1965
1966 /* pmap_enter_kernel INTERNAL
1967 **
1968 * Map the given virtual address to the given physical address within the
1969 * kernel address space. This function exists because the kernel map does
1970 * not do dynamic table allocation. It consists of a contiguous array of ptes
1971 * and can be edited directly without the need to walk through any tables.
1972 *
1973 * XXX: "Danger, Will Robinson!"
1974 * Note that the kernel should never take a fault on any page
1975 * between [ KERNBASE .. virtual_avail ] and this is checked in
1976 * trap.c for kernel-mode MMU faults. This means that mappings
1977 * created in that range must be implicily wired. -gwr
1978 */
1979 void
1980 pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot)
1981 {
1982 boolean_t was_valid, insert;
1983 u_short pte_idx;
1984 int flags;
1985 mmu_short_pte_t *pte;
1986 pv_t *pv;
1987 paddr_t old_pa;
1988
1989 flags = (pa & ~MMU_PAGE_MASK);
1990 pa &= MMU_PAGE_MASK;
1991
1992 if (is_managed(pa))
1993 insert = TRUE;
1994 else
1995 insert = FALSE;
1996
1997 /*
1998 * Calculate the index of the PTE being modified.
1999 */
2000 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2001
2002 /* This array is traditionally named "Sysmap" */
2003 pte = &kernCbase[pte_idx];
2004
2005 if (MMU_VALID_DT(*pte)) {
2006 was_valid = TRUE;
2007 /*
2008 * If the PTE already maps a different
2009 * physical address, umap and pv_unlink.
2010 */
2011 old_pa = MMU_PTE_PA(*pte);
2012 if (pa != old_pa)
2013 pmap_remove_pte(pte);
2014 else {
2015 /*
2016 * Old PA and new PA are the same. No need to
2017 * relink the mapping within the PV list.
2018 */
2019 insert = FALSE;
2020
2021 /*
2022 * Save any mod/ref bits on the PTE.
2023 */
2024 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2025 }
2026 } else {
2027 pte->attr.raw = MMU_DT_INVALID;
2028 was_valid = FALSE;
2029 }
2030
2031 /*
2032 * Map the page. Being careful to preserve modified/referenced bits
2033 * on the PTE.
2034 */
2035 pte->attr.raw |= (pa | MMU_DT_PAGE);
2036
2037 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2038 pte->attr.raw |= MMU_SHORT_PTE_WP;
2039 if (flags & PMAP_NC)
2040 pte->attr.raw |= MMU_SHORT_PTE_CI;
2041 if (was_valid)
2042 TBIS(va);
2043
2044 /*
2045 * Insert the PTE into the PV system, if need be.
2046 */
2047 if (insert) {
2048 pv = pa2pv(pa);
2049 pvebase[pte_idx].pve_next = pv->pv_idx;
2050 pv->pv_idx = pte_idx;
2051 }
2052 }
2053
2054 void
2055 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
2056 {
2057 mmu_short_pte_t *pte;
2058
2059 /* This array is traditionally named "Sysmap" */
2060 pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
2061
2062 KASSERT(!MMU_VALID_DT(*pte));
2063 pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2064 if (!(prot & VM_PROT_WRITE))
2065 pte->attr.raw |= MMU_SHORT_PTE_WP;
2066 }
2067
2068 void
2069 pmap_kremove(vaddr_t va, vsize_t len)
2070 {
2071 int idx, eidx;
2072
2073 #ifdef PMAP_DEBUG
2074 if ((sva & PGOFSET) || (eva & PGOFSET))
2075 panic("pmap_kremove: alignment");
2076 #endif
2077
2078 idx = m68k_btop(va - KERNBASE);
2079 eidx = m68k_btop(va + len - KERNBASE);
2080
2081 while (idx < eidx) {
2082 kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2083 TBIS(va);
2084 va += PAGE_SIZE;
2085 }
2086 }
2087
2088 /* pmap_map INTERNAL
2089 **
2090 * Map a contiguous range of physical memory into a contiguous range of
2091 * the kernel virtual address space.
2092 *
2093 * Used for device mappings and early mapping of the kernel text/data/bss.
2094 * Returns the first virtual address beyond the end of the range.
2095 */
2096 vaddr_t
2097 pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
2098 {
2099 int sz;
2100
2101 sz = endpa - pa;
2102 do {
2103 pmap_enter_kernel(va, pa, prot);
2104 va += PAGE_SIZE;
2105 pa += PAGE_SIZE;
2106 sz -= PAGE_SIZE;
2107 } while (sz > 0);
2108 pmap_update(pmap_kernel());
2109 return(va);
2110 }
2111
2112 /* pmap_protect INTERFACE
2113 **
2114 * Apply the given protection to the given virtual address range within
2115 * the given map.
2116 *
2117 * It is ok for the protection applied to be stronger than what is
2118 * specified. We use this to our advantage when the given map has no
2119 * mapping for the virtual address. By skipping a page when this
2120 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2121 * and therefore do not need to map the page just to apply a protection
2122 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2123 *
2124 * XXX - This function could be speeded up by using pmap_stroll() for inital
2125 * setup, and then manual scrolling in the for() loop.
2126 */
2127 void
2128 pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2129 {
2130 boolean_t iscurpmap;
2131 int a_idx, b_idx, c_idx;
2132 a_tmgr_t *a_tbl;
2133 b_tmgr_t *b_tbl;
2134 c_tmgr_t *c_tbl;
2135 mmu_short_pte_t *pte;
2136
2137 if (pmap == pmap_kernel()) {
2138 pmap_protect_kernel(startva, endva, prot);
2139 return;
2140 }
2141
2142 /*
2143 * In this particular pmap implementation, there are only three
2144 * types of memory protection: 'all' (read/write/execute),
2145 * 'read-only' (read/execute) and 'none' (no mapping.)
2146 * It is not possible for us to treat 'executable' as a separate
2147 * protection type. Therefore, protection requests that seek to
2148 * remove execute permission while retaining read or write, and those
2149 * that make little sense (write-only for example) are ignored.
2150 */
2151 switch (prot) {
2152 case VM_PROT_NONE:
2153 /*
2154 * A request to apply the protection code of
2155 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2156 */
2157 pmap_remove(pmap, startva, endva);
2158 return;
2159 case VM_PROT_EXECUTE:
2160 case VM_PROT_READ:
2161 case VM_PROT_READ|VM_PROT_EXECUTE:
2162 /* continue */
2163 break;
2164 case VM_PROT_WRITE:
2165 case VM_PROT_WRITE|VM_PROT_READ:
2166 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2167 case VM_PROT_ALL:
2168 /* None of these should happen in a sane system. */
2169 return;
2170 }
2171
2172 /*
2173 * If the pmap has no A table, it has no mappings and therefore
2174 * there is nothing to protect.
2175 */
2176 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2177 return;
2178
2179 a_idx = MMU_TIA(startva);
2180 b_idx = MMU_TIB(startva);
2181 c_idx = MMU_TIC(startva);
2182 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2183
2184 iscurpmap = (pmap == current_pmap());
2185 while (startva < endva) {
2186 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2187 if (b_tbl == NULL) {
2188 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2189 b_tbl = mmu_ptov((vaddr_t)b_tbl);
2190 b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2191 }
2192 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2193 if (c_tbl == NULL) {
2194 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2195 c_tbl = mmu_ptov((vaddr_t)c_tbl);
2196 c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2197 }
2198 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2199 pte = &c_tbl->ct_dtbl[c_idx];
2200 /* make the mapping read-only */
2201 pte->attr.raw |= MMU_SHORT_PTE_WP;
2202 /*
2203 * If we just modified the current address space,
2204 * flush any translations for the modified page from
2205 * the translation cache and any data from it in the
2206 * data cache.
2207 */
2208 if (iscurpmap)
2209 TBIS(startva);
2210 }
2211 startva += PAGE_SIZE;
2212
2213 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2214 c_tbl = NULL;
2215 c_idx = 0;
2216 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2217 b_tbl = NULL;
2218 b_idx = 0;
2219 }
2220 }
2221 } else { /* C table wasn't valid */
2222 c_tbl = NULL;
2223 c_idx = 0;
2224 startva += MMU_TIB_RANGE;
2225 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2226 b_tbl = NULL;
2227 b_idx = 0;
2228 }
2229 } /* C table */
2230 } else { /* B table wasn't valid */
2231 b_tbl = NULL;
2232 b_idx = 0;
2233 startva += MMU_TIA_RANGE;
2234 a_idx++;
2235 } /* B table */
2236 }
2237 }
2238
2239 /* pmap_protect_kernel INTERNAL
2240 **
2241 * Apply the given protection code to a kernel address range.
2242 */
2243 void
2244 pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2245 {
2246 vaddr_t va;
2247 mmu_short_pte_t *pte;
2248
2249 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2250 for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
2251 if (MMU_VALID_DT(*pte)) {
2252 switch (prot) {
2253 case VM_PROT_ALL:
2254 break;
2255 case VM_PROT_EXECUTE:
2256 case VM_PROT_READ:
2257 case VM_PROT_READ|VM_PROT_EXECUTE:
2258 pte->attr.raw |= MMU_SHORT_PTE_WP;
2259 break;
2260 case VM_PROT_NONE:
2261 /* this is an alias for 'pmap_remove_kernel' */
2262 pmap_remove_pte(pte);
2263 break;
2264 default:
2265 break;
2266 }
2267 /*
2268 * since this is the kernel, immediately flush any cached
2269 * descriptors for this address.
2270 */
2271 TBIS(va);
2272 }
2273 }
2274 }
2275
2276 /* pmap_unwire INTERFACE
2277 **
2278 * Clear the wired attribute of the specified page.
2279 *
2280 * This function is called from vm_fault.c to unwire
2281 * a mapping.
2282 */
2283 void
2284 pmap_unwire(pmap_t pmap, vaddr_t va)
2285 {
2286 int a_idx, b_idx, c_idx;
2287 a_tmgr_t *a_tbl;
2288 b_tmgr_t *b_tbl;
2289 c_tmgr_t *c_tbl;
2290 mmu_short_pte_t *pte;
2291
2292 /* Kernel mappings always remain wired. */
2293 if (pmap == pmap_kernel())
2294 return;
2295
2296 /*
2297 * Walk through the tables. If the walk terminates without
2298 * a valid PTE then the address wasn't wired in the first place.
2299 * Return immediately.
2300 */
2301 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2302 &b_idx, &c_idx) == FALSE)
2303 return;
2304
2305
2306 /* Is the PTE wired? If not, return. */
2307 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2308 return;
2309
2310 /* Remove the wiring bit. */
2311 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2312
2313 /*
2314 * Decrement the wired entry count in the C table.
2315 * If it reaches zero the following things happen:
2316 * 1. The table no longer has any wired entries and is considered
2317 * unwired.
2318 * 2. It is placed on the available queue.
2319 * 3. The parent table's wired entry count is decremented.
2320 * 4. If it reaches zero, this process repeats at step 1 and
2321 * stops at after reaching the A table.
2322 */
2323 if (--c_tbl->ct_wcnt == 0) {
2324 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2325 if (--b_tbl->bt_wcnt == 0) {
2326 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2327 if (--a_tbl->at_wcnt == 0) {
2328 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2329 }
2330 }
2331 }
2332 }
2333
2334 /* pmap_copy INTERFACE
2335 **
2336 * Copy the mappings of a range of addresses in one pmap, into
2337 * the destination address of another.
2338 *
2339 * This routine is advisory. Should we one day decide that MMU tables
2340 * may be shared by more than one pmap, this function should be used to
2341 * link them together. Until that day however, we do nothing.
2342 */
2343 void
2344 pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src)
2345 {
2346 /* not implemented. */
2347 }
2348
2349 /* pmap_copy_page INTERFACE
2350 **
2351 * Copy the contents of one physical page into another.
2352 *
2353 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2354 * to map the two specified physical pages into the kernel address space.
2355 *
2356 * Note: We could use the transparent translation registers to make the
2357 * mappings. If we do so, be sure to disable interrupts before using them.
2358 */
2359 void
2360 pmap_copy_page(paddr_t srcpa, paddr_t dstpa)
2361 {
2362 vaddr_t srcva, dstva;
2363 int s;
2364
2365 srcva = tmp_vpages[0];
2366 dstva = tmp_vpages[1];
2367
2368 s = splvm();
2369 #ifdef DIAGNOSTIC
2370 if (tmp_vpages_inuse++)
2371 panic("pmap_copy_page: temporary vpages are in use.");
2372 #endif
2373
2374 /* Map pages as non-cacheable to avoid cache polution? */
2375 pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
2376 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2377
2378 /* Hand-optimized version of bcopy(src, dst, PAGE_SIZE) */
2379 copypage((char *) srcva, (char *) dstva);
2380
2381 pmap_kremove(srcva, PAGE_SIZE);
2382 pmap_kremove(dstva, PAGE_SIZE);
2383
2384 #ifdef DIAGNOSTIC
2385 --tmp_vpages_inuse;
2386 #endif
2387 splx(s);
2388 }
2389
2390 /* pmap_zero_page INTERFACE
2391 **
2392 * Zero the contents of the specified physical page.
2393 *
2394 * Uses one of the virtual pages allocated in pmap_boostrap()
2395 * to map the specified page into the kernel address space.
2396 */
2397 void
2398 pmap_zero_page(paddr_t dstpa)
2399 {
2400 vaddr_t dstva;
2401 int s;
2402
2403 dstva = tmp_vpages[1];
2404 s = splvm();
2405 #ifdef DIAGNOSTIC
2406 if (tmp_vpages_inuse++)
2407 panic("pmap_zero_page: temporary vpages are in use.");
2408 #endif
2409
2410 /* The comments in pmap_copy_page() above apply here also. */
2411 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2412
2413 /* Hand-optimized version of bzero(ptr, PAGE_SIZE) */
2414 zeropage((char *) dstva);
2415
2416 pmap_kremove(dstva, PAGE_SIZE);
2417 #ifdef DIAGNOSTIC
2418 --tmp_vpages_inuse;
2419 #endif
2420 splx(s);
2421 }
2422
2423 /* pmap_collect INTERFACE
2424 **
2425 * Called from the VM system when we are about to swap out
2426 * the process using this pmap. This should give up any
2427 * resources held here, including all its MMU tables.
2428 */
2429 void
2430 pmap_collect(pmap_t pmap)
2431 {
2432 /* XXX - todo... */
2433 }
2434
2435 /* pmap_create INTERFACE
2436 **
2437 * Create and return a pmap structure.
2438 */
2439 pmap_t
2440 pmap_create(void)
2441 {
2442 pmap_t pmap;
2443
2444 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2445 pmap_pinit(pmap);
2446 return pmap;
2447 }
2448
2449 /* pmap_pinit INTERNAL
2450 **
2451 * Initialize a pmap structure.
2452 */
2453 void
2454 pmap_pinit(pmap_t pmap)
2455 {
2456 memset(pmap, 0, sizeof(struct pmap));
2457 pmap->pm_a_tmgr = NULL;
2458 pmap->pm_a_phys = kernAphys;
2459 pmap->pm_refcount = 1;
2460 simple_lock_init(&pmap->pm_lock);
2461 }
2462
2463 /* pmap_release INTERFACE
2464 **
2465 * Release any resources held by the given pmap.
2466 *
2467 * This is the reverse analog to pmap_pinit. It does not
2468 * necessarily mean for the pmap structure to be deallocated,
2469 * as in pmap_destroy.
2470 */
2471 void
2472 pmap_release(pmap_t pmap)
2473 {
2474 /*
2475 * As long as the pmap contains no mappings,
2476 * which always should be the case whenever
2477 * this function is called, there really should
2478 * be nothing to do.
2479 */
2480 #ifdef PMAP_DEBUG
2481 if (pmap == pmap_kernel())
2482 panic("pmap_release: kernel pmap");
2483 #endif
2484 /*
2485 * XXX - If this pmap has an A table, give it back.
2486 * The pmap SHOULD be empty by now, and pmap_remove
2487 * should have already given back the A table...
2488 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2489 * at this point, which means some mapping was not
2490 * removed when it should have been. -gwr
2491 */
2492 if (pmap->pm_a_tmgr != NULL) {
2493 /* First make sure we are not using it! */
2494 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2495 kernel_crp.rp_addr = kernAphys;
2496 loadcrp(&kernel_crp);
2497 }
2498 #ifdef PMAP_DEBUG /* XXX - todo! */
2499 /* XXX - Now complain... */
2500 printf("pmap_release: still have table\n");
2501 Debugger();
2502 #endif
2503 free_a_table(pmap->pm_a_tmgr, TRUE);
2504 pmap->pm_a_tmgr = NULL;
2505 pmap->pm_a_phys = kernAphys;
2506 }
2507 }
2508
2509 /* pmap_reference INTERFACE
2510 **
2511 * Increment the reference count of a pmap.
2512 */
2513 void
2514 pmap_reference(pmap_t pmap)
2515 {
2516 pmap_lock(pmap);
2517 pmap_add_ref(pmap);
2518 pmap_unlock(pmap);
2519 }
2520
2521 /* pmap_dereference INTERNAL
2522 **
2523 * Decrease the reference count on the given pmap
2524 * by one and return the current count.
2525 */
2526 int
2527 pmap_dereference(pmap_t pmap)
2528 {
2529 int rtn;
2530
2531 pmap_lock(pmap);
2532 rtn = pmap_del_ref(pmap);
2533 pmap_unlock(pmap);
2534
2535 return rtn;
2536 }
2537
2538 /* pmap_destroy INTERFACE
2539 **
2540 * Decrement a pmap's reference count and delete
2541 * the pmap if it becomes zero. Will be called
2542 * only after all mappings have been removed.
2543 */
2544 void
2545 pmap_destroy(pmap_t pmap)
2546 {
2547 if (pmap_dereference(pmap) == 0) {
2548 pmap_release(pmap);
2549 pool_put(&pmap_pmap_pool, pmap);
2550 }
2551 }
2552
2553 /* pmap_is_referenced INTERFACE
2554 **
2555 * Determine if the given physical page has been
2556 * referenced (read from [or written to.])
2557 */
2558 boolean_t
2559 pmap_is_referenced(struct vm_page *pg)
2560 {
2561 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2562 pv_t *pv;
2563 int idx;
2564
2565 /*
2566 * Check the flags on the pv head. If they are set,
2567 * return immediately. Otherwise a search must be done.
2568 */
2569
2570 pv = pa2pv(pa);
2571 if (pv->pv_flags & PV_FLAGS_USED)
2572 return TRUE;
2573
2574 /*
2575 * Search through all pv elements pointing
2576 * to this page and query their reference bits
2577 */
2578
2579 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2580 if (MMU_PTE_USED(kernCbase[idx])) {
2581 return TRUE;
2582 }
2583 }
2584 return FALSE;
2585 }
2586
2587 /* pmap_is_modified INTERFACE
2588 **
2589 * Determine if the given physical page has been
2590 * modified (written to.)
2591 */
2592 boolean_t
2593 pmap_is_modified(struct vm_page *pg)
2594 {
2595 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2596 pv_t *pv;
2597 int idx;
2598
2599 /* see comments in pmap_is_referenced() */
2600 pv = pa2pv(pa);
2601 if (pv->pv_flags & PV_FLAGS_MDFY)
2602 return TRUE;
2603
2604 for (idx = pv->pv_idx;
2605 idx != PVE_EOL;
2606 idx = pvebase[idx].pve_next) {
2607
2608 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2609 return TRUE;
2610 }
2611 }
2612
2613 return FALSE;
2614 }
2615
2616 /* pmap_page_protect INTERFACE
2617 **
2618 * Applies the given protection to all mappings to the given
2619 * physical page.
2620 */
2621 void
2622 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2623 {
2624 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2625 pv_t *pv;
2626 int idx;
2627 vaddr_t va;
2628 struct mmu_short_pte_struct *pte;
2629 c_tmgr_t *c_tbl;
2630 pmap_t pmap, curpmap;
2631
2632 curpmap = current_pmap();
2633 pv = pa2pv(pa);
2634
2635 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2636 pte = &kernCbase[idx];
2637 switch (prot) {
2638 case VM_PROT_ALL:
2639 /* do nothing */
2640 break;
2641 case VM_PROT_EXECUTE:
2642 case VM_PROT_READ:
2643 case VM_PROT_READ|VM_PROT_EXECUTE:
2644 /*
2645 * Determine the virtual address mapped by
2646 * the PTE and flush ATC entries if necessary.
2647 */
2648 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2649 pte->attr.raw |= MMU_SHORT_PTE_WP;
2650 if (pmap == curpmap || pmap == pmap_kernel())
2651 TBIS(va);
2652 break;
2653 case VM_PROT_NONE:
2654 /* Save the mod/ref bits. */
2655 pv->pv_flags |= pte->attr.raw;
2656 /* Invalidate the PTE. */
2657 pte->attr.raw = MMU_DT_INVALID;
2658
2659 /*
2660 * Update table counts. And flush ATC entries
2661 * if necessary.
2662 */
2663 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2664
2665 /*
2666 * If the PTE belongs to the kernel map,
2667 * be sure to flush the page it maps.
2668 */
2669 if (pmap == pmap_kernel()) {
2670 TBIS(va);
2671 } else {
2672 /*
2673 * The PTE belongs to a user map.
2674 * update the entry count in the C
2675 * table to which it belongs and flush
2676 * the ATC if the mapping belongs to
2677 * the current pmap.
2678 */
2679 c_tbl->ct_ecnt--;
2680 if (pmap == curpmap)
2681 TBIS(va);
2682 }
2683 break;
2684 default:
2685 break;
2686 }
2687 }
2688
2689 /*
2690 * If the protection code indicates that all mappings to the page
2691 * be removed, truncate the PV list to zero entries.
2692 */
2693 if (prot == VM_PROT_NONE)
2694 pv->pv_idx = PVE_EOL;
2695 }
2696
2697 /* pmap_get_pteinfo INTERNAL
2698 **
2699 * Called internally to find the pmap and virtual address within that
2700 * map to which the pte at the given index maps. Also includes the PTE's C
2701 * table manager.
2702 *
2703 * Returns the pmap in the argument provided, and the virtual address
2704 * by return value.
2705 */
2706 vaddr_t
2707 pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl)
2708 {
2709 vaddr_t va = 0;
2710
2711 /*
2712 * Determine if the PTE is a kernel PTE or a user PTE.
2713 */
2714 if (idx >= NUM_KERN_PTES) {
2715 /*
2716 * The PTE belongs to a user mapping.
2717 */
2718 /* XXX: Would like an inline for this to validate idx... */
2719 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2720
2721 *pmap = (*tbl)->ct_pmap;
2722 /*
2723 * To find the va to which the PTE maps, we first take
2724 * the table's base virtual address mapping which is stored
2725 * in ct_va. We then increment this address by a page for
2726 * every slot skipped until we reach the PTE.
2727 */
2728 va = (*tbl)->ct_va;
2729 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2730 } else {
2731 /*
2732 * The PTE belongs to the kernel map.
2733 */
2734 *pmap = pmap_kernel();
2735
2736 va = m68k_ptob(idx);
2737 va += KERNBASE;
2738 }
2739
2740 return va;
2741 }
2742
2743 /* pmap_clear_modify INTERFACE
2744 **
2745 * Clear the modification bit on the page at the specified
2746 * physical address.
2747 *
2748 */
2749 boolean_t
2750 pmap_clear_modify(struct vm_page *pg)
2751 {
2752 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2753 boolean_t rv;
2754
2755 rv = pmap_is_modified(pg);
2756 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2757 return rv;
2758 }
2759
2760 /* pmap_clear_reference INTERFACE
2761 **
2762 * Clear the referenced bit on the page at the specified
2763 * physical address.
2764 */
2765 boolean_t
2766 pmap_clear_reference(struct vm_page *pg)
2767 {
2768 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2769 boolean_t rv;
2770
2771 rv = pmap_is_referenced(pg);
2772 pmap_clear_pv(pa, PV_FLAGS_USED);
2773 return rv;
2774 }
2775
2776 /* pmap_clear_pv INTERNAL
2777 **
2778 * Clears the specified flag from the specified physical address.
2779 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2780 *
2781 * Flag is one of:
2782 * PV_FLAGS_MDFY - Page modified bit.
2783 * PV_FLAGS_USED - Page used (referenced) bit.
2784 *
2785 * This routine must not only clear the flag on the pv list
2786 * head. It must also clear the bit on every pte in the pv
2787 * list associated with the address.
2788 */
2789 void
2790 pmap_clear_pv(paddr_t pa, int flag)
2791 {
2792 pv_t *pv;
2793 int idx;
2794 vaddr_t va;
2795 pmap_t pmap;
2796 mmu_short_pte_t *pte;
2797 c_tmgr_t *c_tbl;
2798
2799 pv = pa2pv(pa);
2800 pv->pv_flags &= ~(flag);
2801 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2802 pte = &kernCbase[idx];
2803 pte->attr.raw &= ~(flag);
2804
2805 /*
2806 * The MC68030 MMU will not set the modified or
2807 * referenced bits on any MMU tables for which it has
2808 * a cached descriptor with its modify bit set. To insure
2809 * that it will modify these bits on the PTE during the next
2810 * time it is written to or read from, we must flush it from
2811 * the ATC.
2812 *
2813 * Ordinarily it is only necessary to flush the descriptor
2814 * if it is used in the current address space. But since I
2815 * am not sure that there will always be a notion of
2816 * 'the current address space' when this function is called,
2817 * I will skip the test and always flush the address. It
2818 * does no harm.
2819 */
2820
2821 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2822 TBIS(va);
2823 }
2824 }
2825
2826 /* pmap_extract INTERFACE
2827 **
2828 * Return the physical address mapped by the virtual address
2829 * in the specified pmap.
2830 *
2831 * Note: this function should also apply an exclusive lock
2832 * on the pmap system during its duration.
2833 */
2834 boolean_t
2835 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
2836 {
2837 int a_idx, b_idx, pte_idx;
2838 a_tmgr_t *a_tbl;
2839 b_tmgr_t *b_tbl;
2840 c_tmgr_t *c_tbl;
2841 mmu_short_pte_t *c_pte;
2842
2843 if (pmap == pmap_kernel())
2844 return pmap_extract_kernel(va, pap);
2845
2846 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2847 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2848 return FALSE;
2849
2850 if (!MMU_VALID_DT(*c_pte))
2851 return FALSE;
2852
2853 if (pap != NULL)
2854 *pap = MMU_PTE_PA(*c_pte);
2855 return (TRUE);
2856 }
2857
2858 /* pmap_extract_kernel INTERNAL
2859 **
2860 * Extract a translation from the kernel address space.
2861 */
2862 boolean_t
2863 pmap_extract_kernel(vaddr_t va, paddr_t *pap)
2864 {
2865 mmu_short_pte_t *pte;
2866
2867 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
2868 if (!MMU_VALID_DT(*pte))
2869 return (FALSE);
2870 if (pap != NULL)
2871 *pap = MMU_PTE_PA(*pte);
2872 return (TRUE);
2873 }
2874
2875 /* pmap_remove_kernel INTERNAL
2876 **
2877 * Remove the mapping of a range of virtual addresses from the kernel map.
2878 * The arguments are already page-aligned.
2879 */
2880 void
2881 pmap_remove_kernel(vaddr_t sva, vaddr_t eva)
2882 {
2883 int idx, eidx;
2884
2885 #ifdef PMAP_DEBUG
2886 if ((sva & PGOFSET) || (eva & PGOFSET))
2887 panic("pmap_remove_kernel: alignment");
2888 #endif
2889
2890 idx = m68k_btop(sva - KERNBASE);
2891 eidx = m68k_btop(eva - KERNBASE);
2892
2893 while (idx < eidx) {
2894 pmap_remove_pte(&kernCbase[idx++]);
2895 TBIS(sva);
2896 sva += PAGE_SIZE;
2897 }
2898 }
2899
2900 /* pmap_remove INTERFACE
2901 **
2902 * Remove the mapping of a range of virtual addresses from the given pmap.
2903 *
2904 * If the range contains any wired entries, this function will probably create
2905 * disaster.
2906 */
2907 void
2908 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
2909 {
2910
2911 if (pmap == pmap_kernel()) {
2912 pmap_remove_kernel(sva, eva);
2913 return;
2914 }
2915
2916 /*
2917 * If the pmap doesn't have an A table of its own, it has no mappings
2918 * that can be removed.
2919 */
2920 if (pmap->pm_a_tmgr == NULL)
2921 return;
2922
2923 /*
2924 * Remove the specified range from the pmap. If the function
2925 * returns true, the operation removed all the valid mappings
2926 * in the pmap and freed its A table. If this happened to the
2927 * currently loaded pmap, the MMU root pointer must be reloaded
2928 * with the default 'kernel' map.
2929 */
2930 if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) {
2931 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2932 kernel_crp.rp_addr = kernAphys;
2933 loadcrp(&kernel_crp);
2934 /* will do TLB flush below */
2935 }
2936 pmap->pm_a_tmgr = NULL;
2937 pmap->pm_a_phys = kernAphys;
2938 }
2939
2940 /*
2941 * If we just modified the current address space,
2942 * make sure to flush the MMU cache.
2943 *
2944 * XXX - this could be an unecessarily large flush.
2945 * XXX - Could decide, based on the size of the VA range
2946 * to be removed, whether to flush "by pages" or "all".
2947 */
2948 if (pmap == current_pmap())
2949 TBIAU();
2950 }
2951
2952 /* pmap_remove_a INTERNAL
2953 **
2954 * This is function number one in a set of three that removes a range
2955 * of memory in the most efficient manner by removing the highest possible
2956 * tables from the memory space. This particular function attempts to remove
2957 * as many B tables as it can, delegating the remaining fragmented ranges to
2958 * pmap_remove_b().
2959 *
2960 * If the removal operation results in an empty A table, the function returns
2961 * TRUE.
2962 *
2963 * It's ugly but will do for now.
2964 */
2965 boolean_t
2966 pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva)
2967 {
2968 boolean_t empty;
2969 int idx;
2970 vaddr_t nstart, nend;
2971 b_tmgr_t *b_tbl;
2972 mmu_long_dte_t *a_dte;
2973 mmu_short_dte_t *b_dte;
2974
2975 /*
2976 * The following code works with what I call a 'granularity
2977 * reduction algorithim'. A range of addresses will always have
2978 * the following properties, which are classified according to
2979 * how the range relates to the size of the current granularity
2980 * - an A table entry:
2981 *
2982 * 1 2 3 4
2983 * -+---+---+---+---+---+---+---+-
2984 * -+---+---+---+---+---+---+---+-
2985 *
2986 * A range will always start on a granularity boundary, illustrated
2987 * by '+' signs in the table above, or it will start at some point
2988 * inbetween a granularity boundary, as illustrated by point 1.
2989 * The first step in removing a range of addresses is to remove the
2990 * range between 1 and 2, the nearest granularity boundary. This
2991 * job is handled by the section of code governed by the
2992 * 'if (start < nstart)' statement.
2993 *
2994 * A range will always encompass zero or more intergral granules,
2995 * illustrated by points 2 and 3. Integral granules are easy to
2996 * remove. The removal of these granules is the second step, and
2997 * is handled by the code block 'if (nstart < nend)'.
2998 *
2999 * Lastly, a range will always end on a granularity boundary,
3000 * ill. by point 3, or it will fall just beyond one, ill. by point
3001 * 4. The last step involves removing this range and is handled by
3002 * the code block 'if (nend < end)'.
3003 */
3004 nstart = MMU_ROUND_UP_A(sva);
3005 nend = MMU_ROUND_A(eva);
3006
3007 if (sva < nstart) {
3008 /*
3009 * This block is executed if the range starts between
3010 * a granularity boundary.
3011 *
3012 * First find the DTE which is responsible for mapping
3013 * the start of the range.
3014 */
3015 idx = MMU_TIA(sva);
3016 a_dte = &a_tbl->at_dtbl[idx];
3017
3018 /*
3019 * If the DTE is valid then delegate the removal of the sub
3020 * range to pmap_remove_b(), which can remove addresses at
3021 * a finer granularity.
3022 */
3023 if (MMU_VALID_DT(*a_dte)) {
3024 b_dte = mmu_ptov(a_dte->addr.raw);
3025 b_tbl = mmuB2tmgr(b_dte);
3026
3027 /*
3028 * The sub range to be removed starts at the start
3029 * of the full range we were asked to remove, and ends
3030 * at the greater of:
3031 * 1. The end of the full range, -or-
3032 * 2. The end of the full range, rounded down to the
3033 * nearest granularity boundary.
3034 */
3035 if (eva < nstart)
3036 empty = pmap_remove_b(b_tbl, sva, eva);
3037 else
3038 empty = pmap_remove_b(b_tbl, sva, nstart);
3039
3040 /*
3041 * If the removal resulted in an empty B table,
3042 * invalidate the DTE that points to it and decrement
3043 * the valid entry count of the A table.
3044 */
3045 if (empty) {
3046 a_dte->attr.raw = MMU_DT_INVALID;
3047 a_tbl->at_ecnt--;
3048 }
3049 }
3050 /*
3051 * If the DTE is invalid, the address range is already non-
3052 * existent and can simply be skipped.
3053 */
3054 }
3055 if (nstart < nend) {
3056 /*
3057 * This block is executed if the range spans a whole number
3058 * multiple of granules (A table entries.)
3059 *
3060 * First find the DTE which is responsible for mapping
3061 * the start of the first granule involved.
3062 */
3063 idx = MMU_TIA(nstart);
3064 a_dte = &a_tbl->at_dtbl[idx];
3065
3066 /*
3067 * Remove entire sub-granules (B tables) one at a time,
3068 * until reaching the end of the range.
3069 */
3070 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3071 if (MMU_VALID_DT(*a_dte)) {
3072 /*
3073 * Find the B table manager for the
3074 * entry and free it.
3075 */
3076 b_dte = mmu_ptov(a_dte->addr.raw);
3077 b_tbl = mmuB2tmgr(b_dte);
3078 free_b_table(b_tbl, TRUE);
3079
3080 /*
3081 * Invalidate the DTE that points to the
3082 * B table and decrement the valid entry
3083 * count of the A table.
3084 */
3085 a_dte->attr.raw = MMU_DT_INVALID;
3086 a_tbl->at_ecnt--;
3087 }
3088 }
3089 if (nend < eva) {
3090 /*
3091 * This block is executed if the range ends beyond a
3092 * granularity boundary.
3093 *
3094 * First find the DTE which is responsible for mapping
3095 * the start of the nearest (rounded down) granularity
3096 * boundary.
3097 */
3098 idx = MMU_TIA(nend);
3099 a_dte = &a_tbl->at_dtbl[idx];
3100
3101 /*
3102 * If the DTE is valid then delegate the removal of the sub
3103 * range to pmap_remove_b(), which can remove addresses at
3104 * a finer granularity.
3105 */
3106 if (MMU_VALID_DT(*a_dte)) {
3107 /*
3108 * Find the B table manager for the entry
3109 * and hand it to pmap_remove_b() along with
3110 * the sub range.
3111 */
3112 b_dte = mmu_ptov(a_dte->addr.raw);
3113 b_tbl = mmuB2tmgr(b_dte);
3114
3115 empty = pmap_remove_b(b_tbl, nend, eva);
3116
3117 /*
3118 * If the removal resulted in an empty B table,
3119 * invalidate the DTE that points to it and decrement
3120 * the valid entry count of the A table.
3121 */
3122 if (empty) {
3123 a_dte->attr.raw = MMU_DT_INVALID;
3124 a_tbl->at_ecnt--;
3125 }
3126 }
3127 }
3128
3129 /*
3130 * If there are no more entries in the A table, release it
3131 * back to the available pool and return TRUE.
3132 */
3133 if (a_tbl->at_ecnt == 0) {
3134 a_tbl->at_parent = NULL;
3135 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3136 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3137 empty = TRUE;
3138 } else {
3139 empty = FALSE;
3140 }
3141
3142 return empty;
3143 }
3144
3145 /* pmap_remove_b INTERNAL
3146 **
3147 * Remove a range of addresses from an address space, trying to remove entire
3148 * C tables if possible.
3149 *
3150 * If the operation results in an empty B table, the function returns TRUE.
3151 */
3152 boolean_t
3153 pmap_remove_b(b_tmgr_t *b_tbl, vaddr_t sva, vaddr_t eva)
3154 {
3155 boolean_t empty;
3156 int idx;
3157 vaddr_t nstart, nend, rstart;
3158 c_tmgr_t *c_tbl;
3159 mmu_short_dte_t *b_dte;
3160 mmu_short_pte_t *c_dte;
3161
3162
3163 nstart = MMU_ROUND_UP_B(sva);
3164 nend = MMU_ROUND_B(eva);
3165
3166 if (sva < nstart) {
3167 idx = MMU_TIB(sva);
3168 b_dte = &b_tbl->bt_dtbl[idx];
3169 if (MMU_VALID_DT(*b_dte)) {
3170 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3171 c_tbl = mmuC2tmgr(c_dte);
3172 if (eva < nstart)
3173 empty = pmap_remove_c(c_tbl, sva, eva);
3174 else
3175 empty = pmap_remove_c(c_tbl, sva, nstart);
3176 if (empty) {
3177 b_dte->attr.raw = MMU_DT_INVALID;
3178 b_tbl->bt_ecnt--;
3179 }
3180 }
3181 }
3182 if (nstart < nend) {
3183 idx = MMU_TIB(nstart);
3184 b_dte = &b_tbl->bt_dtbl[idx];
3185 rstart = nstart;
3186 while (rstart < nend) {
3187 if (MMU_VALID_DT(*b_dte)) {
3188 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3189 c_tbl = mmuC2tmgr(c_dte);
3190 free_c_table(c_tbl, TRUE);
3191 b_dte->attr.raw = MMU_DT_INVALID;
3192 b_tbl->bt_ecnt--;
3193 }
3194 b_dte++;
3195 rstart += MMU_TIB_RANGE;
3196 }
3197 }
3198 if (nend < eva) {
3199 idx = MMU_TIB(nend);
3200 b_dte = &b_tbl->bt_dtbl[idx];
3201 if (MMU_VALID_DT(*b_dte)) {
3202 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3203 c_tbl = mmuC2tmgr(c_dte);
3204 empty = pmap_remove_c(c_tbl, nend, eva);
3205 if (empty) {
3206 b_dte->attr.raw = MMU_DT_INVALID;
3207 b_tbl->bt_ecnt--;
3208 }
3209 }
3210 }
3211
3212 if (b_tbl->bt_ecnt == 0) {
3213 b_tbl->bt_parent = NULL;
3214 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3215 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3216 empty = TRUE;
3217 } else {
3218 empty = FALSE;
3219 }
3220
3221 return empty;
3222 }
3223
3224 /* pmap_remove_c INTERNAL
3225 **
3226 * Remove a range of addresses from the given C table.
3227 */
3228 boolean_t
3229 pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva)
3230 {
3231 boolean_t empty;
3232 int idx;
3233 mmu_short_pte_t *c_pte;
3234
3235 idx = MMU_TIC(sva);
3236 c_pte = &c_tbl->ct_dtbl[idx];
3237 for (;sva < eva; sva += MMU_PAGE_SIZE, c_pte++) {
3238 if (MMU_VALID_DT(*c_pte)) {
3239 pmap_remove_pte(c_pte);
3240 c_tbl->ct_ecnt--;
3241 }
3242 }
3243
3244 if (c_tbl->ct_ecnt == 0) {
3245 c_tbl->ct_parent = NULL;
3246 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3247 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3248 empty = TRUE;
3249 } else {
3250 empty = FALSE;
3251 }
3252
3253 return empty;
3254 }
3255
3256 /* is_managed INTERNAL
3257 **
3258 * Determine if the given physical address is managed by the PV system.
3259 * Note that this logic assumes that no one will ask for the status of
3260 * addresses which lie in-between the memory banks on the 3/80. If they
3261 * do so, it will falsely report that it is managed.
3262 *
3263 * Note: A "managed" address is one that was reported to the VM system as
3264 * a "usable page" during system startup. As such, the VM system expects the
3265 * pmap module to keep an accurate track of the useage of those pages.
3266 * Any page not given to the VM system at startup does not exist (as far as
3267 * the VM system is concerned) and is therefore "unmanaged." Examples are
3268 * those pages which belong to the ROM monitor and the memory allocated before
3269 * the VM system was started.
3270 */
3271 boolean_t
3272 is_managed(paddr_t pa)
3273 {
3274 if (pa >= avail_start && pa < avail_end)
3275 return TRUE;
3276 else
3277 return FALSE;
3278 }
3279
3280 /* pmap_bootstrap_alloc INTERNAL
3281 **
3282 * Used internally for memory allocation at startup when malloc is not
3283 * available. This code will fail once it crosses the first memory
3284 * bank boundary on the 3/80. Hopefully by then however, the VM system
3285 * will be in charge of allocation.
3286 */
3287 void *
3288 pmap_bootstrap_alloc(int size)
3289 {
3290 void *rtn;
3291
3292 #ifdef PMAP_DEBUG
3293 if (bootstrap_alloc_enabled == FALSE) {
3294 mon_printf("pmap_bootstrap_alloc: disabled\n");
3295 sunmon_abort();
3296 }
3297 #endif
3298
3299 rtn = (void *) virtual_avail;
3300 virtual_avail += size;
3301
3302 #ifdef PMAP_DEBUG
3303 if (virtual_avail > virtual_contig_end) {
3304 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3305 sunmon_abort();
3306 }
3307 #endif
3308
3309 return rtn;
3310 }
3311
3312 /* pmap_bootstap_aalign INTERNAL
3313 **
3314 * Used to insure that the next call to pmap_bootstrap_alloc() will
3315 * return a chunk of memory aligned to the specified size.
3316 *
3317 * Note: This function will only support alignment sizes that are powers
3318 * of two.
3319 */
3320 void
3321 pmap_bootstrap_aalign(int size)
3322 {
3323 int off;
3324
3325 off = virtual_avail & (size - 1);
3326 if (off) {
3327 (void) pmap_bootstrap_alloc(size - off);
3328 }
3329 }
3330
3331 /* pmap_pa_exists
3332 **
3333 * Used by the /dev/mem driver to see if a given PA is memory
3334 * that can be mapped. (The PA is not in a hole.)
3335 */
3336 int
3337 pmap_pa_exists(paddr_t pa)
3338 {
3339 int i;
3340
3341 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3342 if ((pa >= avail_mem[i].pmem_start) &&
3343 (pa < avail_mem[i].pmem_end))
3344 return (1);
3345 if (avail_mem[i].pmem_next == NULL)
3346 break;
3347 }
3348 return (0);
3349 }
3350
3351 /* Called only from locore.s and pmap.c */
3352 void _pmap_switch(pmap_t pmap);
3353
3354 /*
3355 * _pmap_switch INTERNAL
3356 *
3357 * This is called by locore.s:cpu_switch() when it is
3358 * switching to a new process. Load new translations.
3359 * Note: done in-line by locore.s unless PMAP_DEBUG
3360 *
3361 * Note that we do NOT allocate a context here, but
3362 * share the "kernel only" context until we really
3363 * need our own context for user-space mappings in
3364 * pmap_enter_user(). [ s/context/mmu A table/ ]
3365 */
3366 void
3367 _pmap_switch(pmap_t pmap)
3368 {
3369 u_long rootpa;
3370
3371 /*
3372 * Only do reload/flush if we have to.
3373 * Note that if the old and new process
3374 * were BOTH using the "null" context,
3375 * then this will NOT flush the TLB.
3376 */
3377 rootpa = pmap->pm_a_phys;
3378 if (kernel_crp.rp_addr != rootpa) {
3379 DPRINT(("pmap_activate(%p)\n", pmap));
3380 kernel_crp.rp_addr = rootpa;
3381 loadcrp(&kernel_crp);
3382 TBIAU();
3383 }
3384 }
3385
3386 /*
3387 * Exported version of pmap_activate(). This is called from the
3388 * machine-independent VM code when a process is given a new pmap.
3389 * If (p == curlwp) do like cpu_switch would do; otherwise just
3390 * take this as notification that the process has a new pmap.
3391 */
3392 void
3393 pmap_activate(struct lwp *l)
3394 {
3395 if (l->l_proc == curproc) {
3396 _pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
3397 }
3398 }
3399
3400 /*
3401 * pmap_deactivate INTERFACE
3402 **
3403 * This is called to deactivate the specified process's address space.
3404 */
3405 void
3406 pmap_deactivate(struct lwp *l)
3407 {
3408 /* Nothing to do. */
3409 }
3410
3411 /*
3412 * Fill in the sun3x-specific part of the kernel core header
3413 * for dumpsys(). (See machdep.c for the rest.)
3414 */
3415 void
3416 pmap_kcore_hdr(struct sun3x_kcore_hdr *sh)
3417 {
3418 u_long spa, len;
3419 int i;
3420
3421 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3422 sh->pg_valid = MMU_DT_PAGE;
3423 sh->contig_end = virtual_contig_end;
3424 sh->kernCbase = (u_long)kernCbase;
3425 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3426 spa = avail_mem[i].pmem_start;
3427 spa = m68k_trunc_page(spa);
3428 len = avail_mem[i].pmem_end - spa;
3429 len = m68k_round_page(len);
3430 sh->ram_segs[i].start = spa;
3431 sh->ram_segs[i].size = len;
3432 }
3433 }
3434
3435
3436 /* pmap_virtual_space INTERFACE
3437 **
3438 * Return the current available range of virtual addresses in the
3439 * arguuments provided. Only really called once.
3440 */
3441 void
3442 pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend)
3443 {
3444 *vstart = virtual_avail;
3445 *vend = virtual_end;
3446 }
3447
3448 /*
3449 * Provide memory to the VM system.
3450 *
3451 * Assume avail_start is always in the
3452 * first segment as pmap_bootstrap does.
3453 */
3454 static void
3455 pmap_page_upload(void)
3456 {
3457 paddr_t a, b; /* memory range */
3458 int i;
3459
3460 /* Supply the memory in segments. */
3461 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3462 a = atop(avail_mem[i].pmem_start);
3463 b = atop(avail_mem[i].pmem_end);
3464 if (i == 0)
3465 a = atop(avail_start);
3466 if (avail_mem[i].pmem_end > avail_end)
3467 b = atop(avail_end);
3468
3469 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3470
3471 if (avail_mem[i].pmem_next == NULL)
3472 break;
3473 }
3474 }
3475
3476 /* pmap_count INTERFACE
3477 **
3478 * Return the number of resident (valid) pages in the given pmap.
3479 *
3480 * Note: If this function is handed the kernel map, it will report
3481 * that it has no mappings. Hopefully the VM system won't ask for kernel
3482 * map statistics.
3483 */
3484 segsz_t
3485 pmap_count(pmap_t pmap, int type)
3486 {
3487 u_int count;
3488 int a_idx, b_idx;
3489 a_tmgr_t *a_tbl;
3490 b_tmgr_t *b_tbl;
3491 c_tmgr_t *c_tbl;
3492
3493 /*
3494 * If the pmap does not have its own A table manager, it has no
3495 * valid entires.
3496 */
3497 if (pmap->pm_a_tmgr == NULL)
3498 return 0;
3499
3500 a_tbl = pmap->pm_a_tmgr;
3501
3502 count = 0;
3503 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3504 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3505 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3506 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3507 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3508 c_tbl = mmuC2tmgr(
3509 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3510 if (type == 0)
3511 /*
3512 * A resident entry count has been requested.
3513 */
3514 count += c_tbl->ct_ecnt;
3515 else
3516 /*
3517 * A wired entry count has been requested.
3518 */
3519 count += c_tbl->ct_wcnt;
3520 }
3521 }
3522 }
3523 }
3524
3525 return count;
3526 }
3527
3528 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3529 * The following routines are only used by DDB for tricky kernel text *
3530 * text operations in db_memrw.c. They are provided for sun3 *
3531 * compatibility. *
3532 *************************************************************************/
3533 /* get_pte INTERNAL
3534 **
3535 * Return the page descriptor the describes the kernel mapping
3536 * of the given virtual address.
3537 */
3538 extern u_long ptest_addr(u_long); /* XXX: locore.s */
3539 u_int
3540 get_pte(vaddr_t va)
3541 {
3542 u_long pte_pa;
3543 mmu_short_pte_t *pte;
3544
3545 /* Get the physical address of the PTE */
3546 pte_pa = ptest_addr(va & ~PGOFSET);
3547
3548 /* Convert to a virtual address... */
3549 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3550
3551 /* Make sure it is in our level-C tables... */
3552 if ((pte < kernCbase) ||
3553 (pte >= &mmuCbase[NUM_USER_PTES]))
3554 return 0;
3555
3556 /* ... and just return its contents. */
3557 return (pte->attr.raw);
3558 }
3559
3560
3561 /* set_pte INTERNAL
3562 **
3563 * Set the page descriptor that describes the kernel mapping
3564 * of the given virtual address.
3565 */
3566 void
3567 set_pte(vaddr_t va, u_int pte)
3568 {
3569 u_long idx;
3570
3571 if (va < KERNBASE)
3572 return;
3573
3574 idx = (unsigned long) m68k_btop(va - KERNBASE);
3575 kernCbase[idx].attr.raw = pte;
3576 TBIS(va);
3577 }
3578
3579 /*
3580 * Routine: pmap_procwr
3581 *
3582 * Function:
3583 * Synchronize caches corresponding to [addr, addr+len) in p.
3584 */
3585 void
3586 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
3587 {
3588 (void)cachectl1(0x80000004, va, len, p);
3589 }
3590
3591
3592 #ifdef PMAP_DEBUG
3593 /************************** DEBUGGING ROUTINES **************************
3594 * The following routines are meant to be an aid to debugging the pmap *
3595 * system. They are callable from the DDB command line and should be *
3596 * prepared to be handed unstable or incomplete states of the system. *
3597 ************************************************************************/
3598
3599 /* pv_list
3600 **
3601 * List all pages found on the pv list for the given physical page.
3602 * To avoid endless loops, the listing will stop at the end of the list
3603 * or after 'n' entries - whichever comes first.
3604 */
3605 void
3606 pv_list(paddr_t pa, int n)
3607 {
3608 int idx;
3609 vaddr_t va;
3610 pv_t *pv;
3611 c_tmgr_t *c_tbl;
3612 pmap_t pmap;
3613
3614 pv = pa2pv(pa);
3615 idx = pv->pv_idx;
3616 for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3617 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3618 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3619 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3620 }
3621 }
3622 #endif /* PMAP_DEBUG */
3623
3624 #ifdef NOT_YET
3625 /* and maybe not ever */
3626 /************************** LOW-LEVEL ROUTINES **************************
3627 * These routines will eventually be re-written into assembly and placed*
3628 * in locore.s. They are here now as stubs so that the pmap module can *
3629 * be linked as a standalone user program for testing. *
3630 ************************************************************************/
3631 /* flush_atc_crp INTERNAL
3632 **
3633 * Flush all page descriptors derived from the given CPU Root Pointer
3634 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3635 * cache.
3636 */
3637 void
3638 flush_atc_crp(int a_tbl)
3639 {
3640 mmu_long_rp_t rp;
3641
3642 /* Create a temporary root table pointer that points to the
3643 * given A table.
3644 */
3645 rp.attr.raw = ~MMU_LONG_RP_LU;
3646 rp.addr.raw = (unsigned int) a_tbl;
3647
3648 mmu_pflushr(&rp);
3649 /* mmu_pflushr:
3650 * movel sp(4)@,a0
3651 * pflushr a0@
3652 * rts
3653 */
3654 }
3655 #endif /* NOT_YET */
3656