pmap.c revision 1.27 1 /* $NetBSD: pmap.c,v 1.27 1997/09/19 13:55:44 leo Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 #include <sys/proc.h>
117 #include <sys/malloc.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120 #include <sys/kcore.h>
121
122 #include <vm/vm.h>
123 #include <vm/vm_kern.h>
124 #include <vm/vm_page.h>
125
126 #include <machine/cpu.h>
127 #include <machine/kcore.h>
128 #include <machine/pmap.h>
129 #include <machine/pte.h>
130 #include <machine/machdep.h>
131 #include <machine/mon.h>
132
133 #include "pmap_pvt.h"
134
135 /* XXX - What headers declare these? */
136 extern struct pcb *curpcb;
137 extern int physmem;
138
139 extern void copypage __P((const void*, void*));
140 extern void zeropage __P((void*));
141
142 /* Defined in locore.s */
143 extern char kernel_text[];
144
145 /* Defined by the linker */
146 extern char etext[], edata[], end[];
147 extern char *esym; /* DDB */
148
149 /*************************** DEBUGGING DEFINITIONS ***********************
150 * Macros, preprocessor defines and variables used in debugging can make *
151 * code hard to read. Anything used exclusively for debugging purposes *
152 * is defined here to avoid having such mess scattered around the file. *
153 *************************************************************************/
154 #ifdef PMAP_DEBUG
155 /*
156 * To aid the debugging process, macros should be expanded into smaller steps
157 * that accomplish the same goal, yet provide convenient places for placing
158 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
159 * 'INLINE' keyword is defined to an empty string. This way, any function
160 * defined to be a 'static INLINE' will become 'outlined' and compiled as
161 * a separate function, which is much easier to debug.
162 */
163 #define INLINE /* nothing */
164
165 /*
166 * It is sometimes convenient to watch the activity of a particular table
167 * in the system. The following variables are used for that purpose.
168 */
169 a_tmgr_t *pmap_watch_atbl = 0;
170 b_tmgr_t *pmap_watch_btbl = 0;
171 c_tmgr_t *pmap_watch_ctbl = 0;
172
173 int pmap_debug = 0;
174 #define DPRINT(args) if (pmap_debug) printf args
175
176 #else /********** Stuff below is defined if NOT debugging **************/
177
178 #define INLINE inline
179 #define DPRINT(args) /* nada */
180
181 #endif /* PMAP_DEBUG */
182 /*********************** END OF DEBUGGING DEFINITIONS ********************/
183
184 /*** Management Structure - Memory Layout
185 * For every MMU table in the sun3x pmap system there must be a way to
186 * manage it; we must know which process is using it, what other tables
187 * depend on it, and whether or not it contains any locked pages. This
188 * is solved by the creation of 'table management' or 'tmgr'
189 * structures. One for each MMU table in the system.
190 *
191 * MAP OF MEMORY USED BY THE PMAP SYSTEM
192 *
193 * towards lower memory
194 * kernAbase -> +-------------------------------------------------------+
195 * | Kernel MMU A level table |
196 * kernBbase -> +-------------------------------------------------------+
197 * | Kernel MMU B level tables |
198 * kernCbase -> +-------------------------------------------------------+
199 * | |
200 * | Kernel MMU C level tables |
201 * | |
202 * mmuCbase -> +-------------------------------------------------------+
203 * | User MMU C level tables |
204 * mmuAbase -> +-------------------------------------------------------+
205 * | |
206 * | User MMU A level tables |
207 * | |
208 * mmuBbase -> +-------------------------------------------------------+
209 * | User MMU B level tables |
210 * tmgrAbase -> +-------------------------------------------------------+
211 * | TMGR A level table structures |
212 * tmgrBbase -> +-------------------------------------------------------+
213 * | TMGR B level table structures |
214 * tmgrCbase -> +-------------------------------------------------------+
215 * | TMGR C level table structures |
216 * pvbase -> +-------------------------------------------------------+
217 * | Physical to Virtual mapping table (list heads) |
218 * pvebase -> +-------------------------------------------------------+
219 * | Physical to Virtual mapping table (list elements) |
220 * | |
221 * +-------------------------------------------------------+
222 * towards higher memory
223 *
224 * For every A table in the MMU A area, there will be a corresponding
225 * a_tmgr structure in the TMGR A area. The same will be true for
226 * the B and C tables. This arrangement will make it easy to find the
227 * controling tmgr structure for any table in the system by use of
228 * (relatively) simple macros.
229 */
230
231 /*
232 * Global variables for storing the base addresses for the areas
233 * labeled above.
234 */
235 static vm_offset_t kernAphys;
236 static mmu_long_dte_t *kernAbase;
237 static mmu_short_dte_t *kernBbase;
238 static mmu_short_pte_t *kernCbase;
239 static mmu_short_pte_t *mmuCbase;
240 static mmu_short_dte_t *mmuBbase;
241 static mmu_long_dte_t *mmuAbase;
242 static a_tmgr_t *Atmgrbase;
243 static b_tmgr_t *Btmgrbase;
244 static c_tmgr_t *Ctmgrbase;
245 static pv_t *pvbase;
246 static pv_elem_t *pvebase;
247 struct pmap kernel_pmap;
248
249 /*
250 * This holds the CRP currently loaded into the MMU.
251 */
252 struct mmu_rootptr kernel_crp;
253
254 /*
255 * Just all around global variables.
256 */
257 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
258 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
259 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
260
261
262 /*
263 * Flags used to mark the safety/availability of certain operations or
264 * resources.
265 */
266 static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
267 bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
268 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
269
270 /*
271 * XXX: For now, retain the traditional variables that were
272 * used in the old pmap/vm interface (without NONCONTIG).
273 */
274 /* Kernel virtual address space available: */
275 vm_offset_t virtual_avail, virtual_end;
276 /* Physical address space available: */
277 vm_offset_t avail_start, avail_end;
278
279 /* This keep track of the end of the contiguously mapped range. */
280 vm_offset_t virtual_contig_end;
281
282 /* Physical address used by pmap_next_page() */
283 vm_offset_t avail_next;
284
285 /* These are used by pmap_copy_page(), etc. */
286 vm_offset_t tmp_vpages[2];
287
288 /*
289 * The 3/80 is the only member of the sun3x family that has non-contiguous
290 * physical memory. Memory is divided into 4 banks which are physically
291 * locatable on the system board. Although the size of these banks varies
292 * with the size of memory they contain, their base addresses are
293 * permenently fixed. The following structure, which describes these
294 * banks, is initialized by pmap_bootstrap() after it reads from a similar
295 * structure provided by the ROM Monitor.
296 *
297 * For the other machines in the sun3x architecture which do have contiguous
298 * RAM, this list will have only one entry, which will describe the entire
299 * range of available memory.
300 */
301 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
302 u_int total_phys_mem;
303
304 /*************************************************************************/
305
306 /*
307 * XXX - Should "tune" these based on statistics.
308 *
309 * My first guess about the relative numbers of these needed is
310 * based on the fact that a "typical" process will have several
311 * pages mapped at low virtual addresses (text, data, bss), then
312 * some mapped shared libraries, and then some stack pages mapped
313 * near the high end of the VA space. Each process can use only
314 * one A table, and most will use only two B tables (maybe three)
315 * and probably about four C tables. Therefore, the first guess
316 * at the relative numbers of these needed is 1:2:4 -gwr
317 *
318 * The number of C tables needed is closely related to the amount
319 * of physical memory available plus a certain amount attributable
320 * to the use of double mappings. With a few simulation statistics
321 * we can find a reasonably good estimation of this unknown value.
322 * Armed with that and the above ratios, we have a good idea of what
323 * is needed at each level. -j
324 *
325 * Note: It is not physical memory memory size, but the total mapped
326 * virtual space required by the combined working sets of all the
327 * currently _runnable_ processes. (Sleeping ones don't count.)
328 * The amount of physical memory should be irrelevant. -gwr
329 */
330 #ifdef FIXED_NTABLES
331 #define NUM_A_TABLES 16
332 #define NUM_B_TABLES 32
333 #define NUM_C_TABLES 64
334 #else
335 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
336 #endif /* FIXED_NTABLES */
337
338 /*
339 * This determines our total virtual mapping capacity.
340 * Yes, it is a FIXED value so we can pre-allocate.
341 */
342 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
343
344 /*
345 * The size of the Kernel Virtual Address Space (KVAS)
346 * for purposes of MMU table allocation is -KERNBASE
347 * (length from KERNBASE to 0xFFFFffff)
348 */
349 #define KVAS_SIZE (-KERNBASE)
350
351 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
352 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
353 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
354 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
355
356 /*************************** MISCELANEOUS MACROS *************************/
357 #define PMAP_LOCK() ; /* Nothing, for now */
358 #define PMAP_UNLOCK() ; /* same. */
359 #define NULL 0
360
361 static INLINE void * mmu_ptov __P((vm_offset_t pa));
362 static INLINE vm_offset_t mmu_vtop __P((void * va));
363
364 #if 0
365 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
366 #endif /* 0 */
367 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
368 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
369
370 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
371 static INLINE int pteidx __P((mmu_short_pte_t *));
372 static INLINE pmap_t current_pmap __P((void));
373
374 /*
375 * We can always convert between virtual and physical addresses
376 * for anything in the range [KERNBASE ... avail_start] because
377 * that range is GUARANTEED to be mapped linearly.
378 * We rely heavily upon this feature!
379 */
380 static INLINE void *
381 mmu_ptov(pa)
382 vm_offset_t pa;
383 {
384 register vm_offset_t va;
385
386 va = (pa + KERNBASE);
387 #ifdef PMAP_DEBUG
388 if ((va < KERNBASE) || (va >= virtual_contig_end))
389 panic("mmu_ptov");
390 #endif
391 return ((void*)va);
392 }
393 static INLINE vm_offset_t
394 mmu_vtop(vva)
395 void *vva;
396 {
397 register vm_offset_t va;
398
399 va = (vm_offset_t)vva;
400 #ifdef PMAP_DEBUG
401 if ((va < KERNBASE) || (va >= virtual_contig_end))
402 panic("mmu_ptov");
403 #endif
404 return (va - KERNBASE);
405 }
406
407 /*
408 * These macros map MMU tables to their corresponding manager structures.
409 * They are needed quite often because many of the pointers in the pmap
410 * system reference MMU tables and not the structures that control them.
411 * There needs to be a way to find one when given the other and these
412 * macros do so by taking advantage of the memory layout described above.
413 * Here's a quick step through the first macro, mmuA2tmgr():
414 *
415 * 1) find the offset of the given MMU A table from the base of its table
416 * pool (table - mmuAbase).
417 * 2) convert this offset into a table index by dividing it by the
418 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
419 * 3) use this index to select the corresponding 'A' table manager
420 * structure from the 'A' table manager pool (Atmgrbase[index]).
421 */
422 /* This function is not currently used. */
423 #if 0
424 static INLINE a_tmgr_t *
425 mmuA2tmgr(mmuAtbl)
426 mmu_long_dte_t *mmuAtbl;
427 {
428 register int idx;
429
430 /* Which table is this in? */
431 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
432 #ifdef PMAP_DEBUG
433 if ((idx < 0) || (idx >= NUM_A_TABLES))
434 panic("mmuA2tmgr");
435 #endif
436 return (&Atmgrbase[idx]);
437 }
438 #endif /* 0 */
439
440 static INLINE b_tmgr_t *
441 mmuB2tmgr(mmuBtbl)
442 mmu_short_dte_t *mmuBtbl;
443 {
444 register int idx;
445
446 /* Which table is this in? */
447 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
448 #ifdef PMAP_DEBUG
449 if ((idx < 0) || (idx >= NUM_B_TABLES))
450 panic("mmuB2tmgr");
451 #endif
452 return (&Btmgrbase[idx]);
453 }
454
455 /* mmuC2tmgr INTERNAL
456 **
457 * Given a pte known to belong to a C table, return the address of
458 * that table's management structure.
459 */
460 static INLINE c_tmgr_t *
461 mmuC2tmgr(mmuCtbl)
462 mmu_short_pte_t *mmuCtbl;
463 {
464 register int idx;
465
466 /* Which table is this in? */
467 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
468 #ifdef PMAP_DEBUG
469 if ((idx < 0) || (idx >= NUM_C_TABLES))
470 panic("mmuC2tmgr");
471 #endif
472 return (&Ctmgrbase[idx]);
473 }
474
475 /* This is now a function call below.
476 * #define pa2pv(pa) \
477 * (&pvbase[(unsigned long)\
478 * m68k_btop(pa)\
479 * ])
480 */
481
482 /* pa2pv INTERNAL
483 **
484 * Return the pv_list_head element which manages the given physical
485 * address.
486 */
487 static INLINE pv_t *
488 pa2pv(pa)
489 vm_offset_t pa;
490 {
491 register struct pmap_physmem_struct *bank;
492 register int idx;
493
494 bank = &avail_mem[0];
495 while (pa >= bank->pmem_end)
496 bank = bank->pmem_next;
497
498 pa -= bank->pmem_start;
499 idx = bank->pmem_pvbase + m68k_btop(pa);
500 #ifdef PMAP_DEBUG
501 if ((idx < 0) || (idx >= physmem))
502 panic("pa2pv");
503 #endif
504 return &pvbase[idx];
505 }
506
507 /* pteidx INTERNAL
508 **
509 * Return the index of the given PTE within the entire fixed table of
510 * PTEs.
511 */
512 static INLINE int
513 pteidx(pte)
514 mmu_short_pte_t *pte;
515 {
516 return (pte - kernCbase);
517 }
518
519 /*
520 * This just offers a place to put some debugging checks,
521 * and reduces the number of places "curproc" appears...
522 */
523 static INLINE pmap_t
524 current_pmap()
525 {
526 struct proc *p;
527 struct vmspace *vm;
528 vm_map_t map;
529 pmap_t pmap;
530
531 p = curproc; /* XXX */
532 if (p == NULL)
533 pmap = &kernel_pmap;
534 else {
535 vm = p->p_vmspace;
536 map = &vm->vm_map;
537 pmap = vm_map_pmap(map);
538 }
539
540 return (pmap);
541 }
542
543
544 /*************************** FUNCTION DEFINITIONS ************************
545 * These appear here merely for the compiler to enforce type checking on *
546 * all function calls. *
547 *************************************************************************/
548
549 /** External functions
550 ** - functions used within this module but written elsewhere.
551 ** both of these functions are in locore.s
552 ** XXX - These functions were later replaced with their more cryptic
553 ** hp300 counterparts. They may be removed now.
554 **/
555 #if 0 /* deprecated mmu */
556 void mmu_seturp __P((vm_offset_t));
557 void mmu_flush __P((int, vm_offset_t));
558 void mmu_flusha __P((void));
559 #endif /* 0 */
560
561 /** Internal functions
562 ** - all functions used only within this module are defined in
563 ** pmap_pvt.h
564 **/
565
566 /** Interface functions
567 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
568 ** defined.
569 **/
570 #ifdef INCLUDED_IN_PMAP_H
571 void pmap_bootstrap __P((void));
572 void *pmap_bootstrap_alloc __P((int));
573 void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
574 pmap_t pmap_create __P((vm_size_t));
575 void pmap_destroy __P((pmap_t));
576 void pmap_reference __P((pmap_t));
577 boolean_t pmap_is_referenced __P((vm_offset_t));
578 boolean_t pmap_is_modified __P((vm_offset_t));
579 void pmap_clear_modify __P((vm_offset_t));
580 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
581 void pmap_activate __P((pmap_t));
582 int pmap_page_index __P((vm_offset_t));
583 u_int pmap_free_pages __P((void));
584 #endif /* INCLUDED_IN_PMAP_H */
585
586 /********************************** CODE ********************************
587 * Functions that are called from other parts of the kernel are labeled *
588 * as 'INTERFACE' functions. Functions that are only called from *
589 * within the pmap module are labeled as 'INTERNAL' functions. *
590 * Functions that are internal, but are not (currently) used at all are *
591 * labeled 'INTERNAL_X'. *
592 ************************************************************************/
593
594 /* pmap_bootstrap INTERNAL
595 **
596 * Initializes the pmap system. Called at boot time from _vm_init()
597 * in _startup.c.
598 *
599 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
600 * system implement pmap_steal_memory() is redundant.
601 * Don't release this code without removing one or the other!
602 */
603 void
604 pmap_bootstrap(nextva)
605 vm_offset_t nextva;
606 {
607 struct physmemory *membank;
608 struct pmap_physmem_struct *pmap_membank;
609 vm_offset_t va, pa, eva;
610 int b, c, i, j; /* running table counts */
611 int size;
612
613 /*
614 * This function is called by __bootstrap after it has
615 * determined the type of machine and made the appropriate
616 * patches to the ROM vectors (XXX- I don't quite know what I meant
617 * by that.) It allocates and sets up enough of the pmap system
618 * to manage the kernel's address space.
619 */
620
621 /*
622 * Determine the range of kernel virtual and physical
623 * space available. Note that we ABSOLUTELY DEPEND on
624 * the fact that the first bank of memory (4MB) is
625 * mapped linearly to KERNBASE (which we guaranteed in
626 * the first instructions of locore.s).
627 * That is plenty for our bootstrap work.
628 */
629 virtual_avail = m68k_round_page(nextva);
630 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
631 virtual_end = VM_MAX_KERNEL_ADDRESS;
632 /* Don't need avail_start til later. */
633
634 /* We may now call pmap_bootstrap_alloc(). */
635 bootstrap_alloc_enabled = TRUE;
636
637 /*
638 * This is a somewhat unwrapped loop to deal with
639 * copying the PROM's 'phsymem' banks into the pmap's
640 * banks. The following is always assumed:
641 * 1. There is always at least one bank of memory.
642 * 2. There is always a last bank of memory, and its
643 * pmem_next member must be set to NULL.
644 * XXX - Use: do { ... } while (membank->next) instead?
645 * XXX - Why copy this stuff at all? -gwr
646 * - It is needed in pa2pv().
647 */
648 membank = romVectorPtr->v_physmemory;
649 pmap_membank = avail_mem;
650 total_phys_mem = 0;
651
652 while (membank->next) {
653 pmap_membank->pmem_start = membank->address;
654 pmap_membank->pmem_end = membank->address + membank->size;
655 total_phys_mem += membank->size;
656 /* This silly syntax arises because pmap_membank
657 * is really a pre-allocated array, but it is put into
658 * use as a linked list.
659 */
660 pmap_membank->pmem_next = pmap_membank + 1;
661 pmap_membank = pmap_membank->pmem_next;
662 membank = membank->next;
663 }
664
665 /*
666 * XXX The last bank of memory should be reduced to exclude the
667 * physical pages needed by the PROM monitor from being used
668 * in the VM system. XXX - See below - Fix!
669 */
670 pmap_membank->pmem_start = membank->address;
671 pmap_membank->pmem_end = membank->address + membank->size;
672 pmap_membank->pmem_next = NULL;
673
674 #if 0 /* XXX - Need to integrate this! */
675 /*
676 * The last few pages of physical memory are "owned" by
677 * the PROM. The total amount of memory we are allowed
678 * to use is given by the romvec pointer. -gwr
679 *
680 * We should dedicate different variables for 'useable'
681 * and 'physically available'. Most users are used to the
682 * kernel reporting the amount of memory 'physically available'
683 * as opposed to 'useable by the kernel' at boot time. -j
684 */
685 total_phys_mem = *romVectorPtr->memoryAvail;
686 #endif /* XXX */
687
688 total_phys_mem += membank->size; /* XXX see above */
689 physmem = btoc(total_phys_mem);
690
691 /*
692 * Avail_end is set to the first byte of physical memory
693 * after the end of the last bank. We use this only to
694 * determine if a physical address is "managed" memory.
695 *
696 * XXX - The setting of avail_end is a temporary ROM saving hack.
697 */
698 avail_end = pmap_membank->pmem_end -
699 (total_phys_mem - *romVectorPtr->memoryAvail);
700 avail_end = m68k_trunc_page(avail_end);
701
702 /*
703 * First allocate enough kernel MMU tables to map all
704 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
705 * Note: All must be aligned on 256 byte boundaries.
706 * Start with the level-A table (one of those).
707 */
708 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
709 kernAbase = pmap_bootstrap_alloc(size);
710 bzero(kernAbase, size);
711
712 /* Now the level-B kernel tables... */
713 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
714 kernBbase = pmap_bootstrap_alloc(size);
715 bzero(kernBbase, size);
716
717 /* Now the level-C kernel tables... */
718 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
719 kernCbase = pmap_bootstrap_alloc(size);
720 bzero(kernCbase, size);
721 /*
722 * Note: In order for the PV system to work correctly, the kernel
723 * and user-level C tables must be allocated contiguously.
724 * Nothing should be allocated between here and the allocation of
725 * mmuCbase below. XXX: Should do this as one allocation, and
726 * then compute a pointer for mmuCbase instead of this...
727 *
728 * Allocate user MMU tables.
729 * These must be contiguous with the preceeding.
730 */
731
732 #ifndef FIXED_NTABLES
733 /*
734 * The number of user-level C tables that should be allocated is
735 * related to the size of physical memory. In general, there should
736 * be enough tables to map four times the amount of available RAM.
737 * The extra amount is needed because some table space is wasted by
738 * fragmentation.
739 */
740 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
741 NUM_B_TABLES = NUM_C_TABLES / 2;
742 NUM_A_TABLES = NUM_B_TABLES / 2;
743 #endif /* !FIXED_NTABLES */
744
745 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
746 mmuCbase = pmap_bootstrap_alloc(size);
747
748 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
749 mmuBbase = pmap_bootstrap_alloc(size);
750
751 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
752 mmuAbase = pmap_bootstrap_alloc(size);
753
754 /*
755 * Fill in the never-changing part of the kernel tables.
756 * For simplicity, the kernel's mappings will be editable as a
757 * flat array of page table entries at kernCbase. The
758 * higher level 'A' and 'B' tables must be initialized to point
759 * to this lower one.
760 */
761 b = c = 0;
762
763 /*
764 * Invalidate all mappings below KERNBASE in the A table.
765 * This area has already been zeroed out, but it is good
766 * practice to explicitly show that we are interpreting
767 * it as a list of A table descriptors.
768 */
769 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
770 kernAbase[i].addr.raw = 0;
771 }
772
773 /*
774 * Set up the kernel A and B tables so that they will reference the
775 * correct spots in the contiguous table of PTEs allocated for the
776 * kernel's virtual memory space.
777 */
778 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
779 kernAbase[i].attr.raw =
780 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
781 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
782
783 for (j=0; j < MMU_B_TBL_SIZE; j++) {
784 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
785 | MMU_DT_SHORT;
786 c += MMU_C_TBL_SIZE;
787 }
788 b += MMU_B_TBL_SIZE;
789 }
790
791 /* XXX - Doing kernel_pmap a little further down. */
792
793 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
794 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
795 pmap_alloc_pv(); /* Allocate physical->virtual map. */
796
797 /*
798 * We are now done with pmap_bootstrap_alloc(). Round up
799 * `virtual_avail' to the nearest page, and set the flag
800 * to prevent use of pmap_bootstrap_alloc() hereafter.
801 */
802 pmap_bootstrap_aalign(NBPG);
803 bootstrap_alloc_enabled = FALSE;
804
805 /*
806 * Now that we are done with pmap_bootstrap_alloc(), we
807 * must save the virtual and physical addresses of the
808 * end of the linearly mapped range, which are stored in
809 * virtual_contig_end and avail_start, respectively.
810 * These variables will never change after this point.
811 */
812 virtual_contig_end = virtual_avail;
813 avail_start = virtual_avail - KERNBASE;
814
815 /*
816 * `avail_next' is a running pointer used by pmap_next_page() to
817 * keep track of the next available physical page to be handed
818 * to the VM system during its initialization, in which it
819 * asks for physical pages, one at a time.
820 */
821 avail_next = avail_start;
822
823 /*
824 * Now allocate some virtual addresses, but not the physical pages
825 * behind them. Note that virtual_avail is already page-aligned.
826 *
827 * tmp_vpages[] is an array of two virtual pages used for temporary
828 * kernel mappings in the pmap module to facilitate various physical
829 * address-oritented operations.
830 */
831 tmp_vpages[0] = virtual_avail;
832 virtual_avail += NBPG;
833 tmp_vpages[1] = virtual_avail;
834 virtual_avail += NBPG;
835
836 /** Initialize the PV system **/
837 pmap_init_pv();
838
839 /*
840 * Fill in the kernel_pmap structure and kernel_crp.
841 */
842 kernAphys = mmu_vtop(kernAbase);
843 kernel_pmap.pm_a_tmgr = NULL;
844 kernel_pmap.pm_a_phys = kernAphys;
845 kernel_pmap.pm_refcount = 1; /* always in use */
846
847 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
848 kernel_crp.rp_addr = kernAphys;
849
850 /*
851 * Now pmap_enter_kernel() may be used safely and will be
852 * the main interface used hereafter to modify the kernel's
853 * virtual address space. Note that since we are still running
854 * under the PROM's address table, none of these table modifications
855 * actually take effect until pmap_takeover_mmu() is called.
856 *
857 * Note: Our tables do NOT have the PROM linear mappings!
858 * Only the mappings created here exist in our tables, so
859 * remember to map anything we expect to use.
860 */
861 va = (vm_offset_t) KERNBASE;
862 pa = 0;
863
864 /*
865 * The first page of the kernel virtual address space is the msgbuf
866 * page. The page attributes (data, non-cached) are set here, while
867 * the address is assigned to this global pointer in cpu_startup().
868 * XXX - Make it non-cached?
869 */
870 for (i = 0; i < btoc(MSGBUFSIZE); i++) {
871 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
872 va += NBPG; pa += NBPG;
873 }
874
875 /* Next page is used as the temporary stack. */
876 pmap_enter_kernel(va, pa, VM_PROT_ALL);
877 va += NBPG; pa += NBPG;
878
879 /*
880 * Map all of the kernel's text segment as read-only and cacheable.
881 * (Cacheable is implied by default). Unfortunately, the last bytes
882 * of kernel text and the first bytes of kernel data will often be
883 * sharing the same page. Therefore, the last page of kernel text
884 * has to be mapped as read/write, to accomodate the data.
885 */
886 eva = m68k_trunc_page((vm_offset_t)etext);
887 for (; va < eva; va += NBPG, pa += NBPG)
888 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
889
890 /*
891 * Map all of the kernel's data as read/write and cacheable.
892 * This includes: data, BSS, symbols, and everything in the
893 * contiguous memory used by pmap_bootstrap_alloc()
894 */
895 for (; pa < avail_start; va += NBPG, pa += NBPG)
896 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
897
898 /*
899 * At this point we are almost ready to take over the MMU. But first
900 * we must save the PROM's address space in our map, as we call its
901 * routines and make references to its data later in the kernel.
902 */
903 pmap_bootstrap_copyprom();
904 pmap_takeover_mmu();
905 pmap_bootstrap_setprom();
906
907 /* Notify the VM system of our page size. */
908 PAGE_SIZE = NBPG;
909 vm_set_page_size();
910 }
911
912
913 /* pmap_alloc_usermmu INTERNAL
914 **
915 * Called from pmap_bootstrap() to allocate MMU tables that will
916 * eventually be used for user mappings.
917 */
918 void
919 pmap_alloc_usermmu()
920 {
921 /* XXX: Moved into caller. */
922 }
923
924 /* pmap_alloc_pv INTERNAL
925 **
926 * Called from pmap_bootstrap() to allocate the physical
927 * to virtual mapping list. Each physical page of memory
928 * in the system has a corresponding element in this list.
929 */
930 void
931 pmap_alloc_pv()
932 {
933 int i;
934 unsigned int total_mem;
935
936 /*
937 * Allocate a pv_head structure for every page of physical
938 * memory that will be managed by the system. Since memory on
939 * the 3/80 is non-contiguous, we cannot arrive at a total page
940 * count by subtraction of the lowest available address from the
941 * highest, but rather we have to step through each memory
942 * bank and add the number of pages in each to the total.
943 *
944 * At this time we also initialize the offset of each bank's
945 * starting pv_head within the pv_head list so that the physical
946 * memory state routines (pmap_is_referenced(),
947 * pmap_is_modified(), et al.) can quickly find coresponding
948 * pv_heads in spite of the non-contiguity.
949 */
950 total_mem = 0;
951 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
952 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
953 total_mem += avail_mem[i].pmem_end -
954 avail_mem[i].pmem_start;
955 if (avail_mem[i].pmem_next == NULL)
956 break;
957 }
958 #ifdef PMAP_DEBUG
959 if (total_mem != total_phys_mem)
960 panic("pmap_alloc_pv did not arrive at correct page count");
961 #endif
962
963 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
964 m68k_btop(total_phys_mem));
965 }
966
967 /* pmap_alloc_usertmgr INTERNAL
968 **
969 * Called from pmap_bootstrap() to allocate the structures which
970 * facilitate management of user MMU tables. Each user MMU table
971 * in the system has one such structure associated with it.
972 */
973 void
974 pmap_alloc_usertmgr()
975 {
976 /* Allocate user MMU table managers */
977 /* It would be a lot simpler to just make these BSS, but */
978 /* we may want to change their size at boot time... -j */
979 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
980 * NUM_A_TABLES);
981 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
982 * NUM_B_TABLES);
983 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
984 * NUM_C_TABLES);
985
986 /*
987 * Allocate PV list elements for the physical to virtual
988 * mapping system.
989 */
990 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
991 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
992 }
993
994 /* pmap_bootstrap_copyprom() INTERNAL
995 **
996 * Copy the PROM mappings into our own tables. Note, we
997 * can use physical addresses until __bootstrap returns.
998 */
999 void
1000 pmap_bootstrap_copyprom()
1001 {
1002 MachMonRomVector *romp;
1003 int *mon_ctbl;
1004 mmu_short_pte_t *kpte;
1005 int i, len;
1006
1007 romp = romVectorPtr;
1008
1009 /*
1010 * Copy the mappings in MON_KDB_START...MONEND
1011 * Note: mon_ctbl[0] maps MON_KDB_START
1012 */
1013 mon_ctbl = *romp->monptaddr;
1014 i = m68k_btop(MON_KDB_START - KERNBASE);
1015 kpte = &kernCbase[i];
1016 len = m68k_btop(MONEND - MON_KDB_START);
1017
1018 for (i = 0; i < len; i++) {
1019 kpte[i].attr.raw = mon_ctbl[i];
1020 }
1021
1022 /*
1023 * Copy the mappings at MON_DVMA_BASE (to the end).
1024 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1025 * XXX - This does not appear to be necessary, but
1026 * I'm not sure yet if it is or not. -gwr
1027 */
1028 mon_ctbl = *romp->shadowpteaddr;
1029 i = m68k_btop(MON_DVMA_BASE - KERNBASE);
1030 kpte = &kernCbase[i];
1031 len = m68k_btop(MON_DVMA_SIZE);
1032
1033 for (i = 0; i < len; i++) {
1034 kpte[i].attr.raw = mon_ctbl[i];
1035 }
1036 }
1037
1038 /* pmap_takeover_mmu INTERNAL
1039 **
1040 * Called from pmap_bootstrap() after it has copied enough of the
1041 * PROM mappings into the kernel map so that we can use our own
1042 * MMU table.
1043 */
1044 void
1045 pmap_takeover_mmu()
1046 {
1047
1048 loadcrp(&kernel_crp);
1049 }
1050
1051 /* pmap_bootstrap_setprom() INTERNAL
1052 **
1053 * Set the PROM mappings so it can see kernel space.
1054 * Note that physical addresses are used here, which
1055 * we can get away with because this runs with the
1056 * low 1GB set for transparent translation.
1057 */
1058 void
1059 pmap_bootstrap_setprom()
1060 {
1061 mmu_long_dte_t *mon_dte;
1062 extern struct mmu_rootptr mon_crp;
1063 int i;
1064
1065 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1066 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1067 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1068 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1069 }
1070 }
1071
1072
1073 /* pmap_init INTERFACE
1074 **
1075 * Called at the end of vm_init() to set up the pmap system to go
1076 * into full time operation. All initialization of kernel_pmap
1077 * should be already done by now, so this should just do things
1078 * needed for user-level pmaps to work.
1079 */
1080 void
1081 pmap_init()
1082 {
1083 /** Initialize the manager pools **/
1084 TAILQ_INIT(&a_pool);
1085 TAILQ_INIT(&b_pool);
1086 TAILQ_INIT(&c_pool);
1087
1088 /**************************************************************
1089 * Initialize all tmgr structures and MMU tables they manage. *
1090 **************************************************************/
1091 /** Initialize A tables **/
1092 pmap_init_a_tables();
1093 /** Initialize B tables **/
1094 pmap_init_b_tables();
1095 /** Initialize C tables **/
1096 pmap_init_c_tables();
1097 }
1098
1099 /* pmap_init_a_tables() INTERNAL
1100 **
1101 * Initializes all A managers, their MMU A tables, and inserts
1102 * them into the A manager pool for use by the system.
1103 */
1104 void
1105 pmap_init_a_tables()
1106 {
1107 int i;
1108 a_tmgr_t *a_tbl;
1109
1110 for (i=0; i < NUM_A_TABLES; i++) {
1111 /* Select the next available A manager from the pool */
1112 a_tbl = &Atmgrbase[i];
1113
1114 /*
1115 * Clear its parent entry. Set its wired and valid
1116 * entry count to zero.
1117 */
1118 a_tbl->at_parent = NULL;
1119 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1120
1121 /* Assign it the next available MMU A table from the pool */
1122 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1123
1124 /*
1125 * Initialize the MMU A table with the table in the `proc0',
1126 * or kernel, mapping. This ensures that every process has
1127 * the kernel mapped in the top part of its address space.
1128 */
1129 bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1130 sizeof(mmu_long_dte_t));
1131
1132 /*
1133 * Finally, insert the manager into the A pool,
1134 * making it ready to be used by the system.
1135 */
1136 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1137 }
1138 }
1139
1140 /* pmap_init_b_tables() INTERNAL
1141 **
1142 * Initializes all B table managers, their MMU B tables, and
1143 * inserts them into the B manager pool for use by the system.
1144 */
1145 void
1146 pmap_init_b_tables()
1147 {
1148 int i,j;
1149 b_tmgr_t *b_tbl;
1150
1151 for (i=0; i < NUM_B_TABLES; i++) {
1152 /* Select the next available B manager from the pool */
1153 b_tbl = &Btmgrbase[i];
1154
1155 b_tbl->bt_parent = NULL; /* clear its parent, */
1156 b_tbl->bt_pidx = 0; /* parent index, */
1157 b_tbl->bt_wcnt = 0; /* wired entry count, */
1158 b_tbl->bt_ecnt = 0; /* valid entry count. */
1159
1160 /* Assign it the next available MMU B table from the pool */
1161 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1162
1163 /* Invalidate every descriptor in the table */
1164 for (j=0; j < MMU_B_TBL_SIZE; j++)
1165 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1166
1167 /* Insert the manager into the B pool */
1168 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1169 }
1170 }
1171
1172 /* pmap_init_c_tables() INTERNAL
1173 **
1174 * Initializes all C table managers, their MMU C tables, and
1175 * inserts them into the C manager pool for use by the system.
1176 */
1177 void
1178 pmap_init_c_tables()
1179 {
1180 int i,j;
1181 c_tmgr_t *c_tbl;
1182
1183 for (i=0; i < NUM_C_TABLES; i++) {
1184 /* Select the next available C manager from the pool */
1185 c_tbl = &Ctmgrbase[i];
1186
1187 c_tbl->ct_parent = NULL; /* clear its parent, */
1188 c_tbl->ct_pidx = 0; /* parent index, */
1189 c_tbl->ct_wcnt = 0; /* wired entry count, */
1190 c_tbl->ct_ecnt = 0; /* valid entry count, */
1191 c_tbl->ct_pmap = NULL; /* parent pmap, */
1192 c_tbl->ct_va = 0; /* base of managed range */
1193
1194 /* Assign it the next available MMU C table from the pool */
1195 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1196
1197 for (j=0; j < MMU_C_TBL_SIZE; j++)
1198 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1199
1200 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1201 }
1202 }
1203
1204 /* pmap_init_pv() INTERNAL
1205 **
1206 * Initializes the Physical to Virtual mapping system.
1207 */
1208 void
1209 pmap_init_pv()
1210 {
1211 int i;
1212
1213 /* Initialize every PV head. */
1214 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1215 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1216 pvbase[i].pv_flags = 0; /* Zero out page flags */
1217 }
1218
1219 pv_initialized = TRUE;
1220 }
1221
1222 /* get_a_table INTERNAL
1223 **
1224 * Retrieve and return a level A table for use in a user map.
1225 */
1226 a_tmgr_t *
1227 get_a_table()
1228 {
1229 a_tmgr_t *tbl;
1230 pmap_t pmap;
1231
1232 /* Get the top A table in the pool */
1233 tbl = a_pool.tqh_first;
1234 if (tbl == NULL) {
1235 /*
1236 * XXX - Instead of panicing here and in other get_x_table
1237 * functions, we do have the option of sleeping on the head of
1238 * the table pool. Any function which updates the table pool
1239 * would then issue a wakeup() on the head, thus waking up any
1240 * processes waiting for a table.
1241 *
1242 * Actually, the place to sleep would be when some process
1243 * asks for a "wired" mapping that would run us short of
1244 * mapping resources. This design DEPENDS on always having
1245 * some mapping resources in the pool for stealing, so we
1246 * must make sure we NEVER let the pool become empty. -gwr
1247 */
1248 panic("get_a_table: out of A tables.");
1249 }
1250
1251 TAILQ_REMOVE(&a_pool, tbl, at_link);
1252 /*
1253 * If the table has a non-null parent pointer then it is in use.
1254 * Forcibly abduct it from its parent and clear its entries.
1255 * No re-entrancy worries here. This table would not be in the
1256 * table pool unless it was available for use.
1257 *
1258 * Note that the second argument to free_a_table() is FALSE. This
1259 * indicates that the table should not be relinked into the A table
1260 * pool. That is a job for the function that called us.
1261 */
1262 if (tbl->at_parent) {
1263 pmap = tbl->at_parent;
1264 free_a_table(tbl, FALSE);
1265 pmap->pm_a_tmgr = NULL;
1266 pmap->pm_a_phys = kernAphys;
1267 }
1268 #ifdef NON_REENTRANT
1269 /*
1270 * If the table isn't to be wired down, re-insert it at the
1271 * end of the pool.
1272 */
1273 if (!wired)
1274 /*
1275 * Quandary - XXX
1276 * Would it be better to let the calling function insert this
1277 * table into the queue? By inserting it here, we are allowing
1278 * it to be stolen immediately. The calling function is
1279 * probably not expecting to use a table that it is not
1280 * assured full control of.
1281 * Answer - In the intrest of re-entrancy, it is best to let
1282 * the calling function determine when a table is available
1283 * for use. Therefore this code block is not used.
1284 */
1285 TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1286 #endif /* NON_REENTRANT */
1287 return tbl;
1288 }
1289
1290 /* get_b_table INTERNAL
1291 **
1292 * Return a level B table for use.
1293 */
1294 b_tmgr_t *
1295 get_b_table()
1296 {
1297 b_tmgr_t *tbl;
1298
1299 /* See 'get_a_table' for comments. */
1300 tbl = b_pool.tqh_first;
1301 if (tbl == NULL)
1302 panic("get_b_table: out of B tables.");
1303 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1304 if (tbl->bt_parent) {
1305 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1306 tbl->bt_parent->at_ecnt--;
1307 free_b_table(tbl, FALSE);
1308 }
1309 #ifdef NON_REENTRANT
1310 if (!wired)
1311 /* XXX see quandary in get_b_table */
1312 /* XXX start lock */
1313 TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1314 /* XXX end lock */
1315 #endif /* NON_REENTRANT */
1316 return tbl;
1317 }
1318
1319 /* get_c_table INTERNAL
1320 **
1321 * Return a level C table for use.
1322 */
1323 c_tmgr_t *
1324 get_c_table()
1325 {
1326 c_tmgr_t *tbl;
1327
1328 /* See 'get_a_table' for comments */
1329 tbl = c_pool.tqh_first;
1330 if (tbl == NULL)
1331 panic("get_c_table: out of C tables.");
1332 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1333 if (tbl->ct_parent) {
1334 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1335 tbl->ct_parent->bt_ecnt--;
1336 free_c_table(tbl, FALSE);
1337 }
1338 #ifdef NON_REENTRANT
1339 if (!wired)
1340 /* XXX See quandary in get_a_table */
1341 /* XXX start lock */
1342 TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1343 /* XXX end lock */
1344 #endif /* NON_REENTRANT */
1345
1346 return tbl;
1347 }
1348
1349 /*
1350 * The following 'free_table' and 'steal_table' functions are called to
1351 * detach tables from their current obligations (parents and children) and
1352 * prepare them for reuse in another mapping.
1353 *
1354 * Free_table is used when the calling function will handle the fate
1355 * of the parent table, such as returning it to the free pool when it has
1356 * no valid entries. Functions that do not want to handle this should
1357 * call steal_table, in which the parent table's descriptors and entry
1358 * count are automatically modified when this table is removed.
1359 */
1360
1361 /* free_a_table INTERNAL
1362 **
1363 * Unmaps the given A table and all child tables from their current
1364 * mappings. Returns the number of pages that were invalidated.
1365 * If 'relink' is true, the function will return the table to the head
1366 * of the available table pool.
1367 *
1368 * Cache note: The MC68851 will automatically flush all
1369 * descriptors derived from a given A table from its
1370 * Automatic Translation Cache (ATC) if we issue a
1371 * 'PFLUSHR' instruction with the base address of the
1372 * table. This function should do, and does so.
1373 * Note note: We are using an MC68030 - there is no
1374 * PFLUSHR.
1375 */
1376 int
1377 free_a_table(a_tbl, relink)
1378 a_tmgr_t *a_tbl;
1379 boolean_t relink;
1380 {
1381 int i, removed_cnt;
1382 mmu_long_dte_t *dte;
1383 mmu_short_dte_t *dtbl;
1384 b_tmgr_t *tmgr;
1385
1386 /*
1387 * Flush the ATC cache of all cached descriptors derived
1388 * from this table.
1389 * Sun3x does not use 68851's cached table feature
1390 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1391 */
1392
1393 /*
1394 * Remove any pending cache flushes that were designated
1395 * for the pmap this A table belongs to.
1396 * a_tbl->parent->atc_flushq[0] = 0;
1397 * Not implemented in sun3x.
1398 */
1399
1400 /*
1401 * All A tables in the system should retain a map for the
1402 * kernel. If the table contains any valid descriptors
1403 * (other than those for the kernel area), invalidate them all,
1404 * stopping short of the kernel's entries.
1405 */
1406 removed_cnt = 0;
1407 if (a_tbl->at_ecnt) {
1408 dte = a_tbl->at_dtbl;
1409 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1410 /*
1411 * If a table entry points to a valid B table, free
1412 * it and its children.
1413 */
1414 if (MMU_VALID_DT(dte[i])) {
1415 /*
1416 * The following block does several things,
1417 * from innermost expression to the
1418 * outermost:
1419 * 1) It extracts the base (cc 1996)
1420 * address of the B table pointed
1421 * to in the A table entry dte[i].
1422 * 2) It converts this base address into
1423 * the virtual address it can be
1424 * accessed with. (all MMU tables point
1425 * to physical addresses.)
1426 * 3) It finds the corresponding manager
1427 * structure which manages this MMU table.
1428 * 4) It frees the manager structure.
1429 * (This frees the MMU table and all
1430 * child tables. See 'free_b_table' for
1431 * details.)
1432 */
1433 dtbl = mmu_ptov(dte[i].addr.raw);
1434 tmgr = mmuB2tmgr(dtbl);
1435 removed_cnt += free_b_table(tmgr, TRUE);
1436 dte[i].attr.raw = MMU_DT_INVALID;
1437 }
1438 }
1439 a_tbl->at_ecnt = 0;
1440 }
1441 if (relink) {
1442 a_tbl->at_parent = NULL;
1443 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1444 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1445 }
1446 return removed_cnt;
1447 }
1448
1449 /* free_b_table INTERNAL
1450 **
1451 * Unmaps the given B table and all its children from their current
1452 * mappings. Returns the number of pages that were invalidated.
1453 * (For comments, see 'free_a_table()').
1454 */
1455 int
1456 free_b_table(b_tbl, relink)
1457 b_tmgr_t *b_tbl;
1458 boolean_t relink;
1459 {
1460 int i, removed_cnt;
1461 mmu_short_dte_t *dte;
1462 mmu_short_pte_t *dtbl;
1463 c_tmgr_t *tmgr;
1464
1465 removed_cnt = 0;
1466 if (b_tbl->bt_ecnt) {
1467 dte = b_tbl->bt_dtbl;
1468 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1469 if (MMU_VALID_DT(dte[i])) {
1470 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1471 tmgr = mmuC2tmgr(dtbl);
1472 removed_cnt += free_c_table(tmgr, TRUE);
1473 dte[i].attr.raw = MMU_DT_INVALID;
1474 }
1475 }
1476 b_tbl->bt_ecnt = 0;
1477 }
1478
1479 if (relink) {
1480 b_tbl->bt_parent = NULL;
1481 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1482 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1483 }
1484 return removed_cnt;
1485 }
1486
1487 /* free_c_table INTERNAL
1488 **
1489 * Unmaps the given C table from use and returns it to the pool for
1490 * re-use. Returns the number of pages that were invalidated.
1491 *
1492 * This function preserves any physical page modification information
1493 * contained in the page descriptors within the C table by calling
1494 * 'pmap_remove_pte().'
1495 */
1496 int
1497 free_c_table(c_tbl, relink)
1498 c_tmgr_t *c_tbl;
1499 boolean_t relink;
1500 {
1501 int i, removed_cnt;
1502
1503 removed_cnt = 0;
1504 if (c_tbl->ct_ecnt) {
1505 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1506 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1507 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1508 removed_cnt++;
1509 }
1510 }
1511 c_tbl->ct_ecnt = 0;
1512 }
1513
1514 if (relink) {
1515 c_tbl->ct_parent = NULL;
1516 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1517 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1518 }
1519 return removed_cnt;
1520 }
1521
1522 #if 0
1523 /* free_c_table_novalid INTERNAL
1524 **
1525 * Frees the given C table manager without checking to see whether
1526 * or not it contains any valid page descriptors as it is assumed
1527 * that it does not.
1528 */
1529 void
1530 free_c_table_novalid(c_tbl)
1531 c_tmgr_t *c_tbl;
1532 {
1533 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1534 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1535 c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1536 c_tbl->ct_parent->bt_ecnt--;
1537 /*
1538 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1539 * we just removed the last entry of the parent B table.
1540 * But I want to insure that this will not endanger pmap_enter()
1541 * with sudden removal of tables it is working with.
1542 *
1543 * We should probably add another field to each table, indicating
1544 * whether or not it is 'locked', ie. in the process of being
1545 * modified.
1546 */
1547 c_tbl->ct_parent = NULL;
1548 }
1549 #endif
1550
1551 /* pmap_remove_pte INTERNAL
1552 **
1553 * Unmap the given pte and preserve any page modification
1554 * information by transfering it to the pv head of the
1555 * physical page it maps to. This function does not update
1556 * any reference counts because it is assumed that the calling
1557 * function will do so.
1558 */
1559 void
1560 pmap_remove_pte(pte)
1561 mmu_short_pte_t *pte;
1562 {
1563 u_short pv_idx, targ_idx;
1564 int s;
1565 vm_offset_t pa;
1566 pv_t *pv;
1567
1568 pa = MMU_PTE_PA(*pte);
1569 if (is_managed(pa)) {
1570 pv = pa2pv(pa);
1571 targ_idx = pteidx(pte); /* Index of PTE being removed */
1572
1573 /*
1574 * If the PTE being removed is the first (or only) PTE in
1575 * the list of PTEs currently mapped to this page, remove the
1576 * PTE by changing the index found on the PV head. Otherwise
1577 * a linear search through the list will have to be executed
1578 * in order to find the PVE which points to the PTE being
1579 * removed, so that it may be modified to point to its new
1580 * neighbor.
1581 */
1582 s = splimp();
1583 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1584 if (pv_idx == targ_idx) {
1585 pv->pv_idx = pvebase[targ_idx].pve_next;
1586 } else {
1587 /*
1588 * Find the PV element which points to the target
1589 * element.
1590 */
1591 while (pvebase[pv_idx].pve_next != targ_idx) {
1592 pv_idx = pvebase[pv_idx].pve_next;
1593 #ifdef DIAGNOSTIC
1594 if (pv_idx == PVE_EOL)
1595 panic("pmap_remove_pte: pv list end!");
1596 #endif
1597 }
1598
1599 /*
1600 * At this point, pv_idx is the index of the PV
1601 * element just before the target element in the list.
1602 * Unlink the target.
1603 */
1604 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1605 }
1606 /*
1607 * Save the mod/ref bits of the pte by simply
1608 * ORing the entire pte onto the pv_flags member
1609 * of the pv structure.
1610 * There is no need to use a separate bit pattern
1611 * for usage information on the pv head than that
1612 * which is used on the MMU ptes.
1613 */
1614 pv->pv_flags |= (u_short) pte->attr.raw;
1615 splx(s);
1616 }
1617
1618 pte->attr.raw = MMU_DT_INVALID;
1619 }
1620
1621 /* pmap_stroll INTERNAL
1622 **
1623 * Retrieve the addresses of all table managers involved in the mapping of
1624 * the given virtual address. If the table walk completed sucessfully,
1625 * return TRUE. If it was only partially sucessful, return FALSE.
1626 * The table walk performed by this function is important to many other
1627 * functions in this module.
1628 *
1629 * Note: This function ought to be easier to read.
1630 */
1631 boolean_t
1632 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1633 pmap_t pmap;
1634 vm_offset_t va;
1635 a_tmgr_t **a_tbl;
1636 b_tmgr_t **b_tbl;
1637 c_tmgr_t **c_tbl;
1638 mmu_short_pte_t **pte;
1639 int *a_idx, *b_idx, *pte_idx;
1640 {
1641 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1642 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1643
1644 if (pmap == pmap_kernel())
1645 return FALSE;
1646
1647 /* Does the given pmap have its own A table? */
1648 *a_tbl = pmap->pm_a_tmgr;
1649 if (*a_tbl == NULL)
1650 return FALSE; /* No. Return unknown. */
1651 /* Does the A table have a valid B table
1652 * under the corresponding table entry?
1653 */
1654 *a_idx = MMU_TIA(va);
1655 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1656 if (!MMU_VALID_DT(*a_dte))
1657 return FALSE; /* No. Return unknown. */
1658 /* Yes. Extract B table from the A table. */
1659 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1660 /* Does the B table have a valid C table
1661 * under the corresponding table entry?
1662 */
1663 *b_idx = MMU_TIB(va);
1664 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1665 if (!MMU_VALID_DT(*b_dte))
1666 return FALSE; /* No. Return unknown. */
1667 /* Yes. Extract C table from the B table. */
1668 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1669 *pte_idx = MMU_TIC(va);
1670 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1671
1672 return TRUE;
1673 }
1674
1675 /* pmap_enter INTERFACE
1676 **
1677 * Called by the kernel to map a virtual address
1678 * to a physical address in the given process map.
1679 *
1680 * Note: this function should apply an exclusive lock
1681 * on the pmap system for its duration. (it certainly
1682 * would save my hair!!)
1683 * This function ought to be easier to read.
1684 */
1685 void
1686 pmap_enter(pmap, va, pa, prot, wired)
1687 pmap_t pmap;
1688 vm_offset_t va;
1689 vm_offset_t pa;
1690 vm_prot_t prot;
1691 boolean_t wired;
1692 {
1693 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1694 u_short nidx; /* PV list index */
1695 int s; /* Used for splimp()/splx() */
1696 int flags; /* Mapping flags. eg. Cache inhibit */
1697 u_int a_idx, b_idx, pte_idx; /* table indices */
1698 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1699 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1700 c_tmgr_t *c_tbl; /* C: short page table manager */
1701 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1702 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1703 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1704 pv_t *pv; /* pv list head */
1705 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1706
1707 if (pmap == NULL)
1708 return;
1709 if (pmap == pmap_kernel()) {
1710 pmap_enter_kernel(va, pa, prot);
1711 return;
1712 }
1713
1714 flags = (pa & ~MMU_PAGE_MASK);
1715 pa &= MMU_PAGE_MASK;
1716
1717 /*
1718 * Determine if the physical address being mapped is on-board RAM.
1719 * Any other area of the address space is likely to belong to a
1720 * device and hence it would be disasterous to cache its contents.
1721 */
1722 if ((managed = is_managed(pa)) == FALSE)
1723 flags |= PMAP_NC;
1724
1725 /*
1726 * For user mappings we walk along the MMU tables of the given
1727 * pmap, reaching a PTE which describes the virtual page being
1728 * mapped or changed. If any level of the walk ends in an invalid
1729 * entry, a table must be allocated and the entry must be updated
1730 * to point to it.
1731 * There is a bit of confusion as to whether this code must be
1732 * re-entrant. For now we will assume it is. To support
1733 * re-entrancy we must unlink tables from the table pool before
1734 * we assume we may use them. Tables are re-linked into the pool
1735 * when we are finished with them at the end of the function.
1736 * But I don't feel like doing that until we have proof that this
1737 * needs to be re-entrant.
1738 * 'llevel' records which tables need to be relinked.
1739 */
1740 llevel = NONE;
1741
1742 /*
1743 * Step 1 - Retrieve the A table from the pmap. If it has no
1744 * A table, allocate a new one from the available pool.
1745 */
1746
1747 a_tbl = pmap->pm_a_tmgr;
1748 if (a_tbl == NULL) {
1749 /*
1750 * This pmap does not currently have an A table. Allocate
1751 * a new one.
1752 */
1753 a_tbl = get_a_table();
1754 a_tbl->at_parent = pmap;
1755
1756 /*
1757 * Assign this new A table to the pmap, and calculate its
1758 * physical address so that loadcrp() can be used to make
1759 * the table active.
1760 */
1761 pmap->pm_a_tmgr = a_tbl;
1762 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1763
1764 /*
1765 * If the process receiving a new A table is the current
1766 * process, we are responsible for setting the MMU so that
1767 * it becomes the current address space. This only adds
1768 * new mappings, so no need to flush anything.
1769 */
1770 if (pmap == current_pmap()) {
1771 kernel_crp.rp_addr = pmap->pm_a_phys;
1772 loadcrp(&kernel_crp);
1773 }
1774
1775 if (!wired)
1776 llevel = NEWA;
1777 } else {
1778 /*
1779 * Use the A table already allocated for this pmap.
1780 * Unlink it from the A table pool if necessary.
1781 */
1782 if (wired && !a_tbl->at_wcnt)
1783 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1784 }
1785
1786 /*
1787 * Step 2 - Walk into the B table. If there is no valid B table,
1788 * allocate one.
1789 */
1790
1791 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1792 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1793 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1794 /* The descriptor is valid. Use the B table it points to. */
1795 /*************************************
1796 * a_idx *
1797 * v *
1798 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1799 * | | | | | | | | | | | | *
1800 * +-+-+-+-+-+-+-+-+-+-+-+- *
1801 * | *
1802 * \- b_tbl -> +-+- *
1803 * | | *
1804 * +-+- *
1805 *************************************/
1806 b_dte = mmu_ptov(a_dte->addr.raw);
1807 b_tbl = mmuB2tmgr(b_dte);
1808
1809 /*
1810 * If the requested mapping must be wired, but this table
1811 * being used to map it is not, the table must be removed
1812 * from the available pool and its wired entry count
1813 * incremented.
1814 */
1815 if (wired && !b_tbl->bt_wcnt) {
1816 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1817 a_tbl->at_wcnt++;
1818 }
1819 } else {
1820 /* The descriptor is invalid. Allocate a new B table. */
1821 b_tbl = get_b_table();
1822
1823 /* Point the parent A table descriptor to this new B table. */
1824 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1825 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1826 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1827
1828 /* Create the necessary back references to the parent table */
1829 b_tbl->bt_parent = a_tbl;
1830 b_tbl->bt_pidx = a_idx;
1831
1832 /*
1833 * If this table is to be wired, make sure the parent A table
1834 * wired count is updated to reflect that it has another wired
1835 * entry.
1836 */
1837 if (wired)
1838 a_tbl->at_wcnt++;
1839 else if (llevel == NONE)
1840 llevel = NEWB;
1841 }
1842
1843 /*
1844 * Step 3 - Walk into the C table, if there is no valid C table,
1845 * allocate one.
1846 */
1847
1848 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1849 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1850 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1851 /* The descriptor is valid. Use the C table it points to. */
1852 /**************************************
1853 * c_idx *
1854 * | v *
1855 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1856 * | | | | | | | | | | | *
1857 * +-+-+-+-+-+-+-+-+-+-+- *
1858 * | *
1859 * \- c_tbl -> +-+-- *
1860 * | | | *
1861 * +-+-- *
1862 **************************************/
1863 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1864 c_tbl = mmuC2tmgr(c_pte);
1865
1866 /* If mapping is wired and table is not */
1867 if (wired && !c_tbl->ct_wcnt) {
1868 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1869 b_tbl->bt_wcnt++;
1870 }
1871 } else {
1872 /* The descriptor is invalid. Allocate a new C table. */
1873 c_tbl = get_c_table();
1874
1875 /* Point the parent B table descriptor to this new C table. */
1876 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1877 b_dte->attr.raw |= MMU_DT_SHORT;
1878 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1879
1880 /* Create the necessary back references to the parent table */
1881 c_tbl->ct_parent = b_tbl;
1882 c_tbl->ct_pidx = b_idx;
1883 /*
1884 * Store the pmap and base virtual managed address for faster
1885 * retrieval in the PV functions.
1886 */
1887 c_tbl->ct_pmap = pmap;
1888 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1889
1890 /*
1891 * If this table is to be wired, make sure the parent B table
1892 * wired count is updated to reflect that it has another wired
1893 * entry.
1894 */
1895 if (wired)
1896 b_tbl->bt_wcnt++;
1897 else if (llevel == NONE)
1898 llevel = NEWC;
1899 }
1900
1901 /*
1902 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1903 * slot of the C table, describing the PA to which the VA is mapped.
1904 */
1905
1906 pte_idx = MMU_TIC(va);
1907 c_pte = &c_tbl->ct_dtbl[pte_idx];
1908 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1909 /*
1910 * The PTE is currently valid. This particular call
1911 * is just a synonym for one (or more) of the following
1912 * operations:
1913 * change protection of a page
1914 * change wiring status of a page
1915 * remove the mapping of a page
1916 *
1917 * XXX - Semi critical: This code should unwire the PTE
1918 * and, possibly, associated parent tables if this is a
1919 * change wiring operation. Currently it does not.
1920 *
1921 * This may be ok if pmap_change_wiring() is the only
1922 * interface used to UNWIRE a page.
1923 */
1924
1925 /* First check if this is a wiring operation. */
1926 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1927 /*
1928 * The PTE is already wired. To prevent it from being
1929 * counted as a new wiring operation, reset the 'wired'
1930 * variable.
1931 */
1932 wired = FALSE;
1933 }
1934
1935 /* Is the new address the same as the old? */
1936 if (MMU_PTE_PA(*c_pte) == pa) {
1937 /*
1938 * Yes, mark that it does not need to be reinserted
1939 * into the PV list.
1940 */
1941 insert = FALSE;
1942
1943 /*
1944 * Clear all but the modified, referenced and wired
1945 * bits on the PTE.
1946 */
1947 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1948 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1949 } else {
1950 /* No, remove the old entry */
1951 pmap_remove_pte(c_pte);
1952 insert = TRUE;
1953 }
1954
1955 /*
1956 * TLB flush is only necessary if modifying current map.
1957 * However, in pmap_enter(), the pmap almost always IS
1958 * the current pmap, so don't even bother to check.
1959 */
1960 TBIS(va);
1961 } else {
1962 /*
1963 * The PTE is invalid. Increment the valid entry count in
1964 * the C table manager to reflect the addition of a new entry.
1965 */
1966 c_tbl->ct_ecnt++;
1967
1968 /* XXX - temporarily make sure the PTE is cleared. */
1969 c_pte->attr.raw = 0;
1970
1971 /* It will also need to be inserted into the PV list. */
1972 insert = TRUE;
1973 }
1974
1975 /*
1976 * If page is changing from unwired to wired status, set an unused bit
1977 * within the PTE to indicate that it is wired. Also increment the
1978 * wired entry count in the C table manager.
1979 */
1980 if (wired) {
1981 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1982 c_tbl->ct_wcnt++;
1983 }
1984
1985 /*
1986 * Map the page, being careful to preserve modify/reference/wired
1987 * bits. At this point it is assumed that the PTE either has no bits
1988 * set, or if there are set bits, they are only modified, reference or
1989 * wired bits. If not, the following statement will cause erratic
1990 * behavior.
1991 */
1992 #ifdef PMAP_DEBUG
1993 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1994 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1995 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1996 Debugger();
1997 }
1998 #endif
1999 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2000
2001 /*
2002 * If the mapping should be read-only, set the write protect
2003 * bit in the PTE.
2004 */
2005 if (!(prot & VM_PROT_WRITE))
2006 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2007
2008 /*
2009 * If the mapping should be cache inhibited (indicated by the flag
2010 * bits found on the lower order of the physical address.)
2011 * mark the PTE as a cache inhibited page.
2012 */
2013 if (flags & PMAP_NC)
2014 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2015
2016 /*
2017 * If the physical address being mapped is managed by the PV
2018 * system then link the pte into the list of pages mapped to that
2019 * address.
2020 */
2021 if (insert && managed) {
2022 pv = pa2pv(pa);
2023 nidx = pteidx(c_pte);
2024
2025 s = splimp();
2026 pvebase[nidx].pve_next = pv->pv_idx;
2027 pv->pv_idx = nidx;
2028 splx(s);
2029 }
2030
2031 /* Move any allocated tables back into the active pool. */
2032
2033 switch (llevel) {
2034 case NEWA:
2035 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2036 /* FALLTHROUGH */
2037 case NEWB:
2038 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2039 /* FALLTHROUGH */
2040 case NEWC:
2041 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2042 /* FALLTHROUGH */
2043 default:
2044 break;
2045 }
2046 }
2047
2048 /* pmap_enter_kernel INTERNAL
2049 **
2050 * Map the given virtual address to the given physical address within the
2051 * kernel address space. This function exists because the kernel map does
2052 * not do dynamic table allocation. It consists of a contiguous array of ptes
2053 * and can be edited directly without the need to walk through any tables.
2054 *
2055 * XXX: "Danger, Will Robinson!"
2056 * Note that the kernel should never take a fault on any page
2057 * between [ KERNBASE .. virtual_avail ] and this is checked in
2058 * trap.c for kernel-mode MMU faults. This means that mappings
2059 * created in that range must be implicily wired. -gwr
2060 */
2061 void
2062 pmap_enter_kernel(va, pa, prot)
2063 vm_offset_t va;
2064 vm_offset_t pa;
2065 vm_prot_t prot;
2066 {
2067 boolean_t was_valid, insert;
2068 u_short pte_idx, pv_idx;
2069 int s, flags;
2070 mmu_short_pte_t *pte;
2071 pv_t *pv;
2072 vm_offset_t old_pa;
2073
2074 flags = (pa & ~MMU_PAGE_MASK);
2075 pa &= MMU_PAGE_MASK;
2076
2077 /*
2078 * Calculate the index of the PTE being modified.
2079 */
2080 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2081
2082 /* This array is traditionally named "Sysmap" */
2083 pte = &kernCbase[pte_idx];
2084
2085 s = splimp();
2086 if (MMU_VALID_DT(*pte)) {
2087 was_valid = TRUE;
2088 /*
2089 * If the PTE is already mapped to an address and it differs
2090 * from the address requested, unlink it from the PV list.
2091 */
2092 old_pa = MMU_PTE_PA(*pte);
2093 if (pa != old_pa) {
2094 if (is_managed(old_pa)) {
2095 /* XXX - Make this into a function call? */
2096 pv = pa2pv(old_pa);
2097 pv_idx = pv->pv_idx;
2098 if (pv_idx == pte_idx) {
2099 pv->pv_idx = pvebase[pte_idx].pve_next;
2100 } else {
2101 while (pvebase[pv_idx].pve_next != pte_idx)
2102 pv_idx = pvebase[pv_idx].pve_next;
2103 pvebase[pv_idx].pve_next =
2104 pvebase[pte_idx].pve_next;
2105 }
2106 /* Save modified/reference bits */
2107 pv->pv_flags |= (u_short) pte->attr.raw;
2108 }
2109 if (is_managed(pa))
2110 insert = TRUE;
2111 else
2112 insert = FALSE;
2113 /*
2114 * Clear out any old bits in the PTE.
2115 */
2116 pte->attr.raw = MMU_DT_INVALID;
2117 } else {
2118 /*
2119 * Old PA and new PA are the same. No need to relink
2120 * the mapping within the PV list.
2121 */
2122 insert = FALSE;
2123
2124 /*
2125 * Save any mod/ref bits on the PTE.
2126 */
2127 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2128 }
2129 } else {
2130 pte->attr.raw = MMU_DT_INVALID;
2131 was_valid = FALSE;
2132 if (is_managed(pa))
2133 insert = TRUE;
2134 else
2135 insert = FALSE;
2136 }
2137
2138 /*
2139 * Map the page. Being careful to preserve modified/referenced bits
2140 * on the PTE.
2141 */
2142 pte->attr.raw |= (pa | MMU_DT_PAGE);
2143
2144 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2145 pte->attr.raw |= MMU_SHORT_PTE_WP;
2146 if (flags & PMAP_NC)
2147 pte->attr.raw |= MMU_SHORT_PTE_CI;
2148 if (was_valid)
2149 TBIS(va);
2150
2151 /*
2152 * Insert the PTE into the PV system, if need be.
2153 */
2154 if (insert) {
2155 pv = pa2pv(pa);
2156 pvebase[pte_idx].pve_next = pv->pv_idx;
2157 pv->pv_idx = pte_idx;
2158 }
2159 splx(s);
2160
2161 }
2162
2163 /* pmap_protect INTERFACE
2164 **
2165 * Apply the given protection to the given virtual address range within
2166 * the given map.
2167 *
2168 * It is ok for the protection applied to be stronger than what is
2169 * specified. We use this to our advantage when the given map has no
2170 * mapping for the virtual address. By skipping a page when this
2171 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2172 * and therefore do not need to map the page just to apply a protection
2173 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2174 *
2175 * XXX - This function could be speeded up by using pmap_stroll() for inital
2176 * setup, and then manual scrolling in the for() loop.
2177 */
2178 void
2179 pmap_protect(pmap, startva, endva, prot)
2180 pmap_t pmap;
2181 vm_offset_t startva, endva;
2182 vm_prot_t prot;
2183 {
2184 boolean_t iscurpmap;
2185 int a_idx, b_idx, c_idx;
2186 a_tmgr_t *a_tbl;
2187 b_tmgr_t *b_tbl;
2188 c_tmgr_t *c_tbl;
2189 mmu_short_pte_t *pte;
2190
2191 if (pmap == NULL)
2192 return;
2193 if (pmap == pmap_kernel()) {
2194 pmap_protect_kernel(startva, endva, prot);
2195 return;
2196 }
2197
2198 /*
2199 * In this particular pmap implementation, there are only three
2200 * types of memory protection: 'all' (read/write/execute),
2201 * 'read-only' (read/execute) and 'none' (no mapping.)
2202 * It is not possible for us to treat 'executable' as a separate
2203 * protection type. Therefore, protection requests that seek to
2204 * remove execute permission while retaining read or write, and those
2205 * that make little sense (write-only for example) are ignored.
2206 */
2207 switch (prot) {
2208 case VM_PROT_NONE:
2209 /*
2210 * A request to apply the protection code of
2211 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2212 */
2213 pmap_remove(pmap, startva, endva);
2214 return;
2215 case VM_PROT_EXECUTE:
2216 case VM_PROT_READ:
2217 case VM_PROT_READ|VM_PROT_EXECUTE:
2218 /* continue */
2219 break;
2220 case VM_PROT_WRITE:
2221 case VM_PROT_WRITE|VM_PROT_READ:
2222 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2223 case VM_PROT_ALL:
2224 /* None of these should happen in a sane system. */
2225 return;
2226 }
2227
2228 /*
2229 * If the pmap has no A table, it has no mappings and therefore
2230 * there is nothing to protect.
2231 */
2232 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2233 return;
2234
2235 a_idx = MMU_TIA(startva);
2236 b_idx = MMU_TIB(startva);
2237 c_idx = MMU_TIC(startva);
2238 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2239
2240 iscurpmap = (pmap == current_pmap());
2241 while (startva < endva) {
2242 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2243 if (b_tbl == NULL) {
2244 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2245 b_tbl = mmu_ptov((vm_offset_t) b_tbl);
2246 b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
2247 }
2248 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2249 if (c_tbl == NULL) {
2250 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2251 c_tbl = mmu_ptov((vm_offset_t) c_tbl);
2252 c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
2253 }
2254 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2255 pte = &c_tbl->ct_dtbl[c_idx];
2256 /* make the mapping read-only */
2257 pte->attr.raw |= MMU_SHORT_PTE_WP;
2258 /*
2259 * If we just modified the current address space,
2260 * flush any translations for the modified page from
2261 * the translation cache and any data from it in the
2262 * data cache.
2263 */
2264 if (iscurpmap)
2265 TBIS(startva);
2266 }
2267 startva += NBPG;
2268
2269 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2270 c_tbl = NULL;
2271 c_idx = 0;
2272 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2273 b_tbl = NULL;
2274 b_idx = 0;
2275 }
2276 }
2277 } else { /* C table wasn't valid */
2278 c_tbl = NULL;
2279 c_idx = 0;
2280 startva += MMU_TIB_RANGE;
2281 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2282 b_tbl = NULL;
2283 b_idx = 0;
2284 }
2285 } /* C table */
2286 } else { /* B table wasn't valid */
2287 b_tbl = NULL;
2288 b_idx = 0;
2289 startva += MMU_TIA_RANGE;
2290 a_idx++;
2291 } /* B table */
2292 }
2293 }
2294
2295 /* pmap_protect_kernel INTERNAL
2296 **
2297 * Apply the given protection code to a kernel address range.
2298 */
2299 void
2300 pmap_protect_kernel(startva, endva, prot)
2301 vm_offset_t startva, endva;
2302 vm_prot_t prot;
2303 {
2304 vm_offset_t va;
2305 mmu_short_pte_t *pte;
2306
2307 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2308 for (va = startva; va < endva; va += NBPG, pte++) {
2309 if (MMU_VALID_DT(*pte)) {
2310 switch (prot) {
2311 case VM_PROT_ALL:
2312 break;
2313 case VM_PROT_EXECUTE:
2314 case VM_PROT_READ:
2315 case VM_PROT_READ|VM_PROT_EXECUTE:
2316 pte->attr.raw |= MMU_SHORT_PTE_WP;
2317 break;
2318 case VM_PROT_NONE:
2319 /* this is an alias for 'pmap_remove_kernel' */
2320 pmap_remove_pte(pte);
2321 break;
2322 default:
2323 break;
2324 }
2325 /*
2326 * since this is the kernel, immediately flush any cached
2327 * descriptors for this address.
2328 */
2329 TBIS(va);
2330 }
2331 }
2332 }
2333
2334 /* pmap_change_wiring INTERFACE
2335 **
2336 * Changes the wiring of the specified page.
2337 *
2338 * This function is called from vm_fault.c to unwire
2339 * a mapping. It really should be called 'pmap_unwire'
2340 * because it is never asked to do anything but remove
2341 * wirings.
2342 */
2343 void
2344 pmap_change_wiring(pmap, va, wire)
2345 pmap_t pmap;
2346 vm_offset_t va;
2347 boolean_t wire;
2348 {
2349 int a_idx, b_idx, c_idx;
2350 a_tmgr_t *a_tbl;
2351 b_tmgr_t *b_tbl;
2352 c_tmgr_t *c_tbl;
2353 mmu_short_pte_t *pte;
2354
2355 /* Kernel mappings always remain wired. */
2356 if (pmap == pmap_kernel())
2357 return;
2358
2359 #ifdef PMAP_DEBUG
2360 if (wire == TRUE)
2361 panic("pmap_change_wiring: wire requested.");
2362 #endif
2363
2364 /*
2365 * Walk through the tables. If the walk terminates without
2366 * a valid PTE then the address wasn't wired in the first place.
2367 * Return immediately.
2368 */
2369 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2370 &b_idx, &c_idx) == FALSE)
2371 return;
2372
2373
2374 /* Is the PTE wired? If not, return. */
2375 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2376 return;
2377
2378 /* Remove the wiring bit. */
2379 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2380
2381 /*
2382 * Decrement the wired entry count in the C table.
2383 * If it reaches zero the following things happen:
2384 * 1. The table no longer has any wired entries and is considered
2385 * unwired.
2386 * 2. It is placed on the available queue.
2387 * 3. The parent table's wired entry count is decremented.
2388 * 4. If it reaches zero, this process repeats at step 1 and
2389 * stops at after reaching the A table.
2390 */
2391 if (--c_tbl->ct_wcnt == 0) {
2392 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2393 if (--b_tbl->bt_wcnt == 0) {
2394 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2395 if (--a_tbl->at_wcnt == 0) {
2396 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2397 }
2398 }
2399 }
2400 }
2401
2402 /* pmap_pageable INTERFACE
2403 **
2404 * Make the specified range of addresses within the given pmap,
2405 * 'pageable' or 'not-pageable'. A pageable page must not cause
2406 * any faults when referenced. A non-pageable page may.
2407 *
2408 * This routine is only advisory. The VM system will call pmap_enter()
2409 * to wire or unwire pages that are going to be made pageable before calling
2410 * this function. By the time this routine is called, everything that needs
2411 * to be done has already been done.
2412 */
2413 void
2414 pmap_pageable(pmap, start, end, pageable)
2415 pmap_t pmap;
2416 vm_offset_t start, end;
2417 boolean_t pageable;
2418 {
2419 /* not implemented. */
2420 }
2421
2422 /* pmap_copy INTERFACE
2423 **
2424 * Copy the mappings of a range of addresses in one pmap, into
2425 * the destination address of another.
2426 *
2427 * This routine is advisory. Should we one day decide that MMU tables
2428 * may be shared by more than one pmap, this function should be used to
2429 * link them together. Until that day however, we do nothing.
2430 */
2431 void
2432 pmap_copy(pmap_a, pmap_b, dst, len, src)
2433 pmap_t pmap_a, pmap_b;
2434 vm_offset_t dst;
2435 vm_size_t len;
2436 vm_offset_t src;
2437 {
2438 /* not implemented. */
2439 }
2440
2441 /* pmap_copy_page INTERFACE
2442 **
2443 * Copy the contents of one physical page into another.
2444 *
2445 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2446 * to map the two specified physical pages into the kernel address space.
2447 *
2448 * Note: We could use the transparent translation registers to make the
2449 * mappings. If we do so, be sure to disable interrupts before using them.
2450 */
2451 void
2452 pmap_copy_page(srcpa, dstpa)
2453 vm_offset_t srcpa, dstpa;
2454 {
2455 vm_offset_t srcva, dstva;
2456 int s;
2457
2458 srcva = tmp_vpages[0];
2459 dstva = tmp_vpages[1];
2460
2461 s = splimp();
2462 if (tmp_vpages_inuse++)
2463 panic("pmap_copy_page: temporary vpages are in use.");
2464
2465 /* Map pages as non-cacheable to avoid cache polution? */
2466 pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
2467 pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2468
2469 /* Hand-optimized version of bcopy(src, dst, NBPG) */
2470 copypage((char *) srcva, (char *) dstva);
2471
2472 pmap_remove_kernel(srcva, srcva + NBPG);
2473 pmap_remove_kernel(dstva, dstva + NBPG);
2474
2475 --tmp_vpages_inuse;
2476 splx(s);
2477 }
2478
2479 /* pmap_zero_page INTERFACE
2480 **
2481 * Zero the contents of the specified physical page.
2482 *
2483 * Uses one of the virtual pages allocated in pmap_boostrap()
2484 * to map the specified page into the kernel address space.
2485 */
2486 void
2487 pmap_zero_page(dstpa)
2488 vm_offset_t dstpa;
2489 {
2490 vm_offset_t dstva;
2491 int s;
2492
2493 dstva = tmp_vpages[1];
2494 s = splimp();
2495 if (tmp_vpages_inuse++)
2496 panic("pmap_zero_page: temporary vpages are in use.");
2497
2498 /* The comments in pmap_copy_page() above apply here also. */
2499 pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2500
2501 /* Hand-optimized version of bzero(ptr, NBPG) */
2502 zeropage((char *) dstva);
2503
2504 pmap_remove_kernel(dstva, dstva + NBPG);
2505
2506 --tmp_vpages_inuse;
2507 splx(s);
2508 }
2509
2510 /* pmap_collect INTERFACE
2511 **
2512 * Called from the VM system when we are about to swap out
2513 * the process using this pmap. This should give up any
2514 * resources held here, including all its MMU tables.
2515 */
2516 void
2517 pmap_collect(pmap)
2518 pmap_t pmap;
2519 {
2520 /* XXX - todo... */
2521 }
2522
2523 /* pmap_create INTERFACE
2524 **
2525 * Create and return a pmap structure.
2526 */
2527 pmap_t
2528 pmap_create(size)
2529 vm_size_t size;
2530 {
2531 pmap_t pmap;
2532
2533 if (size)
2534 return NULL;
2535
2536 pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
2537 pmap_pinit(pmap);
2538
2539 return pmap;
2540 }
2541
2542 /* pmap_pinit INTERNAL
2543 **
2544 * Initialize a pmap structure.
2545 */
2546 void
2547 pmap_pinit(pmap)
2548 pmap_t pmap;
2549 {
2550 bzero(pmap, sizeof(struct pmap));
2551 pmap->pm_a_tmgr = NULL;
2552 pmap->pm_a_phys = kernAphys;
2553 }
2554
2555 /* pmap_release INTERFACE
2556 **
2557 * Release any resources held by the given pmap.
2558 *
2559 * This is the reverse analog to pmap_pinit. It does not
2560 * necessarily mean for the pmap structure to be deallocated,
2561 * as in pmap_destroy.
2562 */
2563 void
2564 pmap_release(pmap)
2565 pmap_t pmap;
2566 {
2567 /*
2568 * As long as the pmap contains no mappings,
2569 * which always should be the case whenever
2570 * this function is called, there really should
2571 * be nothing to do.
2572 */
2573 #ifdef PMAP_DEBUG
2574 if (pmap == NULL)
2575 return;
2576 if (pmap == pmap_kernel())
2577 panic("pmap_release: kernel pmap");
2578 #endif
2579 /*
2580 * XXX - If this pmap has an A table, give it back.
2581 * The pmap SHOULD be empty by now, and pmap_remove
2582 * should have already given back the A table...
2583 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2584 * at this point, which means some mapping was not
2585 * removed when it should have been. -gwr
2586 */
2587 if (pmap->pm_a_tmgr != NULL) {
2588 /* First make sure we are not using it! */
2589 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2590 kernel_crp.rp_addr = kernAphys;
2591 loadcrp(&kernel_crp);
2592 }
2593 #ifdef PMAP_DEBUG /* XXX - todo! */
2594 /* XXX - Now complain... */
2595 printf("pmap_release: still have table\n");
2596 Debugger();
2597 #endif
2598 free_a_table(pmap->pm_a_tmgr, TRUE);
2599 pmap->pm_a_tmgr = NULL;
2600 pmap->pm_a_phys = kernAphys;
2601 }
2602 }
2603
2604 /* pmap_reference INTERFACE
2605 **
2606 * Increment the reference count of a pmap.
2607 */
2608 void
2609 pmap_reference(pmap)
2610 pmap_t pmap;
2611 {
2612 if (pmap == NULL)
2613 return;
2614
2615 /* pmap_lock(pmap); */
2616 pmap->pm_refcount++;
2617 /* pmap_unlock(pmap); */
2618 }
2619
2620 /* pmap_dereference INTERNAL
2621 **
2622 * Decrease the reference count on the given pmap
2623 * by one and return the current count.
2624 */
2625 int
2626 pmap_dereference(pmap)
2627 pmap_t pmap;
2628 {
2629 int rtn;
2630
2631 if (pmap == NULL)
2632 return 0;
2633
2634 /* pmap_lock(pmap); */
2635 rtn = --pmap->pm_refcount;
2636 /* pmap_unlock(pmap); */
2637
2638 return rtn;
2639 }
2640
2641 /* pmap_destroy INTERFACE
2642 **
2643 * Decrement a pmap's reference count and delete
2644 * the pmap if it becomes zero. Will be called
2645 * only after all mappings have been removed.
2646 */
2647 void
2648 pmap_destroy(pmap)
2649 pmap_t pmap;
2650 {
2651 if (pmap == NULL)
2652 return;
2653 if (pmap == &kernel_pmap)
2654 panic("pmap_destroy: kernel_pmap!");
2655 if (pmap_dereference(pmap) == 0) {
2656 pmap_release(pmap);
2657 free(pmap, M_VMPMAP);
2658 }
2659 }
2660
2661 /* pmap_is_referenced INTERFACE
2662 **
2663 * Determine if the given physical page has been
2664 * referenced (read from [or written to.])
2665 */
2666 boolean_t
2667 pmap_is_referenced(pa)
2668 vm_offset_t pa;
2669 {
2670 pv_t *pv;
2671 int idx, s;
2672
2673 if (!pv_initialized)
2674 return FALSE;
2675 /* XXX - this may be unecessary. */
2676 if (!is_managed(pa))
2677 return FALSE;
2678
2679 pv = pa2pv(pa);
2680 /*
2681 * Check the flags on the pv head. If they are set,
2682 * return immediately. Otherwise a search must be done.
2683 */
2684 if (pv->pv_flags & PV_FLAGS_USED)
2685 return TRUE;
2686 else {
2687 s = splimp();
2688 /*
2689 * Search through all pv elements pointing
2690 * to this page and query their reference bits
2691 */
2692 for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2693 pvebase[idx].pve_next)
2694 if (MMU_PTE_USED(kernCbase[idx])) {
2695 splx(s);
2696 return TRUE;
2697 }
2698 splx(s);
2699 }
2700
2701 return FALSE;
2702 }
2703
2704 /* pmap_is_modified INTERFACE
2705 **
2706 * Determine if the given physical page has been
2707 * modified (written to.)
2708 */
2709 boolean_t
2710 pmap_is_modified(pa)
2711 vm_offset_t pa;
2712 {
2713 pv_t *pv;
2714 int idx, s;
2715
2716 if (!pv_initialized)
2717 return FALSE;
2718 /* XXX - this may be unecessary. */
2719 if (!is_managed(pa))
2720 return FALSE;
2721
2722 /* see comments in pmap_is_referenced() */
2723 pv = pa2pv(pa);
2724 if (pv->pv_flags & PV_FLAGS_MDFY) {
2725 return TRUE;
2726 } else {
2727 s = splimp();
2728 for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2729 pvebase[idx].pve_next)
2730 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2731 splx(s);
2732 return TRUE;
2733 }
2734 splx(s);
2735 }
2736
2737 return FALSE;
2738 }
2739
2740 /* pmap_page_protect INTERFACE
2741 **
2742 * Applies the given protection to all mappings to the given
2743 * physical page.
2744 */
2745 void
2746 pmap_page_protect(pa, prot)
2747 vm_offset_t pa;
2748 vm_prot_t prot;
2749 {
2750 pv_t *pv;
2751 int idx, s;
2752 vm_offset_t va;
2753 struct mmu_short_pte_struct *pte;
2754 c_tmgr_t *c_tbl;
2755 pmap_t pmap, curpmap;
2756
2757 if (!is_managed(pa))
2758 return;
2759
2760 curpmap = current_pmap();
2761 pv = pa2pv(pa);
2762 s = splimp();
2763 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2764 pte = &kernCbase[idx];
2765 switch (prot) {
2766 case VM_PROT_ALL:
2767 /* do nothing */
2768 break;
2769 case VM_PROT_EXECUTE:
2770 case VM_PROT_READ:
2771 case VM_PROT_READ|VM_PROT_EXECUTE:
2772 pte->attr.raw |= MMU_SHORT_PTE_WP;
2773
2774 /*
2775 * Determine the virtual address mapped by
2776 * the PTE and flush ATC entries if necessary.
2777 */
2778 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2779 if (pmap == curpmap || pmap == pmap_kernel())
2780 TBIS(va);
2781 break;
2782 case VM_PROT_NONE:
2783 /* Save the mod/ref bits. */
2784 pv->pv_flags |= pte->attr.raw;
2785 /* Invalidate the PTE. */
2786 pte->attr.raw = MMU_DT_INVALID;
2787
2788 /*
2789 * Update table counts. And flush ATC entries
2790 * if necessary.
2791 */
2792 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2793
2794 /*
2795 * If the PTE belongs to the kernel map,
2796 * be sure to flush the page it maps.
2797 */
2798 if (pmap == pmap_kernel()) {
2799 TBIS(va);
2800 } else {
2801 /*
2802 * The PTE belongs to a user map.
2803 * update the entry count in the C
2804 * table to which it belongs and flush
2805 * the ATC if the mapping belongs to
2806 * the current pmap.
2807 */
2808 c_tbl->ct_ecnt--;
2809 if (pmap == curpmap)
2810 TBIS(va);
2811 }
2812 break;
2813 default:
2814 break;
2815 }
2816 }
2817
2818 /*
2819 * If the protection code indicates that all mappings to the page
2820 * be removed, truncate the PV list to zero entries.
2821 */
2822 if (prot == VM_PROT_NONE)
2823 pv->pv_idx = PVE_EOL;
2824 splx(s);
2825 }
2826
2827 /* pmap_get_pteinfo INTERNAL
2828 **
2829 * Called internally to find the pmap and virtual address within that
2830 * map to which the pte at the given index maps. Also includes the PTE's C
2831 * table manager.
2832 *
2833 * Returns the pmap in the argument provided, and the virtual address
2834 * by return value.
2835 */
2836 vm_offset_t
2837 pmap_get_pteinfo(idx, pmap, tbl)
2838 u_int idx;
2839 pmap_t *pmap;
2840 c_tmgr_t **tbl;
2841 {
2842 vm_offset_t va = 0;
2843
2844 /*
2845 * Determine if the PTE is a kernel PTE or a user PTE.
2846 */
2847 if (idx >= NUM_KERN_PTES) {
2848 /*
2849 * The PTE belongs to a user mapping.
2850 */
2851 /* XXX: Would like an inline for this to validate idx... */
2852 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2853
2854 *pmap = (*tbl)->ct_pmap;
2855 /*
2856 * To find the va to which the PTE maps, we first take
2857 * the table's base virtual address mapping which is stored
2858 * in ct_va. We then increment this address by a page for
2859 * every slot skipped until we reach the PTE.
2860 */
2861 va = (*tbl)->ct_va;
2862 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2863 } else {
2864 /*
2865 * The PTE belongs to the kernel map.
2866 */
2867 *pmap = pmap_kernel();
2868
2869 va = m68k_ptob(idx);
2870 va += KERNBASE;
2871 }
2872
2873 return va;
2874 }
2875
2876 /* pmap_clear_modify INTERFACE
2877 **
2878 * Clear the modification bit on the page at the specified
2879 * physical address.
2880 *
2881 */
2882 void
2883 pmap_clear_modify(pa)
2884 vm_offset_t pa;
2885 {
2886 if (!is_managed(pa))
2887 return;
2888 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2889 }
2890
2891 /* pmap_clear_reference INTERFACE
2892 **
2893 * Clear the referenced bit on the page at the specified
2894 * physical address.
2895 */
2896 void
2897 pmap_clear_reference(pa)
2898 vm_offset_t pa;
2899 {
2900 if (!is_managed(pa))
2901 return;
2902 pmap_clear_pv(pa, PV_FLAGS_USED);
2903 }
2904
2905 /* pmap_clear_pv INTERNAL
2906 **
2907 * Clears the specified flag from the specified physical address.
2908 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2909 *
2910 * Flag is one of:
2911 * PV_FLAGS_MDFY - Page modified bit.
2912 * PV_FLAGS_USED - Page used (referenced) bit.
2913 *
2914 * This routine must not only clear the flag on the pv list
2915 * head. It must also clear the bit on every pte in the pv
2916 * list associated with the address.
2917 */
2918 void
2919 pmap_clear_pv(pa, flag)
2920 vm_offset_t pa;
2921 int flag;
2922 {
2923 pv_t *pv;
2924 int idx, s;
2925 vm_offset_t va;
2926 pmap_t pmap;
2927 mmu_short_pte_t *pte;
2928 c_tmgr_t *c_tbl;
2929
2930 pv = pa2pv(pa);
2931
2932 s = splimp();
2933 pv->pv_flags &= ~(flag);
2934 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2935 pte = &kernCbase[idx];
2936 pte->attr.raw &= ~(flag);
2937 /*
2938 * The MC68030 MMU will not set the modified or
2939 * referenced bits on any MMU tables for which it has
2940 * a cached descriptor with its modify bit set. To insure
2941 * that it will modify these bits on the PTE during the next
2942 * time it is written to or read from, we must flush it from
2943 * the ATC.
2944 *
2945 * Ordinarily it is only necessary to flush the descriptor
2946 * if it is used in the current address space. But since I
2947 * am not sure that there will always be a notion of
2948 * 'the current address space' when this function is called,
2949 * I will skip the test and always flush the address. It
2950 * does no harm.
2951 */
2952 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2953 TBIS(va);
2954 }
2955 splx(s);
2956 }
2957
2958 /* pmap_extract INTERFACE
2959 **
2960 * Return the physical address mapped by the virtual address
2961 * in the specified pmap or 0 if it is not known.
2962 *
2963 * Note: this function should also apply an exclusive lock
2964 * on the pmap system during its duration.
2965 */
2966 vm_offset_t
2967 pmap_extract(pmap, va)
2968 pmap_t pmap;
2969 vm_offset_t va;
2970 {
2971 int a_idx, b_idx, pte_idx;
2972 a_tmgr_t *a_tbl;
2973 b_tmgr_t *b_tbl;
2974 c_tmgr_t *c_tbl;
2975 mmu_short_pte_t *c_pte;
2976
2977 if (pmap == pmap_kernel())
2978 return pmap_extract_kernel(va);
2979 if (pmap == NULL)
2980 return 0;
2981
2982 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2983 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2984 return 0;
2985
2986 if (!MMU_VALID_DT(*c_pte))
2987 return 0;
2988
2989 return (MMU_PTE_PA(*c_pte));
2990 }
2991
2992 /* pmap_extract_kernel INTERNAL
2993 **
2994 * Extract a translation from the kernel address space.
2995 */
2996 vm_offset_t
2997 pmap_extract_kernel(va)
2998 vm_offset_t va;
2999 {
3000 mmu_short_pte_t *pte;
3001
3002 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
3003 return MMU_PTE_PA(*pte);
3004 }
3005
3006 /* pmap_remove_kernel INTERNAL
3007 **
3008 * Remove the mapping of a range of virtual addresses from the kernel map.
3009 * The arguments are already page-aligned.
3010 */
3011 void
3012 pmap_remove_kernel(sva, eva)
3013 vm_offset_t sva;
3014 vm_offset_t eva;
3015 {
3016 int idx, eidx;
3017
3018 #ifdef PMAP_DEBUG
3019 if ((sva & PGOFSET) || (eva & PGOFSET))
3020 panic("pmap_remove_kernel: alignment");
3021 #endif
3022
3023 idx = m68k_btop(sva - KERNBASE);
3024 eidx = m68k_btop(eva - KERNBASE);
3025
3026 while (idx < eidx) {
3027 pmap_remove_pte(&kernCbase[idx++]);
3028 TBIS(sva);
3029 sva += NBPG;
3030 }
3031 }
3032
3033 /* pmap_remove INTERFACE
3034 **
3035 * Remove the mapping of a range of virtual addresses from the given pmap.
3036 *
3037 * If the range contains any wired entries, this function will probably create
3038 * disaster.
3039 */
3040 void
3041 pmap_remove(pmap, start, end)
3042 pmap_t pmap;
3043 vm_offset_t start;
3044 vm_offset_t end;
3045 {
3046
3047 if (pmap == pmap_kernel()) {
3048 pmap_remove_kernel(start, end);
3049 return;
3050 }
3051
3052 /*
3053 * XXX - Temporary(?) statement to prevent panic caused
3054 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3055 * to remove because it couldn't get backing store.
3056 * (I guess.)
3057 */
3058 if (pmap == NULL)
3059 return;
3060
3061 /*
3062 * If the pmap doesn't have an A table of its own, it has no mappings
3063 * that can be removed.
3064 */
3065 if (pmap->pm_a_tmgr == NULL)
3066 return;
3067
3068 /*
3069 * Remove the specified range from the pmap. If the function
3070 * returns true, the operation removed all the valid mappings
3071 * in the pmap and freed its A table. If this happened to the
3072 * currently loaded pmap, the MMU root pointer must be reloaded
3073 * with the default 'kernel' map.
3074 */
3075 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3076 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3077 kernel_crp.rp_addr = kernAphys;
3078 loadcrp(&kernel_crp);
3079 /* will do TLB flush below */
3080 }
3081 pmap->pm_a_tmgr = NULL;
3082 pmap->pm_a_phys = kernAphys;
3083 }
3084
3085 /*
3086 * If we just modified the current address space,
3087 * make sure to flush the MMU cache.
3088 *
3089 * XXX - this could be an unecessarily large flush.
3090 * XXX - Could decide, based on the size of the VA range
3091 * to be removed, whether to flush "by pages" or "all".
3092 */
3093 if (pmap == current_pmap())
3094 TBIAU();
3095 }
3096
3097 /* pmap_remove_a INTERNAL
3098 **
3099 * This is function number one in a set of three that removes a range
3100 * of memory in the most efficient manner by removing the highest possible
3101 * tables from the memory space. This particular function attempts to remove
3102 * as many B tables as it can, delegating the remaining fragmented ranges to
3103 * pmap_remove_b().
3104 *
3105 * If the removal operation results in an empty A table, the function returns
3106 * TRUE.
3107 *
3108 * It's ugly but will do for now.
3109 */
3110 boolean_t
3111 pmap_remove_a(a_tbl, start, end)
3112 a_tmgr_t *a_tbl;
3113 vm_offset_t start;
3114 vm_offset_t end;
3115 {
3116 boolean_t empty;
3117 int idx;
3118 vm_offset_t nstart, nend;
3119 b_tmgr_t *b_tbl;
3120 mmu_long_dte_t *a_dte;
3121 mmu_short_dte_t *b_dte;
3122
3123 /*
3124 * The following code works with what I call a 'granularity
3125 * reduction algorithim'. A range of addresses will always have
3126 * the following properties, which are classified according to
3127 * how the range relates to the size of the current granularity
3128 * - an A table entry:
3129 *
3130 * 1 2 3 4
3131 * -+---+---+---+---+---+---+---+-
3132 * -+---+---+---+---+---+---+---+-
3133 *
3134 * A range will always start on a granularity boundary, illustrated
3135 * by '+' signs in the table above, or it will start at some point
3136 * inbetween a granularity boundary, as illustrated by point 1.
3137 * The first step in removing a range of addresses is to remove the
3138 * range between 1 and 2, the nearest granularity boundary. This
3139 * job is handled by the section of code governed by the
3140 * 'if (start < nstart)' statement.
3141 *
3142 * A range will always encompass zero or more intergral granules,
3143 * illustrated by points 2 and 3. Integral granules are easy to
3144 * remove. The removal of these granules is the second step, and
3145 * is handled by the code block 'if (nstart < nend)'.
3146 *
3147 * Lastly, a range will always end on a granularity boundary,
3148 * ill. by point 3, or it will fall just beyond one, ill. by point
3149 * 4. The last step involves removing this range and is handled by
3150 * the code block 'if (nend < end)'.
3151 */
3152 nstart = MMU_ROUND_UP_A(start);
3153 nend = MMU_ROUND_A(end);
3154
3155 if (start < nstart) {
3156 /*
3157 * This block is executed if the range starts between
3158 * a granularity boundary.
3159 *
3160 * First find the DTE which is responsible for mapping
3161 * the start of the range.
3162 */
3163 idx = MMU_TIA(start);
3164 a_dte = &a_tbl->at_dtbl[idx];
3165
3166 /*
3167 * If the DTE is valid then delegate the removal of the sub
3168 * range to pmap_remove_b(), which can remove addresses at
3169 * a finer granularity.
3170 */
3171 if (MMU_VALID_DT(*a_dte)) {
3172 b_dte = mmu_ptov(a_dte->addr.raw);
3173 b_tbl = mmuB2tmgr(b_dte);
3174
3175 /*
3176 * The sub range to be removed starts at the start
3177 * of the full range we were asked to remove, and ends
3178 * at the greater of:
3179 * 1. The end of the full range, -or-
3180 * 2. The end of the full range, rounded down to the
3181 * nearest granularity boundary.
3182 */
3183 if (end < nstart)
3184 empty = pmap_remove_b(b_tbl, start, end);
3185 else
3186 empty = pmap_remove_b(b_tbl, start, nstart);
3187
3188 /*
3189 * If the removal resulted in an empty B table,
3190 * invalidate the DTE that points to it and decrement
3191 * the valid entry count of the A table.
3192 */
3193 if (empty) {
3194 a_dte->attr.raw = MMU_DT_INVALID;
3195 a_tbl->at_ecnt--;
3196 }
3197 }
3198 /*
3199 * If the DTE is invalid, the address range is already non-
3200 * existant and can simply be skipped.
3201 */
3202 }
3203 if (nstart < nend) {
3204 /*
3205 * This block is executed if the range spans a whole number
3206 * multiple of granules (A table entries.)
3207 *
3208 * First find the DTE which is responsible for mapping
3209 * the start of the first granule involved.
3210 */
3211 idx = MMU_TIA(nstart);
3212 a_dte = &a_tbl->at_dtbl[idx];
3213
3214 /*
3215 * Remove entire sub-granules (B tables) one at a time,
3216 * until reaching the end of the range.
3217 */
3218 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3219 if (MMU_VALID_DT(*a_dte)) {
3220 /*
3221 * Find the B table manager for the
3222 * entry and free it.
3223 */
3224 b_dte = mmu_ptov(a_dte->addr.raw);
3225 b_tbl = mmuB2tmgr(b_dte);
3226 free_b_table(b_tbl, TRUE);
3227
3228 /*
3229 * Invalidate the DTE that points to the
3230 * B table and decrement the valid entry
3231 * count of the A table.
3232 */
3233 a_dte->attr.raw = MMU_DT_INVALID;
3234 a_tbl->at_ecnt--;
3235 }
3236 }
3237 if (nend < end) {
3238 /*
3239 * This block is executed if the range ends beyond a
3240 * granularity boundary.
3241 *
3242 * First find the DTE which is responsible for mapping
3243 * the start of the nearest (rounded down) granularity
3244 * boundary.
3245 */
3246 idx = MMU_TIA(nend);
3247 a_dte = &a_tbl->at_dtbl[idx];
3248
3249 /*
3250 * If the DTE is valid then delegate the removal of the sub
3251 * range to pmap_remove_b(), which can remove addresses at
3252 * a finer granularity.
3253 */
3254 if (MMU_VALID_DT(*a_dte)) {
3255 /*
3256 * Find the B table manager for the entry
3257 * and hand it to pmap_remove_b() along with
3258 * the sub range.
3259 */
3260 b_dte = mmu_ptov(a_dte->addr.raw);
3261 b_tbl = mmuB2tmgr(b_dte);
3262
3263 empty = pmap_remove_b(b_tbl, nend, end);
3264
3265 /*
3266 * If the removal resulted in an empty B table,
3267 * invalidate the DTE that points to it and decrement
3268 * the valid entry count of the A table.
3269 */
3270 if (empty) {
3271 a_dte->attr.raw = MMU_DT_INVALID;
3272 a_tbl->at_ecnt--;
3273 }
3274 }
3275 }
3276
3277 /*
3278 * If there are no more entries in the A table, release it
3279 * back to the available pool and return TRUE.
3280 */
3281 if (a_tbl->at_ecnt == 0) {
3282 a_tbl->at_parent = NULL;
3283 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3284 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3285 empty = TRUE;
3286 } else {
3287 empty = FALSE;
3288 }
3289
3290 return empty;
3291 }
3292
3293 /* pmap_remove_b INTERNAL
3294 **
3295 * Remove a range of addresses from an address space, trying to remove entire
3296 * C tables if possible.
3297 *
3298 * If the operation results in an empty B table, the function returns TRUE.
3299 */
3300 boolean_t
3301 pmap_remove_b(b_tbl, start, end)
3302 b_tmgr_t *b_tbl;
3303 vm_offset_t start;
3304 vm_offset_t end;
3305 {
3306 boolean_t empty;
3307 int idx;
3308 vm_offset_t nstart, nend, rstart;
3309 c_tmgr_t *c_tbl;
3310 mmu_short_dte_t *b_dte;
3311 mmu_short_pte_t *c_dte;
3312
3313
3314 nstart = MMU_ROUND_UP_B(start);
3315 nend = MMU_ROUND_B(end);
3316
3317 if (start < nstart) {
3318 idx = MMU_TIB(start);
3319 b_dte = &b_tbl->bt_dtbl[idx];
3320 if (MMU_VALID_DT(*b_dte)) {
3321 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3322 c_tbl = mmuC2tmgr(c_dte);
3323 if (end < nstart)
3324 empty = pmap_remove_c(c_tbl, start, end);
3325 else
3326 empty = pmap_remove_c(c_tbl, start, nstart);
3327 if (empty) {
3328 b_dte->attr.raw = MMU_DT_INVALID;
3329 b_tbl->bt_ecnt--;
3330 }
3331 }
3332 }
3333 if (nstart < nend) {
3334 idx = MMU_TIB(nstart);
3335 b_dte = &b_tbl->bt_dtbl[idx];
3336 rstart = nstart;
3337 while (rstart < nend) {
3338 if (MMU_VALID_DT(*b_dte)) {
3339 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3340 c_tbl = mmuC2tmgr(c_dte);
3341 free_c_table(c_tbl, TRUE);
3342 b_dte->attr.raw = MMU_DT_INVALID;
3343 b_tbl->bt_ecnt--;
3344 }
3345 b_dte++;
3346 rstart += MMU_TIB_RANGE;
3347 }
3348 }
3349 if (nend < end) {
3350 idx = MMU_TIB(nend);
3351 b_dte = &b_tbl->bt_dtbl[idx];
3352 if (MMU_VALID_DT(*b_dte)) {
3353 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3354 c_tbl = mmuC2tmgr(c_dte);
3355 empty = pmap_remove_c(c_tbl, nend, end);
3356 if (empty) {
3357 b_dte->attr.raw = MMU_DT_INVALID;
3358 b_tbl->bt_ecnt--;
3359 }
3360 }
3361 }
3362
3363 if (b_tbl->bt_ecnt == 0) {
3364 b_tbl->bt_parent = NULL;
3365 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3366 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3367 empty = TRUE;
3368 } else {
3369 empty = FALSE;
3370 }
3371
3372 return empty;
3373 }
3374
3375 /* pmap_remove_c INTERNAL
3376 **
3377 * Remove a range of addresses from the given C table.
3378 */
3379 boolean_t
3380 pmap_remove_c(c_tbl, start, end)
3381 c_tmgr_t *c_tbl;
3382 vm_offset_t start;
3383 vm_offset_t end;
3384 {
3385 boolean_t empty;
3386 int idx;
3387 mmu_short_pte_t *c_pte;
3388
3389 idx = MMU_TIC(start);
3390 c_pte = &c_tbl->ct_dtbl[idx];
3391 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3392 if (MMU_VALID_DT(*c_pte)) {
3393 pmap_remove_pte(c_pte);
3394 c_tbl->ct_ecnt--;
3395 }
3396 }
3397
3398 if (c_tbl->ct_ecnt == 0) {
3399 c_tbl->ct_parent = NULL;
3400 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3401 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3402 empty = TRUE;
3403 } else {
3404 empty = FALSE;
3405 }
3406
3407 return empty;
3408 }
3409
3410 /* is_managed INTERNAL
3411 **
3412 * Determine if the given physical address is managed by the PV system.
3413 * Note that this logic assumes that no one will ask for the status of
3414 * addresses which lie in-between the memory banks on the 3/80. If they
3415 * do so, it will falsely report that it is managed.
3416 *
3417 * Note: A "managed" address is one that was reported to the VM system as
3418 * a "usable page" during system startup. As such, the VM system expects the
3419 * pmap module to keep an accurate track of the useage of those pages.
3420 * Any page not given to the VM system at startup does not exist (as far as
3421 * the VM system is concerned) and is therefore "unmanaged." Examples are
3422 * those pages which belong to the ROM monitor and the memory allocated before
3423 * the VM system was started.
3424 */
3425 boolean_t
3426 is_managed(pa)
3427 vm_offset_t pa;
3428 {
3429 if (pa >= avail_start && pa < avail_end)
3430 return TRUE;
3431 else
3432 return FALSE;
3433 }
3434
3435 /* pmap_bootstrap_alloc INTERNAL
3436 **
3437 * Used internally for memory allocation at startup when malloc is not
3438 * available. This code will fail once it crosses the first memory
3439 * bank boundary on the 3/80. Hopefully by then however, the VM system
3440 * will be in charge of allocation.
3441 */
3442 void *
3443 pmap_bootstrap_alloc(size)
3444 int size;
3445 {
3446 void *rtn;
3447
3448 #ifdef PMAP_DEBUG
3449 if (bootstrap_alloc_enabled == FALSE) {
3450 mon_printf("pmap_bootstrap_alloc: disabled\n");
3451 sunmon_abort();
3452 }
3453 #endif
3454
3455 rtn = (void *) virtual_avail;
3456 virtual_avail += size;
3457
3458 #ifdef PMAP_DEBUG
3459 if (virtual_avail > virtual_contig_end) {
3460 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3461 sunmon_abort();
3462 }
3463 #endif
3464
3465 return rtn;
3466 }
3467
3468 /* pmap_bootstap_aalign INTERNAL
3469 **
3470 * Used to insure that the next call to pmap_bootstrap_alloc() will
3471 * return a chunk of memory aligned to the specified size.
3472 *
3473 * Note: This function will only support alignment sizes that are powers
3474 * of two.
3475 */
3476 void
3477 pmap_bootstrap_aalign(size)
3478 int size;
3479 {
3480 int off;
3481
3482 off = virtual_avail & (size - 1);
3483 if (off) {
3484 (void) pmap_bootstrap_alloc(size - off);
3485 }
3486 }
3487
3488 /* pmap_pa_exists
3489 **
3490 * Used by the /dev/mem driver to see if a given PA is memory
3491 * that can be mapped. (The PA is not in a hole.)
3492 */
3493 int
3494 pmap_pa_exists(pa)
3495 vm_offset_t pa;
3496 {
3497 register int i;
3498
3499 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3500 if ((pa >= avail_mem[i].pmem_start) &&
3501 (pa < avail_mem[i].pmem_end))
3502 return (1);
3503 if (avail_mem[i].pmem_next == NULL)
3504 break;
3505 }
3506 return (0);
3507 }
3508
3509 /* pmap_activate INTERFACE
3510 **
3511 * This is called by locore.s:cpu_switch when we are switching to a
3512 * new process. This should load the MMU context for the new proc.
3513 *
3514 * Note: Only used when locore.s is compiled with PMAP_DEBUG.
3515 */
3516 void
3517 pmap_activate(pmap)
3518 pmap_t pmap;
3519 {
3520 u_long rootpa;
3521
3522 /* Only do reload/flush if we have to. */
3523 rootpa = pmap->pm_a_phys;
3524 if (kernel_crp.rp_addr != rootpa) {
3525 DPRINT(("pmap_activate(%p)\n", pmap));
3526 kernel_crp.rp_addr = rootpa;
3527 loadcrp(&kernel_crp);
3528 TBIAU();
3529 }
3530 }
3531
3532
3533 /* pmap_update
3534 **
3535 * Apply any delayed changes scheduled for all pmaps immediately.
3536 *
3537 * No delayed operations are currently done in this pmap.
3538 */
3539 void
3540 pmap_update()
3541 {
3542 /* not implemented. */
3543 }
3544
3545 /*
3546 * Fill in the cpu_kcore header for dumpsys()
3547 * (See machdep.c)
3548 */
3549 void
3550 pmap_set_kcore_hdr(chdr_p)
3551 cpu_kcore_hdr_t *chdr_p;
3552 {
3553 struct sun3x_kcore_hdr *sh = &chdr_p->un._sun3x;
3554 u_long spa, len;
3555 int i;
3556 extern char machine[];
3557
3558 /*
3559 * Fill in dispatch information.
3560 */
3561 strcpy(chdr_p->name, machine);
3562 chdr_p->page_size = NBPG;
3563 chdr_p->kernbase = KERNBASE;
3564
3565 sh->contig_end = virtual_contig_end;
3566 sh->kernCbase = (u_long) kernCbase;
3567 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3568 spa = avail_mem[i].pmem_start;
3569 spa = m68k_trunc_page(spa);
3570 len = avail_mem[i].pmem_end - spa;
3571 len = m68k_round_page(len);
3572 sh->ram_segs[i].start = spa;
3573 sh->ram_segs[i].size = len;
3574 }
3575 }
3576
3577
3578 /* pmap_virtual_space INTERFACE
3579 **
3580 * Return the current available range of virtual addresses in the
3581 * arguuments provided. Only really called once.
3582 */
3583 void
3584 pmap_virtual_space(vstart, vend)
3585 vm_offset_t *vstart, *vend;
3586 {
3587 *vstart = virtual_avail;
3588 *vend = virtual_end;
3589 }
3590
3591 /* pmap_free_pages INTERFACE
3592 **
3593 * Return the number of physical pages still available.
3594 *
3595 * This is probably going to be a mess, but it's only called
3596 * once and it's the only function left that I have to implement!
3597 */
3598 u_int
3599 pmap_free_pages()
3600 {
3601 int i;
3602 u_int left;
3603 vm_offset_t avail;
3604
3605 avail = avail_next;
3606 left = 0;
3607 i = 0;
3608 while (avail >= avail_mem[i].pmem_end) {
3609 if (avail_mem[i].pmem_next == NULL)
3610 return 0;
3611 i++;
3612 }
3613 while (i < SUN3X_NPHYS_RAM_SEGS) {
3614 if (avail < avail_mem[i].pmem_start) {
3615 /* Avail is inside a hole, march it
3616 * up to the next bank.
3617 */
3618 avail = avail_mem[i].pmem_start;
3619 }
3620 left += m68k_btop(avail_mem[i].pmem_end - avail);
3621 if (avail_mem[i].pmem_next == NULL)
3622 break;
3623 i++;
3624 }
3625
3626 return left;
3627 }
3628
3629 /* pmap_page_index INTERFACE
3630 **
3631 * Return the index of the given physical page in a list of useable
3632 * physical pages in the system. Holes in physical memory may be counted
3633 * if so desired. As long as pmap_free_pages() and pmap_page_index()
3634 * agree as to whether holes in memory do or do not count as valid pages,
3635 * it really doesn't matter. However, if you like to save a little
3636 * memory, don't count holes as valid pages. This is even more true when
3637 * the holes are large.
3638 *
3639 * We will not count holes as valid pages. We can generate page indices
3640 * that conform to this by using the memory bank structures initialized
3641 * in pmap_alloc_pv().
3642 */
3643 int
3644 pmap_page_index(pa)
3645 vm_offset_t pa;
3646 {
3647 struct pmap_physmem_struct *bank = avail_mem;
3648
3649 /* Search for the memory bank with this page. */
3650 /* XXX - What if it is not physical memory? */
3651 while (pa > bank->pmem_end)
3652 bank = bank->pmem_next;
3653 pa -= bank->pmem_start;
3654
3655 return (bank->pmem_pvbase + m68k_btop(pa));
3656 }
3657
3658 /* pmap_next_page INTERFACE
3659 **
3660 * Place the physical address of the next available page in the
3661 * argument given. Returns FALSE if there are no more pages left.
3662 *
3663 * This function must jump over any holes in physical memory.
3664 * Once this function is used, any use of pmap_bootstrap_alloc()
3665 * is a sin. Sinners will be punished with erratic behavior.
3666 */
3667 boolean_t
3668 pmap_next_page(pa)
3669 vm_offset_t *pa;
3670 {
3671 static struct pmap_physmem_struct *curbank = avail_mem;
3672
3673 /* XXX - temporary ROM saving hack. */
3674 if (avail_next >= avail_end)
3675 return FALSE;
3676
3677 if (avail_next >= curbank->pmem_end)
3678 if (curbank->pmem_next == NULL)
3679 return FALSE;
3680 else {
3681 curbank = curbank->pmem_next;
3682 avail_next = curbank->pmem_start;
3683 }
3684
3685 *pa = avail_next;
3686 avail_next += NBPG;
3687 return TRUE;
3688 }
3689
3690 /* pmap_count INTERFACE
3691 **
3692 * Return the number of resident (valid) pages in the given pmap.
3693 *
3694 * Note: If this function is handed the kernel map, it will report
3695 * that it has no mappings. Hopefully the VM system won't ask for kernel
3696 * map statistics.
3697 */
3698 segsz_t
3699 pmap_count(pmap, type)
3700 pmap_t pmap;
3701 int type;
3702 {
3703 u_int count;
3704 int a_idx, b_idx;
3705 a_tmgr_t *a_tbl;
3706 b_tmgr_t *b_tbl;
3707 c_tmgr_t *c_tbl;
3708
3709 /*
3710 * If the pmap does not have its own A table manager, it has no
3711 * valid entires.
3712 */
3713 if (pmap->pm_a_tmgr == NULL)
3714 return 0;
3715
3716 a_tbl = pmap->pm_a_tmgr;
3717
3718 count = 0;
3719 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3720 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3721 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3722 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3723 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3724 c_tbl = mmuC2tmgr(
3725 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3726 if (type == 0)
3727 /*
3728 * A resident entry count has been requested.
3729 */
3730 count += c_tbl->ct_ecnt;
3731 else
3732 /*
3733 * A wired entry count has been requested.
3734 */
3735 count += c_tbl->ct_wcnt;
3736 }
3737 }
3738 }
3739 }
3740
3741 return count;
3742 }
3743
3744 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3745 * The following routines are only used by DDB for tricky kernel text *
3746 * text operations in db_memrw.c. They are provided for sun3 *
3747 * compatibility. *
3748 *************************************************************************/
3749 /* get_pte INTERNAL
3750 **
3751 * Return the page descriptor the describes the kernel mapping
3752 * of the given virtual address.
3753 */
3754 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3755 u_long
3756 get_pte(va)
3757 vm_offset_t va;
3758 {
3759 u_long pte_pa;
3760 mmu_short_pte_t *pte;
3761
3762 /* Get the physical address of the PTE */
3763 pte_pa = ptest_addr(va & ~PGOFSET);
3764
3765 /* Convert to a virtual address... */
3766 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3767
3768 /* Make sure it is in our level-C tables... */
3769 if ((pte < kernCbase) ||
3770 (pte >= &mmuCbase[NUM_USER_PTES]))
3771 return 0;
3772
3773 /* ... and just return its contents. */
3774 return (pte->attr.raw);
3775 }
3776
3777
3778 /* set_pte INTERNAL
3779 **
3780 * Set the page descriptor that describes the kernel mapping
3781 * of the given virtual address.
3782 */
3783 void
3784 set_pte(va, pte)
3785 vm_offset_t va;
3786 vm_offset_t pte;
3787 {
3788 u_long idx;
3789
3790 if (va < KERNBASE)
3791 return;
3792
3793 idx = (unsigned long) m68k_btop(va - KERNBASE);
3794 kernCbase[idx].attr.raw = pte;
3795 }
3796
3797 #ifdef PMAP_DEBUG
3798 /************************** DEBUGGING ROUTINES **************************
3799 * The following routines are meant to be an aid to debugging the pmap *
3800 * system. They are callable from the DDB command line and should be *
3801 * prepared to be handed unstable or incomplete states of the system. *
3802 ************************************************************************/
3803
3804 /* pv_list
3805 **
3806 * List all pages found on the pv list for the given physical page.
3807 * To avoid endless loops, the listing will stop at the end of the list
3808 * or after 'n' entries - whichever comes first.
3809 */
3810 void
3811 pv_list(pa, n)
3812 vm_offset_t pa;
3813 int n;
3814 {
3815 int idx;
3816 vm_offset_t va;
3817 pv_t *pv;
3818 c_tmgr_t *c_tbl;
3819 pmap_t pmap;
3820
3821 pv = pa2pv(pa);
3822 idx = pv->pv_idx;
3823
3824 for (;idx != PVE_EOL && n > 0; idx=pvebase[idx].pve_next, n--) {
3825 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3826 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3827 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3828 }
3829 }
3830 #endif /* PMAP_DEBUG */
3831
3832 #ifdef NOT_YET
3833 /* and maybe not ever */
3834 /************************** LOW-LEVEL ROUTINES **************************
3835 * These routines will eventualy be re-written into assembly and placed *
3836 * in locore.s. They are here now as stubs so that the pmap module can *
3837 * be linked as a standalone user program for testing. *
3838 ************************************************************************/
3839 /* flush_atc_crp INTERNAL
3840 **
3841 * Flush all page descriptors derived from the given CPU Root Pointer
3842 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3843 * cache.
3844 */
3845 void
3846 flush_atc_crp(a_tbl)
3847 {
3848 mmu_long_rp_t rp;
3849
3850 /* Create a temporary root table pointer that points to the
3851 * given A table.
3852 */
3853 rp.attr.raw = ~MMU_LONG_RP_LU;
3854 rp.addr.raw = (unsigned int) a_tbl;
3855
3856 mmu_pflushr(&rp);
3857 /* mmu_pflushr:
3858 * movel sp(4)@,a0
3859 * pflushr a0@
3860 * rts
3861 */
3862 }
3863 #endif /* NOT_YET */
3864