pmap.c revision 1.44 1 /* $NetBSD: pmap.c,v 1.44 1999/03/26 23:41:37 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 #include <sys/proc.h>
117 #include <sys/malloc.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120 #include <sys/kcore.h>
121
122 #include <vm/vm.h>
123 #include <vm/vm_kern.h>
124 #include <vm/vm_page.h>
125
126 #include <uvm/uvm.h>
127
128 #define PAGER_SVA (uvm.pager_sva)
129 #define PAGER_EVA (uvm.pager_eva)
130
131 #include <machine/cpu.h>
132 #include <machine/kcore.h>
133 #include <machine/mon.h>
134 #include <machine/pmap.h>
135 #include <machine/pte.h>
136 #include <machine/vmparam.h>
137
138 #include <sun3/sun3/cache.h>
139 #include <sun3/sun3/machdep.h>
140
141 #include "pmap_pvt.h"
142
143 /* XXX - What headers declare these? */
144 extern struct pcb *curpcb;
145 extern int physmem;
146
147 extern void copypage __P((const void*, void*));
148 extern void zeropage __P((void*));
149
150 /* Defined in locore.s */
151 extern char kernel_text[];
152
153 /* Defined by the linker */
154 extern char etext[], edata[], end[];
155 extern char *esym; /* DDB */
156
157 /*************************** DEBUGGING DEFINITIONS ***********************
158 * Macros, preprocessor defines and variables used in debugging can make *
159 * code hard to read. Anything used exclusively for debugging purposes *
160 * is defined here to avoid having such mess scattered around the file. *
161 *************************************************************************/
162 #ifdef PMAP_DEBUG
163 /*
164 * To aid the debugging process, macros should be expanded into smaller steps
165 * that accomplish the same goal, yet provide convenient places for placing
166 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
167 * 'INLINE' keyword is defined to an empty string. This way, any function
168 * defined to be a 'static INLINE' will become 'outlined' and compiled as
169 * a separate function, which is much easier to debug.
170 */
171 #define INLINE /* nothing */
172
173 /*
174 * It is sometimes convenient to watch the activity of a particular table
175 * in the system. The following variables are used for that purpose.
176 */
177 a_tmgr_t *pmap_watch_atbl = 0;
178 b_tmgr_t *pmap_watch_btbl = 0;
179 c_tmgr_t *pmap_watch_ctbl = 0;
180
181 int pmap_debug = 0;
182 #define DPRINT(args) if (pmap_debug) printf args
183
184 #else /********** Stuff below is defined if NOT debugging **************/
185
186 #define INLINE inline
187 #define DPRINT(args) /* nada */
188
189 #endif /* PMAP_DEBUG */
190 /*********************** END OF DEBUGGING DEFINITIONS ********************/
191
192 /*** Management Structure - Memory Layout
193 * For every MMU table in the sun3x pmap system there must be a way to
194 * manage it; we must know which process is using it, what other tables
195 * depend on it, and whether or not it contains any locked pages. This
196 * is solved by the creation of 'table management' or 'tmgr'
197 * structures. One for each MMU table in the system.
198 *
199 * MAP OF MEMORY USED BY THE PMAP SYSTEM
200 *
201 * towards lower memory
202 * kernAbase -> +-------------------------------------------------------+
203 * | Kernel MMU A level table |
204 * kernBbase -> +-------------------------------------------------------+
205 * | Kernel MMU B level tables |
206 * kernCbase -> +-------------------------------------------------------+
207 * | |
208 * | Kernel MMU C level tables |
209 * | |
210 * mmuCbase -> +-------------------------------------------------------+
211 * | User MMU C level tables |
212 * mmuAbase -> +-------------------------------------------------------+
213 * | |
214 * | User MMU A level tables |
215 * | |
216 * mmuBbase -> +-------------------------------------------------------+
217 * | User MMU B level tables |
218 * tmgrAbase -> +-------------------------------------------------------+
219 * | TMGR A level table structures |
220 * tmgrBbase -> +-------------------------------------------------------+
221 * | TMGR B level table structures |
222 * tmgrCbase -> +-------------------------------------------------------+
223 * | TMGR C level table structures |
224 * pvbase -> +-------------------------------------------------------+
225 * | Physical to Virtual mapping table (list heads) |
226 * pvebase -> +-------------------------------------------------------+
227 * | Physical to Virtual mapping table (list elements) |
228 * | |
229 * +-------------------------------------------------------+
230 * towards higher memory
231 *
232 * For every A table in the MMU A area, there will be a corresponding
233 * a_tmgr structure in the TMGR A area. The same will be true for
234 * the B and C tables. This arrangement will make it easy to find the
235 * controling tmgr structure for any table in the system by use of
236 * (relatively) simple macros.
237 */
238
239 /*
240 * Global variables for storing the base addresses for the areas
241 * labeled above.
242 */
243 static vm_offset_t kernAphys;
244 static mmu_long_dte_t *kernAbase;
245 static mmu_short_dte_t *kernBbase;
246 static mmu_short_pte_t *kernCbase;
247 static mmu_short_pte_t *mmuCbase;
248 static mmu_short_dte_t *mmuBbase;
249 static mmu_long_dte_t *mmuAbase;
250 static a_tmgr_t *Atmgrbase;
251 static b_tmgr_t *Btmgrbase;
252 static c_tmgr_t *Ctmgrbase;
253 static pv_t *pvbase;
254 static pv_elem_t *pvebase;
255 struct pmap kernel_pmap;
256
257 /*
258 * This holds the CRP currently loaded into the MMU.
259 */
260 struct mmu_rootptr kernel_crp;
261
262 /*
263 * Just all around global variables.
264 */
265 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
266 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
267 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
268
269
270 /*
271 * Flags used to mark the safety/availability of certain operations or
272 * resources.
273 */
274 static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
275 bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
276 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
277
278 /*
279 * XXX: For now, retain the traditional variables that were
280 * used in the old pmap/vm interface (without NONCONTIG).
281 */
282 /* Kernel virtual address space available: */
283 vm_offset_t virtual_avail, virtual_end;
284 /* Physical address space available: */
285 vm_offset_t avail_start, avail_end;
286
287 /* This keep track of the end of the contiguously mapped range. */
288 vm_offset_t virtual_contig_end;
289
290 /* Physical address used by pmap_next_page() */
291 vm_offset_t avail_next;
292
293 /* These are used by pmap_copy_page(), etc. */
294 vm_offset_t tmp_vpages[2];
295
296 /*
297 * The 3/80 is the only member of the sun3x family that has non-contiguous
298 * physical memory. Memory is divided into 4 banks which are physically
299 * locatable on the system board. Although the size of these banks varies
300 * with the size of memory they contain, their base addresses are
301 * permenently fixed. The following structure, which describes these
302 * banks, is initialized by pmap_bootstrap() after it reads from a similar
303 * structure provided by the ROM Monitor.
304 *
305 * For the other machines in the sun3x architecture which do have contiguous
306 * RAM, this list will have only one entry, which will describe the entire
307 * range of available memory.
308 */
309 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
310 u_int total_phys_mem;
311
312 /*************************************************************************/
313
314 /*
315 * XXX - Should "tune" these based on statistics.
316 *
317 * My first guess about the relative numbers of these needed is
318 * based on the fact that a "typical" process will have several
319 * pages mapped at low virtual addresses (text, data, bss), then
320 * some mapped shared libraries, and then some stack pages mapped
321 * near the high end of the VA space. Each process can use only
322 * one A table, and most will use only two B tables (maybe three)
323 * and probably about four C tables. Therefore, the first guess
324 * at the relative numbers of these needed is 1:2:4 -gwr
325 *
326 * The number of C tables needed is closely related to the amount
327 * of physical memory available plus a certain amount attributable
328 * to the use of double mappings. With a few simulation statistics
329 * we can find a reasonably good estimation of this unknown value.
330 * Armed with that and the above ratios, we have a good idea of what
331 * is needed at each level. -j
332 *
333 * Note: It is not physical memory memory size, but the total mapped
334 * virtual space required by the combined working sets of all the
335 * currently _runnable_ processes. (Sleeping ones don't count.)
336 * The amount of physical memory should be irrelevant. -gwr
337 */
338 #ifdef FIXED_NTABLES
339 #define NUM_A_TABLES 16
340 #define NUM_B_TABLES 32
341 #define NUM_C_TABLES 64
342 #else
343 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
344 #endif /* FIXED_NTABLES */
345
346 /*
347 * This determines our total virtual mapping capacity.
348 * Yes, it is a FIXED value so we can pre-allocate.
349 */
350 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
351
352 /*
353 * The size of the Kernel Virtual Address Space (KVAS)
354 * for purposes of MMU table allocation is -KERNBASE
355 * (length from KERNBASE to 0xFFFFffff)
356 */
357 #define KVAS_SIZE (-KERNBASE)
358
359 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
360 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
361 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
362 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
363
364 /*************************** MISCELANEOUS MACROS *************************/
365 #define PMAP_LOCK() ; /* Nothing, for now */
366 #define PMAP_UNLOCK() ; /* same. */
367 #define NULL 0
368
369 static INLINE void * mmu_ptov __P((vm_offset_t pa));
370 static INLINE vm_offset_t mmu_vtop __P((void * va));
371
372 #if 0
373 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
374 #endif /* 0 */
375 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
376 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
377
378 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
379 static INLINE int pteidx __P((mmu_short_pte_t *));
380 static INLINE pmap_t current_pmap __P((void));
381
382 /*
383 * We can always convert between virtual and physical addresses
384 * for anything in the range [KERNBASE ... avail_start] because
385 * that range is GUARANTEED to be mapped linearly.
386 * We rely heavily upon this feature!
387 */
388 static INLINE void *
389 mmu_ptov(pa)
390 vm_offset_t pa;
391 {
392 register vm_offset_t va;
393
394 va = (pa + KERNBASE);
395 #ifdef PMAP_DEBUG
396 if ((va < KERNBASE) || (va >= virtual_contig_end))
397 panic("mmu_ptov");
398 #endif
399 return ((void*)va);
400 }
401 static INLINE vm_offset_t
402 mmu_vtop(vva)
403 void *vva;
404 {
405 register vm_offset_t va;
406
407 va = (vm_offset_t)vva;
408 #ifdef PMAP_DEBUG
409 if ((va < KERNBASE) || (va >= virtual_contig_end))
410 panic("mmu_ptov");
411 #endif
412 return (va - KERNBASE);
413 }
414
415 /*
416 * These macros map MMU tables to their corresponding manager structures.
417 * They are needed quite often because many of the pointers in the pmap
418 * system reference MMU tables and not the structures that control them.
419 * There needs to be a way to find one when given the other and these
420 * macros do so by taking advantage of the memory layout described above.
421 * Here's a quick step through the first macro, mmuA2tmgr():
422 *
423 * 1) find the offset of the given MMU A table from the base of its table
424 * pool (table - mmuAbase).
425 * 2) convert this offset into a table index by dividing it by the
426 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
427 * 3) use this index to select the corresponding 'A' table manager
428 * structure from the 'A' table manager pool (Atmgrbase[index]).
429 */
430 /* This function is not currently used. */
431 #if 0
432 static INLINE a_tmgr_t *
433 mmuA2tmgr(mmuAtbl)
434 mmu_long_dte_t *mmuAtbl;
435 {
436 register int idx;
437
438 /* Which table is this in? */
439 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
440 #ifdef PMAP_DEBUG
441 if ((idx < 0) || (idx >= NUM_A_TABLES))
442 panic("mmuA2tmgr");
443 #endif
444 return (&Atmgrbase[idx]);
445 }
446 #endif /* 0 */
447
448 static INLINE b_tmgr_t *
449 mmuB2tmgr(mmuBtbl)
450 mmu_short_dte_t *mmuBtbl;
451 {
452 register int idx;
453
454 /* Which table is this in? */
455 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
456 #ifdef PMAP_DEBUG
457 if ((idx < 0) || (idx >= NUM_B_TABLES))
458 panic("mmuB2tmgr");
459 #endif
460 return (&Btmgrbase[idx]);
461 }
462
463 /* mmuC2tmgr INTERNAL
464 **
465 * Given a pte known to belong to a C table, return the address of
466 * that table's management structure.
467 */
468 static INLINE c_tmgr_t *
469 mmuC2tmgr(mmuCtbl)
470 mmu_short_pte_t *mmuCtbl;
471 {
472 register int idx;
473
474 /* Which table is this in? */
475 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
476 #ifdef PMAP_DEBUG
477 if ((idx < 0) || (idx >= NUM_C_TABLES))
478 panic("mmuC2tmgr");
479 #endif
480 return (&Ctmgrbase[idx]);
481 }
482
483 /* This is now a function call below.
484 * #define pa2pv(pa) \
485 * (&pvbase[(unsigned long)\
486 * m68k_btop(pa)\
487 * ])
488 */
489
490 /* pa2pv INTERNAL
491 **
492 * Return the pv_list_head element which manages the given physical
493 * address.
494 */
495 static INLINE pv_t *
496 pa2pv(pa)
497 vm_offset_t pa;
498 {
499 register struct pmap_physmem_struct *bank;
500 register int idx;
501
502 bank = &avail_mem[0];
503 while (pa >= bank->pmem_end)
504 bank = bank->pmem_next;
505
506 pa -= bank->pmem_start;
507 idx = bank->pmem_pvbase + m68k_btop(pa);
508 #ifdef PMAP_DEBUG
509 if ((idx < 0) || (idx >= physmem))
510 panic("pa2pv");
511 #endif
512 return &pvbase[idx];
513 }
514
515 /* pteidx INTERNAL
516 **
517 * Return the index of the given PTE within the entire fixed table of
518 * PTEs.
519 */
520 static INLINE int
521 pteidx(pte)
522 mmu_short_pte_t *pte;
523 {
524 return (pte - kernCbase);
525 }
526
527 /*
528 * This just offers a place to put some debugging checks,
529 * and reduces the number of places "curproc" appears...
530 */
531 static INLINE pmap_t
532 current_pmap()
533 {
534 struct proc *p;
535 struct vmspace *vm;
536 vm_map_t map;
537 pmap_t pmap;
538
539 p = curproc; /* XXX */
540 if (p == NULL)
541 pmap = &kernel_pmap;
542 else {
543 vm = p->p_vmspace;
544 map = &vm->vm_map;
545 pmap = vm_map_pmap(map);
546 }
547
548 return (pmap);
549 }
550
551
552 /*************************** FUNCTION DEFINITIONS ************************
553 * These appear here merely for the compiler to enforce type checking on *
554 * all function calls. *
555 *************************************************************************/
556
557 /** External functions
558 ** - functions used within this module but written elsewhere.
559 ** both of these functions are in locore.s
560 ** XXX - These functions were later replaced with their more cryptic
561 ** hp300 counterparts. They may be removed now.
562 **/
563 #if 0 /* deprecated mmu */
564 void mmu_seturp __P((vm_offset_t));
565 void mmu_flush __P((int, vm_offset_t));
566 void mmu_flusha __P((void));
567 #endif /* 0 */
568
569 /** Internal functions
570 ** Most functions used only within this module are defined in
571 ** pmap_pvt.h (why not here if used only here?)
572 **/
573 static void pmap_page_upload __P((void));
574
575 /** Interface functions
576 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
577 ** defined.
578 **/
579 #ifdef INCLUDED_IN_PMAP_H
580 void pmap_bootstrap __P((void));
581 void *pmap_bootstrap_alloc __P((int));
582 void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t,
583 vm_prot_t));
584 pmap_t pmap_create __P((vm_size_t));
585 void pmap_destroy __P((pmap_t));
586 void pmap_reference __P((pmap_t));
587 boolean_t pmap_is_referenced __P((vm_offset_t));
588 boolean_t pmap_is_modified __P((vm_offset_t));
589 void pmap_clear_modify __P((vm_offset_t));
590 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
591 u_int pmap_free_pages __P((void));
592 #endif /* INCLUDED_IN_PMAP_H */
593 int pmap_page_index __P((vm_offset_t));
594 void pmap_pinit __P((pmap_t));
595 void pmap_release __P((pmap_t));
596
597 /********************************** CODE ********************************
598 * Functions that are called from other parts of the kernel are labeled *
599 * as 'INTERFACE' functions. Functions that are only called from *
600 * within the pmap module are labeled as 'INTERNAL' functions. *
601 * Functions that are internal, but are not (currently) used at all are *
602 * labeled 'INTERNAL_X'. *
603 ************************************************************************/
604
605 /* pmap_bootstrap INTERNAL
606 **
607 * Initializes the pmap system. Called at boot time from
608 * locore2.c:_vm_init()
609 *
610 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
611 * system implement pmap_steal_memory() is redundant.
612 * Don't release this code without removing one or the other!
613 */
614 void
615 pmap_bootstrap(nextva)
616 vm_offset_t nextva;
617 {
618 struct physmemory *membank;
619 struct pmap_physmem_struct *pmap_membank;
620 vm_offset_t va, pa, eva;
621 int b, c, i, j; /* running table counts */
622 int size, resvmem;
623
624 /*
625 * This function is called by __bootstrap after it has
626 * determined the type of machine and made the appropriate
627 * patches to the ROM vectors (XXX- I don't quite know what I meant
628 * by that.) It allocates and sets up enough of the pmap system
629 * to manage the kernel's address space.
630 */
631
632 /*
633 * Determine the range of kernel virtual and physical
634 * space available. Note that we ABSOLUTELY DEPEND on
635 * the fact that the first bank of memory (4MB) is
636 * mapped linearly to KERNBASE (which we guaranteed in
637 * the first instructions of locore.s).
638 * That is plenty for our bootstrap work.
639 */
640 virtual_avail = m68k_round_page(nextva);
641 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
642 virtual_end = VM_MAX_KERNEL_ADDRESS;
643 /* Don't need avail_start til later. */
644
645 /* We may now call pmap_bootstrap_alloc(). */
646 bootstrap_alloc_enabled = TRUE;
647
648 /*
649 * This is a somewhat unwrapped loop to deal with
650 * copying the PROM's 'phsymem' banks into the pmap's
651 * banks. The following is always assumed:
652 * 1. There is always at least one bank of memory.
653 * 2. There is always a last bank of memory, and its
654 * pmem_next member must be set to NULL.
655 */
656 membank = romVectorPtr->v_physmemory;
657 pmap_membank = avail_mem;
658 total_phys_mem = 0;
659
660 for (;;) { /* break on !membank */
661 pmap_membank->pmem_start = membank->address;
662 pmap_membank->pmem_end = membank->address + membank->size;
663 total_phys_mem += membank->size;
664 membank = membank->next;
665 if (!membank)
666 break;
667 /* This silly syntax arises because pmap_membank
668 * is really a pre-allocated array, but it is put into
669 * use as a linked list.
670 */
671 pmap_membank->pmem_next = pmap_membank + 1;
672 pmap_membank = pmap_membank->pmem_next;
673 }
674 /* This is the last element. */
675 pmap_membank->pmem_next = NULL;
676
677 /*
678 * Note: total_phys_mem, physmem represent
679 * actual physical memory, including that
680 * reserved for the PROM monitor.
681 */
682 physmem = btoc(total_phys_mem);
683
684 /*
685 * The last bank of memory should be reduced to prevent the
686 * physical pages needed by the PROM monitor from being used
687 * in the VM system.
688 */
689 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
690 resvmem = m68k_round_page(resvmem);
691 pmap_membank->pmem_end -= resvmem;
692
693 /*
694 * Avail_end is set to the first byte of physical memory
695 * after the end of the last bank. We use this only to
696 * determine if a physical address is "managed" memory.
697 */
698 avail_end = pmap_membank->pmem_end;
699
700 /*
701 * First allocate enough kernel MMU tables to map all
702 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
703 * Note: All must be aligned on 256 byte boundaries.
704 * Start with the level-A table (one of those).
705 */
706 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
707 kernAbase = pmap_bootstrap_alloc(size);
708 bzero(kernAbase, size);
709
710 /* Now the level-B kernel tables... */
711 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
712 kernBbase = pmap_bootstrap_alloc(size);
713 bzero(kernBbase, size);
714
715 /* Now the level-C kernel tables... */
716 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
717 kernCbase = pmap_bootstrap_alloc(size);
718 bzero(kernCbase, size);
719 /*
720 * Note: In order for the PV system to work correctly, the kernel
721 * and user-level C tables must be allocated contiguously.
722 * Nothing should be allocated between here and the allocation of
723 * mmuCbase below. XXX: Should do this as one allocation, and
724 * then compute a pointer for mmuCbase instead of this...
725 *
726 * Allocate user MMU tables.
727 * These must be contiguous with the preceeding.
728 */
729
730 #ifndef FIXED_NTABLES
731 /*
732 * The number of user-level C tables that should be allocated is
733 * related to the size of physical memory. In general, there should
734 * be enough tables to map four times the amount of available RAM.
735 * The extra amount is needed because some table space is wasted by
736 * fragmentation.
737 */
738 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
739 NUM_B_TABLES = NUM_C_TABLES / 2;
740 NUM_A_TABLES = NUM_B_TABLES / 2;
741 #endif /* !FIXED_NTABLES */
742
743 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
744 mmuCbase = pmap_bootstrap_alloc(size);
745
746 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
747 mmuBbase = pmap_bootstrap_alloc(size);
748
749 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
750 mmuAbase = pmap_bootstrap_alloc(size);
751
752 /*
753 * Fill in the never-changing part of the kernel tables.
754 * For simplicity, the kernel's mappings will be editable as a
755 * flat array of page table entries at kernCbase. The
756 * higher level 'A' and 'B' tables must be initialized to point
757 * to this lower one.
758 */
759 b = c = 0;
760
761 /*
762 * Invalidate all mappings below KERNBASE in the A table.
763 * This area has already been zeroed out, but it is good
764 * practice to explicitly show that we are interpreting
765 * it as a list of A table descriptors.
766 */
767 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
768 kernAbase[i].addr.raw = 0;
769 }
770
771 /*
772 * Set up the kernel A and B tables so that they will reference the
773 * correct spots in the contiguous table of PTEs allocated for the
774 * kernel's virtual memory space.
775 */
776 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
777 kernAbase[i].attr.raw =
778 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
779 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
780
781 for (j=0; j < MMU_B_TBL_SIZE; j++) {
782 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
783 | MMU_DT_SHORT;
784 c += MMU_C_TBL_SIZE;
785 }
786 b += MMU_B_TBL_SIZE;
787 }
788
789 /* XXX - Doing kernel_pmap a little further down. */
790
791 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
792 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
793 pmap_alloc_pv(); /* Allocate physical->virtual map. */
794
795 /*
796 * We are now done with pmap_bootstrap_alloc(). Round up
797 * `virtual_avail' to the nearest page, and set the flag
798 * to prevent use of pmap_bootstrap_alloc() hereafter.
799 */
800 pmap_bootstrap_aalign(NBPG);
801 bootstrap_alloc_enabled = FALSE;
802
803 /*
804 * Now that we are done with pmap_bootstrap_alloc(), we
805 * must save the virtual and physical addresses of the
806 * end of the linearly mapped range, which are stored in
807 * virtual_contig_end and avail_start, respectively.
808 * These variables will never change after this point.
809 */
810 virtual_contig_end = virtual_avail;
811 avail_start = virtual_avail - KERNBASE;
812
813 /*
814 * `avail_next' is a running pointer used by pmap_next_page() to
815 * keep track of the next available physical page to be handed
816 * to the VM system during its initialization, in which it
817 * asks for physical pages, one at a time.
818 */
819 avail_next = avail_start;
820
821 /*
822 * Now allocate some virtual addresses, but not the physical pages
823 * behind them. Note that virtual_avail is already page-aligned.
824 *
825 * tmp_vpages[] is an array of two virtual pages used for temporary
826 * kernel mappings in the pmap module to facilitate various physical
827 * address-oritented operations.
828 */
829 tmp_vpages[0] = virtual_avail;
830 virtual_avail += NBPG;
831 tmp_vpages[1] = virtual_avail;
832 virtual_avail += NBPG;
833
834 /** Initialize the PV system **/
835 pmap_init_pv();
836
837 /*
838 * Fill in the kernel_pmap structure and kernel_crp.
839 */
840 kernAphys = mmu_vtop(kernAbase);
841 kernel_pmap.pm_a_tmgr = NULL;
842 kernel_pmap.pm_a_phys = kernAphys;
843 kernel_pmap.pm_refcount = 1; /* always in use */
844
845 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
846 kernel_crp.rp_addr = kernAphys;
847
848 /*
849 * Now pmap_enter_kernel() may be used safely and will be
850 * the main interface used hereafter to modify the kernel's
851 * virtual address space. Note that since we are still running
852 * under the PROM's address table, none of these table modifications
853 * actually take effect until pmap_takeover_mmu() is called.
854 *
855 * Note: Our tables do NOT have the PROM linear mappings!
856 * Only the mappings created here exist in our tables, so
857 * remember to map anything we expect to use.
858 */
859 va = (vm_offset_t) KERNBASE;
860 pa = 0;
861
862 /*
863 * The first page of the kernel virtual address space is the msgbuf
864 * page. The page attributes (data, non-cached) are set here, while
865 * the address is assigned to this global pointer in cpu_startup().
866 * It is non-cached, mostly due to paranoia.
867 */
868 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
869 va += NBPG; pa += NBPG;
870
871 /* Next page is used as the temporary stack. */
872 pmap_enter_kernel(va, pa, VM_PROT_ALL);
873 va += NBPG; pa += NBPG;
874
875 /*
876 * Map all of the kernel's text segment as read-only and cacheable.
877 * (Cacheable is implied by default). Unfortunately, the last bytes
878 * of kernel text and the first bytes of kernel data will often be
879 * sharing the same page. Therefore, the last page of kernel text
880 * has to be mapped as read/write, to accomodate the data.
881 */
882 eva = m68k_trunc_page((vm_offset_t)etext);
883 for (; va < eva; va += NBPG, pa += NBPG)
884 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
885
886 /*
887 * Map all of the kernel's data as read/write and cacheable.
888 * This includes: data, BSS, symbols, and everything in the
889 * contiguous memory used by pmap_bootstrap_alloc()
890 */
891 for (; pa < avail_start; va += NBPG, pa += NBPG)
892 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
893
894 /*
895 * At this point we are almost ready to take over the MMU. But first
896 * we must save the PROM's address space in our map, as we call its
897 * routines and make references to its data later in the kernel.
898 */
899 pmap_bootstrap_copyprom();
900 pmap_takeover_mmu();
901 pmap_bootstrap_setprom();
902
903 /* Notify the VM system of our page size. */
904 PAGE_SIZE = NBPG;
905 uvm_setpagesize();
906
907 pmap_page_upload();
908 }
909
910
911 /* pmap_alloc_usermmu INTERNAL
912 **
913 * Called from pmap_bootstrap() to allocate MMU tables that will
914 * eventually be used for user mappings.
915 */
916 void
917 pmap_alloc_usermmu()
918 {
919 /* XXX: Moved into caller. */
920 }
921
922 /* pmap_alloc_pv INTERNAL
923 **
924 * Called from pmap_bootstrap() to allocate the physical
925 * to virtual mapping list. Each physical page of memory
926 * in the system has a corresponding element in this list.
927 */
928 void
929 pmap_alloc_pv()
930 {
931 int i;
932 unsigned int total_mem;
933
934 /*
935 * Allocate a pv_head structure for every page of physical
936 * memory that will be managed by the system. Since memory on
937 * the 3/80 is non-contiguous, we cannot arrive at a total page
938 * count by subtraction of the lowest available address from the
939 * highest, but rather we have to step through each memory
940 * bank and add the number of pages in each to the total.
941 *
942 * At this time we also initialize the offset of each bank's
943 * starting pv_head within the pv_head list so that the physical
944 * memory state routines (pmap_is_referenced(),
945 * pmap_is_modified(), et al.) can quickly find coresponding
946 * pv_heads in spite of the non-contiguity.
947 */
948 total_mem = 0;
949 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
950 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
951 total_mem += avail_mem[i].pmem_end -
952 avail_mem[i].pmem_start;
953 if (avail_mem[i].pmem_next == NULL)
954 break;
955 }
956 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
957 m68k_btop(total_phys_mem));
958 }
959
960 /* pmap_alloc_usertmgr INTERNAL
961 **
962 * Called from pmap_bootstrap() to allocate the structures which
963 * facilitate management of user MMU tables. Each user MMU table
964 * in the system has one such structure associated with it.
965 */
966 void
967 pmap_alloc_usertmgr()
968 {
969 /* Allocate user MMU table managers */
970 /* It would be a lot simpler to just make these BSS, but */
971 /* we may want to change their size at boot time... -j */
972 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
973 * NUM_A_TABLES);
974 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
975 * NUM_B_TABLES);
976 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
977 * NUM_C_TABLES);
978
979 /*
980 * Allocate PV list elements for the physical to virtual
981 * mapping system.
982 */
983 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
984 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
985 }
986
987 /* pmap_bootstrap_copyprom() INTERNAL
988 **
989 * Copy the PROM mappings into our own tables. Note, we
990 * can use physical addresses until __bootstrap returns.
991 */
992 void
993 pmap_bootstrap_copyprom()
994 {
995 struct sunromvec *romp;
996 int *mon_ctbl;
997 mmu_short_pte_t *kpte;
998 int i, len;
999
1000 romp = romVectorPtr;
1001
1002 /*
1003 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
1004 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
1005 */
1006 mon_ctbl = *romp->monptaddr;
1007 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
1008 kpte = &kernCbase[i];
1009 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
1010
1011 for (i = 0; i < len; i++) {
1012 kpte[i].attr.raw = mon_ctbl[i];
1013 }
1014
1015 /*
1016 * Copy the mappings at MON_DVMA_BASE (to the end).
1017 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1018 * Actually, we only want the last page, which the
1019 * PROM has set up for use by the "ie" driver.
1020 * (The i82686 needs its SCP there.)
1021 * If we copy all the mappings, pmap_enter_kernel
1022 * may complain about finding valid PTEs that are
1023 * not recorded in our PV lists...
1024 */
1025 mon_ctbl = *romp->shadowpteaddr;
1026 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
1027 kpte = &kernCbase[i];
1028 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1029 for (i = (len-1); i < len; i++) {
1030 kpte[i].attr.raw = mon_ctbl[i];
1031 }
1032 }
1033
1034 /* pmap_takeover_mmu INTERNAL
1035 **
1036 * Called from pmap_bootstrap() after it has copied enough of the
1037 * PROM mappings into the kernel map so that we can use our own
1038 * MMU table.
1039 */
1040 void
1041 pmap_takeover_mmu()
1042 {
1043
1044 loadcrp(&kernel_crp);
1045 }
1046
1047 /* pmap_bootstrap_setprom() INTERNAL
1048 **
1049 * Set the PROM mappings so it can see kernel space.
1050 * Note that physical addresses are used here, which
1051 * we can get away with because this runs with the
1052 * low 1GB set for transparent translation.
1053 */
1054 void
1055 pmap_bootstrap_setprom()
1056 {
1057 mmu_long_dte_t *mon_dte;
1058 extern struct mmu_rootptr mon_crp;
1059 int i;
1060
1061 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1062 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1063 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1064 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1065 }
1066 }
1067
1068
1069 /* pmap_init INTERFACE
1070 **
1071 * Called at the end of vm_init() to set up the pmap system to go
1072 * into full time operation. All initialization of kernel_pmap
1073 * should be already done by now, so this should just do things
1074 * needed for user-level pmaps to work.
1075 */
1076 void
1077 pmap_init()
1078 {
1079 /** Initialize the manager pools **/
1080 TAILQ_INIT(&a_pool);
1081 TAILQ_INIT(&b_pool);
1082 TAILQ_INIT(&c_pool);
1083
1084 /**************************************************************
1085 * Initialize all tmgr structures and MMU tables they manage. *
1086 **************************************************************/
1087 /** Initialize A tables **/
1088 pmap_init_a_tables();
1089 /** Initialize B tables **/
1090 pmap_init_b_tables();
1091 /** Initialize C tables **/
1092 pmap_init_c_tables();
1093 }
1094
1095 /* pmap_init_a_tables() INTERNAL
1096 **
1097 * Initializes all A managers, their MMU A tables, and inserts
1098 * them into the A manager pool for use by the system.
1099 */
1100 void
1101 pmap_init_a_tables()
1102 {
1103 int i;
1104 a_tmgr_t *a_tbl;
1105
1106 for (i=0; i < NUM_A_TABLES; i++) {
1107 /* Select the next available A manager from the pool */
1108 a_tbl = &Atmgrbase[i];
1109
1110 /*
1111 * Clear its parent entry. Set its wired and valid
1112 * entry count to zero.
1113 */
1114 a_tbl->at_parent = NULL;
1115 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1116
1117 /* Assign it the next available MMU A table from the pool */
1118 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1119
1120 /*
1121 * Initialize the MMU A table with the table in the `proc0',
1122 * or kernel, mapping. This ensures that every process has
1123 * the kernel mapped in the top part of its address space.
1124 */
1125 bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1126 sizeof(mmu_long_dte_t));
1127
1128 /*
1129 * Finally, insert the manager into the A pool,
1130 * making it ready to be used by the system.
1131 */
1132 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1133 }
1134 }
1135
1136 /* pmap_init_b_tables() INTERNAL
1137 **
1138 * Initializes all B table managers, their MMU B tables, and
1139 * inserts them into the B manager pool for use by the system.
1140 */
1141 void
1142 pmap_init_b_tables()
1143 {
1144 int i,j;
1145 b_tmgr_t *b_tbl;
1146
1147 for (i=0; i < NUM_B_TABLES; i++) {
1148 /* Select the next available B manager from the pool */
1149 b_tbl = &Btmgrbase[i];
1150
1151 b_tbl->bt_parent = NULL; /* clear its parent, */
1152 b_tbl->bt_pidx = 0; /* parent index, */
1153 b_tbl->bt_wcnt = 0; /* wired entry count, */
1154 b_tbl->bt_ecnt = 0; /* valid entry count. */
1155
1156 /* Assign it the next available MMU B table from the pool */
1157 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1158
1159 /* Invalidate every descriptor in the table */
1160 for (j=0; j < MMU_B_TBL_SIZE; j++)
1161 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1162
1163 /* Insert the manager into the B pool */
1164 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1165 }
1166 }
1167
1168 /* pmap_init_c_tables() INTERNAL
1169 **
1170 * Initializes all C table managers, their MMU C tables, and
1171 * inserts them into the C manager pool for use by the system.
1172 */
1173 void
1174 pmap_init_c_tables()
1175 {
1176 int i,j;
1177 c_tmgr_t *c_tbl;
1178
1179 for (i=0; i < NUM_C_TABLES; i++) {
1180 /* Select the next available C manager from the pool */
1181 c_tbl = &Ctmgrbase[i];
1182
1183 c_tbl->ct_parent = NULL; /* clear its parent, */
1184 c_tbl->ct_pidx = 0; /* parent index, */
1185 c_tbl->ct_wcnt = 0; /* wired entry count, */
1186 c_tbl->ct_ecnt = 0; /* valid entry count, */
1187 c_tbl->ct_pmap = NULL; /* parent pmap, */
1188 c_tbl->ct_va = 0; /* base of managed range */
1189
1190 /* Assign it the next available MMU C table from the pool */
1191 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1192
1193 for (j=0; j < MMU_C_TBL_SIZE; j++)
1194 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1195
1196 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1197 }
1198 }
1199
1200 /* pmap_init_pv() INTERNAL
1201 **
1202 * Initializes the Physical to Virtual mapping system.
1203 */
1204 void
1205 pmap_init_pv()
1206 {
1207 int i;
1208
1209 /* Initialize every PV head. */
1210 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1211 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1212 pvbase[i].pv_flags = 0; /* Zero out page flags */
1213 }
1214
1215 pv_initialized = TRUE;
1216 }
1217
1218 /* get_a_table INTERNAL
1219 **
1220 * Retrieve and return a level A table for use in a user map.
1221 */
1222 a_tmgr_t *
1223 get_a_table()
1224 {
1225 a_tmgr_t *tbl;
1226 pmap_t pmap;
1227
1228 /* Get the top A table in the pool */
1229 tbl = a_pool.tqh_first;
1230 if (tbl == NULL) {
1231 /*
1232 * XXX - Instead of panicing here and in other get_x_table
1233 * functions, we do have the option of sleeping on the head of
1234 * the table pool. Any function which updates the table pool
1235 * would then issue a wakeup() on the head, thus waking up any
1236 * processes waiting for a table.
1237 *
1238 * Actually, the place to sleep would be when some process
1239 * asks for a "wired" mapping that would run us short of
1240 * mapping resources. This design DEPENDS on always having
1241 * some mapping resources in the pool for stealing, so we
1242 * must make sure we NEVER let the pool become empty. -gwr
1243 */
1244 panic("get_a_table: out of A tables.");
1245 }
1246
1247 TAILQ_REMOVE(&a_pool, tbl, at_link);
1248 /*
1249 * If the table has a non-null parent pointer then it is in use.
1250 * Forcibly abduct it from its parent and clear its entries.
1251 * No re-entrancy worries here. This table would not be in the
1252 * table pool unless it was available for use.
1253 *
1254 * Note that the second argument to free_a_table() is FALSE. This
1255 * indicates that the table should not be relinked into the A table
1256 * pool. That is a job for the function that called us.
1257 */
1258 if (tbl->at_parent) {
1259 pmap = tbl->at_parent;
1260 free_a_table(tbl, FALSE);
1261 pmap->pm_a_tmgr = NULL;
1262 pmap->pm_a_phys = kernAphys;
1263 }
1264 #ifdef NON_REENTRANT
1265 /*
1266 * If the table isn't to be wired down, re-insert it at the
1267 * end of the pool.
1268 */
1269 if (!wired)
1270 /*
1271 * Quandary - XXX
1272 * Would it be better to let the calling function insert this
1273 * table into the queue? By inserting it here, we are allowing
1274 * it to be stolen immediately. The calling function is
1275 * probably not expecting to use a table that it is not
1276 * assured full control of.
1277 * Answer - In the intrest of re-entrancy, it is best to let
1278 * the calling function determine when a table is available
1279 * for use. Therefore this code block is not used.
1280 */
1281 TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1282 #endif /* NON_REENTRANT */
1283 return tbl;
1284 }
1285
1286 /* get_b_table INTERNAL
1287 **
1288 * Return a level B table for use.
1289 */
1290 b_tmgr_t *
1291 get_b_table()
1292 {
1293 b_tmgr_t *tbl;
1294
1295 /* See 'get_a_table' for comments. */
1296 tbl = b_pool.tqh_first;
1297 if (tbl == NULL)
1298 panic("get_b_table: out of B tables.");
1299 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1300 if (tbl->bt_parent) {
1301 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1302 tbl->bt_parent->at_ecnt--;
1303 free_b_table(tbl, FALSE);
1304 }
1305 #ifdef NON_REENTRANT
1306 if (!wired)
1307 /* XXX see quandary in get_b_table */
1308 /* XXX start lock */
1309 TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1310 /* XXX end lock */
1311 #endif /* NON_REENTRANT */
1312 return tbl;
1313 }
1314
1315 /* get_c_table INTERNAL
1316 **
1317 * Return a level C table for use.
1318 */
1319 c_tmgr_t *
1320 get_c_table()
1321 {
1322 c_tmgr_t *tbl;
1323
1324 /* See 'get_a_table' for comments */
1325 tbl = c_pool.tqh_first;
1326 if (tbl == NULL)
1327 panic("get_c_table: out of C tables.");
1328 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1329 if (tbl->ct_parent) {
1330 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1331 tbl->ct_parent->bt_ecnt--;
1332 free_c_table(tbl, FALSE);
1333 }
1334 #ifdef NON_REENTRANT
1335 if (!wired)
1336 /* XXX See quandary in get_a_table */
1337 /* XXX start lock */
1338 TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1339 /* XXX end lock */
1340 #endif /* NON_REENTRANT */
1341
1342 return tbl;
1343 }
1344
1345 /*
1346 * The following 'free_table' and 'steal_table' functions are called to
1347 * detach tables from their current obligations (parents and children) and
1348 * prepare them for reuse in another mapping.
1349 *
1350 * Free_table is used when the calling function will handle the fate
1351 * of the parent table, such as returning it to the free pool when it has
1352 * no valid entries. Functions that do not want to handle this should
1353 * call steal_table, in which the parent table's descriptors and entry
1354 * count are automatically modified when this table is removed.
1355 */
1356
1357 /* free_a_table INTERNAL
1358 **
1359 * Unmaps the given A table and all child tables from their current
1360 * mappings. Returns the number of pages that were invalidated.
1361 * If 'relink' is true, the function will return the table to the head
1362 * of the available table pool.
1363 *
1364 * Cache note: The MC68851 will automatically flush all
1365 * descriptors derived from a given A table from its
1366 * Automatic Translation Cache (ATC) if we issue a
1367 * 'PFLUSHR' instruction with the base address of the
1368 * table. This function should do, and does so.
1369 * Note note: We are using an MC68030 - there is no
1370 * PFLUSHR.
1371 */
1372 int
1373 free_a_table(a_tbl, relink)
1374 a_tmgr_t *a_tbl;
1375 boolean_t relink;
1376 {
1377 int i, removed_cnt;
1378 mmu_long_dte_t *dte;
1379 mmu_short_dte_t *dtbl;
1380 b_tmgr_t *tmgr;
1381
1382 /*
1383 * Flush the ATC cache of all cached descriptors derived
1384 * from this table.
1385 * Sun3x does not use 68851's cached table feature
1386 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1387 */
1388
1389 /*
1390 * Remove any pending cache flushes that were designated
1391 * for the pmap this A table belongs to.
1392 * a_tbl->parent->atc_flushq[0] = 0;
1393 * Not implemented in sun3x.
1394 */
1395
1396 /*
1397 * All A tables in the system should retain a map for the
1398 * kernel. If the table contains any valid descriptors
1399 * (other than those for the kernel area), invalidate them all,
1400 * stopping short of the kernel's entries.
1401 */
1402 removed_cnt = 0;
1403 if (a_tbl->at_ecnt) {
1404 dte = a_tbl->at_dtbl;
1405 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1406 /*
1407 * If a table entry points to a valid B table, free
1408 * it and its children.
1409 */
1410 if (MMU_VALID_DT(dte[i])) {
1411 /*
1412 * The following block does several things,
1413 * from innermost expression to the
1414 * outermost:
1415 * 1) It extracts the base (cc 1996)
1416 * address of the B table pointed
1417 * to in the A table entry dte[i].
1418 * 2) It converts this base address into
1419 * the virtual address it can be
1420 * accessed with. (all MMU tables point
1421 * to physical addresses.)
1422 * 3) It finds the corresponding manager
1423 * structure which manages this MMU table.
1424 * 4) It frees the manager structure.
1425 * (This frees the MMU table and all
1426 * child tables. See 'free_b_table' for
1427 * details.)
1428 */
1429 dtbl = mmu_ptov(dte[i].addr.raw);
1430 tmgr = mmuB2tmgr(dtbl);
1431 removed_cnt += free_b_table(tmgr, TRUE);
1432 dte[i].attr.raw = MMU_DT_INVALID;
1433 }
1434 }
1435 a_tbl->at_ecnt = 0;
1436 }
1437 if (relink) {
1438 a_tbl->at_parent = NULL;
1439 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1440 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1441 }
1442 return removed_cnt;
1443 }
1444
1445 /* free_b_table INTERNAL
1446 **
1447 * Unmaps the given B table and all its children from their current
1448 * mappings. Returns the number of pages that were invalidated.
1449 * (For comments, see 'free_a_table()').
1450 */
1451 int
1452 free_b_table(b_tbl, relink)
1453 b_tmgr_t *b_tbl;
1454 boolean_t relink;
1455 {
1456 int i, removed_cnt;
1457 mmu_short_dte_t *dte;
1458 mmu_short_pte_t *dtbl;
1459 c_tmgr_t *tmgr;
1460
1461 removed_cnt = 0;
1462 if (b_tbl->bt_ecnt) {
1463 dte = b_tbl->bt_dtbl;
1464 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1465 if (MMU_VALID_DT(dte[i])) {
1466 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1467 tmgr = mmuC2tmgr(dtbl);
1468 removed_cnt += free_c_table(tmgr, TRUE);
1469 dte[i].attr.raw = MMU_DT_INVALID;
1470 }
1471 }
1472 b_tbl->bt_ecnt = 0;
1473 }
1474
1475 if (relink) {
1476 b_tbl->bt_parent = NULL;
1477 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1478 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1479 }
1480 return removed_cnt;
1481 }
1482
1483 /* free_c_table INTERNAL
1484 **
1485 * Unmaps the given C table from use and returns it to the pool for
1486 * re-use. Returns the number of pages that were invalidated.
1487 *
1488 * This function preserves any physical page modification information
1489 * contained in the page descriptors within the C table by calling
1490 * 'pmap_remove_pte().'
1491 */
1492 int
1493 free_c_table(c_tbl, relink)
1494 c_tmgr_t *c_tbl;
1495 boolean_t relink;
1496 {
1497 int i, removed_cnt;
1498
1499 removed_cnt = 0;
1500 if (c_tbl->ct_ecnt) {
1501 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1502 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1503 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1504 removed_cnt++;
1505 }
1506 }
1507 c_tbl->ct_ecnt = 0;
1508 }
1509
1510 if (relink) {
1511 c_tbl->ct_parent = NULL;
1512 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1513 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1514 }
1515 return removed_cnt;
1516 }
1517
1518 #if 0
1519 /* free_c_table_novalid INTERNAL
1520 **
1521 * Frees the given C table manager without checking to see whether
1522 * or not it contains any valid page descriptors as it is assumed
1523 * that it does not.
1524 */
1525 void
1526 free_c_table_novalid(c_tbl)
1527 c_tmgr_t *c_tbl;
1528 {
1529 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1530 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1531 c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1532 c_tbl->ct_parent->bt_ecnt--;
1533 /*
1534 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1535 * we just removed the last entry of the parent B table.
1536 * But I want to insure that this will not endanger pmap_enter()
1537 * with sudden removal of tables it is working with.
1538 *
1539 * We should probably add another field to each table, indicating
1540 * whether or not it is 'locked', ie. in the process of being
1541 * modified.
1542 */
1543 c_tbl->ct_parent = NULL;
1544 }
1545 #endif
1546
1547 /* pmap_remove_pte INTERNAL
1548 **
1549 * Unmap the given pte and preserve any page modification
1550 * information by transfering it to the pv head of the
1551 * physical page it maps to. This function does not update
1552 * any reference counts because it is assumed that the calling
1553 * function will do so.
1554 */
1555 void
1556 pmap_remove_pte(pte)
1557 mmu_short_pte_t *pte;
1558 {
1559 u_short pv_idx, targ_idx;
1560 int s;
1561 vm_offset_t pa;
1562 pv_t *pv;
1563
1564 pa = MMU_PTE_PA(*pte);
1565 if (is_managed(pa)) {
1566 pv = pa2pv(pa);
1567 targ_idx = pteidx(pte); /* Index of PTE being removed */
1568
1569 /*
1570 * If the PTE being removed is the first (or only) PTE in
1571 * the list of PTEs currently mapped to this page, remove the
1572 * PTE by changing the index found on the PV head. Otherwise
1573 * a linear search through the list will have to be executed
1574 * in order to find the PVE which points to the PTE being
1575 * removed, so that it may be modified to point to its new
1576 * neighbor.
1577 */
1578 s = splimp();
1579 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1580 if (pv_idx == targ_idx) {
1581 pv->pv_idx = pvebase[targ_idx].pve_next;
1582 } else {
1583 /*
1584 * Find the PV element pointing to the target
1585 * element. Note: may have pv_idx==PVE_EOL
1586 */
1587 for (;;) {
1588 if (pv_idx == PVE_EOL) {
1589 #ifdef PMAP_DEBUG
1590 printf("pmap_remove_pte: PVE_EOL\n");
1591 Debugger();
1592 #endif
1593 goto pv_not_found;
1594 }
1595 if (pvebase[pv_idx].pve_next == targ_idx)
1596 break;
1597 pv_idx = pvebase[pv_idx].pve_next;
1598 }
1599 /*
1600 * At this point, pv_idx is the index of the PV
1601 * element just before the target element in the list.
1602 * Unlink the target.
1603 */
1604 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1605 pv_not_found:
1606 }
1607 /*
1608 * Save the mod/ref bits of the pte by simply
1609 * ORing the entire pte onto the pv_flags member
1610 * of the pv structure.
1611 * There is no need to use a separate bit pattern
1612 * for usage information on the pv head than that
1613 * which is used on the MMU ptes.
1614 */
1615 pv->pv_flags |= (u_short) pte->attr.raw;
1616 splx(s);
1617 }
1618
1619 pte->attr.raw = MMU_DT_INVALID;
1620 }
1621
1622 /* pmap_stroll INTERNAL
1623 **
1624 * Retrieve the addresses of all table managers involved in the mapping of
1625 * the given virtual address. If the table walk completed sucessfully,
1626 * return TRUE. If it was only partially sucessful, return FALSE.
1627 * The table walk performed by this function is important to many other
1628 * functions in this module.
1629 *
1630 * Note: This function ought to be easier to read.
1631 */
1632 boolean_t
1633 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1634 pmap_t pmap;
1635 vm_offset_t va;
1636 a_tmgr_t **a_tbl;
1637 b_tmgr_t **b_tbl;
1638 c_tmgr_t **c_tbl;
1639 mmu_short_pte_t **pte;
1640 int *a_idx, *b_idx, *pte_idx;
1641 {
1642 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1643 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1644
1645 if (pmap == pmap_kernel())
1646 return FALSE;
1647
1648 /* Does the given pmap have its own A table? */
1649 *a_tbl = pmap->pm_a_tmgr;
1650 if (*a_tbl == NULL)
1651 return FALSE; /* No. Return unknown. */
1652 /* Does the A table have a valid B table
1653 * under the corresponding table entry?
1654 */
1655 *a_idx = MMU_TIA(va);
1656 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1657 if (!MMU_VALID_DT(*a_dte))
1658 return FALSE; /* No. Return unknown. */
1659 /* Yes. Extract B table from the A table. */
1660 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1661 /* Does the B table have a valid C table
1662 * under the corresponding table entry?
1663 */
1664 *b_idx = MMU_TIB(va);
1665 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1666 if (!MMU_VALID_DT(*b_dte))
1667 return FALSE; /* No. Return unknown. */
1668 /* Yes. Extract C table from the B table. */
1669 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1670 *pte_idx = MMU_TIC(va);
1671 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1672
1673 return TRUE;
1674 }
1675
1676 /* pmap_enter INTERFACE
1677 **
1678 * Called by the kernel to map a virtual address
1679 * to a physical address in the given process map.
1680 *
1681 * Note: this function should apply an exclusive lock
1682 * on the pmap system for its duration. (it certainly
1683 * would save my hair!!)
1684 * This function ought to be easier to read.
1685 */
1686 void
1687 pmap_enter(pmap, va, pa, prot, wired, access_type)
1688 pmap_t pmap;
1689 vm_offset_t va;
1690 vm_offset_t pa;
1691 vm_prot_t prot;
1692 boolean_t wired;
1693 vm_prot_t access_type;
1694 {
1695 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1696 u_short nidx; /* PV list index */
1697 int s; /* Used for splimp()/splx() */
1698 int flags; /* Mapping flags. eg. Cache inhibit */
1699 u_int a_idx, b_idx, pte_idx; /* table indices */
1700 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1701 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1702 c_tmgr_t *c_tbl; /* C: short page table manager */
1703 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1704 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1705 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1706 pv_t *pv; /* pv list head */
1707 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1708
1709 if (pmap == NULL)
1710 return;
1711 if (pmap == pmap_kernel()) {
1712 pmap_enter_kernel(va, pa, prot);
1713 return;
1714 }
1715
1716 flags = (pa & ~MMU_PAGE_MASK);
1717 pa &= MMU_PAGE_MASK;
1718
1719 /*
1720 * Determine if the physical address being mapped is on-board RAM.
1721 * Any other area of the address space is likely to belong to a
1722 * device and hence it would be disasterous to cache its contents.
1723 */
1724 if ((managed = is_managed(pa)) == FALSE)
1725 flags |= PMAP_NC;
1726
1727 /*
1728 * For user mappings we walk along the MMU tables of the given
1729 * pmap, reaching a PTE which describes the virtual page being
1730 * mapped or changed. If any level of the walk ends in an invalid
1731 * entry, a table must be allocated and the entry must be updated
1732 * to point to it.
1733 * There is a bit of confusion as to whether this code must be
1734 * re-entrant. For now we will assume it is. To support
1735 * re-entrancy we must unlink tables from the table pool before
1736 * we assume we may use them. Tables are re-linked into the pool
1737 * when we are finished with them at the end of the function.
1738 * But I don't feel like doing that until we have proof that this
1739 * needs to be re-entrant.
1740 * 'llevel' records which tables need to be relinked.
1741 */
1742 llevel = NONE;
1743
1744 /*
1745 * Step 1 - Retrieve the A table from the pmap. If it has no
1746 * A table, allocate a new one from the available pool.
1747 */
1748
1749 a_tbl = pmap->pm_a_tmgr;
1750 if (a_tbl == NULL) {
1751 /*
1752 * This pmap does not currently have an A table. Allocate
1753 * a new one.
1754 */
1755 a_tbl = get_a_table();
1756 a_tbl->at_parent = pmap;
1757
1758 /*
1759 * Assign this new A table to the pmap, and calculate its
1760 * physical address so that loadcrp() can be used to make
1761 * the table active.
1762 */
1763 pmap->pm_a_tmgr = a_tbl;
1764 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1765
1766 /*
1767 * If the process receiving a new A table is the current
1768 * process, we are responsible for setting the MMU so that
1769 * it becomes the current address space. This only adds
1770 * new mappings, so no need to flush anything.
1771 */
1772 if (pmap == current_pmap()) {
1773 kernel_crp.rp_addr = pmap->pm_a_phys;
1774 loadcrp(&kernel_crp);
1775 }
1776
1777 if (!wired)
1778 llevel = NEWA;
1779 } else {
1780 /*
1781 * Use the A table already allocated for this pmap.
1782 * Unlink it from the A table pool if necessary.
1783 */
1784 if (wired && !a_tbl->at_wcnt)
1785 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1786 }
1787
1788 /*
1789 * Step 2 - Walk into the B table. If there is no valid B table,
1790 * allocate one.
1791 */
1792
1793 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1794 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1795 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1796 /* The descriptor is valid. Use the B table it points to. */
1797 /*************************************
1798 * a_idx *
1799 * v *
1800 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1801 * | | | | | | | | | | | | *
1802 * +-+-+-+-+-+-+-+-+-+-+-+- *
1803 * | *
1804 * \- b_tbl -> +-+- *
1805 * | | *
1806 * +-+- *
1807 *************************************/
1808 b_dte = mmu_ptov(a_dte->addr.raw);
1809 b_tbl = mmuB2tmgr(b_dte);
1810
1811 /*
1812 * If the requested mapping must be wired, but this table
1813 * being used to map it is not, the table must be removed
1814 * from the available pool and its wired entry count
1815 * incremented.
1816 */
1817 if (wired && !b_tbl->bt_wcnt) {
1818 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1819 a_tbl->at_wcnt++;
1820 }
1821 } else {
1822 /* The descriptor is invalid. Allocate a new B table. */
1823 b_tbl = get_b_table();
1824
1825 /* Point the parent A table descriptor to this new B table. */
1826 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1827 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1828 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1829
1830 /* Create the necessary back references to the parent table */
1831 b_tbl->bt_parent = a_tbl;
1832 b_tbl->bt_pidx = a_idx;
1833
1834 /*
1835 * If this table is to be wired, make sure the parent A table
1836 * wired count is updated to reflect that it has another wired
1837 * entry.
1838 */
1839 if (wired)
1840 a_tbl->at_wcnt++;
1841 else if (llevel == NONE)
1842 llevel = NEWB;
1843 }
1844
1845 /*
1846 * Step 3 - Walk into the C table, if there is no valid C table,
1847 * allocate one.
1848 */
1849
1850 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1851 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1852 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1853 /* The descriptor is valid. Use the C table it points to. */
1854 /**************************************
1855 * c_idx *
1856 * | v *
1857 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1858 * | | | | | | | | | | | *
1859 * +-+-+-+-+-+-+-+-+-+-+- *
1860 * | *
1861 * \- c_tbl -> +-+-- *
1862 * | | | *
1863 * +-+-- *
1864 **************************************/
1865 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1866 c_tbl = mmuC2tmgr(c_pte);
1867
1868 /* If mapping is wired and table is not */
1869 if (wired && !c_tbl->ct_wcnt) {
1870 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1871 b_tbl->bt_wcnt++;
1872 }
1873 } else {
1874 /* The descriptor is invalid. Allocate a new C table. */
1875 c_tbl = get_c_table();
1876
1877 /* Point the parent B table descriptor to this new C table. */
1878 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1879 b_dte->attr.raw |= MMU_DT_SHORT;
1880 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1881
1882 /* Create the necessary back references to the parent table */
1883 c_tbl->ct_parent = b_tbl;
1884 c_tbl->ct_pidx = b_idx;
1885 /*
1886 * Store the pmap and base virtual managed address for faster
1887 * retrieval in the PV functions.
1888 */
1889 c_tbl->ct_pmap = pmap;
1890 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1891
1892 /*
1893 * If this table is to be wired, make sure the parent B table
1894 * wired count is updated to reflect that it has another wired
1895 * entry.
1896 */
1897 if (wired)
1898 b_tbl->bt_wcnt++;
1899 else if (llevel == NONE)
1900 llevel = NEWC;
1901 }
1902
1903 /*
1904 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1905 * slot of the C table, describing the PA to which the VA is mapped.
1906 */
1907
1908 pte_idx = MMU_TIC(va);
1909 c_pte = &c_tbl->ct_dtbl[pte_idx];
1910 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1911 /*
1912 * The PTE is currently valid. This particular call
1913 * is just a synonym for one (or more) of the following
1914 * operations:
1915 * change protection of a page
1916 * change wiring status of a page
1917 * remove the mapping of a page
1918 *
1919 * XXX - Semi critical: This code should unwire the PTE
1920 * and, possibly, associated parent tables if this is a
1921 * change wiring operation. Currently it does not.
1922 *
1923 * This may be ok if pmap_change_wiring() is the only
1924 * interface used to UNWIRE a page.
1925 */
1926
1927 /* First check if this is a wiring operation. */
1928 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1929 /*
1930 * The PTE is already wired. To prevent it from being
1931 * counted as a new wiring operation, reset the 'wired'
1932 * variable.
1933 */
1934 wired = FALSE;
1935 }
1936
1937 /* Is the new address the same as the old? */
1938 if (MMU_PTE_PA(*c_pte) == pa) {
1939 /*
1940 * Yes, mark that it does not need to be reinserted
1941 * into the PV list.
1942 */
1943 insert = FALSE;
1944
1945 /*
1946 * Clear all but the modified, referenced and wired
1947 * bits on the PTE.
1948 */
1949 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1950 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1951 } else {
1952 /* No, remove the old entry */
1953 pmap_remove_pte(c_pte);
1954 insert = TRUE;
1955 }
1956
1957 /*
1958 * TLB flush is only necessary if modifying current map.
1959 * However, in pmap_enter(), the pmap almost always IS
1960 * the current pmap, so don't even bother to check.
1961 */
1962 TBIS(va);
1963 } else {
1964 /*
1965 * The PTE is invalid. Increment the valid entry count in
1966 * the C table manager to reflect the addition of a new entry.
1967 */
1968 c_tbl->ct_ecnt++;
1969
1970 /* XXX - temporarily make sure the PTE is cleared. */
1971 c_pte->attr.raw = 0;
1972
1973 /* It will also need to be inserted into the PV list. */
1974 insert = TRUE;
1975 }
1976
1977 /*
1978 * If page is changing from unwired to wired status, set an unused bit
1979 * within the PTE to indicate that it is wired. Also increment the
1980 * wired entry count in the C table manager.
1981 */
1982 if (wired) {
1983 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1984 c_tbl->ct_wcnt++;
1985 }
1986
1987 /*
1988 * Map the page, being careful to preserve modify/reference/wired
1989 * bits. At this point it is assumed that the PTE either has no bits
1990 * set, or if there are set bits, they are only modified, reference or
1991 * wired bits. If not, the following statement will cause erratic
1992 * behavior.
1993 */
1994 #ifdef PMAP_DEBUG
1995 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1996 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1997 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1998 Debugger();
1999 }
2000 #endif
2001 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2002
2003 /*
2004 * If the mapping should be read-only, set the write protect
2005 * bit in the PTE.
2006 */
2007 if (!(prot & VM_PROT_WRITE))
2008 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2009
2010 /*
2011 * If the mapping should be cache inhibited (indicated by the flag
2012 * bits found on the lower order of the physical address.)
2013 * mark the PTE as a cache inhibited page.
2014 */
2015 if (flags & PMAP_NC)
2016 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2017
2018 /*
2019 * If the physical address being mapped is managed by the PV
2020 * system then link the pte into the list of pages mapped to that
2021 * address.
2022 */
2023 if (insert && managed) {
2024 pv = pa2pv(pa);
2025 nidx = pteidx(c_pte);
2026
2027 s = splimp();
2028 pvebase[nidx].pve_next = pv->pv_idx;
2029 pv->pv_idx = nidx;
2030 splx(s);
2031 }
2032
2033 /* Move any allocated tables back into the active pool. */
2034
2035 switch (llevel) {
2036 case NEWA:
2037 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2038 /* FALLTHROUGH */
2039 case NEWB:
2040 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2041 /* FALLTHROUGH */
2042 case NEWC:
2043 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2044 /* FALLTHROUGH */
2045 default:
2046 break;
2047 }
2048 }
2049
2050 /* pmap_enter_kernel INTERNAL
2051 **
2052 * Map the given virtual address to the given physical address within the
2053 * kernel address space. This function exists because the kernel map does
2054 * not do dynamic table allocation. It consists of a contiguous array of ptes
2055 * and can be edited directly without the need to walk through any tables.
2056 *
2057 * XXX: "Danger, Will Robinson!"
2058 * Note that the kernel should never take a fault on any page
2059 * between [ KERNBASE .. virtual_avail ] and this is checked in
2060 * trap.c for kernel-mode MMU faults. This means that mappings
2061 * created in that range must be implicily wired. -gwr
2062 */
2063 void
2064 pmap_enter_kernel(va, pa, prot)
2065 vm_offset_t va;
2066 vm_offset_t pa;
2067 vm_prot_t prot;
2068 {
2069 boolean_t was_valid, insert;
2070 u_short pte_idx;
2071 int s, flags;
2072 mmu_short_pte_t *pte;
2073 pv_t *pv;
2074 vm_offset_t old_pa;
2075
2076 flags = (pa & ~MMU_PAGE_MASK);
2077 pa &= MMU_PAGE_MASK;
2078
2079 if (is_managed(pa))
2080 insert = TRUE;
2081 else
2082 insert = FALSE;
2083
2084 /*
2085 * Calculate the index of the PTE being modified.
2086 */
2087 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2088
2089 /* This array is traditionally named "Sysmap" */
2090 pte = &kernCbase[pte_idx];
2091
2092 s = splimp();
2093 if (MMU_VALID_DT(*pte)) {
2094 was_valid = TRUE;
2095 /*
2096 * If the PTE already maps a different
2097 * physical address, umap and pv_unlink.
2098 */
2099 old_pa = MMU_PTE_PA(*pte);
2100 if (pa != old_pa)
2101 pmap_remove_pte(pte);
2102 else {
2103 /*
2104 * Old PA and new PA are the same. No need to
2105 * relink the mapping within the PV list.
2106 */
2107 insert = FALSE;
2108
2109 /*
2110 * Save any mod/ref bits on the PTE.
2111 */
2112 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2113 }
2114 } else {
2115 pte->attr.raw = MMU_DT_INVALID;
2116 was_valid = FALSE;
2117 }
2118
2119 /*
2120 * Map the page. Being careful to preserve modified/referenced bits
2121 * on the PTE.
2122 */
2123 pte->attr.raw |= (pa | MMU_DT_PAGE);
2124
2125 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2126 pte->attr.raw |= MMU_SHORT_PTE_WP;
2127 if (flags & PMAP_NC)
2128 pte->attr.raw |= MMU_SHORT_PTE_CI;
2129 if (was_valid)
2130 TBIS(va);
2131
2132 /*
2133 * Insert the PTE into the PV system, if need be.
2134 */
2135 if (insert) {
2136 pv = pa2pv(pa);
2137 pvebase[pte_idx].pve_next = pv->pv_idx;
2138 pv->pv_idx = pte_idx;
2139 }
2140 splx(s);
2141
2142 }
2143
2144 /* pmap_map INTERNAL
2145 **
2146 * Map a contiguous range of physical memory into a contiguous range of
2147 * the kernel virtual address space.
2148 *
2149 * Used for device mappings and early mapping of the kernel text/data/bss.
2150 * Returns the first virtual address beyond the end of the range.
2151 */
2152 vm_offset_t
2153 pmap_map(va, pa, endpa, prot)
2154 vm_offset_t va;
2155 vm_offset_t pa;
2156 vm_offset_t endpa;
2157 int prot;
2158 {
2159 int sz;
2160
2161 sz = endpa - pa;
2162 do {
2163 pmap_enter_kernel(va, pa, prot);
2164 va += NBPG;
2165 pa += NBPG;
2166 sz -= NBPG;
2167 } while (sz > 0);
2168 return(va);
2169 }
2170
2171 /* pmap_protect INTERFACE
2172 **
2173 * Apply the given protection to the given virtual address range within
2174 * the given map.
2175 *
2176 * It is ok for the protection applied to be stronger than what is
2177 * specified. We use this to our advantage when the given map has no
2178 * mapping for the virtual address. By skipping a page when this
2179 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2180 * and therefore do not need to map the page just to apply a protection
2181 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2182 *
2183 * XXX - This function could be speeded up by using pmap_stroll() for inital
2184 * setup, and then manual scrolling in the for() loop.
2185 */
2186 void
2187 pmap_protect(pmap, startva, endva, prot)
2188 pmap_t pmap;
2189 vm_offset_t startva, endva;
2190 vm_prot_t prot;
2191 {
2192 boolean_t iscurpmap;
2193 int a_idx, b_idx, c_idx;
2194 a_tmgr_t *a_tbl;
2195 b_tmgr_t *b_tbl;
2196 c_tmgr_t *c_tbl;
2197 mmu_short_pte_t *pte;
2198
2199 if (pmap == NULL)
2200 return;
2201 if (pmap == pmap_kernel()) {
2202 pmap_protect_kernel(startva, endva, prot);
2203 return;
2204 }
2205
2206 /*
2207 * In this particular pmap implementation, there are only three
2208 * types of memory protection: 'all' (read/write/execute),
2209 * 'read-only' (read/execute) and 'none' (no mapping.)
2210 * It is not possible for us to treat 'executable' as a separate
2211 * protection type. Therefore, protection requests that seek to
2212 * remove execute permission while retaining read or write, and those
2213 * that make little sense (write-only for example) are ignored.
2214 */
2215 switch (prot) {
2216 case VM_PROT_NONE:
2217 /*
2218 * A request to apply the protection code of
2219 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2220 */
2221 pmap_remove(pmap, startva, endva);
2222 return;
2223 case VM_PROT_EXECUTE:
2224 case VM_PROT_READ:
2225 case VM_PROT_READ|VM_PROT_EXECUTE:
2226 /* continue */
2227 break;
2228 case VM_PROT_WRITE:
2229 case VM_PROT_WRITE|VM_PROT_READ:
2230 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2231 case VM_PROT_ALL:
2232 /* None of these should happen in a sane system. */
2233 return;
2234 }
2235
2236 /*
2237 * If the pmap has no A table, it has no mappings and therefore
2238 * there is nothing to protect.
2239 */
2240 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2241 return;
2242
2243 a_idx = MMU_TIA(startva);
2244 b_idx = MMU_TIB(startva);
2245 c_idx = MMU_TIC(startva);
2246 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2247
2248 iscurpmap = (pmap == current_pmap());
2249 while (startva < endva) {
2250 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2251 if (b_tbl == NULL) {
2252 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2253 b_tbl = mmu_ptov((vm_offset_t) b_tbl);
2254 b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
2255 }
2256 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2257 if (c_tbl == NULL) {
2258 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2259 c_tbl = mmu_ptov((vm_offset_t) c_tbl);
2260 c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
2261 }
2262 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2263 pte = &c_tbl->ct_dtbl[c_idx];
2264 /* make the mapping read-only */
2265 pte->attr.raw |= MMU_SHORT_PTE_WP;
2266 /*
2267 * If we just modified the current address space,
2268 * flush any translations for the modified page from
2269 * the translation cache and any data from it in the
2270 * data cache.
2271 */
2272 if (iscurpmap)
2273 TBIS(startva);
2274 }
2275 startva += NBPG;
2276
2277 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2278 c_tbl = NULL;
2279 c_idx = 0;
2280 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2281 b_tbl = NULL;
2282 b_idx = 0;
2283 }
2284 }
2285 } else { /* C table wasn't valid */
2286 c_tbl = NULL;
2287 c_idx = 0;
2288 startva += MMU_TIB_RANGE;
2289 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2290 b_tbl = NULL;
2291 b_idx = 0;
2292 }
2293 } /* C table */
2294 } else { /* B table wasn't valid */
2295 b_tbl = NULL;
2296 b_idx = 0;
2297 startva += MMU_TIA_RANGE;
2298 a_idx++;
2299 } /* B table */
2300 }
2301 }
2302
2303 /* pmap_protect_kernel INTERNAL
2304 **
2305 * Apply the given protection code to a kernel address range.
2306 */
2307 void
2308 pmap_protect_kernel(startva, endva, prot)
2309 vm_offset_t startva, endva;
2310 vm_prot_t prot;
2311 {
2312 vm_offset_t va;
2313 mmu_short_pte_t *pte;
2314
2315 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2316 for (va = startva; va < endva; va += NBPG, pte++) {
2317 if (MMU_VALID_DT(*pte)) {
2318 switch (prot) {
2319 case VM_PROT_ALL:
2320 break;
2321 case VM_PROT_EXECUTE:
2322 case VM_PROT_READ:
2323 case VM_PROT_READ|VM_PROT_EXECUTE:
2324 pte->attr.raw |= MMU_SHORT_PTE_WP;
2325 break;
2326 case VM_PROT_NONE:
2327 /* this is an alias for 'pmap_remove_kernel' */
2328 pmap_remove_pte(pte);
2329 break;
2330 default:
2331 break;
2332 }
2333 /*
2334 * since this is the kernel, immediately flush any cached
2335 * descriptors for this address.
2336 */
2337 TBIS(va);
2338 }
2339 }
2340 }
2341
2342 /* pmap_change_wiring INTERFACE
2343 **
2344 * Changes the wiring of the specified page.
2345 *
2346 * This function is called from vm_fault.c to unwire
2347 * a mapping. It really should be called 'pmap_unwire'
2348 * because it is never asked to do anything but remove
2349 * wirings.
2350 */
2351 void
2352 pmap_change_wiring(pmap, va, wire)
2353 pmap_t pmap;
2354 vm_offset_t va;
2355 boolean_t wire;
2356 {
2357 int a_idx, b_idx, c_idx;
2358 a_tmgr_t *a_tbl;
2359 b_tmgr_t *b_tbl;
2360 c_tmgr_t *c_tbl;
2361 mmu_short_pte_t *pte;
2362
2363 /* Kernel mappings always remain wired. */
2364 if (pmap == pmap_kernel())
2365 return;
2366
2367 #ifdef PMAP_DEBUG
2368 if (wire == TRUE)
2369 panic("pmap_change_wiring: wire requested.");
2370 #endif
2371
2372 /*
2373 * Walk through the tables. If the walk terminates without
2374 * a valid PTE then the address wasn't wired in the first place.
2375 * Return immediately.
2376 */
2377 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2378 &b_idx, &c_idx) == FALSE)
2379 return;
2380
2381
2382 /* Is the PTE wired? If not, return. */
2383 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2384 return;
2385
2386 /* Remove the wiring bit. */
2387 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2388
2389 /*
2390 * Decrement the wired entry count in the C table.
2391 * If it reaches zero the following things happen:
2392 * 1. The table no longer has any wired entries and is considered
2393 * unwired.
2394 * 2. It is placed on the available queue.
2395 * 3. The parent table's wired entry count is decremented.
2396 * 4. If it reaches zero, this process repeats at step 1 and
2397 * stops at after reaching the A table.
2398 */
2399 if (--c_tbl->ct_wcnt == 0) {
2400 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2401 if (--b_tbl->bt_wcnt == 0) {
2402 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2403 if (--a_tbl->at_wcnt == 0) {
2404 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2405 }
2406 }
2407 }
2408 }
2409
2410 /* pmap_pageable INTERFACE
2411 **
2412 * Make the specified range of addresses within the given pmap,
2413 * 'pageable' or 'not-pageable'. A pageable page must not cause
2414 * any faults when referenced. A non-pageable page may.
2415 *
2416 * This routine is only advisory. The VM system will call pmap_enter()
2417 * to wire or unwire pages that are going to be made pageable before calling
2418 * this function. By the time this routine is called, everything that needs
2419 * to be done has already been done.
2420 */
2421 void
2422 pmap_pageable(pmap, start, end, pageable)
2423 pmap_t pmap;
2424 vm_offset_t start, end;
2425 boolean_t pageable;
2426 {
2427 /* not implemented. */
2428 }
2429
2430 /* pmap_copy INTERFACE
2431 **
2432 * Copy the mappings of a range of addresses in one pmap, into
2433 * the destination address of another.
2434 *
2435 * This routine is advisory. Should we one day decide that MMU tables
2436 * may be shared by more than one pmap, this function should be used to
2437 * link them together. Until that day however, we do nothing.
2438 */
2439 void
2440 pmap_copy(pmap_a, pmap_b, dst, len, src)
2441 pmap_t pmap_a, pmap_b;
2442 vm_offset_t dst;
2443 vm_size_t len;
2444 vm_offset_t src;
2445 {
2446 /* not implemented. */
2447 }
2448
2449 /* pmap_copy_page INTERFACE
2450 **
2451 * Copy the contents of one physical page into another.
2452 *
2453 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2454 * to map the two specified physical pages into the kernel address space.
2455 *
2456 * Note: We could use the transparent translation registers to make the
2457 * mappings. If we do so, be sure to disable interrupts before using them.
2458 */
2459 void
2460 pmap_copy_page(srcpa, dstpa)
2461 vm_offset_t srcpa, dstpa;
2462 {
2463 vm_offset_t srcva, dstva;
2464 int s;
2465
2466 srcva = tmp_vpages[0];
2467 dstva = tmp_vpages[1];
2468
2469 s = splimp();
2470 if (tmp_vpages_inuse++)
2471 panic("pmap_copy_page: temporary vpages are in use.");
2472
2473 /* Map pages as non-cacheable to avoid cache polution? */
2474 pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
2475 pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2476
2477 /* Hand-optimized version of bcopy(src, dst, NBPG) */
2478 copypage((char *) srcva, (char *) dstva);
2479
2480 pmap_remove_kernel(srcva, srcva + NBPG);
2481 pmap_remove_kernel(dstva, dstva + NBPG);
2482
2483 --tmp_vpages_inuse;
2484 splx(s);
2485 }
2486
2487 /* pmap_zero_page INTERFACE
2488 **
2489 * Zero the contents of the specified physical page.
2490 *
2491 * Uses one of the virtual pages allocated in pmap_boostrap()
2492 * to map the specified page into the kernel address space.
2493 */
2494 void
2495 pmap_zero_page(dstpa)
2496 vm_offset_t dstpa;
2497 {
2498 vm_offset_t dstva;
2499 int s;
2500
2501 dstva = tmp_vpages[1];
2502 s = splimp();
2503 if (tmp_vpages_inuse++)
2504 panic("pmap_zero_page: temporary vpages are in use.");
2505
2506 /* The comments in pmap_copy_page() above apply here also. */
2507 pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2508
2509 /* Hand-optimized version of bzero(ptr, NBPG) */
2510 zeropage((char *) dstva);
2511
2512 pmap_remove_kernel(dstva, dstva + NBPG);
2513
2514 --tmp_vpages_inuse;
2515 splx(s);
2516 }
2517
2518 /* pmap_collect INTERFACE
2519 **
2520 * Called from the VM system when we are about to swap out
2521 * the process using this pmap. This should give up any
2522 * resources held here, including all its MMU tables.
2523 */
2524 void
2525 pmap_collect(pmap)
2526 pmap_t pmap;
2527 {
2528 /* XXX - todo... */
2529 }
2530
2531 /* pmap_create INTERFACE
2532 **
2533 * Create and return a pmap structure.
2534 */
2535 pmap_t
2536 pmap_create(size)
2537 vm_size_t size;
2538 {
2539 pmap_t pmap;
2540
2541 if (size)
2542 return NULL;
2543
2544 pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
2545 pmap_pinit(pmap);
2546
2547 return pmap;
2548 }
2549
2550 /* pmap_pinit INTERNAL
2551 **
2552 * Initialize a pmap structure.
2553 */
2554 void
2555 pmap_pinit(pmap)
2556 pmap_t pmap;
2557 {
2558 bzero(pmap, sizeof(struct pmap));
2559 pmap->pm_a_tmgr = NULL;
2560 pmap->pm_a_phys = kernAphys;
2561 }
2562
2563 /* pmap_release INTERFACE
2564 **
2565 * Release any resources held by the given pmap.
2566 *
2567 * This is the reverse analog to pmap_pinit. It does not
2568 * necessarily mean for the pmap structure to be deallocated,
2569 * as in pmap_destroy.
2570 */
2571 void
2572 pmap_release(pmap)
2573 pmap_t pmap;
2574 {
2575 /*
2576 * As long as the pmap contains no mappings,
2577 * which always should be the case whenever
2578 * this function is called, there really should
2579 * be nothing to do.
2580 */
2581 #ifdef PMAP_DEBUG
2582 if (pmap == NULL)
2583 return;
2584 if (pmap == pmap_kernel())
2585 panic("pmap_release: kernel pmap");
2586 #endif
2587 /*
2588 * XXX - If this pmap has an A table, give it back.
2589 * The pmap SHOULD be empty by now, and pmap_remove
2590 * should have already given back the A table...
2591 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2592 * at this point, which means some mapping was not
2593 * removed when it should have been. -gwr
2594 */
2595 if (pmap->pm_a_tmgr != NULL) {
2596 /* First make sure we are not using it! */
2597 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2598 kernel_crp.rp_addr = kernAphys;
2599 loadcrp(&kernel_crp);
2600 }
2601 #ifdef PMAP_DEBUG /* XXX - todo! */
2602 /* XXX - Now complain... */
2603 printf("pmap_release: still have table\n");
2604 Debugger();
2605 #endif
2606 free_a_table(pmap->pm_a_tmgr, TRUE);
2607 pmap->pm_a_tmgr = NULL;
2608 pmap->pm_a_phys = kernAphys;
2609 }
2610 }
2611
2612 /* pmap_reference INTERFACE
2613 **
2614 * Increment the reference count of a pmap.
2615 */
2616 void
2617 pmap_reference(pmap)
2618 pmap_t pmap;
2619 {
2620 if (pmap == NULL)
2621 return;
2622
2623 /* pmap_lock(pmap); */
2624 pmap->pm_refcount++;
2625 /* pmap_unlock(pmap); */
2626 }
2627
2628 /* pmap_dereference INTERNAL
2629 **
2630 * Decrease the reference count on the given pmap
2631 * by one and return the current count.
2632 */
2633 int
2634 pmap_dereference(pmap)
2635 pmap_t pmap;
2636 {
2637 int rtn;
2638
2639 if (pmap == NULL)
2640 return 0;
2641
2642 /* pmap_lock(pmap); */
2643 rtn = --pmap->pm_refcount;
2644 /* pmap_unlock(pmap); */
2645
2646 return rtn;
2647 }
2648
2649 /* pmap_destroy INTERFACE
2650 **
2651 * Decrement a pmap's reference count and delete
2652 * the pmap if it becomes zero. Will be called
2653 * only after all mappings have been removed.
2654 */
2655 void
2656 pmap_destroy(pmap)
2657 pmap_t pmap;
2658 {
2659 if (pmap == NULL)
2660 return;
2661 if (pmap == &kernel_pmap)
2662 panic("pmap_destroy: kernel_pmap!");
2663 if (pmap_dereference(pmap) == 0) {
2664 pmap_release(pmap);
2665 free(pmap, M_VMPMAP);
2666 }
2667 }
2668
2669 /* pmap_is_referenced INTERFACE
2670 **
2671 * Determine if the given physical page has been
2672 * referenced (read from [or written to.])
2673 */
2674 boolean_t
2675 pmap_is_referenced(pa)
2676 vm_offset_t pa;
2677 {
2678 pv_t *pv;
2679 int idx, s;
2680
2681 if (!pv_initialized)
2682 return FALSE;
2683 /* XXX - this may be unecessary. */
2684 if (!is_managed(pa))
2685 return FALSE;
2686
2687 pv = pa2pv(pa);
2688 /*
2689 * Check the flags on the pv head. If they are set,
2690 * return immediately. Otherwise a search must be done.
2691 */
2692 if (pv->pv_flags & PV_FLAGS_USED)
2693 return TRUE;
2694
2695 s = splimp();
2696 /*
2697 * Search through all pv elements pointing
2698 * to this page and query their reference bits
2699 */
2700 for (idx = pv->pv_idx;
2701 idx != PVE_EOL;
2702 idx = pvebase[idx].pve_next) {
2703
2704 if (MMU_PTE_USED(kernCbase[idx])) {
2705 splx(s);
2706 return TRUE;
2707 }
2708 }
2709 splx(s);
2710
2711 return FALSE;
2712 }
2713
2714 /* pmap_is_modified INTERFACE
2715 **
2716 * Determine if the given physical page has been
2717 * modified (written to.)
2718 */
2719 boolean_t
2720 pmap_is_modified(pa)
2721 vm_offset_t pa;
2722 {
2723 pv_t *pv;
2724 int idx, s;
2725
2726 if (!pv_initialized)
2727 return FALSE;
2728 /* XXX - this may be unecessary. */
2729 if (!is_managed(pa))
2730 return FALSE;
2731
2732 /* see comments in pmap_is_referenced() */
2733 pv = pa2pv(pa);
2734 if (pv->pv_flags & PV_FLAGS_MDFY)
2735 return TRUE;
2736
2737 s = splimp();
2738 for (idx = pv->pv_idx;
2739 idx != PVE_EOL;
2740 idx = pvebase[idx].pve_next) {
2741
2742 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2743 splx(s);
2744 return TRUE;
2745 }
2746 }
2747 splx(s);
2748
2749 return FALSE;
2750 }
2751
2752 /* pmap_page_protect INTERFACE
2753 **
2754 * Applies the given protection to all mappings to the given
2755 * physical page.
2756 */
2757 void
2758 pmap_page_protect(pa, prot)
2759 vm_offset_t pa;
2760 vm_prot_t prot;
2761 {
2762 pv_t *pv;
2763 int idx, s;
2764 vm_offset_t va;
2765 struct mmu_short_pte_struct *pte;
2766 c_tmgr_t *c_tbl;
2767 pmap_t pmap, curpmap;
2768
2769 if (!is_managed(pa))
2770 return;
2771
2772 curpmap = current_pmap();
2773 pv = pa2pv(pa);
2774 s = splimp();
2775
2776 for (idx = pv->pv_idx;
2777 idx != PVE_EOL;
2778 idx = pvebase[idx].pve_next) {
2779
2780 pte = &kernCbase[idx];
2781 switch (prot) {
2782 case VM_PROT_ALL:
2783 /* do nothing */
2784 break;
2785 case VM_PROT_EXECUTE:
2786 case VM_PROT_READ:
2787 case VM_PROT_READ|VM_PROT_EXECUTE:
2788 /*
2789 * Determine the virtual address mapped by
2790 * the PTE and flush ATC entries if necessary.
2791 */
2792 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2793 /* XXX don't write protect pager mappings */
2794 if (va >= PAGER_SVA && va < PAGER_EVA) {
2795 #ifdef PMAP_DEBUG
2796 /* XXX - Does this actually happen? */
2797 printf("pmap_page_protect: in pager!\n");
2798 Debugger();
2799 #endif
2800 } else
2801 pte->attr.raw |= MMU_SHORT_PTE_WP;
2802 if (pmap == curpmap || pmap == pmap_kernel())
2803 TBIS(va);
2804 break;
2805 case VM_PROT_NONE:
2806 /* Save the mod/ref bits. */
2807 pv->pv_flags |= pte->attr.raw;
2808 /* Invalidate the PTE. */
2809 pte->attr.raw = MMU_DT_INVALID;
2810
2811 /*
2812 * Update table counts. And flush ATC entries
2813 * if necessary.
2814 */
2815 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2816
2817 /*
2818 * If the PTE belongs to the kernel map,
2819 * be sure to flush the page it maps.
2820 */
2821 if (pmap == pmap_kernel()) {
2822 TBIS(va);
2823 } else {
2824 /*
2825 * The PTE belongs to a user map.
2826 * update the entry count in the C
2827 * table to which it belongs and flush
2828 * the ATC if the mapping belongs to
2829 * the current pmap.
2830 */
2831 c_tbl->ct_ecnt--;
2832 if (pmap == curpmap)
2833 TBIS(va);
2834 }
2835 break;
2836 default:
2837 break;
2838 }
2839 }
2840
2841 /*
2842 * If the protection code indicates that all mappings to the page
2843 * be removed, truncate the PV list to zero entries.
2844 */
2845 if (prot == VM_PROT_NONE)
2846 pv->pv_idx = PVE_EOL;
2847 splx(s);
2848 }
2849
2850 /* pmap_get_pteinfo INTERNAL
2851 **
2852 * Called internally to find the pmap and virtual address within that
2853 * map to which the pte at the given index maps. Also includes the PTE's C
2854 * table manager.
2855 *
2856 * Returns the pmap in the argument provided, and the virtual address
2857 * by return value.
2858 */
2859 vm_offset_t
2860 pmap_get_pteinfo(idx, pmap, tbl)
2861 u_int idx;
2862 pmap_t *pmap;
2863 c_tmgr_t **tbl;
2864 {
2865 vm_offset_t va = 0;
2866
2867 /*
2868 * Determine if the PTE is a kernel PTE or a user PTE.
2869 */
2870 if (idx >= NUM_KERN_PTES) {
2871 /*
2872 * The PTE belongs to a user mapping.
2873 */
2874 /* XXX: Would like an inline for this to validate idx... */
2875 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2876
2877 *pmap = (*tbl)->ct_pmap;
2878 /*
2879 * To find the va to which the PTE maps, we first take
2880 * the table's base virtual address mapping which is stored
2881 * in ct_va. We then increment this address by a page for
2882 * every slot skipped until we reach the PTE.
2883 */
2884 va = (*tbl)->ct_va;
2885 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2886 } else {
2887 /*
2888 * The PTE belongs to the kernel map.
2889 */
2890 *pmap = pmap_kernel();
2891
2892 va = m68k_ptob(idx);
2893 va += KERNBASE;
2894 }
2895
2896 return va;
2897 }
2898
2899 /* pmap_clear_modify INTERFACE
2900 **
2901 * Clear the modification bit on the page at the specified
2902 * physical address.
2903 *
2904 */
2905 void
2906 pmap_clear_modify(pa)
2907 vm_offset_t pa;
2908 {
2909 if (!is_managed(pa))
2910 return;
2911 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2912 }
2913
2914 /* pmap_clear_reference INTERFACE
2915 **
2916 * Clear the referenced bit on the page at the specified
2917 * physical address.
2918 */
2919 void
2920 pmap_clear_reference(pa)
2921 vm_offset_t pa;
2922 {
2923 if (!is_managed(pa))
2924 return;
2925 pmap_clear_pv(pa, PV_FLAGS_USED);
2926 }
2927
2928 /* pmap_clear_pv INTERNAL
2929 **
2930 * Clears the specified flag from the specified physical address.
2931 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2932 *
2933 * Flag is one of:
2934 * PV_FLAGS_MDFY - Page modified bit.
2935 * PV_FLAGS_USED - Page used (referenced) bit.
2936 *
2937 * This routine must not only clear the flag on the pv list
2938 * head. It must also clear the bit on every pte in the pv
2939 * list associated with the address.
2940 */
2941 void
2942 pmap_clear_pv(pa, flag)
2943 vm_offset_t pa;
2944 int flag;
2945 {
2946 pv_t *pv;
2947 int idx, s;
2948 vm_offset_t va;
2949 pmap_t pmap;
2950 mmu_short_pte_t *pte;
2951 c_tmgr_t *c_tbl;
2952
2953 pv = pa2pv(pa);
2954
2955 s = splimp();
2956 pv->pv_flags &= ~(flag);
2957
2958 for (idx = pv->pv_idx;
2959 idx != PVE_EOL;
2960 idx = pvebase[idx].pve_next) {
2961
2962 pte = &kernCbase[idx];
2963 pte->attr.raw &= ~(flag);
2964 /*
2965 * The MC68030 MMU will not set the modified or
2966 * referenced bits on any MMU tables for which it has
2967 * a cached descriptor with its modify bit set. To insure
2968 * that it will modify these bits on the PTE during the next
2969 * time it is written to or read from, we must flush it from
2970 * the ATC.
2971 *
2972 * Ordinarily it is only necessary to flush the descriptor
2973 * if it is used in the current address space. But since I
2974 * am not sure that there will always be a notion of
2975 * 'the current address space' when this function is called,
2976 * I will skip the test and always flush the address. It
2977 * does no harm.
2978 */
2979 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2980 TBIS(va);
2981 }
2982 splx(s);
2983 }
2984
2985 /* pmap_extract INTERFACE
2986 **
2987 * Return the physical address mapped by the virtual address
2988 * in the specified pmap or 0 if it is not known.
2989 *
2990 * Note: this function should also apply an exclusive lock
2991 * on the pmap system during its duration.
2992 */
2993 vm_offset_t
2994 pmap_extract(pmap, va)
2995 pmap_t pmap;
2996 vm_offset_t va;
2997 {
2998 int a_idx, b_idx, pte_idx;
2999 a_tmgr_t *a_tbl;
3000 b_tmgr_t *b_tbl;
3001 c_tmgr_t *c_tbl;
3002 mmu_short_pte_t *c_pte;
3003
3004 if (pmap == pmap_kernel())
3005 return pmap_extract_kernel(va);
3006 if (pmap == NULL)
3007 return 0;
3008
3009 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
3010 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
3011 return 0;
3012
3013 if (!MMU_VALID_DT(*c_pte))
3014 return 0;
3015
3016 return (MMU_PTE_PA(*c_pte));
3017 }
3018
3019 /* pmap_extract_kernel INTERNAL
3020 **
3021 * Extract a translation from the kernel address space.
3022 */
3023 vm_offset_t
3024 pmap_extract_kernel(va)
3025 vm_offset_t va;
3026 {
3027 mmu_short_pte_t *pte;
3028
3029 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
3030 return MMU_PTE_PA(*pte);
3031 }
3032
3033 /* pmap_remove_kernel INTERNAL
3034 **
3035 * Remove the mapping of a range of virtual addresses from the kernel map.
3036 * The arguments are already page-aligned.
3037 */
3038 void
3039 pmap_remove_kernel(sva, eva)
3040 vm_offset_t sva;
3041 vm_offset_t eva;
3042 {
3043 int idx, eidx;
3044
3045 #ifdef PMAP_DEBUG
3046 if ((sva & PGOFSET) || (eva & PGOFSET))
3047 panic("pmap_remove_kernel: alignment");
3048 #endif
3049
3050 idx = m68k_btop(sva - KERNBASE);
3051 eidx = m68k_btop(eva - KERNBASE);
3052
3053 while (idx < eidx) {
3054 pmap_remove_pte(&kernCbase[idx++]);
3055 TBIS(sva);
3056 sva += NBPG;
3057 }
3058 }
3059
3060 /* pmap_remove INTERFACE
3061 **
3062 * Remove the mapping of a range of virtual addresses from the given pmap.
3063 *
3064 * If the range contains any wired entries, this function will probably create
3065 * disaster.
3066 */
3067 void
3068 pmap_remove(pmap, start, end)
3069 pmap_t pmap;
3070 vm_offset_t start;
3071 vm_offset_t end;
3072 {
3073
3074 if (pmap == pmap_kernel()) {
3075 pmap_remove_kernel(start, end);
3076 return;
3077 }
3078
3079 /*
3080 * XXX - Temporary(?) statement to prevent panic caused
3081 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3082 * to remove because it couldn't get backing store.
3083 * (I guess.)
3084 */
3085 if (pmap == NULL)
3086 return;
3087
3088 /*
3089 * If the pmap doesn't have an A table of its own, it has no mappings
3090 * that can be removed.
3091 */
3092 if (pmap->pm_a_tmgr == NULL)
3093 return;
3094
3095 /*
3096 * Remove the specified range from the pmap. If the function
3097 * returns true, the operation removed all the valid mappings
3098 * in the pmap and freed its A table. If this happened to the
3099 * currently loaded pmap, the MMU root pointer must be reloaded
3100 * with the default 'kernel' map.
3101 */
3102 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3103 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3104 kernel_crp.rp_addr = kernAphys;
3105 loadcrp(&kernel_crp);
3106 /* will do TLB flush below */
3107 }
3108 pmap->pm_a_tmgr = NULL;
3109 pmap->pm_a_phys = kernAphys;
3110 }
3111
3112 /*
3113 * If we just modified the current address space,
3114 * make sure to flush the MMU cache.
3115 *
3116 * XXX - this could be an unecessarily large flush.
3117 * XXX - Could decide, based on the size of the VA range
3118 * to be removed, whether to flush "by pages" or "all".
3119 */
3120 if (pmap == current_pmap())
3121 TBIAU();
3122 }
3123
3124 /* pmap_remove_a INTERNAL
3125 **
3126 * This is function number one in a set of three that removes a range
3127 * of memory in the most efficient manner by removing the highest possible
3128 * tables from the memory space. This particular function attempts to remove
3129 * as many B tables as it can, delegating the remaining fragmented ranges to
3130 * pmap_remove_b().
3131 *
3132 * If the removal operation results in an empty A table, the function returns
3133 * TRUE.
3134 *
3135 * It's ugly but will do for now.
3136 */
3137 boolean_t
3138 pmap_remove_a(a_tbl, start, end)
3139 a_tmgr_t *a_tbl;
3140 vm_offset_t start;
3141 vm_offset_t end;
3142 {
3143 boolean_t empty;
3144 int idx;
3145 vm_offset_t nstart, nend;
3146 b_tmgr_t *b_tbl;
3147 mmu_long_dte_t *a_dte;
3148 mmu_short_dte_t *b_dte;
3149
3150 /*
3151 * The following code works with what I call a 'granularity
3152 * reduction algorithim'. A range of addresses will always have
3153 * the following properties, which are classified according to
3154 * how the range relates to the size of the current granularity
3155 * - an A table entry:
3156 *
3157 * 1 2 3 4
3158 * -+---+---+---+---+---+---+---+-
3159 * -+---+---+---+---+---+---+---+-
3160 *
3161 * A range will always start on a granularity boundary, illustrated
3162 * by '+' signs in the table above, or it will start at some point
3163 * inbetween a granularity boundary, as illustrated by point 1.
3164 * The first step in removing a range of addresses is to remove the
3165 * range between 1 and 2, the nearest granularity boundary. This
3166 * job is handled by the section of code governed by the
3167 * 'if (start < nstart)' statement.
3168 *
3169 * A range will always encompass zero or more intergral granules,
3170 * illustrated by points 2 and 3. Integral granules are easy to
3171 * remove. The removal of these granules is the second step, and
3172 * is handled by the code block 'if (nstart < nend)'.
3173 *
3174 * Lastly, a range will always end on a granularity boundary,
3175 * ill. by point 3, or it will fall just beyond one, ill. by point
3176 * 4. The last step involves removing this range and is handled by
3177 * the code block 'if (nend < end)'.
3178 */
3179 nstart = MMU_ROUND_UP_A(start);
3180 nend = MMU_ROUND_A(end);
3181
3182 if (start < nstart) {
3183 /*
3184 * This block is executed if the range starts between
3185 * a granularity boundary.
3186 *
3187 * First find the DTE which is responsible for mapping
3188 * the start of the range.
3189 */
3190 idx = MMU_TIA(start);
3191 a_dte = &a_tbl->at_dtbl[idx];
3192
3193 /*
3194 * If the DTE is valid then delegate the removal of the sub
3195 * range to pmap_remove_b(), which can remove addresses at
3196 * a finer granularity.
3197 */
3198 if (MMU_VALID_DT(*a_dte)) {
3199 b_dte = mmu_ptov(a_dte->addr.raw);
3200 b_tbl = mmuB2tmgr(b_dte);
3201
3202 /*
3203 * The sub range to be removed starts at the start
3204 * of the full range we were asked to remove, and ends
3205 * at the greater of:
3206 * 1. The end of the full range, -or-
3207 * 2. The end of the full range, rounded down to the
3208 * nearest granularity boundary.
3209 */
3210 if (end < nstart)
3211 empty = pmap_remove_b(b_tbl, start, end);
3212 else
3213 empty = pmap_remove_b(b_tbl, start, nstart);
3214
3215 /*
3216 * If the removal resulted in an empty B table,
3217 * invalidate the DTE that points to it and decrement
3218 * the valid entry count of the A table.
3219 */
3220 if (empty) {
3221 a_dte->attr.raw = MMU_DT_INVALID;
3222 a_tbl->at_ecnt--;
3223 }
3224 }
3225 /*
3226 * If the DTE is invalid, the address range is already non-
3227 * existant and can simply be skipped.
3228 */
3229 }
3230 if (nstart < nend) {
3231 /*
3232 * This block is executed if the range spans a whole number
3233 * multiple of granules (A table entries.)
3234 *
3235 * First find the DTE which is responsible for mapping
3236 * the start of the first granule involved.
3237 */
3238 idx = MMU_TIA(nstart);
3239 a_dte = &a_tbl->at_dtbl[idx];
3240
3241 /*
3242 * Remove entire sub-granules (B tables) one at a time,
3243 * until reaching the end of the range.
3244 */
3245 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3246 if (MMU_VALID_DT(*a_dte)) {
3247 /*
3248 * Find the B table manager for the
3249 * entry and free it.
3250 */
3251 b_dte = mmu_ptov(a_dte->addr.raw);
3252 b_tbl = mmuB2tmgr(b_dte);
3253 free_b_table(b_tbl, TRUE);
3254
3255 /*
3256 * Invalidate the DTE that points to the
3257 * B table and decrement the valid entry
3258 * count of the A table.
3259 */
3260 a_dte->attr.raw = MMU_DT_INVALID;
3261 a_tbl->at_ecnt--;
3262 }
3263 }
3264 if (nend < end) {
3265 /*
3266 * This block is executed if the range ends beyond a
3267 * granularity boundary.
3268 *
3269 * First find the DTE which is responsible for mapping
3270 * the start of the nearest (rounded down) granularity
3271 * boundary.
3272 */
3273 idx = MMU_TIA(nend);
3274 a_dte = &a_tbl->at_dtbl[idx];
3275
3276 /*
3277 * If the DTE is valid then delegate the removal of the sub
3278 * range to pmap_remove_b(), which can remove addresses at
3279 * a finer granularity.
3280 */
3281 if (MMU_VALID_DT(*a_dte)) {
3282 /*
3283 * Find the B table manager for the entry
3284 * and hand it to pmap_remove_b() along with
3285 * the sub range.
3286 */
3287 b_dte = mmu_ptov(a_dte->addr.raw);
3288 b_tbl = mmuB2tmgr(b_dte);
3289
3290 empty = pmap_remove_b(b_tbl, nend, end);
3291
3292 /*
3293 * If the removal resulted in an empty B table,
3294 * invalidate the DTE that points to it and decrement
3295 * the valid entry count of the A table.
3296 */
3297 if (empty) {
3298 a_dte->attr.raw = MMU_DT_INVALID;
3299 a_tbl->at_ecnt--;
3300 }
3301 }
3302 }
3303
3304 /*
3305 * If there are no more entries in the A table, release it
3306 * back to the available pool and return TRUE.
3307 */
3308 if (a_tbl->at_ecnt == 0) {
3309 a_tbl->at_parent = NULL;
3310 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3311 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3312 empty = TRUE;
3313 } else {
3314 empty = FALSE;
3315 }
3316
3317 return empty;
3318 }
3319
3320 /* pmap_remove_b INTERNAL
3321 **
3322 * Remove a range of addresses from an address space, trying to remove entire
3323 * C tables if possible.
3324 *
3325 * If the operation results in an empty B table, the function returns TRUE.
3326 */
3327 boolean_t
3328 pmap_remove_b(b_tbl, start, end)
3329 b_tmgr_t *b_tbl;
3330 vm_offset_t start;
3331 vm_offset_t end;
3332 {
3333 boolean_t empty;
3334 int idx;
3335 vm_offset_t nstart, nend, rstart;
3336 c_tmgr_t *c_tbl;
3337 mmu_short_dte_t *b_dte;
3338 mmu_short_pte_t *c_dte;
3339
3340
3341 nstart = MMU_ROUND_UP_B(start);
3342 nend = MMU_ROUND_B(end);
3343
3344 if (start < nstart) {
3345 idx = MMU_TIB(start);
3346 b_dte = &b_tbl->bt_dtbl[idx];
3347 if (MMU_VALID_DT(*b_dte)) {
3348 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3349 c_tbl = mmuC2tmgr(c_dte);
3350 if (end < nstart)
3351 empty = pmap_remove_c(c_tbl, start, end);
3352 else
3353 empty = pmap_remove_c(c_tbl, start, nstart);
3354 if (empty) {
3355 b_dte->attr.raw = MMU_DT_INVALID;
3356 b_tbl->bt_ecnt--;
3357 }
3358 }
3359 }
3360 if (nstart < nend) {
3361 idx = MMU_TIB(nstart);
3362 b_dte = &b_tbl->bt_dtbl[idx];
3363 rstart = nstart;
3364 while (rstart < nend) {
3365 if (MMU_VALID_DT(*b_dte)) {
3366 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3367 c_tbl = mmuC2tmgr(c_dte);
3368 free_c_table(c_tbl, TRUE);
3369 b_dte->attr.raw = MMU_DT_INVALID;
3370 b_tbl->bt_ecnt--;
3371 }
3372 b_dte++;
3373 rstart += MMU_TIB_RANGE;
3374 }
3375 }
3376 if (nend < end) {
3377 idx = MMU_TIB(nend);
3378 b_dte = &b_tbl->bt_dtbl[idx];
3379 if (MMU_VALID_DT(*b_dte)) {
3380 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3381 c_tbl = mmuC2tmgr(c_dte);
3382 empty = pmap_remove_c(c_tbl, nend, end);
3383 if (empty) {
3384 b_dte->attr.raw = MMU_DT_INVALID;
3385 b_tbl->bt_ecnt--;
3386 }
3387 }
3388 }
3389
3390 if (b_tbl->bt_ecnt == 0) {
3391 b_tbl->bt_parent = NULL;
3392 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3393 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3394 empty = TRUE;
3395 } else {
3396 empty = FALSE;
3397 }
3398
3399 return empty;
3400 }
3401
3402 /* pmap_remove_c INTERNAL
3403 **
3404 * Remove a range of addresses from the given C table.
3405 */
3406 boolean_t
3407 pmap_remove_c(c_tbl, start, end)
3408 c_tmgr_t *c_tbl;
3409 vm_offset_t start;
3410 vm_offset_t end;
3411 {
3412 boolean_t empty;
3413 int idx;
3414 mmu_short_pte_t *c_pte;
3415
3416 idx = MMU_TIC(start);
3417 c_pte = &c_tbl->ct_dtbl[idx];
3418 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3419 if (MMU_VALID_DT(*c_pte)) {
3420 pmap_remove_pte(c_pte);
3421 c_tbl->ct_ecnt--;
3422 }
3423 }
3424
3425 if (c_tbl->ct_ecnt == 0) {
3426 c_tbl->ct_parent = NULL;
3427 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3428 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3429 empty = TRUE;
3430 } else {
3431 empty = FALSE;
3432 }
3433
3434 return empty;
3435 }
3436
3437 /* is_managed INTERNAL
3438 **
3439 * Determine if the given physical address is managed by the PV system.
3440 * Note that this logic assumes that no one will ask for the status of
3441 * addresses which lie in-between the memory banks on the 3/80. If they
3442 * do so, it will falsely report that it is managed.
3443 *
3444 * Note: A "managed" address is one that was reported to the VM system as
3445 * a "usable page" during system startup. As such, the VM system expects the
3446 * pmap module to keep an accurate track of the useage of those pages.
3447 * Any page not given to the VM system at startup does not exist (as far as
3448 * the VM system is concerned) and is therefore "unmanaged." Examples are
3449 * those pages which belong to the ROM monitor and the memory allocated before
3450 * the VM system was started.
3451 */
3452 boolean_t
3453 is_managed(pa)
3454 vm_offset_t pa;
3455 {
3456 if (pa >= avail_start && pa < avail_end)
3457 return TRUE;
3458 else
3459 return FALSE;
3460 }
3461
3462 /* pmap_bootstrap_alloc INTERNAL
3463 **
3464 * Used internally for memory allocation at startup when malloc is not
3465 * available. This code will fail once it crosses the first memory
3466 * bank boundary on the 3/80. Hopefully by then however, the VM system
3467 * will be in charge of allocation.
3468 */
3469 void *
3470 pmap_bootstrap_alloc(size)
3471 int size;
3472 {
3473 void *rtn;
3474
3475 #ifdef PMAP_DEBUG
3476 if (bootstrap_alloc_enabled == FALSE) {
3477 mon_printf("pmap_bootstrap_alloc: disabled\n");
3478 sunmon_abort();
3479 }
3480 #endif
3481
3482 rtn = (void *) virtual_avail;
3483 virtual_avail += size;
3484
3485 #ifdef PMAP_DEBUG
3486 if (virtual_avail > virtual_contig_end) {
3487 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3488 sunmon_abort();
3489 }
3490 #endif
3491
3492 return rtn;
3493 }
3494
3495 /* pmap_bootstap_aalign INTERNAL
3496 **
3497 * Used to insure that the next call to pmap_bootstrap_alloc() will
3498 * return a chunk of memory aligned to the specified size.
3499 *
3500 * Note: This function will only support alignment sizes that are powers
3501 * of two.
3502 */
3503 void
3504 pmap_bootstrap_aalign(size)
3505 int size;
3506 {
3507 int off;
3508
3509 off = virtual_avail & (size - 1);
3510 if (off) {
3511 (void) pmap_bootstrap_alloc(size - off);
3512 }
3513 }
3514
3515 /* pmap_pa_exists
3516 **
3517 * Used by the /dev/mem driver to see if a given PA is memory
3518 * that can be mapped. (The PA is not in a hole.)
3519 */
3520 int
3521 pmap_pa_exists(pa)
3522 vm_offset_t pa;
3523 {
3524 register int i;
3525
3526 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3527 if ((pa >= avail_mem[i].pmem_start) &&
3528 (pa < avail_mem[i].pmem_end))
3529 return (1);
3530 if (avail_mem[i].pmem_next == NULL)
3531 break;
3532 }
3533 return (0);
3534 }
3535
3536 /* Called only from locore.s and pmap.c */
3537 void _pmap_switch __P((pmap_t pmap));
3538
3539 /*
3540 * _pmap_switch INTERNAL
3541 *
3542 * This is called by locore.s:cpu_switch() when it is
3543 * switching to a new process. Load new translations.
3544 * Note: done in-line by locore.s unless PMAP_DEBUG
3545 *
3546 * Note that we do NOT allocate a context here, but
3547 * share the "kernel only" context until we really
3548 * need our own context for user-space mappings in
3549 * pmap_enter_user(). [ s/context/mmu A table/ ]
3550 */
3551 void
3552 _pmap_switch(pmap)
3553 pmap_t pmap;
3554 {
3555 u_long rootpa;
3556
3557 /*
3558 * Only do reload/flush if we have to.
3559 * Note that if the old and new process
3560 * were BOTH using the "null" context,
3561 * then this will NOT flush the TLB.
3562 */
3563 rootpa = pmap->pm_a_phys;
3564 if (kernel_crp.rp_addr != rootpa) {
3565 DPRINT(("pmap_activate(%p)\n", pmap));
3566 kernel_crp.rp_addr = rootpa;
3567 loadcrp(&kernel_crp);
3568 TBIAU();
3569 }
3570 }
3571
3572 /*
3573 * Exported version of pmap_activate(). This is called from the
3574 * machine-independent VM code when a process is given a new pmap.
3575 * If (p == curproc) do like cpu_switch would do; otherwise just
3576 * take this as notification that the process has a new pmap.
3577 */
3578 void
3579 pmap_activate(p)
3580 struct proc *p;
3581 {
3582 pmap_t pmap = p->p_vmspace->vm_map.pmap;
3583 int s;
3584
3585 if (p == curproc) {
3586 s = splimp();
3587 _pmap_switch(pmap);
3588 splx(s);
3589 }
3590 }
3591
3592 /*
3593 * pmap_deactivate INTERFACE
3594 **
3595 * This is called to deactivate the specified process's address space.
3596 * XXX The semantics of this function are currently not well-defined.
3597 */
3598 void
3599 pmap_deactivate(p)
3600 struct proc *p;
3601 {
3602 /* not implemented. */
3603 }
3604
3605 /* pmap_update
3606 **
3607 * Apply any delayed changes scheduled for all pmaps immediately.
3608 *
3609 * No delayed operations are currently done in this pmap.
3610 */
3611 void
3612 pmap_update()
3613 {
3614 /* not implemented. */
3615 }
3616
3617 /*
3618 * Fill in the sun3x-specific part of the kernel core header
3619 * for dumpsys(). (See machdep.c for the rest.)
3620 */
3621 void
3622 pmap_kcore_hdr(sh)
3623 struct sun3x_kcore_hdr *sh;
3624 {
3625 u_long spa, len;
3626 int i;
3627
3628 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3629 sh->pg_valid = MMU_DT_PAGE;
3630 sh->contig_end = virtual_contig_end;
3631 sh->kernCbase = (u_long) kernCbase;
3632 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3633 spa = avail_mem[i].pmem_start;
3634 spa = m68k_trunc_page(spa);
3635 len = avail_mem[i].pmem_end - spa;
3636 len = m68k_round_page(len);
3637 sh->ram_segs[i].start = spa;
3638 sh->ram_segs[i].size = len;
3639 }
3640 }
3641
3642
3643 /* pmap_virtual_space INTERFACE
3644 **
3645 * Return the current available range of virtual addresses in the
3646 * arguuments provided. Only really called once.
3647 */
3648 void
3649 pmap_virtual_space(vstart, vend)
3650 vm_offset_t *vstart, *vend;
3651 {
3652 *vstart = virtual_avail;
3653 *vend = virtual_end;
3654 }
3655
3656 /*
3657 * Provide memory to the VM system.
3658 *
3659 * Assume avail_start is always in the
3660 * first segment as pmap_bootstrap does.
3661 */
3662 static void
3663 pmap_page_upload()
3664 {
3665 vm_offset_t a, b; /* memory range */
3666 int i;
3667
3668 /* Supply the memory in segments. */
3669 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3670 a = atop(avail_mem[i].pmem_start);
3671 b = atop(avail_mem[i].pmem_end);
3672 if (i == 0)
3673 a = atop(avail_start);
3674
3675 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3676
3677 if (avail_mem[i].pmem_next == NULL)
3678 break;
3679 }
3680 }
3681
3682 /* pmap_page_index INTERFACE
3683 **
3684 * Return the index of the given physical page in a list of useable
3685 * physical pages in the system. Holes in physical memory may be counted
3686 * if so desired. As long as pmap_free_pages() and pmap_page_index()
3687 * agree as to whether holes in memory do or do not count as valid pages,
3688 * it really doesn't matter. However, if you like to save a little
3689 * memory, don't count holes as valid pages. This is even more true when
3690 * the holes are large.
3691 *
3692 * We will not count holes as valid pages. We can generate page indices
3693 * that conform to this by using the memory bank structures initialized
3694 * in pmap_alloc_pv().
3695 */
3696 int
3697 pmap_page_index(pa)
3698 vm_offset_t pa;
3699 {
3700 struct pmap_physmem_struct *bank = avail_mem;
3701 vm_offset_t off;
3702
3703 /* Search for the memory bank with this page. */
3704 /* XXX - What if it is not physical memory? */
3705 while (pa > bank->pmem_end)
3706 bank = bank->pmem_next;
3707 off = pa - bank->pmem_start;
3708
3709 return (bank->pmem_pvbase + m68k_btop(off));
3710 }
3711
3712 /* pmap_count INTERFACE
3713 **
3714 * Return the number of resident (valid) pages in the given pmap.
3715 *
3716 * Note: If this function is handed the kernel map, it will report
3717 * that it has no mappings. Hopefully the VM system won't ask for kernel
3718 * map statistics.
3719 */
3720 segsz_t
3721 pmap_count(pmap, type)
3722 pmap_t pmap;
3723 int type;
3724 {
3725 u_int count;
3726 int a_idx, b_idx;
3727 a_tmgr_t *a_tbl;
3728 b_tmgr_t *b_tbl;
3729 c_tmgr_t *c_tbl;
3730
3731 /*
3732 * If the pmap does not have its own A table manager, it has no
3733 * valid entires.
3734 */
3735 if (pmap->pm_a_tmgr == NULL)
3736 return 0;
3737
3738 a_tbl = pmap->pm_a_tmgr;
3739
3740 count = 0;
3741 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3742 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3743 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3744 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3745 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3746 c_tbl = mmuC2tmgr(
3747 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3748 if (type == 0)
3749 /*
3750 * A resident entry count has been requested.
3751 */
3752 count += c_tbl->ct_ecnt;
3753 else
3754 /*
3755 * A wired entry count has been requested.
3756 */
3757 count += c_tbl->ct_wcnt;
3758 }
3759 }
3760 }
3761 }
3762
3763 return count;
3764 }
3765
3766 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3767 * The following routines are only used by DDB for tricky kernel text *
3768 * text operations in db_memrw.c. They are provided for sun3 *
3769 * compatibility. *
3770 *************************************************************************/
3771 /* get_pte INTERNAL
3772 **
3773 * Return the page descriptor the describes the kernel mapping
3774 * of the given virtual address.
3775 */
3776 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3777 u_int
3778 get_pte(va)
3779 vm_offset_t va;
3780 {
3781 u_long pte_pa;
3782 mmu_short_pte_t *pte;
3783
3784 /* Get the physical address of the PTE */
3785 pte_pa = ptest_addr(va & ~PGOFSET);
3786
3787 /* Convert to a virtual address... */
3788 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3789
3790 /* Make sure it is in our level-C tables... */
3791 if ((pte < kernCbase) ||
3792 (pte >= &mmuCbase[NUM_USER_PTES]))
3793 return 0;
3794
3795 /* ... and just return its contents. */
3796 return (pte->attr.raw);
3797 }
3798
3799
3800 /* set_pte INTERNAL
3801 **
3802 * Set the page descriptor that describes the kernel mapping
3803 * of the given virtual address.
3804 */
3805 void
3806 set_pte(va, pte)
3807 vm_offset_t va;
3808 u_int pte;
3809 {
3810 u_long idx;
3811
3812 if (va < KERNBASE)
3813 return;
3814
3815 idx = (unsigned long) m68k_btop(va - KERNBASE);
3816 kernCbase[idx].attr.raw = pte;
3817 TBIS(va);
3818 }
3819
3820 /*
3821 * Routine: pmap_procwr
3822 *
3823 * Function:
3824 * Synchronize caches corresponding to [addr, addr+len) in p.
3825 */
3826 void
3827 pmap_procwr(p, va, len)
3828 struct proc *p;
3829 vaddr_t va;
3830 size_t len;
3831 {
3832 (void)cachectl1(0x80000004, va, len, p);
3833 }
3834
3835
3836 #ifdef PMAP_DEBUG
3837 /************************** DEBUGGING ROUTINES **************************
3838 * The following routines are meant to be an aid to debugging the pmap *
3839 * system. They are callable from the DDB command line and should be *
3840 * prepared to be handed unstable or incomplete states of the system. *
3841 ************************************************************************/
3842
3843 /* pv_list
3844 **
3845 * List all pages found on the pv list for the given physical page.
3846 * To avoid endless loops, the listing will stop at the end of the list
3847 * or after 'n' entries - whichever comes first.
3848 */
3849 void
3850 pv_list(pa, n)
3851 vm_offset_t pa;
3852 int n;
3853 {
3854 int idx;
3855 vm_offset_t va;
3856 pv_t *pv;
3857 c_tmgr_t *c_tbl;
3858 pmap_t pmap;
3859
3860 pv = pa2pv(pa);
3861 idx = pv->pv_idx;
3862
3863 for (;idx != PVE_EOL && n > 0;
3864 idx=pvebase[idx].pve_next, n--) {
3865
3866 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3867 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3868 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3869 }
3870 }
3871 #endif /* PMAP_DEBUG */
3872
3873 #ifdef NOT_YET
3874 /* and maybe not ever */
3875 /************************** LOW-LEVEL ROUTINES **************************
3876 * These routines will eventualy be re-written into assembly and placed *
3877 * in locore.s. They are here now as stubs so that the pmap module can *
3878 * be linked as a standalone user program for testing. *
3879 ************************************************************************/
3880 /* flush_atc_crp INTERNAL
3881 **
3882 * Flush all page descriptors derived from the given CPU Root Pointer
3883 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3884 * cache.
3885 */
3886 void
3887 flush_atc_crp(a_tbl)
3888 {
3889 mmu_long_rp_t rp;
3890
3891 /* Create a temporary root table pointer that points to the
3892 * given A table.
3893 */
3894 rp.attr.raw = ~MMU_LONG_RP_LU;
3895 rp.addr.raw = (unsigned int) a_tbl;
3896
3897 mmu_pflushr(&rp);
3898 /* mmu_pflushr:
3899 * movel sp(4)@,a0
3900 * pflushr a0@
3901 * rts
3902 */
3903 }
3904 #endif /* NOT_YET */
3905