pmap.c revision 1.36 1 /* $NetBSD: pmap.c,v 1.36 1998/05/19 19:00:18 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 #include <sys/proc.h>
117 #include <sys/malloc.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120 #include <sys/kcore.h>
121
122 #include <vm/vm.h>
123 #include <vm/vm_kern.h>
124 #include <vm/vm_page.h>
125
126 #include <machine/cpu.h>
127 #include <machine/kcore.h>
128 #include <machine/mon.h>
129 #include <machine/pmap.h>
130 #include <machine/pte.h>
131
132 #include <sun3/sun3/cache.h>
133 #include <sun3/sun3/machdep.h>
134
135 #include "pmap_pvt.h"
136
137 /* XXX - What headers declare these? */
138 extern struct pcb *curpcb;
139 extern int physmem;
140
141 extern void copypage __P((const void*, void*));
142 extern void zeropage __P((void*));
143
144 /* Defined in locore.s */
145 extern char kernel_text[];
146
147 /* Defined by the linker */
148 extern char etext[], edata[], end[];
149 extern char *esym; /* DDB */
150
151 /*************************** DEBUGGING DEFINITIONS ***********************
152 * Macros, preprocessor defines and variables used in debugging can make *
153 * code hard to read. Anything used exclusively for debugging purposes *
154 * is defined here to avoid having such mess scattered around the file. *
155 *************************************************************************/
156 #ifdef PMAP_DEBUG
157 /*
158 * To aid the debugging process, macros should be expanded into smaller steps
159 * that accomplish the same goal, yet provide convenient places for placing
160 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
161 * 'INLINE' keyword is defined to an empty string. This way, any function
162 * defined to be a 'static INLINE' will become 'outlined' and compiled as
163 * a separate function, which is much easier to debug.
164 */
165 #define INLINE /* nothing */
166
167 /*
168 * It is sometimes convenient to watch the activity of a particular table
169 * in the system. The following variables are used for that purpose.
170 */
171 a_tmgr_t *pmap_watch_atbl = 0;
172 b_tmgr_t *pmap_watch_btbl = 0;
173 c_tmgr_t *pmap_watch_ctbl = 0;
174
175 int pmap_debug = 0;
176 #define DPRINT(args) if (pmap_debug) printf args
177
178 #else /********** Stuff below is defined if NOT debugging **************/
179
180 #define INLINE inline
181 #define DPRINT(args) /* nada */
182
183 #endif /* PMAP_DEBUG */
184 /*********************** END OF DEBUGGING DEFINITIONS ********************/
185
186 /*** Management Structure - Memory Layout
187 * For every MMU table in the sun3x pmap system there must be a way to
188 * manage it; we must know which process is using it, what other tables
189 * depend on it, and whether or not it contains any locked pages. This
190 * is solved by the creation of 'table management' or 'tmgr'
191 * structures. One for each MMU table in the system.
192 *
193 * MAP OF MEMORY USED BY THE PMAP SYSTEM
194 *
195 * towards lower memory
196 * kernAbase -> +-------------------------------------------------------+
197 * | Kernel MMU A level table |
198 * kernBbase -> +-------------------------------------------------------+
199 * | Kernel MMU B level tables |
200 * kernCbase -> +-------------------------------------------------------+
201 * | |
202 * | Kernel MMU C level tables |
203 * | |
204 * mmuCbase -> +-------------------------------------------------------+
205 * | User MMU C level tables |
206 * mmuAbase -> +-------------------------------------------------------+
207 * | |
208 * | User MMU A level tables |
209 * | |
210 * mmuBbase -> +-------------------------------------------------------+
211 * | User MMU B level tables |
212 * tmgrAbase -> +-------------------------------------------------------+
213 * | TMGR A level table structures |
214 * tmgrBbase -> +-------------------------------------------------------+
215 * | TMGR B level table structures |
216 * tmgrCbase -> +-------------------------------------------------------+
217 * | TMGR C level table structures |
218 * pvbase -> +-------------------------------------------------------+
219 * | Physical to Virtual mapping table (list heads) |
220 * pvebase -> +-------------------------------------------------------+
221 * | Physical to Virtual mapping table (list elements) |
222 * | |
223 * +-------------------------------------------------------+
224 * towards higher memory
225 *
226 * For every A table in the MMU A area, there will be a corresponding
227 * a_tmgr structure in the TMGR A area. The same will be true for
228 * the B and C tables. This arrangement will make it easy to find the
229 * controling tmgr structure for any table in the system by use of
230 * (relatively) simple macros.
231 */
232
233 /*
234 * Global variables for storing the base addresses for the areas
235 * labeled above.
236 */
237 static vm_offset_t kernAphys;
238 static mmu_long_dte_t *kernAbase;
239 static mmu_short_dte_t *kernBbase;
240 static mmu_short_pte_t *kernCbase;
241 static mmu_short_pte_t *mmuCbase;
242 static mmu_short_dte_t *mmuBbase;
243 static mmu_long_dte_t *mmuAbase;
244 static a_tmgr_t *Atmgrbase;
245 static b_tmgr_t *Btmgrbase;
246 static c_tmgr_t *Ctmgrbase;
247 static pv_t *pvbase;
248 static pv_elem_t *pvebase;
249 struct pmap kernel_pmap;
250
251 /*
252 * This holds the CRP currently loaded into the MMU.
253 */
254 struct mmu_rootptr kernel_crp;
255
256 /*
257 * Just all around global variables.
258 */
259 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
260 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
261 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
262
263
264 /*
265 * Flags used to mark the safety/availability of certain operations or
266 * resources.
267 */
268 static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
269 bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
270 int tmp_vpages_inuse; /* Temporary virtual pages are in use */
271
272 /*
273 * XXX: For now, retain the traditional variables that were
274 * used in the old pmap/vm interface (without NONCONTIG).
275 */
276 /* Kernel virtual address space available: */
277 vm_offset_t virtual_avail, virtual_end;
278 /* Physical address space available: */
279 vm_offset_t avail_start, avail_end;
280
281 /* This keep track of the end of the contiguously mapped range. */
282 vm_offset_t virtual_contig_end;
283
284 /* Physical address used by pmap_next_page() */
285 vm_offset_t avail_next;
286
287 /* These are used by pmap_copy_page(), etc. */
288 vm_offset_t tmp_vpages[2];
289
290 /*
291 * The 3/80 is the only member of the sun3x family that has non-contiguous
292 * physical memory. Memory is divided into 4 banks which are physically
293 * locatable on the system board. Although the size of these banks varies
294 * with the size of memory they contain, their base addresses are
295 * permenently fixed. The following structure, which describes these
296 * banks, is initialized by pmap_bootstrap() after it reads from a similar
297 * structure provided by the ROM Monitor.
298 *
299 * For the other machines in the sun3x architecture which do have contiguous
300 * RAM, this list will have only one entry, which will describe the entire
301 * range of available memory.
302 */
303 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
304 u_int total_phys_mem;
305
306 /*************************************************************************/
307
308 /*
309 * XXX - Should "tune" these based on statistics.
310 *
311 * My first guess about the relative numbers of these needed is
312 * based on the fact that a "typical" process will have several
313 * pages mapped at low virtual addresses (text, data, bss), then
314 * some mapped shared libraries, and then some stack pages mapped
315 * near the high end of the VA space. Each process can use only
316 * one A table, and most will use only two B tables (maybe three)
317 * and probably about four C tables. Therefore, the first guess
318 * at the relative numbers of these needed is 1:2:4 -gwr
319 *
320 * The number of C tables needed is closely related to the amount
321 * of physical memory available plus a certain amount attributable
322 * to the use of double mappings. With a few simulation statistics
323 * we can find a reasonably good estimation of this unknown value.
324 * Armed with that and the above ratios, we have a good idea of what
325 * is needed at each level. -j
326 *
327 * Note: It is not physical memory memory size, but the total mapped
328 * virtual space required by the combined working sets of all the
329 * currently _runnable_ processes. (Sleeping ones don't count.)
330 * The amount of physical memory should be irrelevant. -gwr
331 */
332 #ifdef FIXED_NTABLES
333 #define NUM_A_TABLES 16
334 #define NUM_B_TABLES 32
335 #define NUM_C_TABLES 64
336 #else
337 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
338 #endif /* FIXED_NTABLES */
339
340 /*
341 * This determines our total virtual mapping capacity.
342 * Yes, it is a FIXED value so we can pre-allocate.
343 */
344 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
345
346 /*
347 * The size of the Kernel Virtual Address Space (KVAS)
348 * for purposes of MMU table allocation is -KERNBASE
349 * (length from KERNBASE to 0xFFFFffff)
350 */
351 #define KVAS_SIZE (-KERNBASE)
352
353 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
354 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
355 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
356 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
357
358 /*************************** MISCELANEOUS MACROS *************************/
359 #define PMAP_LOCK() ; /* Nothing, for now */
360 #define PMAP_UNLOCK() ; /* same. */
361 #define NULL 0
362
363 static INLINE void * mmu_ptov __P((vm_offset_t pa));
364 static INLINE vm_offset_t mmu_vtop __P((void * va));
365
366 #if 0
367 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
368 #endif /* 0 */
369 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
370 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
371
372 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
373 static INLINE int pteidx __P((mmu_short_pte_t *));
374 static INLINE pmap_t current_pmap __P((void));
375
376 /*
377 * We can always convert between virtual and physical addresses
378 * for anything in the range [KERNBASE ... avail_start] because
379 * that range is GUARANTEED to be mapped linearly.
380 * We rely heavily upon this feature!
381 */
382 static INLINE void *
383 mmu_ptov(pa)
384 vm_offset_t pa;
385 {
386 register vm_offset_t va;
387
388 va = (pa + KERNBASE);
389 #ifdef PMAP_DEBUG
390 if ((va < KERNBASE) || (va >= virtual_contig_end))
391 panic("mmu_ptov");
392 #endif
393 return ((void*)va);
394 }
395 static INLINE vm_offset_t
396 mmu_vtop(vva)
397 void *vva;
398 {
399 register vm_offset_t va;
400
401 va = (vm_offset_t)vva;
402 #ifdef PMAP_DEBUG
403 if ((va < KERNBASE) || (va >= virtual_contig_end))
404 panic("mmu_ptov");
405 #endif
406 return (va - KERNBASE);
407 }
408
409 /*
410 * These macros map MMU tables to their corresponding manager structures.
411 * They are needed quite often because many of the pointers in the pmap
412 * system reference MMU tables and not the structures that control them.
413 * There needs to be a way to find one when given the other and these
414 * macros do so by taking advantage of the memory layout described above.
415 * Here's a quick step through the first macro, mmuA2tmgr():
416 *
417 * 1) find the offset of the given MMU A table from the base of its table
418 * pool (table - mmuAbase).
419 * 2) convert this offset into a table index by dividing it by the
420 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
421 * 3) use this index to select the corresponding 'A' table manager
422 * structure from the 'A' table manager pool (Atmgrbase[index]).
423 */
424 /* This function is not currently used. */
425 #if 0
426 static INLINE a_tmgr_t *
427 mmuA2tmgr(mmuAtbl)
428 mmu_long_dte_t *mmuAtbl;
429 {
430 register int idx;
431
432 /* Which table is this in? */
433 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
434 #ifdef PMAP_DEBUG
435 if ((idx < 0) || (idx >= NUM_A_TABLES))
436 panic("mmuA2tmgr");
437 #endif
438 return (&Atmgrbase[idx]);
439 }
440 #endif /* 0 */
441
442 static INLINE b_tmgr_t *
443 mmuB2tmgr(mmuBtbl)
444 mmu_short_dte_t *mmuBtbl;
445 {
446 register int idx;
447
448 /* Which table is this in? */
449 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
450 #ifdef PMAP_DEBUG
451 if ((idx < 0) || (idx >= NUM_B_TABLES))
452 panic("mmuB2tmgr");
453 #endif
454 return (&Btmgrbase[idx]);
455 }
456
457 /* mmuC2tmgr INTERNAL
458 **
459 * Given a pte known to belong to a C table, return the address of
460 * that table's management structure.
461 */
462 static INLINE c_tmgr_t *
463 mmuC2tmgr(mmuCtbl)
464 mmu_short_pte_t *mmuCtbl;
465 {
466 register int idx;
467
468 /* Which table is this in? */
469 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
470 #ifdef PMAP_DEBUG
471 if ((idx < 0) || (idx >= NUM_C_TABLES))
472 panic("mmuC2tmgr");
473 #endif
474 return (&Ctmgrbase[idx]);
475 }
476
477 /* This is now a function call below.
478 * #define pa2pv(pa) \
479 * (&pvbase[(unsigned long)\
480 * m68k_btop(pa)\
481 * ])
482 */
483
484 /* pa2pv INTERNAL
485 **
486 * Return the pv_list_head element which manages the given physical
487 * address.
488 */
489 static INLINE pv_t *
490 pa2pv(pa)
491 vm_offset_t pa;
492 {
493 register struct pmap_physmem_struct *bank;
494 register int idx;
495
496 bank = &avail_mem[0];
497 while (pa >= bank->pmem_end)
498 bank = bank->pmem_next;
499
500 pa -= bank->pmem_start;
501 idx = bank->pmem_pvbase + m68k_btop(pa);
502 #ifdef PMAP_DEBUG
503 if ((idx < 0) || (idx >= physmem))
504 panic("pa2pv");
505 #endif
506 return &pvbase[idx];
507 }
508
509 /* pteidx INTERNAL
510 **
511 * Return the index of the given PTE within the entire fixed table of
512 * PTEs.
513 */
514 static INLINE int
515 pteidx(pte)
516 mmu_short_pte_t *pte;
517 {
518 return (pte - kernCbase);
519 }
520
521 /*
522 * This just offers a place to put some debugging checks,
523 * and reduces the number of places "curproc" appears...
524 */
525 static INLINE pmap_t
526 current_pmap()
527 {
528 struct proc *p;
529 struct vmspace *vm;
530 vm_map_t map;
531 pmap_t pmap;
532
533 p = curproc; /* XXX */
534 if (p == NULL)
535 pmap = &kernel_pmap;
536 else {
537 vm = p->p_vmspace;
538 map = &vm->vm_map;
539 pmap = vm_map_pmap(map);
540 }
541
542 return (pmap);
543 }
544
545
546 /*************************** FUNCTION DEFINITIONS ************************
547 * These appear here merely for the compiler to enforce type checking on *
548 * all function calls. *
549 *************************************************************************/
550
551 /** External functions
552 ** - functions used within this module but written elsewhere.
553 ** both of these functions are in locore.s
554 ** XXX - These functions were later replaced with their more cryptic
555 ** hp300 counterparts. They may be removed now.
556 **/
557 #if 0 /* deprecated mmu */
558 void mmu_seturp __P((vm_offset_t));
559 void mmu_flush __P((int, vm_offset_t));
560 void mmu_flusha __P((void));
561 #endif /* 0 */
562
563 /** Internal functions
564 ** - all functions used only within this module are defined in
565 ** pmap_pvt.h
566 **/
567
568 /** Interface functions
569 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
570 ** defined.
571 **/
572 #ifdef INCLUDED_IN_PMAP_H
573 void pmap_bootstrap __P((void));
574 void *pmap_bootstrap_alloc __P((int));
575 void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
576 pmap_t pmap_create __P((vm_size_t));
577 void pmap_destroy __P((pmap_t));
578 void pmap_reference __P((pmap_t));
579 boolean_t pmap_is_referenced __P((vm_offset_t));
580 boolean_t pmap_is_modified __P((vm_offset_t));
581 void pmap_clear_modify __P((vm_offset_t));
582 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
583 int pmap_page_index __P((vm_offset_t));
584 u_int pmap_free_pages __P((void));
585 #endif /* INCLUDED_IN_PMAP_H */
586 void pmap_pinit __P((pmap_t));
587 void pmap_release __P((pmap_t));
588
589 /********************************** CODE ********************************
590 * Functions that are called from other parts of the kernel are labeled *
591 * as 'INTERFACE' functions. Functions that are only called from *
592 * within the pmap module are labeled as 'INTERNAL' functions. *
593 * Functions that are internal, but are not (currently) used at all are *
594 * labeled 'INTERNAL_X'. *
595 ************************************************************************/
596
597 /* pmap_bootstrap INTERNAL
598 **
599 * Initializes the pmap system. Called at boot time from
600 * locore2.c:_vm_init()
601 *
602 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
603 * system implement pmap_steal_memory() is redundant.
604 * Don't release this code without removing one or the other!
605 */
606 void
607 pmap_bootstrap(nextva)
608 vm_offset_t nextva;
609 {
610 struct physmemory *membank;
611 struct pmap_physmem_struct *pmap_membank;
612 vm_offset_t va, pa, eva;
613 int b, c, i, j; /* running table counts */
614 int size;
615
616 /*
617 * This function is called by __bootstrap after it has
618 * determined the type of machine and made the appropriate
619 * patches to the ROM vectors (XXX- I don't quite know what I meant
620 * by that.) It allocates and sets up enough of the pmap system
621 * to manage the kernel's address space.
622 */
623
624 /*
625 * Determine the range of kernel virtual and physical
626 * space available. Note that we ABSOLUTELY DEPEND on
627 * the fact that the first bank of memory (4MB) is
628 * mapped linearly to KERNBASE (which we guaranteed in
629 * the first instructions of locore.s).
630 * That is plenty for our bootstrap work.
631 */
632 virtual_avail = m68k_round_page(nextva);
633 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
634 virtual_end = VM_MAX_KERNEL_ADDRESS;
635 /* Don't need avail_start til later. */
636
637 /* We may now call pmap_bootstrap_alloc(). */
638 bootstrap_alloc_enabled = TRUE;
639
640 /*
641 * This is a somewhat unwrapped loop to deal with
642 * copying the PROM's 'phsymem' banks into the pmap's
643 * banks. The following is always assumed:
644 * 1. There is always at least one bank of memory.
645 * 2. There is always a last bank of memory, and its
646 * pmem_next member must be set to NULL.
647 * XXX - Use: do { ... } while (membank->next) instead?
648 * XXX - Why copy this stuff at all? -gwr
649 * - It is needed in pa2pv().
650 */
651 membank = romVectorPtr->v_physmemory;
652 pmap_membank = avail_mem;
653 total_phys_mem = 0;
654
655 while (membank->next) {
656 pmap_membank->pmem_start = membank->address;
657 pmap_membank->pmem_end = membank->address + membank->size;
658 total_phys_mem += membank->size;
659 /* This silly syntax arises because pmap_membank
660 * is really a pre-allocated array, but it is put into
661 * use as a linked list.
662 */
663 pmap_membank->pmem_next = pmap_membank + 1;
664 pmap_membank = pmap_membank->pmem_next;
665 membank = membank->next;
666 }
667
668 /*
669 * XXX The last bank of memory should be reduced to exclude the
670 * physical pages needed by the PROM monitor from being used
671 * in the VM system. XXX - See below - Fix!
672 */
673 pmap_membank->pmem_start = membank->address;
674 pmap_membank->pmem_end = membank->address + membank->size;
675 pmap_membank->pmem_next = NULL;
676
677 #if 0 /* XXX - Need to integrate this! */
678 /*
679 * The last few pages of physical memory are "owned" by
680 * the PROM. The total amount of memory we are allowed
681 * to use is given by the romvec pointer. -gwr
682 *
683 * We should dedicate different variables for 'useable'
684 * and 'physically available'. Most users are used to the
685 * kernel reporting the amount of memory 'physically available'
686 * as opposed to 'useable by the kernel' at boot time. -j
687 */
688 total_phys_mem = *romVectorPtr->memoryAvail;
689 #endif /* XXX */
690
691 total_phys_mem += membank->size; /* XXX see above */
692 physmem = btoc(total_phys_mem);
693
694 /*
695 * Avail_end is set to the first byte of physical memory
696 * after the end of the last bank. We use this only to
697 * determine if a physical address is "managed" memory.
698 *
699 * XXX - The setting of avail_end is a temporary ROM saving hack.
700 */
701 avail_end = pmap_membank->pmem_end -
702 (total_phys_mem - *romVectorPtr->memoryAvail);
703 avail_end = m68k_trunc_page(avail_end);
704
705 /*
706 * First allocate enough kernel MMU tables to map all
707 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
708 * Note: All must be aligned on 256 byte boundaries.
709 * Start with the level-A table (one of those).
710 */
711 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
712 kernAbase = pmap_bootstrap_alloc(size);
713 bzero(kernAbase, size);
714
715 /* Now the level-B kernel tables... */
716 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
717 kernBbase = pmap_bootstrap_alloc(size);
718 bzero(kernBbase, size);
719
720 /* Now the level-C kernel tables... */
721 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
722 kernCbase = pmap_bootstrap_alloc(size);
723 bzero(kernCbase, size);
724 /*
725 * Note: In order for the PV system to work correctly, the kernel
726 * and user-level C tables must be allocated contiguously.
727 * Nothing should be allocated between here and the allocation of
728 * mmuCbase below. XXX: Should do this as one allocation, and
729 * then compute a pointer for mmuCbase instead of this...
730 *
731 * Allocate user MMU tables.
732 * These must be contiguous with the preceeding.
733 */
734
735 #ifndef FIXED_NTABLES
736 /*
737 * The number of user-level C tables that should be allocated is
738 * related to the size of physical memory. In general, there should
739 * be enough tables to map four times the amount of available RAM.
740 * The extra amount is needed because some table space is wasted by
741 * fragmentation.
742 */
743 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
744 NUM_B_TABLES = NUM_C_TABLES / 2;
745 NUM_A_TABLES = NUM_B_TABLES / 2;
746 #endif /* !FIXED_NTABLES */
747
748 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
749 mmuCbase = pmap_bootstrap_alloc(size);
750
751 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
752 mmuBbase = pmap_bootstrap_alloc(size);
753
754 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
755 mmuAbase = pmap_bootstrap_alloc(size);
756
757 /*
758 * Fill in the never-changing part of the kernel tables.
759 * For simplicity, the kernel's mappings will be editable as a
760 * flat array of page table entries at kernCbase. The
761 * higher level 'A' and 'B' tables must be initialized to point
762 * to this lower one.
763 */
764 b = c = 0;
765
766 /*
767 * Invalidate all mappings below KERNBASE in the A table.
768 * This area has already been zeroed out, but it is good
769 * practice to explicitly show that we are interpreting
770 * it as a list of A table descriptors.
771 */
772 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
773 kernAbase[i].addr.raw = 0;
774 }
775
776 /*
777 * Set up the kernel A and B tables so that they will reference the
778 * correct spots in the contiguous table of PTEs allocated for the
779 * kernel's virtual memory space.
780 */
781 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
782 kernAbase[i].attr.raw =
783 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
784 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
785
786 for (j=0; j < MMU_B_TBL_SIZE; j++) {
787 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
788 | MMU_DT_SHORT;
789 c += MMU_C_TBL_SIZE;
790 }
791 b += MMU_B_TBL_SIZE;
792 }
793
794 /* XXX - Doing kernel_pmap a little further down. */
795
796 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
797 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
798 pmap_alloc_pv(); /* Allocate physical->virtual map. */
799
800 /*
801 * We are now done with pmap_bootstrap_alloc(). Round up
802 * `virtual_avail' to the nearest page, and set the flag
803 * to prevent use of pmap_bootstrap_alloc() hereafter.
804 */
805 pmap_bootstrap_aalign(NBPG);
806 bootstrap_alloc_enabled = FALSE;
807
808 /*
809 * Now that we are done with pmap_bootstrap_alloc(), we
810 * must save the virtual and physical addresses of the
811 * end of the linearly mapped range, which are stored in
812 * virtual_contig_end and avail_start, respectively.
813 * These variables will never change after this point.
814 */
815 virtual_contig_end = virtual_avail;
816 avail_start = virtual_avail - KERNBASE;
817
818 /*
819 * `avail_next' is a running pointer used by pmap_next_page() to
820 * keep track of the next available physical page to be handed
821 * to the VM system during its initialization, in which it
822 * asks for physical pages, one at a time.
823 */
824 avail_next = avail_start;
825
826 /*
827 * Now allocate some virtual addresses, but not the physical pages
828 * behind them. Note that virtual_avail is already page-aligned.
829 *
830 * tmp_vpages[] is an array of two virtual pages used for temporary
831 * kernel mappings in the pmap module to facilitate various physical
832 * address-oritented operations.
833 */
834 tmp_vpages[0] = virtual_avail;
835 virtual_avail += NBPG;
836 tmp_vpages[1] = virtual_avail;
837 virtual_avail += NBPG;
838
839 /** Initialize the PV system **/
840 pmap_init_pv();
841
842 /*
843 * Fill in the kernel_pmap structure and kernel_crp.
844 */
845 kernAphys = mmu_vtop(kernAbase);
846 kernel_pmap.pm_a_tmgr = NULL;
847 kernel_pmap.pm_a_phys = kernAphys;
848 kernel_pmap.pm_refcount = 1; /* always in use */
849
850 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
851 kernel_crp.rp_addr = kernAphys;
852
853 /*
854 * Now pmap_enter_kernel() may be used safely and will be
855 * the main interface used hereafter to modify the kernel's
856 * virtual address space. Note that since we are still running
857 * under the PROM's address table, none of these table modifications
858 * actually take effect until pmap_takeover_mmu() is called.
859 *
860 * Note: Our tables do NOT have the PROM linear mappings!
861 * Only the mappings created here exist in our tables, so
862 * remember to map anything we expect to use.
863 */
864 va = (vm_offset_t) KERNBASE;
865 pa = 0;
866
867 /*
868 * The first page of the kernel virtual address space is the msgbuf
869 * page. The page attributes (data, non-cached) are set here, while
870 * the address is assigned to this global pointer in cpu_startup().
871 * It is non-cached, mostly due to paranoia.
872 */
873 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
874 va += NBPG; pa += NBPG;
875
876 /* Next page is used as the temporary stack. */
877 pmap_enter_kernel(va, pa, VM_PROT_ALL);
878 va += NBPG; pa += NBPG;
879
880 /*
881 * Map all of the kernel's text segment as read-only and cacheable.
882 * (Cacheable is implied by default). Unfortunately, the last bytes
883 * of kernel text and the first bytes of kernel data will often be
884 * sharing the same page. Therefore, the last page of kernel text
885 * has to be mapped as read/write, to accomodate the data.
886 */
887 eva = m68k_trunc_page((vm_offset_t)etext);
888 for (; va < eva; va += NBPG, pa += NBPG)
889 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
890
891 /*
892 * Map all of the kernel's data as read/write and cacheable.
893 * This includes: data, BSS, symbols, and everything in the
894 * contiguous memory used by pmap_bootstrap_alloc()
895 */
896 for (; pa < avail_start; va += NBPG, pa += NBPG)
897 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
898
899 /*
900 * At this point we are almost ready to take over the MMU. But first
901 * we must save the PROM's address space in our map, as we call its
902 * routines and make references to its data later in the kernel.
903 */
904 pmap_bootstrap_copyprom();
905 pmap_takeover_mmu();
906 pmap_bootstrap_setprom();
907
908 /* Notify the VM system of our page size. */
909 PAGE_SIZE = NBPG;
910 vm_set_page_size();
911 }
912
913
914 /* pmap_alloc_usermmu INTERNAL
915 **
916 * Called from pmap_bootstrap() to allocate MMU tables that will
917 * eventually be used for user mappings.
918 */
919 void
920 pmap_alloc_usermmu()
921 {
922 /* XXX: Moved into caller. */
923 }
924
925 /* pmap_alloc_pv INTERNAL
926 **
927 * Called from pmap_bootstrap() to allocate the physical
928 * to virtual mapping list. Each physical page of memory
929 * in the system has a corresponding element in this list.
930 */
931 void
932 pmap_alloc_pv()
933 {
934 int i;
935 unsigned int total_mem;
936
937 /*
938 * Allocate a pv_head structure for every page of physical
939 * memory that will be managed by the system. Since memory on
940 * the 3/80 is non-contiguous, we cannot arrive at a total page
941 * count by subtraction of the lowest available address from the
942 * highest, but rather we have to step through each memory
943 * bank and add the number of pages in each to the total.
944 *
945 * At this time we also initialize the offset of each bank's
946 * starting pv_head within the pv_head list so that the physical
947 * memory state routines (pmap_is_referenced(),
948 * pmap_is_modified(), et al.) can quickly find coresponding
949 * pv_heads in spite of the non-contiguity.
950 */
951 total_mem = 0;
952 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
953 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
954 total_mem += avail_mem[i].pmem_end -
955 avail_mem[i].pmem_start;
956 if (avail_mem[i].pmem_next == NULL)
957 break;
958 }
959 #ifdef PMAP_DEBUG
960 if (total_mem != total_phys_mem)
961 panic("pmap_alloc_pv did not arrive at correct page count");
962 #endif
963
964 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
965 m68k_btop(total_phys_mem));
966 }
967
968 /* pmap_alloc_usertmgr INTERNAL
969 **
970 * Called from pmap_bootstrap() to allocate the structures which
971 * facilitate management of user MMU tables. Each user MMU table
972 * in the system has one such structure associated with it.
973 */
974 void
975 pmap_alloc_usertmgr()
976 {
977 /* Allocate user MMU table managers */
978 /* It would be a lot simpler to just make these BSS, but */
979 /* we may want to change their size at boot time... -j */
980 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
981 * NUM_A_TABLES);
982 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
983 * NUM_B_TABLES);
984 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
985 * NUM_C_TABLES);
986
987 /*
988 * Allocate PV list elements for the physical to virtual
989 * mapping system.
990 */
991 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
992 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
993 }
994
995 /* pmap_bootstrap_copyprom() INTERNAL
996 **
997 * Copy the PROM mappings into our own tables. Note, we
998 * can use physical addresses until __bootstrap returns.
999 */
1000 void
1001 pmap_bootstrap_copyprom()
1002 {
1003 struct sunromvec *romp;
1004 int *mon_ctbl;
1005 mmu_short_pte_t *kpte;
1006 int i, len;
1007
1008 romp = romVectorPtr;
1009
1010 /*
1011 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
1012 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
1013 */
1014 mon_ctbl = *romp->monptaddr;
1015 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
1016 kpte = &kernCbase[i];
1017 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
1018
1019 for (i = 0; i < len; i++) {
1020 kpte[i].attr.raw = mon_ctbl[i];
1021 }
1022
1023 /*
1024 * Copy the mappings at MON_DVMA_BASE (to the end).
1025 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1026 * Actually, we only want the last page, which the
1027 * PROM has set up for use by the "ie" driver.
1028 * (The i82686 needs its SCP there.)
1029 * If we copy all the mappings, pmap_enter_kernel
1030 * may complain about finding valid PTEs that are
1031 * not recorded in our PV lists...
1032 */
1033 mon_ctbl = *romp->shadowpteaddr;
1034 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
1035 kpte = &kernCbase[i];
1036 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1037 for (i = (len-1); i < len; i++) {
1038 kpte[i].attr.raw = mon_ctbl[i];
1039 }
1040 }
1041
1042 /* pmap_takeover_mmu INTERNAL
1043 **
1044 * Called from pmap_bootstrap() after it has copied enough of the
1045 * PROM mappings into the kernel map so that we can use our own
1046 * MMU table.
1047 */
1048 void
1049 pmap_takeover_mmu()
1050 {
1051
1052 loadcrp(&kernel_crp);
1053 }
1054
1055 /* pmap_bootstrap_setprom() INTERNAL
1056 **
1057 * Set the PROM mappings so it can see kernel space.
1058 * Note that physical addresses are used here, which
1059 * we can get away with because this runs with the
1060 * low 1GB set for transparent translation.
1061 */
1062 void
1063 pmap_bootstrap_setprom()
1064 {
1065 mmu_long_dte_t *mon_dte;
1066 extern struct mmu_rootptr mon_crp;
1067 int i;
1068
1069 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1070 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1071 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1072 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1073 }
1074 }
1075
1076
1077 /* pmap_init INTERFACE
1078 **
1079 * Called at the end of vm_init() to set up the pmap system to go
1080 * into full time operation. All initialization of kernel_pmap
1081 * should be already done by now, so this should just do things
1082 * needed for user-level pmaps to work.
1083 */
1084 void
1085 pmap_init()
1086 {
1087 /** Initialize the manager pools **/
1088 TAILQ_INIT(&a_pool);
1089 TAILQ_INIT(&b_pool);
1090 TAILQ_INIT(&c_pool);
1091
1092 /**************************************************************
1093 * Initialize all tmgr structures and MMU tables they manage. *
1094 **************************************************************/
1095 /** Initialize A tables **/
1096 pmap_init_a_tables();
1097 /** Initialize B tables **/
1098 pmap_init_b_tables();
1099 /** Initialize C tables **/
1100 pmap_init_c_tables();
1101 }
1102
1103 /* pmap_init_a_tables() INTERNAL
1104 **
1105 * Initializes all A managers, their MMU A tables, and inserts
1106 * them into the A manager pool for use by the system.
1107 */
1108 void
1109 pmap_init_a_tables()
1110 {
1111 int i;
1112 a_tmgr_t *a_tbl;
1113
1114 for (i=0; i < NUM_A_TABLES; i++) {
1115 /* Select the next available A manager from the pool */
1116 a_tbl = &Atmgrbase[i];
1117
1118 /*
1119 * Clear its parent entry. Set its wired and valid
1120 * entry count to zero.
1121 */
1122 a_tbl->at_parent = NULL;
1123 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1124
1125 /* Assign it the next available MMU A table from the pool */
1126 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1127
1128 /*
1129 * Initialize the MMU A table with the table in the `proc0',
1130 * or kernel, mapping. This ensures that every process has
1131 * the kernel mapped in the top part of its address space.
1132 */
1133 bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1134 sizeof(mmu_long_dte_t));
1135
1136 /*
1137 * Finally, insert the manager into the A pool,
1138 * making it ready to be used by the system.
1139 */
1140 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1141 }
1142 }
1143
1144 /* pmap_init_b_tables() INTERNAL
1145 **
1146 * Initializes all B table managers, their MMU B tables, and
1147 * inserts them into the B manager pool for use by the system.
1148 */
1149 void
1150 pmap_init_b_tables()
1151 {
1152 int i,j;
1153 b_tmgr_t *b_tbl;
1154
1155 for (i=0; i < NUM_B_TABLES; i++) {
1156 /* Select the next available B manager from the pool */
1157 b_tbl = &Btmgrbase[i];
1158
1159 b_tbl->bt_parent = NULL; /* clear its parent, */
1160 b_tbl->bt_pidx = 0; /* parent index, */
1161 b_tbl->bt_wcnt = 0; /* wired entry count, */
1162 b_tbl->bt_ecnt = 0; /* valid entry count. */
1163
1164 /* Assign it the next available MMU B table from the pool */
1165 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1166
1167 /* Invalidate every descriptor in the table */
1168 for (j=0; j < MMU_B_TBL_SIZE; j++)
1169 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1170
1171 /* Insert the manager into the B pool */
1172 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1173 }
1174 }
1175
1176 /* pmap_init_c_tables() INTERNAL
1177 **
1178 * Initializes all C table managers, their MMU C tables, and
1179 * inserts them into the C manager pool for use by the system.
1180 */
1181 void
1182 pmap_init_c_tables()
1183 {
1184 int i,j;
1185 c_tmgr_t *c_tbl;
1186
1187 for (i=0; i < NUM_C_TABLES; i++) {
1188 /* Select the next available C manager from the pool */
1189 c_tbl = &Ctmgrbase[i];
1190
1191 c_tbl->ct_parent = NULL; /* clear its parent, */
1192 c_tbl->ct_pidx = 0; /* parent index, */
1193 c_tbl->ct_wcnt = 0; /* wired entry count, */
1194 c_tbl->ct_ecnt = 0; /* valid entry count, */
1195 c_tbl->ct_pmap = NULL; /* parent pmap, */
1196 c_tbl->ct_va = 0; /* base of managed range */
1197
1198 /* Assign it the next available MMU C table from the pool */
1199 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1200
1201 for (j=0; j < MMU_C_TBL_SIZE; j++)
1202 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1203
1204 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1205 }
1206 }
1207
1208 /* pmap_init_pv() INTERNAL
1209 **
1210 * Initializes the Physical to Virtual mapping system.
1211 */
1212 void
1213 pmap_init_pv()
1214 {
1215 int i;
1216
1217 /* Initialize every PV head. */
1218 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1219 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1220 pvbase[i].pv_flags = 0; /* Zero out page flags */
1221 }
1222
1223 pv_initialized = TRUE;
1224 }
1225
1226 /* get_a_table INTERNAL
1227 **
1228 * Retrieve and return a level A table for use in a user map.
1229 */
1230 a_tmgr_t *
1231 get_a_table()
1232 {
1233 a_tmgr_t *tbl;
1234 pmap_t pmap;
1235
1236 /* Get the top A table in the pool */
1237 tbl = a_pool.tqh_first;
1238 if (tbl == NULL) {
1239 /*
1240 * XXX - Instead of panicing here and in other get_x_table
1241 * functions, we do have the option of sleeping on the head of
1242 * the table pool. Any function which updates the table pool
1243 * would then issue a wakeup() on the head, thus waking up any
1244 * processes waiting for a table.
1245 *
1246 * Actually, the place to sleep would be when some process
1247 * asks for a "wired" mapping that would run us short of
1248 * mapping resources. This design DEPENDS on always having
1249 * some mapping resources in the pool for stealing, so we
1250 * must make sure we NEVER let the pool become empty. -gwr
1251 */
1252 panic("get_a_table: out of A tables.");
1253 }
1254
1255 TAILQ_REMOVE(&a_pool, tbl, at_link);
1256 /*
1257 * If the table has a non-null parent pointer then it is in use.
1258 * Forcibly abduct it from its parent and clear its entries.
1259 * No re-entrancy worries here. This table would not be in the
1260 * table pool unless it was available for use.
1261 *
1262 * Note that the second argument to free_a_table() is FALSE. This
1263 * indicates that the table should not be relinked into the A table
1264 * pool. That is a job for the function that called us.
1265 */
1266 if (tbl->at_parent) {
1267 pmap = tbl->at_parent;
1268 free_a_table(tbl, FALSE);
1269 pmap->pm_a_tmgr = NULL;
1270 pmap->pm_a_phys = kernAphys;
1271 }
1272 #ifdef NON_REENTRANT
1273 /*
1274 * If the table isn't to be wired down, re-insert it at the
1275 * end of the pool.
1276 */
1277 if (!wired)
1278 /*
1279 * Quandary - XXX
1280 * Would it be better to let the calling function insert this
1281 * table into the queue? By inserting it here, we are allowing
1282 * it to be stolen immediately. The calling function is
1283 * probably not expecting to use a table that it is not
1284 * assured full control of.
1285 * Answer - In the intrest of re-entrancy, it is best to let
1286 * the calling function determine when a table is available
1287 * for use. Therefore this code block is not used.
1288 */
1289 TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1290 #endif /* NON_REENTRANT */
1291 return tbl;
1292 }
1293
1294 /* get_b_table INTERNAL
1295 **
1296 * Return a level B table for use.
1297 */
1298 b_tmgr_t *
1299 get_b_table()
1300 {
1301 b_tmgr_t *tbl;
1302
1303 /* See 'get_a_table' for comments. */
1304 tbl = b_pool.tqh_first;
1305 if (tbl == NULL)
1306 panic("get_b_table: out of B tables.");
1307 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1308 if (tbl->bt_parent) {
1309 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1310 tbl->bt_parent->at_ecnt--;
1311 free_b_table(tbl, FALSE);
1312 }
1313 #ifdef NON_REENTRANT
1314 if (!wired)
1315 /* XXX see quandary in get_b_table */
1316 /* XXX start lock */
1317 TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1318 /* XXX end lock */
1319 #endif /* NON_REENTRANT */
1320 return tbl;
1321 }
1322
1323 /* get_c_table INTERNAL
1324 **
1325 * Return a level C table for use.
1326 */
1327 c_tmgr_t *
1328 get_c_table()
1329 {
1330 c_tmgr_t *tbl;
1331
1332 /* See 'get_a_table' for comments */
1333 tbl = c_pool.tqh_first;
1334 if (tbl == NULL)
1335 panic("get_c_table: out of C tables.");
1336 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1337 if (tbl->ct_parent) {
1338 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1339 tbl->ct_parent->bt_ecnt--;
1340 free_c_table(tbl, FALSE);
1341 }
1342 #ifdef NON_REENTRANT
1343 if (!wired)
1344 /* XXX See quandary in get_a_table */
1345 /* XXX start lock */
1346 TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1347 /* XXX end lock */
1348 #endif /* NON_REENTRANT */
1349
1350 return tbl;
1351 }
1352
1353 /*
1354 * The following 'free_table' and 'steal_table' functions are called to
1355 * detach tables from their current obligations (parents and children) and
1356 * prepare them for reuse in another mapping.
1357 *
1358 * Free_table is used when the calling function will handle the fate
1359 * of the parent table, such as returning it to the free pool when it has
1360 * no valid entries. Functions that do not want to handle this should
1361 * call steal_table, in which the parent table's descriptors and entry
1362 * count are automatically modified when this table is removed.
1363 */
1364
1365 /* free_a_table INTERNAL
1366 **
1367 * Unmaps the given A table and all child tables from their current
1368 * mappings. Returns the number of pages that were invalidated.
1369 * If 'relink' is true, the function will return the table to the head
1370 * of the available table pool.
1371 *
1372 * Cache note: The MC68851 will automatically flush all
1373 * descriptors derived from a given A table from its
1374 * Automatic Translation Cache (ATC) if we issue a
1375 * 'PFLUSHR' instruction with the base address of the
1376 * table. This function should do, and does so.
1377 * Note note: We are using an MC68030 - there is no
1378 * PFLUSHR.
1379 */
1380 int
1381 free_a_table(a_tbl, relink)
1382 a_tmgr_t *a_tbl;
1383 boolean_t relink;
1384 {
1385 int i, removed_cnt;
1386 mmu_long_dte_t *dte;
1387 mmu_short_dte_t *dtbl;
1388 b_tmgr_t *tmgr;
1389
1390 /*
1391 * Flush the ATC cache of all cached descriptors derived
1392 * from this table.
1393 * Sun3x does not use 68851's cached table feature
1394 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1395 */
1396
1397 /*
1398 * Remove any pending cache flushes that were designated
1399 * for the pmap this A table belongs to.
1400 * a_tbl->parent->atc_flushq[0] = 0;
1401 * Not implemented in sun3x.
1402 */
1403
1404 /*
1405 * All A tables in the system should retain a map for the
1406 * kernel. If the table contains any valid descriptors
1407 * (other than those for the kernel area), invalidate them all,
1408 * stopping short of the kernel's entries.
1409 */
1410 removed_cnt = 0;
1411 if (a_tbl->at_ecnt) {
1412 dte = a_tbl->at_dtbl;
1413 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1414 /*
1415 * If a table entry points to a valid B table, free
1416 * it and its children.
1417 */
1418 if (MMU_VALID_DT(dte[i])) {
1419 /*
1420 * The following block does several things,
1421 * from innermost expression to the
1422 * outermost:
1423 * 1) It extracts the base (cc 1996)
1424 * address of the B table pointed
1425 * to in the A table entry dte[i].
1426 * 2) It converts this base address into
1427 * the virtual address it can be
1428 * accessed with. (all MMU tables point
1429 * to physical addresses.)
1430 * 3) It finds the corresponding manager
1431 * structure which manages this MMU table.
1432 * 4) It frees the manager structure.
1433 * (This frees the MMU table and all
1434 * child tables. See 'free_b_table' for
1435 * details.)
1436 */
1437 dtbl = mmu_ptov(dte[i].addr.raw);
1438 tmgr = mmuB2tmgr(dtbl);
1439 removed_cnt += free_b_table(tmgr, TRUE);
1440 dte[i].attr.raw = MMU_DT_INVALID;
1441 }
1442 }
1443 a_tbl->at_ecnt = 0;
1444 }
1445 if (relink) {
1446 a_tbl->at_parent = NULL;
1447 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1448 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1449 }
1450 return removed_cnt;
1451 }
1452
1453 /* free_b_table INTERNAL
1454 **
1455 * Unmaps the given B table and all its children from their current
1456 * mappings. Returns the number of pages that were invalidated.
1457 * (For comments, see 'free_a_table()').
1458 */
1459 int
1460 free_b_table(b_tbl, relink)
1461 b_tmgr_t *b_tbl;
1462 boolean_t relink;
1463 {
1464 int i, removed_cnt;
1465 mmu_short_dte_t *dte;
1466 mmu_short_pte_t *dtbl;
1467 c_tmgr_t *tmgr;
1468
1469 removed_cnt = 0;
1470 if (b_tbl->bt_ecnt) {
1471 dte = b_tbl->bt_dtbl;
1472 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1473 if (MMU_VALID_DT(dte[i])) {
1474 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1475 tmgr = mmuC2tmgr(dtbl);
1476 removed_cnt += free_c_table(tmgr, TRUE);
1477 dte[i].attr.raw = MMU_DT_INVALID;
1478 }
1479 }
1480 b_tbl->bt_ecnt = 0;
1481 }
1482
1483 if (relink) {
1484 b_tbl->bt_parent = NULL;
1485 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1486 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1487 }
1488 return removed_cnt;
1489 }
1490
1491 /* free_c_table INTERNAL
1492 **
1493 * Unmaps the given C table from use and returns it to the pool for
1494 * re-use. Returns the number of pages that were invalidated.
1495 *
1496 * This function preserves any physical page modification information
1497 * contained in the page descriptors within the C table by calling
1498 * 'pmap_remove_pte().'
1499 */
1500 int
1501 free_c_table(c_tbl, relink)
1502 c_tmgr_t *c_tbl;
1503 boolean_t relink;
1504 {
1505 int i, removed_cnt;
1506
1507 removed_cnt = 0;
1508 if (c_tbl->ct_ecnt) {
1509 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1510 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1511 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1512 removed_cnt++;
1513 }
1514 }
1515 c_tbl->ct_ecnt = 0;
1516 }
1517
1518 if (relink) {
1519 c_tbl->ct_parent = NULL;
1520 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1521 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1522 }
1523 return removed_cnt;
1524 }
1525
1526 #if 0
1527 /* free_c_table_novalid INTERNAL
1528 **
1529 * Frees the given C table manager without checking to see whether
1530 * or not it contains any valid page descriptors as it is assumed
1531 * that it does not.
1532 */
1533 void
1534 free_c_table_novalid(c_tbl)
1535 c_tmgr_t *c_tbl;
1536 {
1537 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1538 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1539 c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1540 c_tbl->ct_parent->bt_ecnt--;
1541 /*
1542 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1543 * we just removed the last entry of the parent B table.
1544 * But I want to insure that this will not endanger pmap_enter()
1545 * with sudden removal of tables it is working with.
1546 *
1547 * We should probably add another field to each table, indicating
1548 * whether or not it is 'locked', ie. in the process of being
1549 * modified.
1550 */
1551 c_tbl->ct_parent = NULL;
1552 }
1553 #endif
1554
1555 /* pmap_remove_pte INTERNAL
1556 **
1557 * Unmap the given pte and preserve any page modification
1558 * information by transfering it to the pv head of the
1559 * physical page it maps to. This function does not update
1560 * any reference counts because it is assumed that the calling
1561 * function will do so.
1562 */
1563 void
1564 pmap_remove_pte(pte)
1565 mmu_short_pte_t *pte;
1566 {
1567 u_short pv_idx, targ_idx;
1568 int s;
1569 vm_offset_t pa;
1570 pv_t *pv;
1571
1572 pa = MMU_PTE_PA(*pte);
1573 if (is_managed(pa)) {
1574 pv = pa2pv(pa);
1575 targ_idx = pteidx(pte); /* Index of PTE being removed */
1576
1577 /*
1578 * If the PTE being removed is the first (or only) PTE in
1579 * the list of PTEs currently mapped to this page, remove the
1580 * PTE by changing the index found on the PV head. Otherwise
1581 * a linear search through the list will have to be executed
1582 * in order to find the PVE which points to the PTE being
1583 * removed, so that it may be modified to point to its new
1584 * neighbor.
1585 */
1586 s = splimp();
1587 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1588 if (pv_idx == targ_idx) {
1589 pv->pv_idx = pvebase[targ_idx].pve_next;
1590 } else {
1591 /*
1592 * Find the PV element pointing to the target
1593 * element. Note: may have pv_idx==PVE_EOL
1594 */
1595 for (;;) {
1596 if (pv_idx == PVE_EOL) {
1597 #ifdef PMAP_DEBUG
1598 printf("pmap_remove_pte: PVE_EOL\n");
1599 Debugger();
1600 #endif
1601 goto pv_not_found;
1602 }
1603 if (pvebase[pv_idx].pve_next == targ_idx)
1604 break;
1605 pv_idx = pvebase[pv_idx].pve_next;
1606 }
1607 /*
1608 * At this point, pv_idx is the index of the PV
1609 * element just before the target element in the list.
1610 * Unlink the target.
1611 */
1612 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1613 pv_not_found:
1614 }
1615 /*
1616 * Save the mod/ref bits of the pte by simply
1617 * ORing the entire pte onto the pv_flags member
1618 * of the pv structure.
1619 * There is no need to use a separate bit pattern
1620 * for usage information on the pv head than that
1621 * which is used on the MMU ptes.
1622 */
1623 pv->pv_flags |= (u_short) pte->attr.raw;
1624 splx(s);
1625 }
1626
1627 pte->attr.raw = MMU_DT_INVALID;
1628 }
1629
1630 /* pmap_stroll INTERNAL
1631 **
1632 * Retrieve the addresses of all table managers involved in the mapping of
1633 * the given virtual address. If the table walk completed sucessfully,
1634 * return TRUE. If it was only partially sucessful, return FALSE.
1635 * The table walk performed by this function is important to many other
1636 * functions in this module.
1637 *
1638 * Note: This function ought to be easier to read.
1639 */
1640 boolean_t
1641 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1642 pmap_t pmap;
1643 vm_offset_t va;
1644 a_tmgr_t **a_tbl;
1645 b_tmgr_t **b_tbl;
1646 c_tmgr_t **c_tbl;
1647 mmu_short_pte_t **pte;
1648 int *a_idx, *b_idx, *pte_idx;
1649 {
1650 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1651 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1652
1653 if (pmap == pmap_kernel())
1654 return FALSE;
1655
1656 /* Does the given pmap have its own A table? */
1657 *a_tbl = pmap->pm_a_tmgr;
1658 if (*a_tbl == NULL)
1659 return FALSE; /* No. Return unknown. */
1660 /* Does the A table have a valid B table
1661 * under the corresponding table entry?
1662 */
1663 *a_idx = MMU_TIA(va);
1664 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1665 if (!MMU_VALID_DT(*a_dte))
1666 return FALSE; /* No. Return unknown. */
1667 /* Yes. Extract B table from the A table. */
1668 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1669 /* Does the B table have a valid C table
1670 * under the corresponding table entry?
1671 */
1672 *b_idx = MMU_TIB(va);
1673 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1674 if (!MMU_VALID_DT(*b_dte))
1675 return FALSE; /* No. Return unknown. */
1676 /* Yes. Extract C table from the B table. */
1677 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1678 *pte_idx = MMU_TIC(va);
1679 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1680
1681 return TRUE;
1682 }
1683
1684 /* pmap_enter INTERFACE
1685 **
1686 * Called by the kernel to map a virtual address
1687 * to a physical address in the given process map.
1688 *
1689 * Note: this function should apply an exclusive lock
1690 * on the pmap system for its duration. (it certainly
1691 * would save my hair!!)
1692 * This function ought to be easier to read.
1693 */
1694 void
1695 pmap_enter(pmap, va, pa, prot, wired)
1696 pmap_t pmap;
1697 vm_offset_t va;
1698 vm_offset_t pa;
1699 vm_prot_t prot;
1700 boolean_t wired;
1701 {
1702 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1703 u_short nidx; /* PV list index */
1704 int s; /* Used for splimp()/splx() */
1705 int flags; /* Mapping flags. eg. Cache inhibit */
1706 u_int a_idx, b_idx, pte_idx; /* table indices */
1707 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1708 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1709 c_tmgr_t *c_tbl; /* C: short page table manager */
1710 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1711 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1712 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1713 pv_t *pv; /* pv list head */
1714 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1715
1716 if (pmap == NULL)
1717 return;
1718 if (pmap == pmap_kernel()) {
1719 pmap_enter_kernel(va, pa, prot);
1720 return;
1721 }
1722
1723 flags = (pa & ~MMU_PAGE_MASK);
1724 pa &= MMU_PAGE_MASK;
1725
1726 /*
1727 * Determine if the physical address being mapped is on-board RAM.
1728 * Any other area of the address space is likely to belong to a
1729 * device and hence it would be disasterous to cache its contents.
1730 */
1731 if ((managed = is_managed(pa)) == FALSE)
1732 flags |= PMAP_NC;
1733
1734 /*
1735 * For user mappings we walk along the MMU tables of the given
1736 * pmap, reaching a PTE which describes the virtual page being
1737 * mapped or changed. If any level of the walk ends in an invalid
1738 * entry, a table must be allocated and the entry must be updated
1739 * to point to it.
1740 * There is a bit of confusion as to whether this code must be
1741 * re-entrant. For now we will assume it is. To support
1742 * re-entrancy we must unlink tables from the table pool before
1743 * we assume we may use them. Tables are re-linked into the pool
1744 * when we are finished with them at the end of the function.
1745 * But I don't feel like doing that until we have proof that this
1746 * needs to be re-entrant.
1747 * 'llevel' records which tables need to be relinked.
1748 */
1749 llevel = NONE;
1750
1751 /*
1752 * Step 1 - Retrieve the A table from the pmap. If it has no
1753 * A table, allocate a new one from the available pool.
1754 */
1755
1756 a_tbl = pmap->pm_a_tmgr;
1757 if (a_tbl == NULL) {
1758 /*
1759 * This pmap does not currently have an A table. Allocate
1760 * a new one.
1761 */
1762 a_tbl = get_a_table();
1763 a_tbl->at_parent = pmap;
1764
1765 /*
1766 * Assign this new A table to the pmap, and calculate its
1767 * physical address so that loadcrp() can be used to make
1768 * the table active.
1769 */
1770 pmap->pm_a_tmgr = a_tbl;
1771 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1772
1773 /*
1774 * If the process receiving a new A table is the current
1775 * process, we are responsible for setting the MMU so that
1776 * it becomes the current address space. This only adds
1777 * new mappings, so no need to flush anything.
1778 */
1779 if (pmap == current_pmap()) {
1780 kernel_crp.rp_addr = pmap->pm_a_phys;
1781 loadcrp(&kernel_crp);
1782 }
1783
1784 if (!wired)
1785 llevel = NEWA;
1786 } else {
1787 /*
1788 * Use the A table already allocated for this pmap.
1789 * Unlink it from the A table pool if necessary.
1790 */
1791 if (wired && !a_tbl->at_wcnt)
1792 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1793 }
1794
1795 /*
1796 * Step 2 - Walk into the B table. If there is no valid B table,
1797 * allocate one.
1798 */
1799
1800 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1801 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1802 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1803 /* The descriptor is valid. Use the B table it points to. */
1804 /*************************************
1805 * a_idx *
1806 * v *
1807 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1808 * | | | | | | | | | | | | *
1809 * +-+-+-+-+-+-+-+-+-+-+-+- *
1810 * | *
1811 * \- b_tbl -> +-+- *
1812 * | | *
1813 * +-+- *
1814 *************************************/
1815 b_dte = mmu_ptov(a_dte->addr.raw);
1816 b_tbl = mmuB2tmgr(b_dte);
1817
1818 /*
1819 * If the requested mapping must be wired, but this table
1820 * being used to map it is not, the table must be removed
1821 * from the available pool and its wired entry count
1822 * incremented.
1823 */
1824 if (wired && !b_tbl->bt_wcnt) {
1825 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1826 a_tbl->at_wcnt++;
1827 }
1828 } else {
1829 /* The descriptor is invalid. Allocate a new B table. */
1830 b_tbl = get_b_table();
1831
1832 /* Point the parent A table descriptor to this new B table. */
1833 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1834 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1835 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1836
1837 /* Create the necessary back references to the parent table */
1838 b_tbl->bt_parent = a_tbl;
1839 b_tbl->bt_pidx = a_idx;
1840
1841 /*
1842 * If this table is to be wired, make sure the parent A table
1843 * wired count is updated to reflect that it has another wired
1844 * entry.
1845 */
1846 if (wired)
1847 a_tbl->at_wcnt++;
1848 else if (llevel == NONE)
1849 llevel = NEWB;
1850 }
1851
1852 /*
1853 * Step 3 - Walk into the C table, if there is no valid C table,
1854 * allocate one.
1855 */
1856
1857 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1858 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1859 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1860 /* The descriptor is valid. Use the C table it points to. */
1861 /**************************************
1862 * c_idx *
1863 * | v *
1864 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1865 * | | | | | | | | | | | *
1866 * +-+-+-+-+-+-+-+-+-+-+- *
1867 * | *
1868 * \- c_tbl -> +-+-- *
1869 * | | | *
1870 * +-+-- *
1871 **************************************/
1872 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1873 c_tbl = mmuC2tmgr(c_pte);
1874
1875 /* If mapping is wired and table is not */
1876 if (wired && !c_tbl->ct_wcnt) {
1877 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1878 b_tbl->bt_wcnt++;
1879 }
1880 } else {
1881 /* The descriptor is invalid. Allocate a new C table. */
1882 c_tbl = get_c_table();
1883
1884 /* Point the parent B table descriptor to this new C table. */
1885 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1886 b_dte->attr.raw |= MMU_DT_SHORT;
1887 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1888
1889 /* Create the necessary back references to the parent table */
1890 c_tbl->ct_parent = b_tbl;
1891 c_tbl->ct_pidx = b_idx;
1892 /*
1893 * Store the pmap and base virtual managed address for faster
1894 * retrieval in the PV functions.
1895 */
1896 c_tbl->ct_pmap = pmap;
1897 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1898
1899 /*
1900 * If this table is to be wired, make sure the parent B table
1901 * wired count is updated to reflect that it has another wired
1902 * entry.
1903 */
1904 if (wired)
1905 b_tbl->bt_wcnt++;
1906 else if (llevel == NONE)
1907 llevel = NEWC;
1908 }
1909
1910 /*
1911 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1912 * slot of the C table, describing the PA to which the VA is mapped.
1913 */
1914
1915 pte_idx = MMU_TIC(va);
1916 c_pte = &c_tbl->ct_dtbl[pte_idx];
1917 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1918 /*
1919 * The PTE is currently valid. This particular call
1920 * is just a synonym for one (or more) of the following
1921 * operations:
1922 * change protection of a page
1923 * change wiring status of a page
1924 * remove the mapping of a page
1925 *
1926 * XXX - Semi critical: This code should unwire the PTE
1927 * and, possibly, associated parent tables if this is a
1928 * change wiring operation. Currently it does not.
1929 *
1930 * This may be ok if pmap_change_wiring() is the only
1931 * interface used to UNWIRE a page.
1932 */
1933
1934 /* First check if this is a wiring operation. */
1935 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1936 /*
1937 * The PTE is already wired. To prevent it from being
1938 * counted as a new wiring operation, reset the 'wired'
1939 * variable.
1940 */
1941 wired = FALSE;
1942 }
1943
1944 /* Is the new address the same as the old? */
1945 if (MMU_PTE_PA(*c_pte) == pa) {
1946 /*
1947 * Yes, mark that it does not need to be reinserted
1948 * into the PV list.
1949 */
1950 insert = FALSE;
1951
1952 /*
1953 * Clear all but the modified, referenced and wired
1954 * bits on the PTE.
1955 */
1956 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1957 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1958 } else {
1959 /* No, remove the old entry */
1960 pmap_remove_pte(c_pte);
1961 insert = TRUE;
1962 }
1963
1964 /*
1965 * TLB flush is only necessary if modifying current map.
1966 * However, in pmap_enter(), the pmap almost always IS
1967 * the current pmap, so don't even bother to check.
1968 */
1969 TBIS(va);
1970 } else {
1971 /*
1972 * The PTE is invalid. Increment the valid entry count in
1973 * the C table manager to reflect the addition of a new entry.
1974 */
1975 c_tbl->ct_ecnt++;
1976
1977 /* XXX - temporarily make sure the PTE is cleared. */
1978 c_pte->attr.raw = 0;
1979
1980 /* It will also need to be inserted into the PV list. */
1981 insert = TRUE;
1982 }
1983
1984 /*
1985 * If page is changing from unwired to wired status, set an unused bit
1986 * within the PTE to indicate that it is wired. Also increment the
1987 * wired entry count in the C table manager.
1988 */
1989 if (wired) {
1990 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1991 c_tbl->ct_wcnt++;
1992 }
1993
1994 /*
1995 * Map the page, being careful to preserve modify/reference/wired
1996 * bits. At this point it is assumed that the PTE either has no bits
1997 * set, or if there are set bits, they are only modified, reference or
1998 * wired bits. If not, the following statement will cause erratic
1999 * behavior.
2000 */
2001 #ifdef PMAP_DEBUG
2002 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
2003 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2004 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2005 Debugger();
2006 }
2007 #endif
2008 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2009
2010 /*
2011 * If the mapping should be read-only, set the write protect
2012 * bit in the PTE.
2013 */
2014 if (!(prot & VM_PROT_WRITE))
2015 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2016
2017 /*
2018 * If the mapping should be cache inhibited (indicated by the flag
2019 * bits found on the lower order of the physical address.)
2020 * mark the PTE as a cache inhibited page.
2021 */
2022 if (flags & PMAP_NC)
2023 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2024
2025 /*
2026 * If the physical address being mapped is managed by the PV
2027 * system then link the pte into the list of pages mapped to that
2028 * address.
2029 */
2030 if (insert && managed) {
2031 pv = pa2pv(pa);
2032 nidx = pteidx(c_pte);
2033
2034 s = splimp();
2035 pvebase[nidx].pve_next = pv->pv_idx;
2036 pv->pv_idx = nidx;
2037 splx(s);
2038 }
2039
2040 /* Move any allocated tables back into the active pool. */
2041
2042 switch (llevel) {
2043 case NEWA:
2044 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2045 /* FALLTHROUGH */
2046 case NEWB:
2047 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2048 /* FALLTHROUGH */
2049 case NEWC:
2050 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2051 /* FALLTHROUGH */
2052 default:
2053 break;
2054 }
2055 }
2056
2057 /* pmap_enter_kernel INTERNAL
2058 **
2059 * Map the given virtual address to the given physical address within the
2060 * kernel address space. This function exists because the kernel map does
2061 * not do dynamic table allocation. It consists of a contiguous array of ptes
2062 * and can be edited directly without the need to walk through any tables.
2063 *
2064 * XXX: "Danger, Will Robinson!"
2065 * Note that the kernel should never take a fault on any page
2066 * between [ KERNBASE .. virtual_avail ] and this is checked in
2067 * trap.c for kernel-mode MMU faults. This means that mappings
2068 * created in that range must be implicily wired. -gwr
2069 */
2070 void
2071 pmap_enter_kernel(va, pa, prot)
2072 vm_offset_t va;
2073 vm_offset_t pa;
2074 vm_prot_t prot;
2075 {
2076 boolean_t was_valid, insert;
2077 u_short pte_idx;
2078 int s, flags;
2079 mmu_short_pte_t *pte;
2080 pv_t *pv;
2081 vm_offset_t old_pa;
2082
2083 flags = (pa & ~MMU_PAGE_MASK);
2084 pa &= MMU_PAGE_MASK;
2085
2086 if (is_managed(pa))
2087 insert = TRUE;
2088 else
2089 insert = FALSE;
2090
2091 /*
2092 * Calculate the index of the PTE being modified.
2093 */
2094 pte_idx = (u_long) m68k_btop(va - KERNBASE);
2095
2096 /* This array is traditionally named "Sysmap" */
2097 pte = &kernCbase[pte_idx];
2098
2099 s = splimp();
2100 if (MMU_VALID_DT(*pte)) {
2101 was_valid = TRUE;
2102 /*
2103 * If the PTE already maps a different
2104 * physical address, umap and pv_unlink.
2105 */
2106 old_pa = MMU_PTE_PA(*pte);
2107 if (pa != old_pa)
2108 pmap_remove_pte(pte);
2109 else {
2110 /*
2111 * Old PA and new PA are the same. No need to
2112 * relink the mapping within the PV list.
2113 */
2114 insert = FALSE;
2115
2116 /*
2117 * Save any mod/ref bits on the PTE.
2118 */
2119 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2120 }
2121 } else {
2122 pte->attr.raw = MMU_DT_INVALID;
2123 was_valid = FALSE;
2124 }
2125
2126 /*
2127 * Map the page. Being careful to preserve modified/referenced bits
2128 * on the PTE.
2129 */
2130 pte->attr.raw |= (pa | MMU_DT_PAGE);
2131
2132 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2133 pte->attr.raw |= MMU_SHORT_PTE_WP;
2134 if (flags & PMAP_NC)
2135 pte->attr.raw |= MMU_SHORT_PTE_CI;
2136 if (was_valid)
2137 TBIS(va);
2138
2139 /*
2140 * Insert the PTE into the PV system, if need be.
2141 */
2142 if (insert) {
2143 pv = pa2pv(pa);
2144 pvebase[pte_idx].pve_next = pv->pv_idx;
2145 pv->pv_idx = pte_idx;
2146 }
2147 splx(s);
2148
2149 }
2150
2151 /* pmap_map INTERNAL
2152 **
2153 * Map a contiguous range of physical memory into a contiguous range of
2154 * the kernel virtual address space.
2155 *
2156 * Used for device mappings and early mapping of the kernel text/data/bss.
2157 * Returns the first virtual address beyond the end of the range.
2158 */
2159 vm_offset_t
2160 pmap_map(va, pa, endpa, prot)
2161 vm_offset_t va;
2162 vm_offset_t pa;
2163 vm_offset_t endpa;
2164 int prot;
2165 {
2166 int sz;
2167
2168 sz = endpa - pa;
2169 do {
2170 pmap_enter_kernel(va, pa, prot);
2171 va += NBPG;
2172 pa += NBPG;
2173 sz -= NBPG;
2174 } while (sz > 0);
2175 return(va);
2176 }
2177
2178 /* pmap_protect INTERFACE
2179 **
2180 * Apply the given protection to the given virtual address range within
2181 * the given map.
2182 *
2183 * It is ok for the protection applied to be stronger than what is
2184 * specified. We use this to our advantage when the given map has no
2185 * mapping for the virtual address. By skipping a page when this
2186 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2187 * and therefore do not need to map the page just to apply a protection
2188 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2189 *
2190 * XXX - This function could be speeded up by using pmap_stroll() for inital
2191 * setup, and then manual scrolling in the for() loop.
2192 */
2193 void
2194 pmap_protect(pmap, startva, endva, prot)
2195 pmap_t pmap;
2196 vm_offset_t startva, endva;
2197 vm_prot_t prot;
2198 {
2199 boolean_t iscurpmap;
2200 int a_idx, b_idx, c_idx;
2201 a_tmgr_t *a_tbl;
2202 b_tmgr_t *b_tbl;
2203 c_tmgr_t *c_tbl;
2204 mmu_short_pte_t *pte;
2205
2206 if (pmap == NULL)
2207 return;
2208 if (pmap == pmap_kernel()) {
2209 pmap_protect_kernel(startva, endva, prot);
2210 return;
2211 }
2212
2213 /*
2214 * In this particular pmap implementation, there are only three
2215 * types of memory protection: 'all' (read/write/execute),
2216 * 'read-only' (read/execute) and 'none' (no mapping.)
2217 * It is not possible for us to treat 'executable' as a separate
2218 * protection type. Therefore, protection requests that seek to
2219 * remove execute permission while retaining read or write, and those
2220 * that make little sense (write-only for example) are ignored.
2221 */
2222 switch (prot) {
2223 case VM_PROT_NONE:
2224 /*
2225 * A request to apply the protection code of
2226 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2227 */
2228 pmap_remove(pmap, startva, endva);
2229 return;
2230 case VM_PROT_EXECUTE:
2231 case VM_PROT_READ:
2232 case VM_PROT_READ|VM_PROT_EXECUTE:
2233 /* continue */
2234 break;
2235 case VM_PROT_WRITE:
2236 case VM_PROT_WRITE|VM_PROT_READ:
2237 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2238 case VM_PROT_ALL:
2239 /* None of these should happen in a sane system. */
2240 return;
2241 }
2242
2243 /*
2244 * If the pmap has no A table, it has no mappings and therefore
2245 * there is nothing to protect.
2246 */
2247 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2248 return;
2249
2250 a_idx = MMU_TIA(startva);
2251 b_idx = MMU_TIB(startva);
2252 c_idx = MMU_TIC(startva);
2253 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2254
2255 iscurpmap = (pmap == current_pmap());
2256 while (startva < endva) {
2257 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2258 if (b_tbl == NULL) {
2259 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2260 b_tbl = mmu_ptov((vm_offset_t) b_tbl);
2261 b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
2262 }
2263 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2264 if (c_tbl == NULL) {
2265 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2266 c_tbl = mmu_ptov((vm_offset_t) c_tbl);
2267 c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
2268 }
2269 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2270 pte = &c_tbl->ct_dtbl[c_idx];
2271 /* make the mapping read-only */
2272 pte->attr.raw |= MMU_SHORT_PTE_WP;
2273 /*
2274 * If we just modified the current address space,
2275 * flush any translations for the modified page from
2276 * the translation cache and any data from it in the
2277 * data cache.
2278 */
2279 if (iscurpmap)
2280 TBIS(startva);
2281 }
2282 startva += NBPG;
2283
2284 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2285 c_tbl = NULL;
2286 c_idx = 0;
2287 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2288 b_tbl = NULL;
2289 b_idx = 0;
2290 }
2291 }
2292 } else { /* C table wasn't valid */
2293 c_tbl = NULL;
2294 c_idx = 0;
2295 startva += MMU_TIB_RANGE;
2296 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2297 b_tbl = NULL;
2298 b_idx = 0;
2299 }
2300 } /* C table */
2301 } else { /* B table wasn't valid */
2302 b_tbl = NULL;
2303 b_idx = 0;
2304 startva += MMU_TIA_RANGE;
2305 a_idx++;
2306 } /* B table */
2307 }
2308 }
2309
2310 /* pmap_protect_kernel INTERNAL
2311 **
2312 * Apply the given protection code to a kernel address range.
2313 */
2314 void
2315 pmap_protect_kernel(startva, endva, prot)
2316 vm_offset_t startva, endva;
2317 vm_prot_t prot;
2318 {
2319 vm_offset_t va;
2320 mmu_short_pte_t *pte;
2321
2322 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2323 for (va = startva; va < endva; va += NBPG, pte++) {
2324 if (MMU_VALID_DT(*pte)) {
2325 switch (prot) {
2326 case VM_PROT_ALL:
2327 break;
2328 case VM_PROT_EXECUTE:
2329 case VM_PROT_READ:
2330 case VM_PROT_READ|VM_PROT_EXECUTE:
2331 pte->attr.raw |= MMU_SHORT_PTE_WP;
2332 break;
2333 case VM_PROT_NONE:
2334 /* this is an alias for 'pmap_remove_kernel' */
2335 pmap_remove_pte(pte);
2336 break;
2337 default:
2338 break;
2339 }
2340 /*
2341 * since this is the kernel, immediately flush any cached
2342 * descriptors for this address.
2343 */
2344 TBIS(va);
2345 }
2346 }
2347 }
2348
2349 /* pmap_change_wiring INTERFACE
2350 **
2351 * Changes the wiring of the specified page.
2352 *
2353 * This function is called from vm_fault.c to unwire
2354 * a mapping. It really should be called 'pmap_unwire'
2355 * because it is never asked to do anything but remove
2356 * wirings.
2357 */
2358 void
2359 pmap_change_wiring(pmap, va, wire)
2360 pmap_t pmap;
2361 vm_offset_t va;
2362 boolean_t wire;
2363 {
2364 int a_idx, b_idx, c_idx;
2365 a_tmgr_t *a_tbl;
2366 b_tmgr_t *b_tbl;
2367 c_tmgr_t *c_tbl;
2368 mmu_short_pte_t *pte;
2369
2370 /* Kernel mappings always remain wired. */
2371 if (pmap == pmap_kernel())
2372 return;
2373
2374 #ifdef PMAP_DEBUG
2375 if (wire == TRUE)
2376 panic("pmap_change_wiring: wire requested.");
2377 #endif
2378
2379 /*
2380 * Walk through the tables. If the walk terminates without
2381 * a valid PTE then the address wasn't wired in the first place.
2382 * Return immediately.
2383 */
2384 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2385 &b_idx, &c_idx) == FALSE)
2386 return;
2387
2388
2389 /* Is the PTE wired? If not, return. */
2390 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2391 return;
2392
2393 /* Remove the wiring bit. */
2394 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2395
2396 /*
2397 * Decrement the wired entry count in the C table.
2398 * If it reaches zero the following things happen:
2399 * 1. The table no longer has any wired entries and is considered
2400 * unwired.
2401 * 2. It is placed on the available queue.
2402 * 3. The parent table's wired entry count is decremented.
2403 * 4. If it reaches zero, this process repeats at step 1 and
2404 * stops at after reaching the A table.
2405 */
2406 if (--c_tbl->ct_wcnt == 0) {
2407 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2408 if (--b_tbl->bt_wcnt == 0) {
2409 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2410 if (--a_tbl->at_wcnt == 0) {
2411 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2412 }
2413 }
2414 }
2415 }
2416
2417 /* pmap_pageable INTERFACE
2418 **
2419 * Make the specified range of addresses within the given pmap,
2420 * 'pageable' or 'not-pageable'. A pageable page must not cause
2421 * any faults when referenced. A non-pageable page may.
2422 *
2423 * This routine is only advisory. The VM system will call pmap_enter()
2424 * to wire or unwire pages that are going to be made pageable before calling
2425 * this function. By the time this routine is called, everything that needs
2426 * to be done has already been done.
2427 */
2428 void
2429 pmap_pageable(pmap, start, end, pageable)
2430 pmap_t pmap;
2431 vm_offset_t start, end;
2432 boolean_t pageable;
2433 {
2434 /* not implemented. */
2435 }
2436
2437 /* pmap_copy INTERFACE
2438 **
2439 * Copy the mappings of a range of addresses in one pmap, into
2440 * the destination address of another.
2441 *
2442 * This routine is advisory. Should we one day decide that MMU tables
2443 * may be shared by more than one pmap, this function should be used to
2444 * link them together. Until that day however, we do nothing.
2445 */
2446 void
2447 pmap_copy(pmap_a, pmap_b, dst, len, src)
2448 pmap_t pmap_a, pmap_b;
2449 vm_offset_t dst;
2450 vm_size_t len;
2451 vm_offset_t src;
2452 {
2453 /* not implemented. */
2454 }
2455
2456 /* pmap_copy_page INTERFACE
2457 **
2458 * Copy the contents of one physical page into another.
2459 *
2460 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2461 * to map the two specified physical pages into the kernel address space.
2462 *
2463 * Note: We could use the transparent translation registers to make the
2464 * mappings. If we do so, be sure to disable interrupts before using them.
2465 */
2466 void
2467 pmap_copy_page(srcpa, dstpa)
2468 vm_offset_t srcpa, dstpa;
2469 {
2470 vm_offset_t srcva, dstva;
2471 int s;
2472
2473 srcva = tmp_vpages[0];
2474 dstva = tmp_vpages[1];
2475
2476 s = splimp();
2477 if (tmp_vpages_inuse++)
2478 panic("pmap_copy_page: temporary vpages are in use.");
2479
2480 /* Map pages as non-cacheable to avoid cache polution? */
2481 pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
2482 pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2483
2484 /* Hand-optimized version of bcopy(src, dst, NBPG) */
2485 copypage((char *) srcva, (char *) dstva);
2486
2487 pmap_remove_kernel(srcva, srcva + NBPG);
2488 pmap_remove_kernel(dstva, dstva + NBPG);
2489
2490 --tmp_vpages_inuse;
2491 splx(s);
2492 }
2493
2494 /* pmap_zero_page INTERFACE
2495 **
2496 * Zero the contents of the specified physical page.
2497 *
2498 * Uses one of the virtual pages allocated in pmap_boostrap()
2499 * to map the specified page into the kernel address space.
2500 */
2501 void
2502 pmap_zero_page(dstpa)
2503 vm_offset_t dstpa;
2504 {
2505 vm_offset_t dstva;
2506 int s;
2507
2508 dstva = tmp_vpages[1];
2509 s = splimp();
2510 if (tmp_vpages_inuse++)
2511 panic("pmap_zero_page: temporary vpages are in use.");
2512
2513 /* The comments in pmap_copy_page() above apply here also. */
2514 pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2515
2516 /* Hand-optimized version of bzero(ptr, NBPG) */
2517 zeropage((char *) dstva);
2518
2519 pmap_remove_kernel(dstva, dstva + NBPG);
2520
2521 --tmp_vpages_inuse;
2522 splx(s);
2523 }
2524
2525 /* pmap_collect INTERFACE
2526 **
2527 * Called from the VM system when we are about to swap out
2528 * the process using this pmap. This should give up any
2529 * resources held here, including all its MMU tables.
2530 */
2531 void
2532 pmap_collect(pmap)
2533 pmap_t pmap;
2534 {
2535 /* XXX - todo... */
2536 }
2537
2538 /* pmap_create INTERFACE
2539 **
2540 * Create and return a pmap structure.
2541 */
2542 pmap_t
2543 pmap_create(size)
2544 vm_size_t size;
2545 {
2546 pmap_t pmap;
2547
2548 if (size)
2549 return NULL;
2550
2551 pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
2552 pmap_pinit(pmap);
2553
2554 return pmap;
2555 }
2556
2557 /* pmap_pinit INTERNAL
2558 **
2559 * Initialize a pmap structure.
2560 */
2561 void
2562 pmap_pinit(pmap)
2563 pmap_t pmap;
2564 {
2565 bzero(pmap, sizeof(struct pmap));
2566 pmap->pm_a_tmgr = NULL;
2567 pmap->pm_a_phys = kernAphys;
2568 }
2569
2570 /* pmap_release INTERFACE
2571 **
2572 * Release any resources held by the given pmap.
2573 *
2574 * This is the reverse analog to pmap_pinit. It does not
2575 * necessarily mean for the pmap structure to be deallocated,
2576 * as in pmap_destroy.
2577 */
2578 void
2579 pmap_release(pmap)
2580 pmap_t pmap;
2581 {
2582 /*
2583 * As long as the pmap contains no mappings,
2584 * which always should be the case whenever
2585 * this function is called, there really should
2586 * be nothing to do.
2587 */
2588 #ifdef PMAP_DEBUG
2589 if (pmap == NULL)
2590 return;
2591 if (pmap == pmap_kernel())
2592 panic("pmap_release: kernel pmap");
2593 #endif
2594 /*
2595 * XXX - If this pmap has an A table, give it back.
2596 * The pmap SHOULD be empty by now, and pmap_remove
2597 * should have already given back the A table...
2598 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2599 * at this point, which means some mapping was not
2600 * removed when it should have been. -gwr
2601 */
2602 if (pmap->pm_a_tmgr != NULL) {
2603 /* First make sure we are not using it! */
2604 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2605 kernel_crp.rp_addr = kernAphys;
2606 loadcrp(&kernel_crp);
2607 }
2608 #ifdef PMAP_DEBUG /* XXX - todo! */
2609 /* XXX - Now complain... */
2610 printf("pmap_release: still have table\n");
2611 Debugger();
2612 #endif
2613 free_a_table(pmap->pm_a_tmgr, TRUE);
2614 pmap->pm_a_tmgr = NULL;
2615 pmap->pm_a_phys = kernAphys;
2616 }
2617 }
2618
2619 /* pmap_reference INTERFACE
2620 **
2621 * Increment the reference count of a pmap.
2622 */
2623 void
2624 pmap_reference(pmap)
2625 pmap_t pmap;
2626 {
2627 if (pmap == NULL)
2628 return;
2629
2630 /* pmap_lock(pmap); */
2631 pmap->pm_refcount++;
2632 /* pmap_unlock(pmap); */
2633 }
2634
2635 /* pmap_dereference INTERNAL
2636 **
2637 * Decrease the reference count on the given pmap
2638 * by one and return the current count.
2639 */
2640 int
2641 pmap_dereference(pmap)
2642 pmap_t pmap;
2643 {
2644 int rtn;
2645
2646 if (pmap == NULL)
2647 return 0;
2648
2649 /* pmap_lock(pmap); */
2650 rtn = --pmap->pm_refcount;
2651 /* pmap_unlock(pmap); */
2652
2653 return rtn;
2654 }
2655
2656 /* pmap_destroy INTERFACE
2657 **
2658 * Decrement a pmap's reference count and delete
2659 * the pmap if it becomes zero. Will be called
2660 * only after all mappings have been removed.
2661 */
2662 void
2663 pmap_destroy(pmap)
2664 pmap_t pmap;
2665 {
2666 if (pmap == NULL)
2667 return;
2668 if (pmap == &kernel_pmap)
2669 panic("pmap_destroy: kernel_pmap!");
2670 if (pmap_dereference(pmap) == 0) {
2671 pmap_release(pmap);
2672 free(pmap, M_VMPMAP);
2673 }
2674 }
2675
2676 /* pmap_is_referenced INTERFACE
2677 **
2678 * Determine if the given physical page has been
2679 * referenced (read from [or written to.])
2680 */
2681 boolean_t
2682 pmap_is_referenced(pa)
2683 vm_offset_t pa;
2684 {
2685 pv_t *pv;
2686 int idx, s;
2687
2688 if (!pv_initialized)
2689 return FALSE;
2690 /* XXX - this may be unecessary. */
2691 if (!is_managed(pa))
2692 return FALSE;
2693
2694 pv = pa2pv(pa);
2695 /*
2696 * Check the flags on the pv head. If they are set,
2697 * return immediately. Otherwise a search must be done.
2698 */
2699 if (pv->pv_flags & PV_FLAGS_USED)
2700 return TRUE;
2701
2702 s = splimp();
2703 /*
2704 * Search through all pv elements pointing
2705 * to this page and query their reference bits
2706 */
2707 for (idx = pv->pv_idx;
2708 idx != PVE_EOL;
2709 idx = pvebase[idx].pve_next) {
2710
2711 if (MMU_PTE_USED(kernCbase[idx])) {
2712 splx(s);
2713 return TRUE;
2714 }
2715 }
2716 splx(s);
2717
2718 return FALSE;
2719 }
2720
2721 /* pmap_is_modified INTERFACE
2722 **
2723 * Determine if the given physical page has been
2724 * modified (written to.)
2725 */
2726 boolean_t
2727 pmap_is_modified(pa)
2728 vm_offset_t pa;
2729 {
2730 pv_t *pv;
2731 int idx, s;
2732
2733 if (!pv_initialized)
2734 return FALSE;
2735 /* XXX - this may be unecessary. */
2736 if (!is_managed(pa))
2737 return FALSE;
2738
2739 /* see comments in pmap_is_referenced() */
2740 pv = pa2pv(pa);
2741 if (pv->pv_flags & PV_FLAGS_MDFY)
2742 return TRUE;
2743
2744 s = splimp();
2745 for (idx = pv->pv_idx;
2746 idx != PVE_EOL;
2747 idx = pvebase[idx].pve_next) {
2748
2749 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2750 splx(s);
2751 return TRUE;
2752 }
2753 }
2754 splx(s);
2755
2756 return FALSE;
2757 }
2758
2759 /* pmap_page_protect INTERFACE
2760 **
2761 * Applies the given protection to all mappings to the given
2762 * physical page.
2763 */
2764 void
2765 pmap_page_protect(pa, prot)
2766 vm_offset_t pa;
2767 vm_prot_t prot;
2768 {
2769 pv_t *pv;
2770 int idx, s;
2771 vm_offset_t va;
2772 struct mmu_short_pte_struct *pte;
2773 c_tmgr_t *c_tbl;
2774 pmap_t pmap, curpmap;
2775
2776 if (!is_managed(pa))
2777 return;
2778
2779 curpmap = current_pmap();
2780 pv = pa2pv(pa);
2781 s = splimp();
2782
2783 for (idx = pv->pv_idx;
2784 idx != PVE_EOL;
2785 idx = pvebase[idx].pve_next) {
2786
2787 pte = &kernCbase[idx];
2788 switch (prot) {
2789 case VM_PROT_ALL:
2790 /* do nothing */
2791 break;
2792 case VM_PROT_EXECUTE:
2793 case VM_PROT_READ:
2794 case VM_PROT_READ|VM_PROT_EXECUTE:
2795 pte->attr.raw |= MMU_SHORT_PTE_WP;
2796
2797 /*
2798 * Determine the virtual address mapped by
2799 * the PTE and flush ATC entries if necessary.
2800 */
2801 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2802 if (pmap == curpmap || pmap == pmap_kernel())
2803 TBIS(va);
2804 break;
2805 case VM_PROT_NONE:
2806 /* Save the mod/ref bits. */
2807 pv->pv_flags |= pte->attr.raw;
2808 /* Invalidate the PTE. */
2809 pte->attr.raw = MMU_DT_INVALID;
2810
2811 /*
2812 * Update table counts. And flush ATC entries
2813 * if necessary.
2814 */
2815 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2816
2817 /*
2818 * If the PTE belongs to the kernel map,
2819 * be sure to flush the page it maps.
2820 */
2821 if (pmap == pmap_kernel()) {
2822 TBIS(va);
2823 } else {
2824 /*
2825 * The PTE belongs to a user map.
2826 * update the entry count in the C
2827 * table to which it belongs and flush
2828 * the ATC if the mapping belongs to
2829 * the current pmap.
2830 */
2831 c_tbl->ct_ecnt--;
2832 if (pmap == curpmap)
2833 TBIS(va);
2834 }
2835 break;
2836 default:
2837 break;
2838 }
2839 }
2840
2841 /*
2842 * If the protection code indicates that all mappings to the page
2843 * be removed, truncate the PV list to zero entries.
2844 */
2845 if (prot == VM_PROT_NONE)
2846 pv->pv_idx = PVE_EOL;
2847 splx(s);
2848 }
2849
2850 /* pmap_get_pteinfo INTERNAL
2851 **
2852 * Called internally to find the pmap and virtual address within that
2853 * map to which the pte at the given index maps. Also includes the PTE's C
2854 * table manager.
2855 *
2856 * Returns the pmap in the argument provided, and the virtual address
2857 * by return value.
2858 */
2859 vm_offset_t
2860 pmap_get_pteinfo(idx, pmap, tbl)
2861 u_int idx;
2862 pmap_t *pmap;
2863 c_tmgr_t **tbl;
2864 {
2865 vm_offset_t va = 0;
2866
2867 /*
2868 * Determine if the PTE is a kernel PTE or a user PTE.
2869 */
2870 if (idx >= NUM_KERN_PTES) {
2871 /*
2872 * The PTE belongs to a user mapping.
2873 */
2874 /* XXX: Would like an inline for this to validate idx... */
2875 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2876
2877 *pmap = (*tbl)->ct_pmap;
2878 /*
2879 * To find the va to which the PTE maps, we first take
2880 * the table's base virtual address mapping which is stored
2881 * in ct_va. We then increment this address by a page for
2882 * every slot skipped until we reach the PTE.
2883 */
2884 va = (*tbl)->ct_va;
2885 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2886 } else {
2887 /*
2888 * The PTE belongs to the kernel map.
2889 */
2890 *pmap = pmap_kernel();
2891
2892 va = m68k_ptob(idx);
2893 va += KERNBASE;
2894 }
2895
2896 return va;
2897 }
2898
2899 /* pmap_clear_modify INTERFACE
2900 **
2901 * Clear the modification bit on the page at the specified
2902 * physical address.
2903 *
2904 */
2905 void
2906 pmap_clear_modify(pa)
2907 vm_offset_t pa;
2908 {
2909 if (!is_managed(pa))
2910 return;
2911 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2912 }
2913
2914 /* pmap_clear_reference INTERFACE
2915 **
2916 * Clear the referenced bit on the page at the specified
2917 * physical address.
2918 */
2919 void
2920 pmap_clear_reference(pa)
2921 vm_offset_t pa;
2922 {
2923 if (!is_managed(pa))
2924 return;
2925 pmap_clear_pv(pa, PV_FLAGS_USED);
2926 }
2927
2928 /* pmap_clear_pv INTERNAL
2929 **
2930 * Clears the specified flag from the specified physical address.
2931 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2932 *
2933 * Flag is one of:
2934 * PV_FLAGS_MDFY - Page modified bit.
2935 * PV_FLAGS_USED - Page used (referenced) bit.
2936 *
2937 * This routine must not only clear the flag on the pv list
2938 * head. It must also clear the bit on every pte in the pv
2939 * list associated with the address.
2940 */
2941 void
2942 pmap_clear_pv(pa, flag)
2943 vm_offset_t pa;
2944 int flag;
2945 {
2946 pv_t *pv;
2947 int idx, s;
2948 vm_offset_t va;
2949 pmap_t pmap;
2950 mmu_short_pte_t *pte;
2951 c_tmgr_t *c_tbl;
2952
2953 pv = pa2pv(pa);
2954
2955 s = splimp();
2956 pv->pv_flags &= ~(flag);
2957
2958 for (idx = pv->pv_idx;
2959 idx != PVE_EOL;
2960 idx = pvebase[idx].pve_next) {
2961
2962 pte = &kernCbase[idx];
2963 pte->attr.raw &= ~(flag);
2964 /*
2965 * The MC68030 MMU will not set the modified or
2966 * referenced bits on any MMU tables for which it has
2967 * a cached descriptor with its modify bit set. To insure
2968 * that it will modify these bits on the PTE during the next
2969 * time it is written to or read from, we must flush it from
2970 * the ATC.
2971 *
2972 * Ordinarily it is only necessary to flush the descriptor
2973 * if it is used in the current address space. But since I
2974 * am not sure that there will always be a notion of
2975 * 'the current address space' when this function is called,
2976 * I will skip the test and always flush the address. It
2977 * does no harm.
2978 */
2979 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2980 TBIS(va);
2981 }
2982 splx(s);
2983 }
2984
2985 /* pmap_extract INTERFACE
2986 **
2987 * Return the physical address mapped by the virtual address
2988 * in the specified pmap or 0 if it is not known.
2989 *
2990 * Note: this function should also apply an exclusive lock
2991 * on the pmap system during its duration.
2992 */
2993 vm_offset_t
2994 pmap_extract(pmap, va)
2995 pmap_t pmap;
2996 vm_offset_t va;
2997 {
2998 int a_idx, b_idx, pte_idx;
2999 a_tmgr_t *a_tbl;
3000 b_tmgr_t *b_tbl;
3001 c_tmgr_t *c_tbl;
3002 mmu_short_pte_t *c_pte;
3003
3004 if (pmap == pmap_kernel())
3005 return pmap_extract_kernel(va);
3006 if (pmap == NULL)
3007 return 0;
3008
3009 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
3010 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
3011 return 0;
3012
3013 if (!MMU_VALID_DT(*c_pte))
3014 return 0;
3015
3016 return (MMU_PTE_PA(*c_pte));
3017 }
3018
3019 /* pmap_extract_kernel INTERNAL
3020 **
3021 * Extract a translation from the kernel address space.
3022 */
3023 vm_offset_t
3024 pmap_extract_kernel(va)
3025 vm_offset_t va;
3026 {
3027 mmu_short_pte_t *pte;
3028
3029 pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
3030 return MMU_PTE_PA(*pte);
3031 }
3032
3033 /* pmap_remove_kernel INTERNAL
3034 **
3035 * Remove the mapping of a range of virtual addresses from the kernel map.
3036 * The arguments are already page-aligned.
3037 */
3038 void
3039 pmap_remove_kernel(sva, eva)
3040 vm_offset_t sva;
3041 vm_offset_t eva;
3042 {
3043 int idx, eidx;
3044
3045 #ifdef PMAP_DEBUG
3046 if ((sva & PGOFSET) || (eva & PGOFSET))
3047 panic("pmap_remove_kernel: alignment");
3048 #endif
3049
3050 idx = m68k_btop(sva - KERNBASE);
3051 eidx = m68k_btop(eva - KERNBASE);
3052
3053 while (idx < eidx) {
3054 pmap_remove_pte(&kernCbase[idx++]);
3055 TBIS(sva);
3056 sva += NBPG;
3057 }
3058 }
3059
3060 /* pmap_remove INTERFACE
3061 **
3062 * Remove the mapping of a range of virtual addresses from the given pmap.
3063 *
3064 * If the range contains any wired entries, this function will probably create
3065 * disaster.
3066 */
3067 void
3068 pmap_remove(pmap, start, end)
3069 pmap_t pmap;
3070 vm_offset_t start;
3071 vm_offset_t end;
3072 {
3073
3074 if (pmap == pmap_kernel()) {
3075 pmap_remove_kernel(start, end);
3076 return;
3077 }
3078
3079 /*
3080 * XXX - Temporary(?) statement to prevent panic caused
3081 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3082 * to remove because it couldn't get backing store.
3083 * (I guess.)
3084 */
3085 if (pmap == NULL)
3086 return;
3087
3088 /*
3089 * If the pmap doesn't have an A table of its own, it has no mappings
3090 * that can be removed.
3091 */
3092 if (pmap->pm_a_tmgr == NULL)
3093 return;
3094
3095 /*
3096 * Remove the specified range from the pmap. If the function
3097 * returns true, the operation removed all the valid mappings
3098 * in the pmap and freed its A table. If this happened to the
3099 * currently loaded pmap, the MMU root pointer must be reloaded
3100 * with the default 'kernel' map.
3101 */
3102 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3103 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3104 kernel_crp.rp_addr = kernAphys;
3105 loadcrp(&kernel_crp);
3106 /* will do TLB flush below */
3107 }
3108 pmap->pm_a_tmgr = NULL;
3109 pmap->pm_a_phys = kernAphys;
3110 }
3111
3112 /*
3113 * If we just modified the current address space,
3114 * make sure to flush the MMU cache.
3115 *
3116 * XXX - this could be an unecessarily large flush.
3117 * XXX - Could decide, based on the size of the VA range
3118 * to be removed, whether to flush "by pages" or "all".
3119 */
3120 if (pmap == current_pmap())
3121 TBIAU();
3122 }
3123
3124 /* pmap_remove_a INTERNAL
3125 **
3126 * This is function number one in a set of three that removes a range
3127 * of memory in the most efficient manner by removing the highest possible
3128 * tables from the memory space. This particular function attempts to remove
3129 * as many B tables as it can, delegating the remaining fragmented ranges to
3130 * pmap_remove_b().
3131 *
3132 * If the removal operation results in an empty A table, the function returns
3133 * TRUE.
3134 *
3135 * It's ugly but will do for now.
3136 */
3137 boolean_t
3138 pmap_remove_a(a_tbl, start, end)
3139 a_tmgr_t *a_tbl;
3140 vm_offset_t start;
3141 vm_offset_t end;
3142 {
3143 boolean_t empty;
3144 int idx;
3145 vm_offset_t nstart, nend;
3146 b_tmgr_t *b_tbl;
3147 mmu_long_dte_t *a_dte;
3148 mmu_short_dte_t *b_dte;
3149
3150 /*
3151 * The following code works with what I call a 'granularity
3152 * reduction algorithim'. A range of addresses will always have
3153 * the following properties, which are classified according to
3154 * how the range relates to the size of the current granularity
3155 * - an A table entry:
3156 *
3157 * 1 2 3 4
3158 * -+---+---+---+---+---+---+---+-
3159 * -+---+---+---+---+---+---+---+-
3160 *
3161 * A range will always start on a granularity boundary, illustrated
3162 * by '+' signs in the table above, or it will start at some point
3163 * inbetween a granularity boundary, as illustrated by point 1.
3164 * The first step in removing a range of addresses is to remove the
3165 * range between 1 and 2, the nearest granularity boundary. This
3166 * job is handled by the section of code governed by the
3167 * 'if (start < nstart)' statement.
3168 *
3169 * A range will always encompass zero or more intergral granules,
3170 * illustrated by points 2 and 3. Integral granules are easy to
3171 * remove. The removal of these granules is the second step, and
3172 * is handled by the code block 'if (nstart < nend)'.
3173 *
3174 * Lastly, a range will always end on a granularity boundary,
3175 * ill. by point 3, or it will fall just beyond one, ill. by point
3176 * 4. The last step involves removing this range and is handled by
3177 * the code block 'if (nend < end)'.
3178 */
3179 nstart = MMU_ROUND_UP_A(start);
3180 nend = MMU_ROUND_A(end);
3181
3182 if (start < nstart) {
3183 /*
3184 * This block is executed if the range starts between
3185 * a granularity boundary.
3186 *
3187 * First find the DTE which is responsible for mapping
3188 * the start of the range.
3189 */
3190 idx = MMU_TIA(start);
3191 a_dte = &a_tbl->at_dtbl[idx];
3192
3193 /*
3194 * If the DTE is valid then delegate the removal of the sub
3195 * range to pmap_remove_b(), which can remove addresses at
3196 * a finer granularity.
3197 */
3198 if (MMU_VALID_DT(*a_dte)) {
3199 b_dte = mmu_ptov(a_dte->addr.raw);
3200 b_tbl = mmuB2tmgr(b_dte);
3201
3202 /*
3203 * The sub range to be removed starts at the start
3204 * of the full range we were asked to remove, and ends
3205 * at the greater of:
3206 * 1. The end of the full range, -or-
3207 * 2. The end of the full range, rounded down to the
3208 * nearest granularity boundary.
3209 */
3210 if (end < nstart)
3211 empty = pmap_remove_b(b_tbl, start, end);
3212 else
3213 empty = pmap_remove_b(b_tbl, start, nstart);
3214
3215 /*
3216 * If the removal resulted in an empty B table,
3217 * invalidate the DTE that points to it and decrement
3218 * the valid entry count of the A table.
3219 */
3220 if (empty) {
3221 a_dte->attr.raw = MMU_DT_INVALID;
3222 a_tbl->at_ecnt--;
3223 }
3224 }
3225 /*
3226 * If the DTE is invalid, the address range is already non-
3227 * existant and can simply be skipped.
3228 */
3229 }
3230 if (nstart < nend) {
3231 /*
3232 * This block is executed if the range spans a whole number
3233 * multiple of granules (A table entries.)
3234 *
3235 * First find the DTE which is responsible for mapping
3236 * the start of the first granule involved.
3237 */
3238 idx = MMU_TIA(nstart);
3239 a_dte = &a_tbl->at_dtbl[idx];
3240
3241 /*
3242 * Remove entire sub-granules (B tables) one at a time,
3243 * until reaching the end of the range.
3244 */
3245 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3246 if (MMU_VALID_DT(*a_dte)) {
3247 /*
3248 * Find the B table manager for the
3249 * entry and free it.
3250 */
3251 b_dte = mmu_ptov(a_dte->addr.raw);
3252 b_tbl = mmuB2tmgr(b_dte);
3253 free_b_table(b_tbl, TRUE);
3254
3255 /*
3256 * Invalidate the DTE that points to the
3257 * B table and decrement the valid entry
3258 * count of the A table.
3259 */
3260 a_dte->attr.raw = MMU_DT_INVALID;
3261 a_tbl->at_ecnt--;
3262 }
3263 }
3264 if (nend < end) {
3265 /*
3266 * This block is executed if the range ends beyond a
3267 * granularity boundary.
3268 *
3269 * First find the DTE which is responsible for mapping
3270 * the start of the nearest (rounded down) granularity
3271 * boundary.
3272 */
3273 idx = MMU_TIA(nend);
3274 a_dte = &a_tbl->at_dtbl[idx];
3275
3276 /*
3277 * If the DTE is valid then delegate the removal of the sub
3278 * range to pmap_remove_b(), which can remove addresses at
3279 * a finer granularity.
3280 */
3281 if (MMU_VALID_DT(*a_dte)) {
3282 /*
3283 * Find the B table manager for the entry
3284 * and hand it to pmap_remove_b() along with
3285 * the sub range.
3286 */
3287 b_dte = mmu_ptov(a_dte->addr.raw);
3288 b_tbl = mmuB2tmgr(b_dte);
3289
3290 empty = pmap_remove_b(b_tbl, nend, end);
3291
3292 /*
3293 * If the removal resulted in an empty B table,
3294 * invalidate the DTE that points to it and decrement
3295 * the valid entry count of the A table.
3296 */
3297 if (empty) {
3298 a_dte->attr.raw = MMU_DT_INVALID;
3299 a_tbl->at_ecnt--;
3300 }
3301 }
3302 }
3303
3304 /*
3305 * If there are no more entries in the A table, release it
3306 * back to the available pool and return TRUE.
3307 */
3308 if (a_tbl->at_ecnt == 0) {
3309 a_tbl->at_parent = NULL;
3310 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3311 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3312 empty = TRUE;
3313 } else {
3314 empty = FALSE;
3315 }
3316
3317 return empty;
3318 }
3319
3320 /* pmap_remove_b INTERNAL
3321 **
3322 * Remove a range of addresses from an address space, trying to remove entire
3323 * C tables if possible.
3324 *
3325 * If the operation results in an empty B table, the function returns TRUE.
3326 */
3327 boolean_t
3328 pmap_remove_b(b_tbl, start, end)
3329 b_tmgr_t *b_tbl;
3330 vm_offset_t start;
3331 vm_offset_t end;
3332 {
3333 boolean_t empty;
3334 int idx;
3335 vm_offset_t nstart, nend, rstart;
3336 c_tmgr_t *c_tbl;
3337 mmu_short_dte_t *b_dte;
3338 mmu_short_pte_t *c_dte;
3339
3340
3341 nstart = MMU_ROUND_UP_B(start);
3342 nend = MMU_ROUND_B(end);
3343
3344 if (start < nstart) {
3345 idx = MMU_TIB(start);
3346 b_dte = &b_tbl->bt_dtbl[idx];
3347 if (MMU_VALID_DT(*b_dte)) {
3348 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3349 c_tbl = mmuC2tmgr(c_dte);
3350 if (end < nstart)
3351 empty = pmap_remove_c(c_tbl, start, end);
3352 else
3353 empty = pmap_remove_c(c_tbl, start, nstart);
3354 if (empty) {
3355 b_dte->attr.raw = MMU_DT_INVALID;
3356 b_tbl->bt_ecnt--;
3357 }
3358 }
3359 }
3360 if (nstart < nend) {
3361 idx = MMU_TIB(nstart);
3362 b_dte = &b_tbl->bt_dtbl[idx];
3363 rstart = nstart;
3364 while (rstart < nend) {
3365 if (MMU_VALID_DT(*b_dte)) {
3366 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3367 c_tbl = mmuC2tmgr(c_dte);
3368 free_c_table(c_tbl, TRUE);
3369 b_dte->attr.raw = MMU_DT_INVALID;
3370 b_tbl->bt_ecnt--;
3371 }
3372 b_dte++;
3373 rstart += MMU_TIB_RANGE;
3374 }
3375 }
3376 if (nend < end) {
3377 idx = MMU_TIB(nend);
3378 b_dte = &b_tbl->bt_dtbl[idx];
3379 if (MMU_VALID_DT(*b_dte)) {
3380 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3381 c_tbl = mmuC2tmgr(c_dte);
3382 empty = pmap_remove_c(c_tbl, nend, end);
3383 if (empty) {
3384 b_dte->attr.raw = MMU_DT_INVALID;
3385 b_tbl->bt_ecnt--;
3386 }
3387 }
3388 }
3389
3390 if (b_tbl->bt_ecnt == 0) {
3391 b_tbl->bt_parent = NULL;
3392 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3393 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3394 empty = TRUE;
3395 } else {
3396 empty = FALSE;
3397 }
3398
3399 return empty;
3400 }
3401
3402 /* pmap_remove_c INTERNAL
3403 **
3404 * Remove a range of addresses from the given C table.
3405 */
3406 boolean_t
3407 pmap_remove_c(c_tbl, start, end)
3408 c_tmgr_t *c_tbl;
3409 vm_offset_t start;
3410 vm_offset_t end;
3411 {
3412 boolean_t empty;
3413 int idx;
3414 mmu_short_pte_t *c_pte;
3415
3416 idx = MMU_TIC(start);
3417 c_pte = &c_tbl->ct_dtbl[idx];
3418 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3419 if (MMU_VALID_DT(*c_pte)) {
3420 pmap_remove_pte(c_pte);
3421 c_tbl->ct_ecnt--;
3422 }
3423 }
3424
3425 if (c_tbl->ct_ecnt == 0) {
3426 c_tbl->ct_parent = NULL;
3427 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3428 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3429 empty = TRUE;
3430 } else {
3431 empty = FALSE;
3432 }
3433
3434 return empty;
3435 }
3436
3437 /* is_managed INTERNAL
3438 **
3439 * Determine if the given physical address is managed by the PV system.
3440 * Note that this logic assumes that no one will ask for the status of
3441 * addresses which lie in-between the memory banks on the 3/80. If they
3442 * do so, it will falsely report that it is managed.
3443 *
3444 * Note: A "managed" address is one that was reported to the VM system as
3445 * a "usable page" during system startup. As such, the VM system expects the
3446 * pmap module to keep an accurate track of the useage of those pages.
3447 * Any page not given to the VM system at startup does not exist (as far as
3448 * the VM system is concerned) and is therefore "unmanaged." Examples are
3449 * those pages which belong to the ROM monitor and the memory allocated before
3450 * the VM system was started.
3451 */
3452 boolean_t
3453 is_managed(pa)
3454 vm_offset_t pa;
3455 {
3456 if (pa >= avail_start && pa < avail_end)
3457 return TRUE;
3458 else
3459 return FALSE;
3460 }
3461
3462 /* pmap_bootstrap_alloc INTERNAL
3463 **
3464 * Used internally for memory allocation at startup when malloc is not
3465 * available. This code will fail once it crosses the first memory
3466 * bank boundary on the 3/80. Hopefully by then however, the VM system
3467 * will be in charge of allocation.
3468 */
3469 void *
3470 pmap_bootstrap_alloc(size)
3471 int size;
3472 {
3473 void *rtn;
3474
3475 #ifdef PMAP_DEBUG
3476 if (bootstrap_alloc_enabled == FALSE) {
3477 mon_printf("pmap_bootstrap_alloc: disabled\n");
3478 sunmon_abort();
3479 }
3480 #endif
3481
3482 rtn = (void *) virtual_avail;
3483 virtual_avail += size;
3484
3485 #ifdef PMAP_DEBUG
3486 if (virtual_avail > virtual_contig_end) {
3487 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3488 sunmon_abort();
3489 }
3490 #endif
3491
3492 return rtn;
3493 }
3494
3495 /* pmap_bootstap_aalign INTERNAL
3496 **
3497 * Used to insure that the next call to pmap_bootstrap_alloc() will
3498 * return a chunk of memory aligned to the specified size.
3499 *
3500 * Note: This function will only support alignment sizes that are powers
3501 * of two.
3502 */
3503 void
3504 pmap_bootstrap_aalign(size)
3505 int size;
3506 {
3507 int off;
3508
3509 off = virtual_avail & (size - 1);
3510 if (off) {
3511 (void) pmap_bootstrap_alloc(size - off);
3512 }
3513 }
3514
3515 /* pmap_pa_exists
3516 **
3517 * Used by the /dev/mem driver to see if a given PA is memory
3518 * that can be mapped. (The PA is not in a hole.)
3519 */
3520 int
3521 pmap_pa_exists(pa)
3522 vm_offset_t pa;
3523 {
3524 register int i;
3525
3526 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3527 if ((pa >= avail_mem[i].pmem_start) &&
3528 (pa < avail_mem[i].pmem_end))
3529 return (1);
3530 if (avail_mem[i].pmem_next == NULL)
3531 break;
3532 }
3533 return (0);
3534 }
3535
3536 /* Called only from locore.s and pmap.c */
3537 void _pmap_switch __P((pmap_t pmap));
3538
3539 /*
3540 * _pmap_switch INTERNAL
3541 *
3542 * This is called by locore.s:cpu_switch() when it is
3543 * switching to a new process. Load new translations.
3544 * Note: done in-line by locore.s unless PMAP_DEBUG
3545 *
3546 * Note that we do NOT allocate a context here, but
3547 * share the "kernel only" context until we really
3548 * need our own context for user-space mappings in
3549 * pmap_enter_user(). [ s/context/mmu A table/ ]
3550 */
3551 void
3552 _pmap_switch(pmap)
3553 pmap_t pmap;
3554 {
3555 u_long rootpa;
3556
3557 /*
3558 * Only do reload/flush if we have to.
3559 * Note that if the old and new process
3560 * were BOTH using the "null" context,
3561 * then this will NOT flush the TLB.
3562 */
3563 rootpa = pmap->pm_a_phys;
3564 if (kernel_crp.rp_addr != rootpa) {
3565 DPRINT(("pmap_activate(%p)\n", pmap));
3566 kernel_crp.rp_addr = rootpa;
3567 loadcrp(&kernel_crp);
3568 TBIAU();
3569 }
3570 }
3571
3572 /*
3573 * Exported version of pmap_activate(). This is called from the
3574 * machine-independent VM code when a process is given a new pmap.
3575 * If (p == curproc) do like cpu_switch would do; otherwise just
3576 * take this as notification that the process has a new pmap.
3577 */
3578 void
3579 pmap_activate(p)
3580 struct proc *p;
3581 {
3582 pmap_t pmap = p->p_vmspace->vm_map.pmap;
3583 int s;
3584
3585 if (p == curproc) {
3586 s = splimp();
3587 _pmap_switch(pmap);
3588 splx(s);
3589 }
3590 }
3591
3592 /*
3593 * pmap_deactivate INTERFACE
3594 **
3595 * This is called to deactivate the specified process's address space.
3596 * XXX The semantics of this function are currently not well-defined.
3597 */
3598 void
3599 pmap_deactivate(p)
3600 struct proc *p;
3601 {
3602 /* not implemented. */
3603 }
3604
3605 /* pmap_update
3606 **
3607 * Apply any delayed changes scheduled for all pmaps immediately.
3608 *
3609 * No delayed operations are currently done in this pmap.
3610 */
3611 void
3612 pmap_update()
3613 {
3614 /* not implemented. */
3615 }
3616
3617 /*
3618 * Fill in the sun3x-specific part of the kernel core header
3619 * for dumpsys(). (See machdep.c for the rest.)
3620 */
3621 void
3622 pmap_kcore_hdr(sh)
3623 struct sun3x_kcore_hdr *sh;
3624 {
3625 u_long spa, len;
3626 int i;
3627
3628 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3629 sh->pg_valid = MMU_DT_PAGE;
3630 sh->contig_end = virtual_contig_end;
3631 sh->kernCbase = (u_long) kernCbase;
3632 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3633 spa = avail_mem[i].pmem_start;
3634 spa = m68k_trunc_page(spa);
3635 len = avail_mem[i].pmem_end - spa;
3636 len = m68k_round_page(len);
3637 sh->ram_segs[i].start = spa;
3638 sh->ram_segs[i].size = len;
3639 }
3640 }
3641
3642
3643 /* pmap_virtual_space INTERFACE
3644 **
3645 * Return the current available range of virtual addresses in the
3646 * arguuments provided. Only really called once.
3647 */
3648 void
3649 pmap_virtual_space(vstart, vend)
3650 vm_offset_t *vstart, *vend;
3651 {
3652 *vstart = virtual_avail;
3653 *vend = virtual_end;
3654 }
3655
3656 /* pmap_free_pages INTERFACE
3657 **
3658 * Return the number of physical pages still available.
3659 *
3660 * This is probably going to be a mess, but it's only called
3661 * once and it's the only function left that I have to implement!
3662 */
3663 u_int
3664 pmap_free_pages()
3665 {
3666 int i;
3667 u_int left;
3668 vm_offset_t avail;
3669
3670 avail = avail_next;
3671 left = 0;
3672 i = 0;
3673 while (avail >= avail_mem[i].pmem_end) {
3674 if (avail_mem[i].pmem_next == NULL)
3675 return 0;
3676 i++;
3677 }
3678 while (i < SUN3X_NPHYS_RAM_SEGS) {
3679 if (avail < avail_mem[i].pmem_start) {
3680 /* Avail is inside a hole, march it
3681 * up to the next bank.
3682 */
3683 avail = avail_mem[i].pmem_start;
3684 }
3685 left += m68k_btop(avail_mem[i].pmem_end - avail);
3686 if (avail_mem[i].pmem_next == NULL)
3687 break;
3688 i++;
3689 }
3690
3691 return left;
3692 }
3693
3694 /* pmap_page_index INTERFACE
3695 **
3696 * Return the index of the given physical page in a list of useable
3697 * physical pages in the system. Holes in physical memory may be counted
3698 * if so desired. As long as pmap_free_pages() and pmap_page_index()
3699 * agree as to whether holes in memory do or do not count as valid pages,
3700 * it really doesn't matter. However, if you like to save a little
3701 * memory, don't count holes as valid pages. This is even more true when
3702 * the holes are large.
3703 *
3704 * We will not count holes as valid pages. We can generate page indices
3705 * that conform to this by using the memory bank structures initialized
3706 * in pmap_alloc_pv().
3707 */
3708 int
3709 pmap_page_index(pa)
3710 vm_offset_t pa;
3711 {
3712 struct pmap_physmem_struct *bank = avail_mem;
3713
3714 /* Search for the memory bank with this page. */
3715 /* XXX - What if it is not physical memory? */
3716 while (pa > bank->pmem_end)
3717 bank = bank->pmem_next;
3718 pa -= bank->pmem_start;
3719
3720 return (bank->pmem_pvbase + m68k_btop(pa));
3721 }
3722
3723 /* pmap_next_page INTERFACE
3724 **
3725 * Place the physical address of the next available page in the
3726 * argument given. Returns FALSE if there are no more pages left.
3727 *
3728 * This function must jump over any holes in physical memory.
3729 * Once this function is used, any use of pmap_bootstrap_alloc()
3730 * is a sin. Sinners will be punished with erratic behavior.
3731 */
3732 boolean_t
3733 pmap_next_page(pa)
3734 vm_offset_t *pa;
3735 {
3736 static struct pmap_physmem_struct *curbank = avail_mem;
3737
3738 /* XXX - temporary ROM saving hack. */
3739 if (avail_next >= avail_end)
3740 return FALSE;
3741
3742 if (avail_next >= curbank->pmem_end)
3743 if (curbank->pmem_next == NULL)
3744 return FALSE;
3745 else {
3746 curbank = curbank->pmem_next;
3747 avail_next = curbank->pmem_start;
3748 }
3749
3750 *pa = avail_next;
3751 avail_next += NBPG;
3752 return TRUE;
3753 }
3754
3755 /* pmap_count INTERFACE
3756 **
3757 * Return the number of resident (valid) pages in the given pmap.
3758 *
3759 * Note: If this function is handed the kernel map, it will report
3760 * that it has no mappings. Hopefully the VM system won't ask for kernel
3761 * map statistics.
3762 */
3763 segsz_t
3764 pmap_count(pmap, type)
3765 pmap_t pmap;
3766 int type;
3767 {
3768 u_int count;
3769 int a_idx, b_idx;
3770 a_tmgr_t *a_tbl;
3771 b_tmgr_t *b_tbl;
3772 c_tmgr_t *c_tbl;
3773
3774 /*
3775 * If the pmap does not have its own A table manager, it has no
3776 * valid entires.
3777 */
3778 if (pmap->pm_a_tmgr == NULL)
3779 return 0;
3780
3781 a_tbl = pmap->pm_a_tmgr;
3782
3783 count = 0;
3784 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3785 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3786 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3787 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3788 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3789 c_tbl = mmuC2tmgr(
3790 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3791 if (type == 0)
3792 /*
3793 * A resident entry count has been requested.
3794 */
3795 count += c_tbl->ct_ecnt;
3796 else
3797 /*
3798 * A wired entry count has been requested.
3799 */
3800 count += c_tbl->ct_wcnt;
3801 }
3802 }
3803 }
3804 }
3805
3806 return count;
3807 }
3808
3809 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3810 * The following routines are only used by DDB for tricky kernel text *
3811 * text operations in db_memrw.c. They are provided for sun3 *
3812 * compatibility. *
3813 *************************************************************************/
3814 /* get_pte INTERNAL
3815 **
3816 * Return the page descriptor the describes the kernel mapping
3817 * of the given virtual address.
3818 */
3819 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3820 u_int
3821 get_pte(va)
3822 vm_offset_t va;
3823 {
3824 u_long pte_pa;
3825 mmu_short_pte_t *pte;
3826
3827 /* Get the physical address of the PTE */
3828 pte_pa = ptest_addr(va & ~PGOFSET);
3829
3830 /* Convert to a virtual address... */
3831 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3832
3833 /* Make sure it is in our level-C tables... */
3834 if ((pte < kernCbase) ||
3835 (pte >= &mmuCbase[NUM_USER_PTES]))
3836 return 0;
3837
3838 /* ... and just return its contents. */
3839 return (pte->attr.raw);
3840 }
3841
3842
3843 /* set_pte INTERNAL
3844 **
3845 * Set the page descriptor that describes the kernel mapping
3846 * of the given virtual address.
3847 */
3848 void
3849 set_pte(va, pte)
3850 vm_offset_t va;
3851 u_int pte;
3852 {
3853 u_long idx;
3854
3855 if (va < KERNBASE)
3856 return;
3857
3858 idx = (unsigned long) m68k_btop(va - KERNBASE);
3859 kernCbase[idx].attr.raw = pte;
3860 TBIS(va);
3861 }
3862
3863 #ifdef PMAP_DEBUG
3864 /************************** DEBUGGING ROUTINES **************************
3865 * The following routines are meant to be an aid to debugging the pmap *
3866 * system. They are callable from the DDB command line and should be *
3867 * prepared to be handed unstable or incomplete states of the system. *
3868 ************************************************************************/
3869
3870 /* pv_list
3871 **
3872 * List all pages found on the pv list for the given physical page.
3873 * To avoid endless loops, the listing will stop at the end of the list
3874 * or after 'n' entries - whichever comes first.
3875 */
3876 void
3877 pv_list(pa, n)
3878 vm_offset_t pa;
3879 int n;
3880 {
3881 int idx;
3882 vm_offset_t va;
3883 pv_t *pv;
3884 c_tmgr_t *c_tbl;
3885 pmap_t pmap;
3886
3887 pv = pa2pv(pa);
3888 idx = pv->pv_idx;
3889
3890 for (;idx != PVE_EOL && n > 0;
3891 idx=pvebase[idx].pve_next, n--) {
3892
3893 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3894 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3895 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3896 }
3897 }
3898 #endif /* PMAP_DEBUG */
3899
3900 #ifdef NOT_YET
3901 /* and maybe not ever */
3902 /************************** LOW-LEVEL ROUTINES **************************
3903 * These routines will eventualy be re-written into assembly and placed *
3904 * in locore.s. They are here now as stubs so that the pmap module can *
3905 * be linked as a standalone user program for testing. *
3906 ************************************************************************/
3907 /* flush_atc_crp INTERNAL
3908 **
3909 * Flush all page descriptors derived from the given CPU Root Pointer
3910 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3911 * cache.
3912 */
3913 void
3914 flush_atc_crp(a_tbl)
3915 {
3916 mmu_long_rp_t rp;
3917
3918 /* Create a temporary root table pointer that points to the
3919 * given A table.
3920 */
3921 rp.attr.raw = ~MMU_LONG_RP_LU;
3922 rp.addr.raw = (unsigned int) a_tbl;
3923
3924 mmu_pflushr(&rp);
3925 /* mmu_pflushr:
3926 * movel sp(4)@,a0
3927 * pflushr a0@
3928 * rts
3929 */
3930 }
3931 #endif /* NOT_YET */
3932