pmap.c revision 1.108 1 /* $NetBSD: pmap.c,v 1.108 2009/11/07 07:27:48 cegger Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * XXX These comments aren't quite accurate. Need to change.
34 * The sun3x uses the MC68851 Memory Management Unit, which is built
35 * into the CPU. The 68851 maps virtual to physical addresses using
36 * a multi-level table lookup, which is stored in the very memory that
37 * it maps. The number of levels of lookup is configurable from one
38 * to four. In this implementation, we use three, named 'A' through 'C'.
39 *
40 * The MMU translates virtual addresses into physical addresses by
41 * traversing these tables in a process called a 'table walk'. The most
42 * significant 7 bits of the Virtual Address ('VA') being translated are
43 * used as an index into the level A table, whose base in physical memory
44 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
45 * address found at that index in the A table is used as the base
46 * address for the next table, the B table. The next six bits of the VA are
47 * used as an index into the B table, which in turn gives the base address
48 * of the third and final C table.
49 *
50 * The next six bits of the VA are used as an index into the C table to
51 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
52 * to which the remaining 13 bits of the VA are added, producing the
53 * mapped physical address.
54 *
55 * To map the entire memory space in this manner would require 2114296 bytes
56 * of page tables per process - quite expensive. Instead we will
57 * allocate a fixed but considerably smaller space for the page tables at
58 * the time the VM system is initialized. When the pmap code is asked by
59 * the kernel to map a VA to a PA, it allocates tables as needed from this
60 * pool. When there are no more tables in the pool, tables are stolen
61 * from the oldest mapped entries in the tree. This is only possible
62 * because all memory mappings are stored in the kernel memory map
63 * structures, independent of the pmap structures. A VA which references
64 * one of these invalidated maps will cause a page fault. The kernel
65 * will determine that the page fault was caused by a task using a valid
66 * VA, but for some reason (which does not concern it), that address was
67 * not mapped. It will ask the pmap code to re-map the entry and then
68 * it will resume executing the faulting task.
69 *
70 * In this manner the most efficient use of the page table space is
71 * achieved. Tasks which do not execute often will have their tables
72 * stolen and reused by tasks which execute more frequently. The best
73 * size for the page table pool will probably be determined by
74 * experimentation.
75 *
76 * You read all of the comments so far. Good for you.
77 * Now go play!
78 */
79
80 /*** A Note About the 68851 Address Translation Cache
81 * The MC68851 has a 64 entry cache, called the Address Translation Cache
82 * or 'ATC'. This cache stores the most recently used page descriptors
83 * accessed by the MMU when it does translations. Using a marker called a
84 * 'task alias' the MMU can store the descriptors from 8 different table
85 * spaces concurrently. The task alias is associated with the base
86 * address of the level A table of that address space. When an address
87 * space is currently active (the CRP currently points to its A table)
88 * the only cached descriptors that will be obeyed are ones which have a
89 * matching task alias of the current space associated with them.
90 *
91 * Since the cache is always consulted before any table lookups are done,
92 * it is important that it accurately reflect the state of the MMU tables.
93 * Whenever a change has been made to a table that has been loaded into
94 * the MMU, the code must be sure to flush any cached entries that are
95 * affected by the change. These instances are documented in the code at
96 * various points.
97 */
98 /*** A Note About the Note About the 68851 Address Translation Cache
99 * 4 months into this code I discovered that the sun3x does not have
100 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
101 * the 68030 CPU.
102 * All though it behaves very similarly to the 68851, it only has 1 task
103 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
104 * of the previous note does not apply to the sun3x pmap.
105 */
106
107 #include <sys/cdefs.h>
108 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.108 2009/11/07 07:27:48 cegger Exp $");
109
110 #include "opt_ddb.h"
111 #include "opt_pmap_debug.h"
112
113 #include <sys/param.h>
114 #include <sys/systm.h>
115 #include <sys/proc.h>
116 #include <sys/malloc.h>
117 #include <sys/pool.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120 #include <sys/kcore.h>
121
122 #include <uvm/uvm.h>
123
124 #include <machine/cpu.h>
125 #include <machine/kcore.h>
126 #include <machine/mon.h>
127 #include <machine/pmap.h>
128 #include <machine/pte.h>
129 #include <machine/vmparam.h>
130 #include <m68k/cacheops.h>
131
132 #include <sun3/sun3/cache.h>
133 #include <sun3/sun3/machdep.h>
134
135 #include "pmap_pvt.h"
136
137 /* XXX - What headers declare these? */
138 extern struct pcb *curpcb;
139
140 /* Defined in locore.s */
141 extern char kernel_text[];
142
143 /* Defined by the linker */
144 extern char etext[], edata[], end[];
145 extern char *esym; /* DDB */
146
147 /*************************** DEBUGGING DEFINITIONS ***********************
148 * Macros, preprocessor defines and variables used in debugging can make *
149 * code hard to read. Anything used exclusively for debugging purposes *
150 * is defined here to avoid having such mess scattered around the file. *
151 *************************************************************************/
152 #ifdef PMAP_DEBUG
153 /*
154 * To aid the debugging process, macros should be expanded into smaller steps
155 * that accomplish the same goal, yet provide convenient places for placing
156 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
157 * 'INLINE' keyword is defined to an empty string. This way, any function
158 * defined to be a 'static INLINE' will become 'outlined' and compiled as
159 * a separate function, which is much easier to debug.
160 */
161 #define INLINE /* nothing */
162
163 /*
164 * It is sometimes convenient to watch the activity of a particular table
165 * in the system. The following variables are used for that purpose.
166 */
167 a_tmgr_t *pmap_watch_atbl = 0;
168 b_tmgr_t *pmap_watch_btbl = 0;
169 c_tmgr_t *pmap_watch_ctbl = 0;
170
171 int pmap_debug = 0;
172 #define DPRINT(args) if (pmap_debug) printf args
173
174 #else /********** Stuff below is defined if NOT debugging **************/
175
176 #define INLINE inline
177 #define DPRINT(args) /* nada */
178
179 #endif /* PMAP_DEBUG */
180 /*********************** END OF DEBUGGING DEFINITIONS ********************/
181
182 /*** Management Structure - Memory Layout
183 * For every MMU table in the sun3x pmap system there must be a way to
184 * manage it; we must know which process is using it, what other tables
185 * depend on it, and whether or not it contains any locked pages. This
186 * is solved by the creation of 'table management' or 'tmgr'
187 * structures. One for each MMU table in the system.
188 *
189 * MAP OF MEMORY USED BY THE PMAP SYSTEM
190 *
191 * towards lower memory
192 * kernAbase -> +-------------------------------------------------------+
193 * | Kernel MMU A level table |
194 * kernBbase -> +-------------------------------------------------------+
195 * | Kernel MMU B level tables |
196 * kernCbase -> +-------------------------------------------------------+
197 * | |
198 * | Kernel MMU C level tables |
199 * | |
200 * mmuCbase -> +-------------------------------------------------------+
201 * | User MMU C level tables |
202 * mmuAbase -> +-------------------------------------------------------+
203 * | |
204 * | User MMU A level tables |
205 * | |
206 * mmuBbase -> +-------------------------------------------------------+
207 * | User MMU B level tables |
208 * tmgrAbase -> +-------------------------------------------------------+
209 * | TMGR A level table structures |
210 * tmgrBbase -> +-------------------------------------------------------+
211 * | TMGR B level table structures |
212 * tmgrCbase -> +-------------------------------------------------------+
213 * | TMGR C level table structures |
214 * pvbase -> +-------------------------------------------------------+
215 * | Physical to Virtual mapping table (list heads) |
216 * pvebase -> +-------------------------------------------------------+
217 * | Physical to Virtual mapping table (list elements) |
218 * | |
219 * +-------------------------------------------------------+
220 * towards higher memory
221 *
222 * For every A table in the MMU A area, there will be a corresponding
223 * a_tmgr structure in the TMGR A area. The same will be true for
224 * the B and C tables. This arrangement will make it easy to find the
225 * controling tmgr structure for any table in the system by use of
226 * (relatively) simple macros.
227 */
228
229 /*
230 * Global variables for storing the base addresses for the areas
231 * labeled above.
232 */
233 static vaddr_t kernAphys;
234 static mmu_long_dte_t *kernAbase;
235 static mmu_short_dte_t *kernBbase;
236 static mmu_short_pte_t *kernCbase;
237 static mmu_short_pte_t *mmuCbase;
238 static mmu_short_dte_t *mmuBbase;
239 static mmu_long_dte_t *mmuAbase;
240 static a_tmgr_t *Atmgrbase;
241 static b_tmgr_t *Btmgrbase;
242 static c_tmgr_t *Ctmgrbase;
243 static pv_t *pvbase;
244 static pv_elem_t *pvebase;
245 static struct pmap kernel_pmap;
246 struct pmap *const kernel_pmap_ptr = &kernel_pmap;
247
248 /*
249 * This holds the CRP currently loaded into the MMU.
250 */
251 struct mmu_rootptr kernel_crp;
252
253 /*
254 * Just all around global variables.
255 */
256 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
257 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
258 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
259
260
261 /*
262 * Flags used to mark the safety/availability of certain operations or
263 * resources.
264 */
265 /* Safe to use pmap_bootstrap_alloc(). */
266 static bool bootstrap_alloc_enabled = false;
267 /* Temporary virtual pages are in use */
268 int tmp_vpages_inuse;
269
270 /*
271 * XXX: For now, retain the traditional variables that were
272 * used in the old pmap/vm interface (without NONCONTIG).
273 */
274 /* Kernel virtual address space available: */
275 vaddr_t virtual_avail, virtual_end;
276 /* Physical address space available: */
277 paddr_t avail_start, avail_end;
278
279 /* This keep track of the end of the contiguously mapped range. */
280 vaddr_t virtual_contig_end;
281
282 /* Physical address used by pmap_next_page() */
283 paddr_t avail_next;
284
285 /* These are used by pmap_copy_page(), etc. */
286 vaddr_t tmp_vpages[2];
287
288 /* memory pool for pmap structures */
289 struct pool pmap_pmap_pool;
290
291 /*
292 * The 3/80 is the only member of the sun3x family that has non-contiguous
293 * physical memory. Memory is divided into 4 banks which are physically
294 * locatable on the system board. Although the size of these banks varies
295 * with the size of memory they contain, their base addresses are
296 * permenently fixed. The following structure, which describes these
297 * banks, is initialized by pmap_bootstrap() after it reads from a similar
298 * structure provided by the ROM Monitor.
299 *
300 * For the other machines in the sun3x architecture which do have contiguous
301 * RAM, this list will have only one entry, which will describe the entire
302 * range of available memory.
303 */
304 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
305 u_int total_phys_mem;
306
307 /*************************************************************************/
308
309 /*
310 * XXX - Should "tune" these based on statistics.
311 *
312 * My first guess about the relative numbers of these needed is
313 * based on the fact that a "typical" process will have several
314 * pages mapped at low virtual addresses (text, data, bss), then
315 * some mapped shared libraries, and then some stack pages mapped
316 * near the high end of the VA space. Each process can use only
317 * one A table, and most will use only two B tables (maybe three)
318 * and probably about four C tables. Therefore, the first guess
319 * at the relative numbers of these needed is 1:2:4 -gwr
320 *
321 * The number of C tables needed is closely related to the amount
322 * of physical memory available plus a certain amount attributable
323 * to the use of double mappings. With a few simulation statistics
324 * we can find a reasonably good estimation of this unknown value.
325 * Armed with that and the above ratios, we have a good idea of what
326 * is needed at each level. -j
327 *
328 * Note: It is not physical memory memory size, but the total mapped
329 * virtual space required by the combined working sets of all the
330 * currently _runnable_ processes. (Sleeping ones don't count.)
331 * The amount of physical memory should be irrelevant. -gwr
332 */
333 #ifdef FIXED_NTABLES
334 #define NUM_A_TABLES 16
335 #define NUM_B_TABLES 32
336 #define NUM_C_TABLES 64
337 #else
338 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
339 #endif /* FIXED_NTABLES */
340
341 /*
342 * This determines our total virtual mapping capacity.
343 * Yes, it is a FIXED value so we can pre-allocate.
344 */
345 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
346
347 /*
348 * The size of the Kernel Virtual Address Space (KVAS)
349 * for purposes of MMU table allocation is -KERNBASE
350 * (length from KERNBASE to 0xFFFFffff)
351 */
352 #define KVAS_SIZE (-KERNBASE)
353
354 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
355 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
356 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
357 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
358
359 /*************************** MISCELANEOUS MACROS *************************/
360 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
361 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
362 #define pmap_add_ref(pmap) ++pmap->pm_refcount
363 #define pmap_del_ref(pmap) --pmap->pm_refcount
364 #define pmap_refcount(pmap) pmap->pm_refcount
365
366 void *pmap_bootstrap_alloc(int);
367
368 static INLINE void *mmu_ptov(paddr_t);
369 static INLINE paddr_t mmu_vtop(void *);
370
371 #if 0
372 static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *);
373 #endif /* 0 */
374 static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *);
375 static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *);
376
377 static INLINE pv_t *pa2pv(paddr_t);
378 static INLINE int pteidx(mmu_short_pte_t *);
379 static INLINE pmap_t current_pmap(void);
380
381 /*
382 * We can always convert between virtual and physical addresses
383 * for anything in the range [KERNBASE ... avail_start] because
384 * that range is GUARANTEED to be mapped linearly.
385 * We rely heavily upon this feature!
386 */
387 static INLINE void *
388 mmu_ptov(paddr_t pa)
389 {
390 vaddr_t va;
391
392 va = (pa + KERNBASE);
393 #ifdef PMAP_DEBUG
394 if ((va < KERNBASE) || (va >= virtual_contig_end))
395 panic("mmu_ptov");
396 #endif
397 return (void *)va;
398 }
399
400 static INLINE paddr_t
401 mmu_vtop(void *vva)
402 {
403 vaddr_t va;
404
405 va = (vaddr_t)vva;
406 #ifdef PMAP_DEBUG
407 if ((va < KERNBASE) || (va >= virtual_contig_end))
408 panic("mmu_vtop");
409 #endif
410 return va - KERNBASE;
411 }
412
413 /*
414 * These macros map MMU tables to their corresponding manager structures.
415 * They are needed quite often because many of the pointers in the pmap
416 * system reference MMU tables and not the structures that control them.
417 * There needs to be a way to find one when given the other and these
418 * macros do so by taking advantage of the memory layout described above.
419 * Here's a quick step through the first macro, mmuA2tmgr():
420 *
421 * 1) find the offset of the given MMU A table from the base of its table
422 * pool (table - mmuAbase).
423 * 2) convert this offset into a table index by dividing it by the
424 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
425 * 3) use this index to select the corresponding 'A' table manager
426 * structure from the 'A' table manager pool (Atmgrbase[index]).
427 */
428 /* This function is not currently used. */
429 #if 0
430 static INLINE a_tmgr_t *
431 mmuA2tmgr(mmu_long_dte_t *mmuAtbl)
432 {
433 int idx;
434
435 /* Which table is this in? */
436 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
437 #ifdef PMAP_DEBUG
438 if ((idx < 0) || (idx >= NUM_A_TABLES))
439 panic("mmuA2tmgr");
440 #endif
441 return &Atmgrbase[idx];
442 }
443 #endif /* 0 */
444
445 static INLINE b_tmgr_t *
446 mmuB2tmgr(mmu_short_dte_t *mmuBtbl)
447 {
448 int idx;
449
450 /* Which table is this in? */
451 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
452 #ifdef PMAP_DEBUG
453 if ((idx < 0) || (idx >= NUM_B_TABLES))
454 panic("mmuB2tmgr");
455 #endif
456 return &Btmgrbase[idx];
457 }
458
459 /* mmuC2tmgr INTERNAL
460 **
461 * Given a pte known to belong to a C table, return the address of
462 * that table's management structure.
463 */
464 static INLINE c_tmgr_t *
465 mmuC2tmgr(mmu_short_pte_t *mmuCtbl)
466 {
467 int idx;
468
469 /* Which table is this in? */
470 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
471 #ifdef PMAP_DEBUG
472 if ((idx < 0) || (idx >= NUM_C_TABLES))
473 panic("mmuC2tmgr");
474 #endif
475 return &Ctmgrbase[idx];
476 }
477
478 /* This is now a function call below.
479 * #define pa2pv(pa) \
480 * (&pvbase[(unsigned long)\
481 * m68k_btop(pa)\
482 * ])
483 */
484
485 /* pa2pv INTERNAL
486 **
487 * Return the pv_list_head element which manages the given physical
488 * address.
489 */
490 static INLINE pv_t *
491 pa2pv(paddr_t pa)
492 {
493 struct pmap_physmem_struct *bank;
494 int idx;
495
496 bank = &avail_mem[0];
497 while (pa >= bank->pmem_end)
498 bank = bank->pmem_next;
499
500 pa -= bank->pmem_start;
501 idx = bank->pmem_pvbase + m68k_btop(pa);
502 #ifdef PMAP_DEBUG
503 if ((idx < 0) || (idx >= physmem))
504 panic("pa2pv");
505 #endif
506 return &pvbase[idx];
507 }
508
509 /* pteidx INTERNAL
510 **
511 * Return the index of the given PTE within the entire fixed table of
512 * PTEs.
513 */
514 static INLINE int
515 pteidx(mmu_short_pte_t *pte)
516 {
517
518 return pte - kernCbase;
519 }
520
521 /*
522 * This just offers a place to put some debugging checks,
523 * and reduces the number of places "curlwp" appears...
524 */
525 static INLINE pmap_t
526 current_pmap(void)
527 {
528 struct vmspace *vm;
529 struct vm_map *map;
530 pmap_t pmap;
531
532 vm = curproc->p_vmspace;
533 map = &vm->vm_map;
534 pmap = vm_map_pmap(map);
535
536 return pmap;
537 }
538
539
540 /*************************** FUNCTION DEFINITIONS ************************
541 * These appear here merely for the compiler to enforce type checking on *
542 * all function calls. *
543 *************************************************************************/
544
545 /*
546 * Internal functions
547 */
548 a_tmgr_t *get_a_table(void);
549 b_tmgr_t *get_b_table(void);
550 c_tmgr_t *get_c_table(void);
551 int free_a_table(a_tmgr_t *, bool);
552 int free_b_table(b_tmgr_t *, bool);
553 int free_c_table(c_tmgr_t *, bool);
554
555 void pmap_bootstrap_aalign(int);
556 void pmap_alloc_usermmu(void);
557 void pmap_alloc_usertmgr(void);
558 void pmap_alloc_pv(void);
559 void pmap_init_a_tables(void);
560 void pmap_init_b_tables(void);
561 void pmap_init_c_tables(void);
562 void pmap_init_pv(void);
563 void pmap_clear_pv(paddr_t, int);
564 static INLINE bool is_managed(paddr_t);
565
566 bool pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t);
567 bool pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t);
568 bool pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t);
569 void pmap_remove_pte(mmu_short_pte_t *);
570
571 void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t);
572 static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t);
573 static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t);
574 static INLINE bool pmap_extract_kernel(vaddr_t, paddr_t *);
575 vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **);
576 static INLINE int pmap_dereference(pmap_t);
577
578 bool pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **,
579 mmu_short_pte_t **, int *, int *, int *);
580 void pmap_bootstrap_copyprom(void);
581 void pmap_takeover_mmu(void);
582 void pmap_bootstrap_setprom(void);
583 static void pmap_page_upload(void);
584
585 #ifdef PMAP_DEBUG
586 /* Debugging function definitions */
587 void pv_list(paddr_t, int);
588 #endif /* PMAP_DEBUG */
589
590 /** Interface functions
591 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
592 ** defined.
593 ** The new UVM doesn't require them so now INTERNAL.
594 **/
595 static INLINE void pmap_pinit(pmap_t);
596 static INLINE void pmap_release(pmap_t);
597
598 /********************************** CODE ********************************
599 * Functions that are called from other parts of the kernel are labeled *
600 * as 'INTERFACE' functions. Functions that are only called from *
601 * within the pmap module are labeled as 'INTERNAL' functions. *
602 * Functions that are internal, but are not (currently) used at all are *
603 * labeled 'INTERNAL_X'. *
604 ************************************************************************/
605
606 /* pmap_bootstrap INTERNAL
607 **
608 * Initializes the pmap system. Called at boot time from
609 * locore2.c:_vm_init()
610 *
611 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
612 * system implement pmap_steal_memory() is redundant.
613 * Don't release this code without removing one or the other!
614 */
615 void
616 pmap_bootstrap(vaddr_t nextva)
617 {
618 struct physmemory *membank;
619 struct pmap_physmem_struct *pmap_membank;
620 vaddr_t va, eva;
621 paddr_t pa;
622 int b, c, i, j; /* running table counts */
623 int size, resvmem;
624
625 /*
626 * This function is called by __bootstrap after it has
627 * determined the type of machine and made the appropriate
628 * patches to the ROM vectors (XXX- I don't quite know what I meant
629 * by that.) It allocates and sets up enough of the pmap system
630 * to manage the kernel's address space.
631 */
632
633 /*
634 * Determine the range of kernel virtual and physical
635 * space available. Note that we ABSOLUTELY DEPEND on
636 * the fact that the first bank of memory (4MB) is
637 * mapped linearly to KERNBASE (which we guaranteed in
638 * the first instructions of locore.s).
639 * That is plenty for our bootstrap work.
640 */
641 virtual_avail = m68k_round_page(nextva);
642 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
643 virtual_end = VM_MAX_KERNEL_ADDRESS;
644 /* Don't need avail_start til later. */
645
646 /* We may now call pmap_bootstrap_alloc(). */
647 bootstrap_alloc_enabled = true;
648
649 /*
650 * This is a somewhat unwrapped loop to deal with
651 * copying the PROM's 'phsymem' banks into the pmap's
652 * banks. The following is always assumed:
653 * 1. There is always at least one bank of memory.
654 * 2. There is always a last bank of memory, and its
655 * pmem_next member must be set to NULL.
656 */
657 membank = romVectorPtr->v_physmemory;
658 pmap_membank = avail_mem;
659 total_phys_mem = 0;
660
661 for (;;) { /* break on !membank */
662 pmap_membank->pmem_start = membank->address;
663 pmap_membank->pmem_end = membank->address + membank->size;
664 total_phys_mem += membank->size;
665 membank = membank->next;
666 if (!membank)
667 break;
668 /* This silly syntax arises because pmap_membank
669 * is really a pre-allocated array, but it is put into
670 * use as a linked list.
671 */
672 pmap_membank->pmem_next = pmap_membank + 1;
673 pmap_membank = pmap_membank->pmem_next;
674 }
675 /* This is the last element. */
676 pmap_membank->pmem_next = NULL;
677
678 /*
679 * Note: total_phys_mem, physmem represent
680 * actual physical memory, including that
681 * reserved for the PROM monitor.
682 */
683 physmem = btoc(total_phys_mem);
684
685 /*
686 * Avail_end is set to the first byte of physical memory
687 * after the end of the last bank. We use this only to
688 * determine if a physical address is "managed" memory.
689 * This address range should be reduced to prevent the
690 * physical pages needed by the PROM monitor from being used
691 * in the VM system.
692 */
693 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
694 resvmem = m68k_round_page(resvmem);
695 avail_end = pmap_membank->pmem_end - resvmem;
696
697 /*
698 * First allocate enough kernel MMU tables to map all
699 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
700 * Note: All must be aligned on 256 byte boundaries.
701 * Start with the level-A table (one of those).
702 */
703 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
704 kernAbase = pmap_bootstrap_alloc(size);
705 memset(kernAbase, 0, size);
706
707 /* Now the level-B kernel tables... */
708 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
709 kernBbase = pmap_bootstrap_alloc(size);
710 memset(kernBbase, 0, size);
711
712 /* Now the level-C kernel tables... */
713 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
714 kernCbase = pmap_bootstrap_alloc(size);
715 memset(kernCbase, 0, size);
716 /*
717 * Note: In order for the PV system to work correctly, the kernel
718 * and user-level C tables must be allocated contiguously.
719 * Nothing should be allocated between here and the allocation of
720 * mmuCbase below. XXX: Should do this as one allocation, and
721 * then compute a pointer for mmuCbase instead of this...
722 *
723 * Allocate user MMU tables.
724 * These must be contiguous with the preceding.
725 */
726
727 #ifndef FIXED_NTABLES
728 /*
729 * The number of user-level C tables that should be allocated is
730 * related to the size of physical memory. In general, there should
731 * be enough tables to map four times the amount of available RAM.
732 * The extra amount is needed because some table space is wasted by
733 * fragmentation.
734 */
735 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
736 NUM_B_TABLES = NUM_C_TABLES / 2;
737 NUM_A_TABLES = NUM_B_TABLES / 2;
738 #endif /* !FIXED_NTABLES */
739
740 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
741 mmuCbase = pmap_bootstrap_alloc(size);
742
743 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
744 mmuBbase = pmap_bootstrap_alloc(size);
745
746 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
747 mmuAbase = pmap_bootstrap_alloc(size);
748
749 /*
750 * Fill in the never-changing part of the kernel tables.
751 * For simplicity, the kernel's mappings will be editable as a
752 * flat array of page table entries at kernCbase. The
753 * higher level 'A' and 'B' tables must be initialized to point
754 * to this lower one.
755 */
756 b = c = 0;
757
758 /*
759 * Invalidate all mappings below KERNBASE in the A table.
760 * This area has already been zeroed out, but it is good
761 * practice to explicitly show that we are interpreting
762 * it as a list of A table descriptors.
763 */
764 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
765 kernAbase[i].addr.raw = 0;
766 }
767
768 /*
769 * Set up the kernel A and B tables so that they will reference the
770 * correct spots in the contiguous table of PTEs allocated for the
771 * kernel's virtual memory space.
772 */
773 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
774 kernAbase[i].attr.raw =
775 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
776 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
777
778 for (j = 0; j < MMU_B_TBL_SIZE; j++) {
779 kernBbase[b + j].attr.raw =
780 mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT;
781 c += MMU_C_TBL_SIZE;
782 }
783 b += MMU_B_TBL_SIZE;
784 }
785
786 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
787 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
788 pmap_alloc_pv(); /* Allocate physical->virtual map. */
789
790 /*
791 * We are now done with pmap_bootstrap_alloc(). Round up
792 * `virtual_avail' to the nearest page, and set the flag
793 * to prevent use of pmap_bootstrap_alloc() hereafter.
794 */
795 pmap_bootstrap_aalign(PAGE_SIZE);
796 bootstrap_alloc_enabled = false;
797
798 /*
799 * Now that we are done with pmap_bootstrap_alloc(), we
800 * must save the virtual and physical addresses of the
801 * end of the linearly mapped range, which are stored in
802 * virtual_contig_end and avail_start, respectively.
803 * These variables will never change after this point.
804 */
805 virtual_contig_end = virtual_avail;
806 avail_start = virtual_avail - KERNBASE;
807
808 /*
809 * `avail_next' is a running pointer used by pmap_next_page() to
810 * keep track of the next available physical page to be handed
811 * to the VM system during its initialization, in which it
812 * asks for physical pages, one at a time.
813 */
814 avail_next = avail_start;
815
816 /*
817 * Now allocate some virtual addresses, but not the physical pages
818 * behind them. Note that virtual_avail is already page-aligned.
819 *
820 * tmp_vpages[] is an array of two virtual pages used for temporary
821 * kernel mappings in the pmap module to facilitate various physical
822 * address-oritented operations.
823 */
824 tmp_vpages[0] = virtual_avail;
825 virtual_avail += PAGE_SIZE;
826 tmp_vpages[1] = virtual_avail;
827 virtual_avail += PAGE_SIZE;
828
829 /** Initialize the PV system **/
830 pmap_init_pv();
831
832 /*
833 * Fill in the kernel_pmap structure and kernel_crp.
834 */
835 kernAphys = mmu_vtop(kernAbase);
836 kernel_pmap.pm_a_tmgr = NULL;
837 kernel_pmap.pm_a_phys = kernAphys;
838 kernel_pmap.pm_refcount = 1; /* always in use */
839 simple_lock_init(&kernel_pmap.pm_lock);
840
841 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
842 kernel_crp.rp_addr = kernAphys;
843
844 /*
845 * Now pmap_enter_kernel() may be used safely and will be
846 * the main interface used hereafter to modify the kernel's
847 * virtual address space. Note that since we are still running
848 * under the PROM's address table, none of these table modifications
849 * actually take effect until pmap_takeover_mmu() is called.
850 *
851 * Note: Our tables do NOT have the PROM linear mappings!
852 * Only the mappings created here exist in our tables, so
853 * remember to map anything we expect to use.
854 */
855 va = (vaddr_t)KERNBASE;
856 pa = 0;
857
858 /*
859 * The first page of the kernel virtual address space is the msgbuf
860 * page. The page attributes (data, non-cached) are set here, while
861 * the address is assigned to this global pointer in cpu_startup().
862 * It is non-cached, mostly due to paranoia.
863 */
864 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
865 va += PAGE_SIZE;
866 pa += PAGE_SIZE;
867
868 /* Next page is used as the temporary stack. */
869 pmap_enter_kernel(va, pa, VM_PROT_ALL);
870 va += PAGE_SIZE;
871 pa += PAGE_SIZE;
872
873 /*
874 * Map all of the kernel's text segment as read-only and cacheable.
875 * (Cacheable is implied by default). Unfortunately, the last bytes
876 * of kernel text and the first bytes of kernel data will often be
877 * sharing the same page. Therefore, the last page of kernel text
878 * has to be mapped as read/write, to accommodate the data.
879 */
880 eva = m68k_trunc_page((vaddr_t)etext);
881 for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
882 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
883
884 /*
885 * Map all of the kernel's data as read/write and cacheable.
886 * This includes: data, BSS, symbols, and everything in the
887 * contiguous memory used by pmap_bootstrap_alloc()
888 */
889 for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
890 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
891
892 /*
893 * At this point we are almost ready to take over the MMU. But first
894 * we must save the PROM's address space in our map, as we call its
895 * routines and make references to its data later in the kernel.
896 */
897 pmap_bootstrap_copyprom();
898 pmap_takeover_mmu();
899 pmap_bootstrap_setprom();
900
901 /* Notify the VM system of our page size. */
902 uvmexp.pagesize = PAGE_SIZE;
903 uvm_setpagesize();
904
905 pmap_page_upload();
906 }
907
908
909 /* pmap_alloc_usermmu INTERNAL
910 **
911 * Called from pmap_bootstrap() to allocate MMU tables that will
912 * eventually be used for user mappings.
913 */
914 void
915 pmap_alloc_usermmu(void)
916 {
917
918 /* XXX: Moved into caller. */
919 }
920
921 /* pmap_alloc_pv INTERNAL
922 **
923 * Called from pmap_bootstrap() to allocate the physical
924 * to virtual mapping list. Each physical page of memory
925 * in the system has a corresponding element in this list.
926 */
927 void
928 pmap_alloc_pv(void)
929 {
930 int i;
931 unsigned int total_mem;
932
933 /*
934 * Allocate a pv_head structure for every page of physical
935 * memory that will be managed by the system. Since memory on
936 * the 3/80 is non-contiguous, we cannot arrive at a total page
937 * count by subtraction of the lowest available address from the
938 * highest, but rather we have to step through each memory
939 * bank and add the number of pages in each to the total.
940 *
941 * At this time we also initialize the offset of each bank's
942 * starting pv_head within the pv_head list so that the physical
943 * memory state routines (pmap_is_referenced(),
944 * pmap_is_modified(), et al.) can quickly find coresponding
945 * pv_heads in spite of the non-contiguity.
946 */
947 total_mem = 0;
948 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
949 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
950 total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start;
951 if (avail_mem[i].pmem_next == NULL)
952 break;
953 }
954 pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) *
955 m68k_btop(total_phys_mem));
956 }
957
958 /* pmap_alloc_usertmgr INTERNAL
959 **
960 * Called from pmap_bootstrap() to allocate the structures which
961 * facilitate management of user MMU tables. Each user MMU table
962 * in the system has one such structure associated with it.
963 */
964 void
965 pmap_alloc_usertmgr(void)
966 {
967 /* Allocate user MMU table managers */
968 /* It would be a lot simpler to just make these BSS, but */
969 /* we may want to change their size at boot time... -j */
970 Atmgrbase =
971 (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES);
972 Btmgrbase =
973 (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES);
974 Ctmgrbase =
975 (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES);
976
977 /*
978 * Allocate PV list elements for the physical to virtual
979 * mapping system.
980 */
981 pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) *
982 (NUM_USER_PTES + NUM_KERN_PTES));
983 }
984
985 /* pmap_bootstrap_copyprom() INTERNAL
986 **
987 * Copy the PROM mappings into our own tables. Note, we
988 * can use physical addresses until __bootstrap returns.
989 */
990 void
991 pmap_bootstrap_copyprom(void)
992 {
993 struct sunromvec *romp;
994 int *mon_ctbl;
995 mmu_short_pte_t *kpte;
996 int i, len;
997
998 romp = romVectorPtr;
999
1000 /*
1001 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
1002 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
1003 */
1004 mon_ctbl = *romp->monptaddr;
1005 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
1006 kpte = &kernCbase[i];
1007 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
1008
1009 for (i = 0; i < len; i++) {
1010 kpte[i].attr.raw = mon_ctbl[i];
1011 }
1012
1013 /*
1014 * Copy the mappings at MON_DVMA_BASE (to the end).
1015 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1016 * Actually, we only want the last page, which the
1017 * PROM has set up for use by the "ie" driver.
1018 * (The i82686 needs its SCP there.)
1019 * If we copy all the mappings, pmap_enter_kernel
1020 * may complain about finding valid PTEs that are
1021 * not recorded in our PV lists...
1022 */
1023 mon_ctbl = *romp->shadowpteaddr;
1024 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
1025 kpte = &kernCbase[i];
1026 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1027 for (i = (len - 1); i < len; i++) {
1028 kpte[i].attr.raw = mon_ctbl[i];
1029 }
1030 }
1031
1032 /* pmap_takeover_mmu INTERNAL
1033 **
1034 * Called from pmap_bootstrap() after it has copied enough of the
1035 * PROM mappings into the kernel map so that we can use our own
1036 * MMU table.
1037 */
1038 void
1039 pmap_takeover_mmu(void)
1040 {
1041
1042 loadcrp(&kernel_crp);
1043 }
1044
1045 /* pmap_bootstrap_setprom() INTERNAL
1046 **
1047 * Set the PROM mappings so it can see kernel space.
1048 * Note that physical addresses are used here, which
1049 * we can get away with because this runs with the
1050 * low 1GB set for transparent translation.
1051 */
1052 void
1053 pmap_bootstrap_setprom(void)
1054 {
1055 mmu_long_dte_t *mon_dte;
1056 extern struct mmu_rootptr mon_crp;
1057 int i;
1058
1059 mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr;
1060 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1061 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1062 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1063 }
1064 }
1065
1066
1067 /* pmap_init INTERFACE
1068 **
1069 * Called at the end of vm_init() to set up the pmap system to go
1070 * into full time operation. All initialization of kernel_pmap
1071 * should be already done by now, so this should just do things
1072 * needed for user-level pmaps to work.
1073 */
1074 void
1075 pmap_init(void)
1076 {
1077
1078 /** Initialize the manager pools **/
1079 TAILQ_INIT(&a_pool);
1080 TAILQ_INIT(&b_pool);
1081 TAILQ_INIT(&c_pool);
1082
1083 /**************************************************************
1084 * Initialize all tmgr structures and MMU tables they manage. *
1085 **************************************************************/
1086 /** Initialize A tables **/
1087 pmap_init_a_tables();
1088 /** Initialize B tables **/
1089 pmap_init_b_tables();
1090 /** Initialize C tables **/
1091 pmap_init_c_tables();
1092
1093 /** Initialize the pmap pools **/
1094 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1095 &pool_allocator_nointr, IPL_NONE);
1096 }
1097
1098 /* pmap_init_a_tables() INTERNAL
1099 **
1100 * Initializes all A managers, their MMU A tables, and inserts
1101 * them into the A manager pool for use by the system.
1102 */
1103 void
1104 pmap_init_a_tables(void)
1105 {
1106 int i;
1107 a_tmgr_t *a_tbl;
1108
1109 for (i = 0; i < NUM_A_TABLES; i++) {
1110 /* Select the next available A manager from the pool */
1111 a_tbl = &Atmgrbase[i];
1112
1113 /*
1114 * Clear its parent entry. Set its wired and valid
1115 * entry count to zero.
1116 */
1117 a_tbl->at_parent = NULL;
1118 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1119
1120 /* Assign it the next available MMU A table from the pool */
1121 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1122
1123 /*
1124 * Initialize the MMU A table with the table in the `proc0',
1125 * or kernel, mapping. This ensures that every process has
1126 * the kernel mapped in the top part of its address space.
1127 */
1128 memcpy(a_tbl->at_dtbl, kernAbase,
1129 MMU_A_TBL_SIZE * sizeof(mmu_long_dte_t));
1130
1131 /*
1132 * Finally, insert the manager into the A pool,
1133 * making it ready to be used by the system.
1134 */
1135 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1136 }
1137 }
1138
1139 /* pmap_init_b_tables() INTERNAL
1140 **
1141 * Initializes all B table managers, their MMU B tables, and
1142 * inserts them into the B manager pool for use by the system.
1143 */
1144 void
1145 pmap_init_b_tables(void)
1146 {
1147 int i, j;
1148 b_tmgr_t *b_tbl;
1149
1150 for (i = 0; i < NUM_B_TABLES; i++) {
1151 /* Select the next available B manager from the pool */
1152 b_tbl = &Btmgrbase[i];
1153
1154 b_tbl->bt_parent = NULL; /* clear its parent, */
1155 b_tbl->bt_pidx = 0; /* parent index, */
1156 b_tbl->bt_wcnt = 0; /* wired entry count, */
1157 b_tbl->bt_ecnt = 0; /* valid entry count. */
1158
1159 /* Assign it the next available MMU B table from the pool */
1160 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1161
1162 /* Invalidate every descriptor in the table */
1163 for (j = 0; j < MMU_B_TBL_SIZE; j++)
1164 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1165
1166 /* Insert the manager into the B pool */
1167 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1168 }
1169 }
1170
1171 /* pmap_init_c_tables() INTERNAL
1172 **
1173 * Initializes all C table managers, their MMU C tables, and
1174 * inserts them into the C manager pool for use by the system.
1175 */
1176 void
1177 pmap_init_c_tables(void)
1178 {
1179 int i, j;
1180 c_tmgr_t *c_tbl;
1181
1182 for (i = 0; i < NUM_C_TABLES; i++) {
1183 /* Select the next available C manager from the pool */
1184 c_tbl = &Ctmgrbase[i];
1185
1186 c_tbl->ct_parent = NULL; /* clear its parent, */
1187 c_tbl->ct_pidx = 0; /* parent index, */
1188 c_tbl->ct_wcnt = 0; /* wired entry count, */
1189 c_tbl->ct_ecnt = 0; /* valid entry count, */
1190 c_tbl->ct_pmap = NULL; /* parent pmap, */
1191 c_tbl->ct_va = 0; /* base of managed range */
1192
1193 /* Assign it the next available MMU C table from the pool */
1194 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1195
1196 for (j = 0; j < MMU_C_TBL_SIZE; j++)
1197 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1198
1199 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1200 }
1201 }
1202
1203 /* pmap_init_pv() INTERNAL
1204 **
1205 * Initializes the Physical to Virtual mapping system.
1206 */
1207 void
1208 pmap_init_pv(void)
1209 {
1210 int i;
1211
1212 /* Initialize every PV head. */
1213 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1214 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1215 pvbase[i].pv_flags = 0; /* Zero out page flags */
1216 }
1217 }
1218
1219 /* is_managed INTERNAL
1220 **
1221 * Determine if the given physical address is managed by the PV system.
1222 * Note that this logic assumes that no one will ask for the status of
1223 * addresses which lie in-between the memory banks on the 3/80. If they
1224 * do so, it will falsely report that it is managed.
1225 *
1226 * Note: A "managed" address is one that was reported to the VM system as
1227 * a "usable page" during system startup. As such, the VM system expects the
1228 * pmap module to keep an accurate track of the useage of those pages.
1229 * Any page not given to the VM system at startup does not exist (as far as
1230 * the VM system is concerned) and is therefore "unmanaged." Examples are
1231 * those pages which belong to the ROM monitor and the memory allocated before
1232 * the VM system was started.
1233 */
1234 static INLINE bool
1235 is_managed(paddr_t pa)
1236 {
1237 if (pa >= avail_start && pa < avail_end)
1238 return true;
1239 else
1240 return false;
1241 }
1242
1243 /* get_a_table INTERNAL
1244 **
1245 * Retrieve and return a level A table for use in a user map.
1246 */
1247 a_tmgr_t *
1248 get_a_table(void)
1249 {
1250 a_tmgr_t *tbl;
1251 pmap_t pmap;
1252
1253 /* Get the top A table in the pool */
1254 tbl = TAILQ_FIRST(&a_pool);
1255 if (tbl == NULL) {
1256 /*
1257 * XXX - Instead of panicking here and in other get_x_table
1258 * functions, we do have the option of sleeping on the head of
1259 * the table pool. Any function which updates the table pool
1260 * would then issue a wakeup() on the head, thus waking up any
1261 * processes waiting for a table.
1262 *
1263 * Actually, the place to sleep would be when some process
1264 * asks for a "wired" mapping that would run us short of
1265 * mapping resources. This design DEPENDS on always having
1266 * some mapping resources in the pool for stealing, so we
1267 * must make sure we NEVER let the pool become empty. -gwr
1268 */
1269 panic("get_a_table: out of A tables.");
1270 }
1271
1272 TAILQ_REMOVE(&a_pool, tbl, at_link);
1273 /*
1274 * If the table has a non-null parent pointer then it is in use.
1275 * Forcibly abduct it from its parent and clear its entries.
1276 * No re-entrancy worries here. This table would not be in the
1277 * table pool unless it was available for use.
1278 *
1279 * Note that the second argument to free_a_table() is false. This
1280 * indicates that the table should not be relinked into the A table
1281 * pool. That is a job for the function that called us.
1282 */
1283 if (tbl->at_parent) {
1284 KASSERT(tbl->at_wcnt == 0);
1285 pmap = tbl->at_parent;
1286 free_a_table(tbl, false);
1287 pmap->pm_a_tmgr = NULL;
1288 pmap->pm_a_phys = kernAphys;
1289 }
1290 return tbl;
1291 }
1292
1293 /* get_b_table INTERNAL
1294 **
1295 * Return a level B table for use.
1296 */
1297 b_tmgr_t *
1298 get_b_table(void)
1299 {
1300 b_tmgr_t *tbl;
1301
1302 /* See 'get_a_table' for comments. */
1303 tbl = TAILQ_FIRST(&b_pool);
1304 if (tbl == NULL)
1305 panic("get_b_table: out of B tables.");
1306 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1307 if (tbl->bt_parent) {
1308 KASSERT(tbl->bt_wcnt == 0);
1309 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1310 tbl->bt_parent->at_ecnt--;
1311 free_b_table(tbl, false);
1312 }
1313 return tbl;
1314 }
1315
1316 /* get_c_table INTERNAL
1317 **
1318 * Return a level C table for use.
1319 */
1320 c_tmgr_t *
1321 get_c_table(void)
1322 {
1323 c_tmgr_t *tbl;
1324
1325 /* See 'get_a_table' for comments */
1326 tbl = TAILQ_FIRST(&c_pool);
1327 if (tbl == NULL)
1328 panic("get_c_table: out of C tables.");
1329 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1330 if (tbl->ct_parent) {
1331 KASSERT(tbl->ct_wcnt == 0);
1332 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1333 tbl->ct_parent->bt_ecnt--;
1334 free_c_table(tbl, false);
1335 }
1336 return tbl;
1337 }
1338
1339 /*
1340 * The following 'free_table' and 'steal_table' functions are called to
1341 * detach tables from their current obligations (parents and children) and
1342 * prepare them for reuse in another mapping.
1343 *
1344 * Free_table is used when the calling function will handle the fate
1345 * of the parent table, such as returning it to the free pool when it has
1346 * no valid entries. Functions that do not want to handle this should
1347 * call steal_table, in which the parent table's descriptors and entry
1348 * count are automatically modified when this table is removed.
1349 */
1350
1351 /* free_a_table INTERNAL
1352 **
1353 * Unmaps the given A table and all child tables from their current
1354 * mappings. Returns the number of pages that were invalidated.
1355 * If 'relink' is true, the function will return the table to the head
1356 * of the available table pool.
1357 *
1358 * Cache note: The MC68851 will automatically flush all
1359 * descriptors derived from a given A table from its
1360 * Automatic Translation Cache (ATC) if we issue a
1361 * 'PFLUSHR' instruction with the base address of the
1362 * table. This function should do, and does so.
1363 * Note note: We are using an MC68030 - there is no
1364 * PFLUSHR.
1365 */
1366 int
1367 free_a_table(a_tmgr_t *a_tbl, bool relink)
1368 {
1369 int i, removed_cnt;
1370 mmu_long_dte_t *dte;
1371 mmu_short_dte_t *dtbl;
1372 b_tmgr_t *b_tbl;
1373 uint8_t at_wired, bt_wired;
1374
1375 /*
1376 * Flush the ATC cache of all cached descriptors derived
1377 * from this table.
1378 * Sun3x does not use 68851's cached table feature
1379 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1380 */
1381
1382 /*
1383 * Remove any pending cache flushes that were designated
1384 * for the pmap this A table belongs to.
1385 * a_tbl->parent->atc_flushq[0] = 0;
1386 * Not implemented in sun3x.
1387 */
1388
1389 /*
1390 * All A tables in the system should retain a map for the
1391 * kernel. If the table contains any valid descriptors
1392 * (other than those for the kernel area), invalidate them all,
1393 * stopping short of the kernel's entries.
1394 */
1395 removed_cnt = 0;
1396 at_wired = a_tbl->at_wcnt;
1397 if (a_tbl->at_ecnt) {
1398 dte = a_tbl->at_dtbl;
1399 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
1400 /*
1401 * If a table entry points to a valid B table, free
1402 * it and its children.
1403 */
1404 if (MMU_VALID_DT(dte[i])) {
1405 /*
1406 * The following block does several things,
1407 * from innermost expression to the
1408 * outermost:
1409 * 1) It extracts the base (cc 1996)
1410 * address of the B table pointed
1411 * to in the A table entry dte[i].
1412 * 2) It converts this base address into
1413 * the virtual address it can be
1414 * accessed with. (all MMU tables point
1415 * to physical addresses.)
1416 * 3) It finds the corresponding manager
1417 * structure which manages this MMU table.
1418 * 4) It frees the manager structure.
1419 * (This frees the MMU table and all
1420 * child tables. See 'free_b_table' for
1421 * details.)
1422 */
1423 dtbl = mmu_ptov(dte[i].addr.raw);
1424 b_tbl = mmuB2tmgr(dtbl);
1425 bt_wired = b_tbl->bt_wcnt;
1426 removed_cnt += free_b_table(b_tbl, true);
1427 if (bt_wired)
1428 a_tbl->at_wcnt--;
1429 dte[i].attr.raw = MMU_DT_INVALID;
1430 }
1431 }
1432 a_tbl->at_ecnt = 0;
1433 }
1434 KASSERT(a_tbl->at_wcnt == 0);
1435
1436 if (relink) {
1437 a_tbl->at_parent = NULL;
1438 if (!at_wired)
1439 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1440 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1441 }
1442 return removed_cnt;
1443 }
1444
1445 /* free_b_table INTERNAL
1446 **
1447 * Unmaps the given B table and all its children from their current
1448 * mappings. Returns the number of pages that were invalidated.
1449 * (For comments, see 'free_a_table()').
1450 */
1451 int
1452 free_b_table(b_tmgr_t *b_tbl, bool relink)
1453 {
1454 int i, removed_cnt;
1455 mmu_short_dte_t *dte;
1456 mmu_short_pte_t *dtbl;
1457 c_tmgr_t *c_tbl;
1458 uint8_t bt_wired, ct_wired;
1459
1460 removed_cnt = 0;
1461 bt_wired = b_tbl->bt_wcnt;
1462 if (b_tbl->bt_ecnt) {
1463 dte = b_tbl->bt_dtbl;
1464 for (i = 0; i < MMU_B_TBL_SIZE; i++) {
1465 if (MMU_VALID_DT(dte[i])) {
1466 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1467 c_tbl = mmuC2tmgr(dtbl);
1468 ct_wired = c_tbl->ct_wcnt;
1469 removed_cnt += free_c_table(c_tbl, true);
1470 if (ct_wired)
1471 b_tbl->bt_wcnt--;
1472 dte[i].attr.raw = MMU_DT_INVALID;
1473 }
1474 }
1475 b_tbl->bt_ecnt = 0;
1476 }
1477 KASSERT(b_tbl->bt_wcnt == 0);
1478
1479 if (relink) {
1480 b_tbl->bt_parent = NULL;
1481 if (!bt_wired)
1482 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1483 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1484 }
1485 return removed_cnt;
1486 }
1487
1488 /* free_c_table INTERNAL
1489 **
1490 * Unmaps the given C table from use and returns it to the pool for
1491 * re-use. Returns the number of pages that were invalidated.
1492 *
1493 * This function preserves any physical page modification information
1494 * contained in the page descriptors within the C table by calling
1495 * 'pmap_remove_pte().'
1496 */
1497 int
1498 free_c_table(c_tmgr_t *c_tbl, bool relink)
1499 {
1500 mmu_short_pte_t *c_pte;
1501 int i, removed_cnt;
1502 uint8_t ct_wired;
1503
1504 removed_cnt = 0;
1505 ct_wired = c_tbl->ct_wcnt;
1506 if (c_tbl->ct_ecnt) {
1507 for (i = 0; i < MMU_C_TBL_SIZE; i++) {
1508 c_pte = &c_tbl->ct_dtbl[i];
1509 if (MMU_VALID_DT(*c_pte)) {
1510 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
1511 c_tbl->ct_wcnt--;
1512 pmap_remove_pte(c_pte);
1513 removed_cnt++;
1514 }
1515 }
1516 c_tbl->ct_ecnt = 0;
1517 }
1518 KASSERT(c_tbl->ct_wcnt == 0);
1519
1520 if (relink) {
1521 c_tbl->ct_parent = NULL;
1522 if (!ct_wired)
1523 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1524 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1525 }
1526 return removed_cnt;
1527 }
1528
1529
1530 /* pmap_remove_pte INTERNAL
1531 **
1532 * Unmap the given pte and preserve any page modification
1533 * information by transfering it to the pv head of the
1534 * physical page it maps to. This function does not update
1535 * any reference counts because it is assumed that the calling
1536 * function will do so.
1537 */
1538 void
1539 pmap_remove_pte(mmu_short_pte_t *pte)
1540 {
1541 u_short pv_idx, targ_idx;
1542 paddr_t pa;
1543 pv_t *pv;
1544
1545 pa = MMU_PTE_PA(*pte);
1546 if (is_managed(pa)) {
1547 pv = pa2pv(pa);
1548 targ_idx = pteidx(pte); /* Index of PTE being removed */
1549
1550 /*
1551 * If the PTE being removed is the first (or only) PTE in
1552 * the list of PTEs currently mapped to this page, remove the
1553 * PTE by changing the index found on the PV head. Otherwise
1554 * a linear search through the list will have to be executed
1555 * in order to find the PVE which points to the PTE being
1556 * removed, so that it may be modified to point to its new
1557 * neighbor.
1558 */
1559
1560 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1561 if (pv_idx == targ_idx) {
1562 pv->pv_idx = pvebase[targ_idx].pve_next;
1563 } else {
1564
1565 /*
1566 * Find the PV element pointing to the target
1567 * element. Note: may have pv_idx==PVE_EOL
1568 */
1569
1570 for (;;) {
1571 if (pv_idx == PVE_EOL) {
1572 goto pv_not_found;
1573 }
1574 if (pvebase[pv_idx].pve_next == targ_idx)
1575 break;
1576 pv_idx = pvebase[pv_idx].pve_next;
1577 }
1578
1579 /*
1580 * At this point, pv_idx is the index of the PV
1581 * element just before the target element in the list.
1582 * Unlink the target.
1583 */
1584
1585 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1586 }
1587
1588 /*
1589 * Save the mod/ref bits of the pte by simply
1590 * ORing the entire pte onto the pv_flags member
1591 * of the pv structure.
1592 * There is no need to use a separate bit pattern
1593 * for usage information on the pv head than that
1594 * which is used on the MMU ptes.
1595 */
1596
1597 pv_not_found:
1598 pv->pv_flags |= (u_short) pte->attr.raw;
1599 }
1600 pte->attr.raw = MMU_DT_INVALID;
1601 }
1602
1603 /* pmap_stroll INTERNAL
1604 **
1605 * Retrieve the addresses of all table managers involved in the mapping of
1606 * the given virtual address. If the table walk completed successfully,
1607 * return true. If it was only partially successful, return false.
1608 * The table walk performed by this function is important to many other
1609 * functions in this module.
1610 *
1611 * Note: This function ought to be easier to read.
1612 */
1613 bool
1614 pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl,
1615 c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx,
1616 int *pte_idx)
1617 {
1618 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1619 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1620
1621 if (pmap == pmap_kernel())
1622 return false;
1623
1624 /* Does the given pmap have its own A table? */
1625 *a_tbl = pmap->pm_a_tmgr;
1626 if (*a_tbl == NULL)
1627 return false; /* No. Return unknown. */
1628 /* Does the A table have a valid B table
1629 * under the corresponding table entry?
1630 */
1631 *a_idx = MMU_TIA(va);
1632 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1633 if (!MMU_VALID_DT(*a_dte))
1634 return false; /* No. Return unknown. */
1635 /* Yes. Extract B table from the A table. */
1636 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1637 /*
1638 * Does the B table have a valid C table
1639 * under the corresponding table entry?
1640 */
1641 *b_idx = MMU_TIB(va);
1642 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1643 if (!MMU_VALID_DT(*b_dte))
1644 return false; /* No. Return unknown. */
1645 /* Yes. Extract C table from the B table. */
1646 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1647 *pte_idx = MMU_TIC(va);
1648 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1649
1650 return true;
1651 }
1652
1653 /* pmap_enter INTERFACE
1654 **
1655 * Called by the kernel to map a virtual address
1656 * to a physical address in the given process map.
1657 *
1658 * Note: this function should apply an exclusive lock
1659 * on the pmap system for its duration. (it certainly
1660 * would save my hair!!)
1661 * This function ought to be easier to read.
1662 */
1663 int
1664 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1665 {
1666 bool insert, managed; /* Marks the need for PV insertion.*/
1667 u_short nidx; /* PV list index */
1668 int mapflags; /* Flags for the mapping (see NOTE1) */
1669 u_int a_idx, b_idx, pte_idx; /* table indices */
1670 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1671 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1672 c_tmgr_t *c_tbl; /* C: short page table manager */
1673 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1674 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1675 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1676 pv_t *pv; /* pv list head */
1677 bool wired; /* is the mapping to be wired? */
1678 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1679
1680 if (pmap == pmap_kernel()) {
1681 pmap_enter_kernel(va, pa, prot);
1682 return 0;
1683 }
1684
1685 /*
1686 * Determine if the mapping should be wired.
1687 */
1688 wired = ((flags & PMAP_WIRED) != 0);
1689
1690 /*
1691 * NOTE1:
1692 *
1693 * On November 13, 1999, someone changed the pmap_enter() API such
1694 * that it now accepts a 'flags' argument. This new argument
1695 * contains bit-flags for the architecture-independent (UVM) system to
1696 * use in signalling certain mapping requirements to the architecture-
1697 * dependent (pmap) system. The argument it replaces, 'wired', is now
1698 * one of the flags within it.
1699 *
1700 * In addition to flags signaled by the architecture-independent
1701 * system, parts of the architecture-dependent section of the sun3x
1702 * kernel pass their own flags in the lower, unused bits of the
1703 * physical address supplied to this function. These flags are
1704 * extracted and stored in the temporary variable 'mapflags'.
1705 *
1706 * Extract sun3x specific flags from the physical address.
1707 */
1708 mapflags = (pa & ~MMU_PAGE_MASK);
1709 pa &= MMU_PAGE_MASK;
1710
1711 /*
1712 * Determine if the physical address being mapped is on-board RAM.
1713 * Any other area of the address space is likely to belong to a
1714 * device and hence it would be disasterous to cache its contents.
1715 */
1716 if ((managed = is_managed(pa)) == false)
1717 mapflags |= PMAP_NC;
1718
1719 /*
1720 * For user mappings we walk along the MMU tables of the given
1721 * pmap, reaching a PTE which describes the virtual page being
1722 * mapped or changed. If any level of the walk ends in an invalid
1723 * entry, a table must be allocated and the entry must be updated
1724 * to point to it.
1725 * There is a bit of confusion as to whether this code must be
1726 * re-entrant. For now we will assume it is. To support
1727 * re-entrancy we must unlink tables from the table pool before
1728 * we assume we may use them. Tables are re-linked into the pool
1729 * when we are finished with them at the end of the function.
1730 * But I don't feel like doing that until we have proof that this
1731 * needs to be re-entrant.
1732 * 'llevel' records which tables need to be relinked.
1733 */
1734 llevel = NONE;
1735
1736 /*
1737 * Step 1 - Retrieve the A table from the pmap. If it has no
1738 * A table, allocate a new one from the available pool.
1739 */
1740
1741 a_tbl = pmap->pm_a_tmgr;
1742 if (a_tbl == NULL) {
1743 /*
1744 * This pmap does not currently have an A table. Allocate
1745 * a new one.
1746 */
1747 a_tbl = get_a_table();
1748 a_tbl->at_parent = pmap;
1749
1750 /*
1751 * Assign this new A table to the pmap, and calculate its
1752 * physical address so that loadcrp() can be used to make
1753 * the table active.
1754 */
1755 pmap->pm_a_tmgr = a_tbl;
1756 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1757
1758 /*
1759 * If the process receiving a new A table is the current
1760 * process, we are responsible for setting the MMU so that
1761 * it becomes the current address space. This only adds
1762 * new mappings, so no need to flush anything.
1763 */
1764 if (pmap == current_pmap()) {
1765 kernel_crp.rp_addr = pmap->pm_a_phys;
1766 loadcrp(&kernel_crp);
1767 }
1768
1769 if (!wired)
1770 llevel = NEWA;
1771 } else {
1772 /*
1773 * Use the A table already allocated for this pmap.
1774 * Unlink it from the A table pool if necessary.
1775 */
1776 if (wired && !a_tbl->at_wcnt)
1777 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1778 }
1779
1780 /*
1781 * Step 2 - Walk into the B table. If there is no valid B table,
1782 * allocate one.
1783 */
1784
1785 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1786 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1787 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1788 /* The descriptor is valid. Use the B table it points to. */
1789 /*************************************
1790 * a_idx *
1791 * v *
1792 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1793 * | | | | | | | | | | | | *
1794 * +-+-+-+-+-+-+-+-+-+-+-+- *
1795 * | *
1796 * \- b_tbl -> +-+- *
1797 * | | *
1798 * +-+- *
1799 *************************************/
1800 b_dte = mmu_ptov(a_dte->addr.raw);
1801 b_tbl = mmuB2tmgr(b_dte);
1802
1803 /*
1804 * If the requested mapping must be wired, but this table
1805 * being used to map it is not, the table must be removed
1806 * from the available pool and its wired entry count
1807 * incremented.
1808 */
1809 if (wired && !b_tbl->bt_wcnt) {
1810 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1811 a_tbl->at_wcnt++;
1812 }
1813 } else {
1814 /* The descriptor is invalid. Allocate a new B table. */
1815 b_tbl = get_b_table();
1816
1817 /* Point the parent A table descriptor to this new B table. */
1818 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1819 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1820 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1821
1822 /* Create the necessary back references to the parent table */
1823 b_tbl->bt_parent = a_tbl;
1824 b_tbl->bt_pidx = a_idx;
1825
1826 /*
1827 * If this table is to be wired, make sure the parent A table
1828 * wired count is updated to reflect that it has another wired
1829 * entry.
1830 */
1831 if (wired)
1832 a_tbl->at_wcnt++;
1833 else if (llevel == NONE)
1834 llevel = NEWB;
1835 }
1836
1837 /*
1838 * Step 3 - Walk into the C table, if there is no valid C table,
1839 * allocate one.
1840 */
1841
1842 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1843 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1844 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1845 /* The descriptor is valid. Use the C table it points to. */
1846 /**************************************
1847 * c_idx *
1848 * | v *
1849 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1850 * | | | | | | | | | | | *
1851 * +-+-+-+-+-+-+-+-+-+-+- *
1852 * | *
1853 * \- c_tbl -> +-+-- *
1854 * | | | *
1855 * +-+-- *
1856 **************************************/
1857 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1858 c_tbl = mmuC2tmgr(c_pte);
1859
1860 /* If mapping is wired and table is not */
1861 if (wired && !c_tbl->ct_wcnt) {
1862 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1863 b_tbl->bt_wcnt++;
1864 }
1865 } else {
1866 /* The descriptor is invalid. Allocate a new C table. */
1867 c_tbl = get_c_table();
1868
1869 /* Point the parent B table descriptor to this new C table. */
1870 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1871 b_dte->attr.raw |= MMU_DT_SHORT;
1872 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1873
1874 /* Create the necessary back references to the parent table */
1875 c_tbl->ct_parent = b_tbl;
1876 c_tbl->ct_pidx = b_idx;
1877 /*
1878 * Store the pmap and base virtual managed address for faster
1879 * retrieval in the PV functions.
1880 */
1881 c_tbl->ct_pmap = pmap;
1882 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1883
1884 /*
1885 * If this table is to be wired, make sure the parent B table
1886 * wired count is updated to reflect that it has another wired
1887 * entry.
1888 */
1889 if (wired)
1890 b_tbl->bt_wcnt++;
1891 else if (llevel == NONE)
1892 llevel = NEWC;
1893 }
1894
1895 /*
1896 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1897 * slot of the C table, describing the PA to which the VA is mapped.
1898 */
1899
1900 pte_idx = MMU_TIC(va);
1901 c_pte = &c_tbl->ct_dtbl[pte_idx];
1902 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1903 /*
1904 * The PTE is currently valid. This particular call
1905 * is just a synonym for one (or more) of the following
1906 * operations:
1907 * change protection of a page
1908 * change wiring status of a page
1909 * remove the mapping of a page
1910 */
1911
1912 /* First check if this is a wiring operation. */
1913 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) {
1914 /*
1915 * The existing mapping is wired, so adjust wired
1916 * entry count here. If new mapping is still wired,
1917 * wired entry count will be incremented again later.
1918 */
1919 c_tbl->ct_wcnt--;
1920 if (!wired) {
1921 /*
1922 * The mapping of this PTE is being changed
1923 * from wired to unwired.
1924 * Adjust wired entry counts in each table and
1925 * set llevel flag to put unwired tables back
1926 * into the active pool.
1927 */
1928 if (c_tbl->ct_wcnt == 0) {
1929 llevel = NEWC;
1930 if (--b_tbl->bt_wcnt == 0) {
1931 llevel = NEWB;
1932 if (--a_tbl->at_wcnt == 0) {
1933 llevel = NEWA;
1934 }
1935 }
1936 }
1937 }
1938 }
1939
1940 /* Is the new address the same as the old? */
1941 if (MMU_PTE_PA(*c_pte) == pa) {
1942 /*
1943 * Yes, mark that it does not need to be reinserted
1944 * into the PV list.
1945 */
1946 insert = false;
1947
1948 /*
1949 * Clear all but the modified, referenced and wired
1950 * bits on the PTE.
1951 */
1952 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1953 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1954 } else {
1955 /* No, remove the old entry */
1956 pmap_remove_pte(c_pte);
1957 insert = true;
1958 }
1959
1960 /*
1961 * TLB flush is only necessary if modifying current map.
1962 * However, in pmap_enter(), the pmap almost always IS
1963 * the current pmap, so don't even bother to check.
1964 */
1965 TBIS(va);
1966 } else {
1967 /*
1968 * The PTE is invalid. Increment the valid entry count in
1969 * the C table manager to reflect the addition of a new entry.
1970 */
1971 c_tbl->ct_ecnt++;
1972
1973 /* XXX - temporarily make sure the PTE is cleared. */
1974 c_pte->attr.raw = 0;
1975
1976 /* It will also need to be inserted into the PV list. */
1977 insert = true;
1978 }
1979
1980 /*
1981 * If page is changing from unwired to wired status, set an unused bit
1982 * within the PTE to indicate that it is wired. Also increment the
1983 * wired entry count in the C table manager.
1984 */
1985 if (wired) {
1986 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1987 c_tbl->ct_wcnt++;
1988 }
1989
1990 /*
1991 * Map the page, being careful to preserve modify/reference/wired
1992 * bits. At this point it is assumed that the PTE either has no bits
1993 * set, or if there are set bits, they are only modified, reference or
1994 * wired bits. If not, the following statement will cause erratic
1995 * behavior.
1996 */
1997 #ifdef PMAP_DEBUG
1998 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1999 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2000 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2001 Debugger();
2002 }
2003 #endif
2004 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2005
2006 /*
2007 * If the mapping should be read-only, set the write protect
2008 * bit in the PTE.
2009 */
2010 if (!(prot & VM_PROT_WRITE))
2011 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2012
2013 /*
2014 * Mark the PTE as used and/or modified as specified by the flags arg.
2015 */
2016 if (flags & VM_PROT_ALL) {
2017 c_pte->attr.raw |= MMU_SHORT_PTE_USED;
2018 if (flags & VM_PROT_WRITE) {
2019 c_pte->attr.raw |= MMU_SHORT_PTE_M;
2020 }
2021 }
2022
2023 /*
2024 * If the mapping should be cache inhibited (indicated by the flag
2025 * bits found on the lower order of the physical address.)
2026 * mark the PTE as a cache inhibited page.
2027 */
2028 if (mapflags & PMAP_NC)
2029 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2030
2031 /*
2032 * If the physical address being mapped is managed by the PV
2033 * system then link the pte into the list of pages mapped to that
2034 * address.
2035 */
2036 if (insert && managed) {
2037 pv = pa2pv(pa);
2038 nidx = pteidx(c_pte);
2039
2040 pvebase[nidx].pve_next = pv->pv_idx;
2041 pv->pv_idx = nidx;
2042 }
2043
2044 /* Move any allocated or unwired tables back into the active pool. */
2045
2046 switch (llevel) {
2047 case NEWA:
2048 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2049 /* FALLTHROUGH */
2050 case NEWB:
2051 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2052 /* FALLTHROUGH */
2053 case NEWC:
2054 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2055 /* FALLTHROUGH */
2056 default:
2057 break;
2058 }
2059
2060 return 0;
2061 }
2062
2063 /* pmap_enter_kernel INTERNAL
2064 **
2065 * Map the given virtual address to the given physical address within the
2066 * kernel address space. This function exists because the kernel map does
2067 * not do dynamic table allocation. It consists of a contiguous array of ptes
2068 * and can be edited directly without the need to walk through any tables.
2069 *
2070 * XXX: "Danger, Will Robinson!"
2071 * Note that the kernel should never take a fault on any page
2072 * between [ KERNBASE .. virtual_avail ] and this is checked in
2073 * trap.c for kernel-mode MMU faults. This means that mappings
2074 * created in that range must be implicily wired. -gwr
2075 */
2076 void
2077 pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot)
2078 {
2079 bool was_valid, insert;
2080 u_short pte_idx;
2081 int flags;
2082 mmu_short_pte_t *pte;
2083 pv_t *pv;
2084 paddr_t old_pa;
2085
2086 flags = (pa & ~MMU_PAGE_MASK);
2087 pa &= MMU_PAGE_MASK;
2088
2089 if (is_managed(pa))
2090 insert = true;
2091 else
2092 insert = false;
2093
2094 /*
2095 * Calculate the index of the PTE being modified.
2096 */
2097 pte_idx = (u_long)m68k_btop(va - KERNBASE);
2098
2099 /* This array is traditionally named "Sysmap" */
2100 pte = &kernCbase[pte_idx];
2101
2102 if (MMU_VALID_DT(*pte)) {
2103 was_valid = true;
2104 /*
2105 * If the PTE already maps a different
2106 * physical address, umap and pv_unlink.
2107 */
2108 old_pa = MMU_PTE_PA(*pte);
2109 if (pa != old_pa)
2110 pmap_remove_pte(pte);
2111 else {
2112 /*
2113 * Old PA and new PA are the same. No need to
2114 * relink the mapping within the PV list.
2115 */
2116 insert = false;
2117
2118 /*
2119 * Save any mod/ref bits on the PTE.
2120 */
2121 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2122 }
2123 } else {
2124 pte->attr.raw = MMU_DT_INVALID;
2125 was_valid = false;
2126 }
2127
2128 /*
2129 * Map the page. Being careful to preserve modified/referenced bits
2130 * on the PTE.
2131 */
2132 pte->attr.raw |= (pa | MMU_DT_PAGE);
2133
2134 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2135 pte->attr.raw |= MMU_SHORT_PTE_WP;
2136 if (flags & PMAP_NC)
2137 pte->attr.raw |= MMU_SHORT_PTE_CI;
2138 if (was_valid)
2139 TBIS(va);
2140
2141 /*
2142 * Insert the PTE into the PV system, if need be.
2143 */
2144 if (insert) {
2145 pv = pa2pv(pa);
2146 pvebase[pte_idx].pve_next = pv->pv_idx;
2147 pv->pv_idx = pte_idx;
2148 }
2149 }
2150
2151 void
2152 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2153 {
2154 mmu_short_pte_t *pte;
2155
2156 /* This array is traditionally named "Sysmap" */
2157 pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
2158
2159 KASSERT(!MMU_VALID_DT(*pte));
2160 pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2161 if (!(prot & VM_PROT_WRITE))
2162 pte->attr.raw |= MMU_SHORT_PTE_WP;
2163 }
2164
2165 void
2166 pmap_kremove(vaddr_t va, vsize_t len)
2167 {
2168 int idx, eidx;
2169
2170 #ifdef PMAP_DEBUG
2171 if ((va & PGOFSET) || (len & PGOFSET))
2172 panic("pmap_kremove: alignment");
2173 #endif
2174
2175 idx = m68k_btop(va - KERNBASE);
2176 eidx = m68k_btop(va + len - KERNBASE);
2177
2178 while (idx < eidx) {
2179 kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2180 TBIS(va);
2181 va += PAGE_SIZE;
2182 }
2183 }
2184
2185 /* pmap_map INTERNAL
2186 **
2187 * Map a contiguous range of physical memory into a contiguous range of
2188 * the kernel virtual address space.
2189 *
2190 * Used for device mappings and early mapping of the kernel text/data/bss.
2191 * Returns the first virtual address beyond the end of the range.
2192 */
2193 vaddr_t
2194 pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
2195 {
2196 int sz;
2197
2198 sz = endpa - pa;
2199 do {
2200 pmap_enter_kernel(va, pa, prot);
2201 va += PAGE_SIZE;
2202 pa += PAGE_SIZE;
2203 sz -= PAGE_SIZE;
2204 } while (sz > 0);
2205 pmap_update(pmap_kernel());
2206 return va;
2207 }
2208
2209 /* pmap_protect_kernel INTERNAL
2210 **
2211 * Apply the given protection code to a kernel address range.
2212 */
2213 static INLINE void
2214 pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2215 {
2216 vaddr_t va;
2217 mmu_short_pte_t *pte;
2218
2219 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2220 for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
2221 if (MMU_VALID_DT(*pte)) {
2222 switch (prot) {
2223 case VM_PROT_ALL:
2224 break;
2225 case VM_PROT_EXECUTE:
2226 case VM_PROT_READ:
2227 case VM_PROT_READ|VM_PROT_EXECUTE:
2228 pte->attr.raw |= MMU_SHORT_PTE_WP;
2229 break;
2230 case VM_PROT_NONE:
2231 /* this is an alias for 'pmap_remove_kernel' */
2232 pmap_remove_pte(pte);
2233 break;
2234 default:
2235 break;
2236 }
2237 /*
2238 * since this is the kernel, immediately flush any cached
2239 * descriptors for this address.
2240 */
2241 TBIS(va);
2242 }
2243 }
2244 }
2245
2246 /* pmap_protect INTERFACE
2247 **
2248 * Apply the given protection to the given virtual address range within
2249 * the given map.
2250 *
2251 * It is ok for the protection applied to be stronger than what is
2252 * specified. We use this to our advantage when the given map has no
2253 * mapping for the virtual address. By skipping a page when this
2254 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2255 * and therefore do not need to map the page just to apply a protection
2256 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2257 *
2258 * XXX - This function could be speeded up by using pmap_stroll() for inital
2259 * setup, and then manual scrolling in the for() loop.
2260 */
2261 void
2262 pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2263 {
2264 bool iscurpmap;
2265 int a_idx, b_idx, c_idx;
2266 a_tmgr_t *a_tbl;
2267 b_tmgr_t *b_tbl;
2268 c_tmgr_t *c_tbl;
2269 mmu_short_pte_t *pte;
2270
2271 if (pmap == pmap_kernel()) {
2272 pmap_protect_kernel(startva, endva, prot);
2273 return;
2274 }
2275
2276 /*
2277 * In this particular pmap implementation, there are only three
2278 * types of memory protection: 'all' (read/write/execute),
2279 * 'read-only' (read/execute) and 'none' (no mapping.)
2280 * It is not possible for us to treat 'executable' as a separate
2281 * protection type. Therefore, protection requests that seek to
2282 * remove execute permission while retaining read or write, and those
2283 * that make little sense (write-only for example) are ignored.
2284 */
2285 switch (prot) {
2286 case VM_PROT_NONE:
2287 /*
2288 * A request to apply the protection code of
2289 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2290 */
2291 pmap_remove(pmap, startva, endva);
2292 return;
2293 case VM_PROT_EXECUTE:
2294 case VM_PROT_READ:
2295 case VM_PROT_READ|VM_PROT_EXECUTE:
2296 /* continue */
2297 break;
2298 case VM_PROT_WRITE:
2299 case VM_PROT_WRITE|VM_PROT_READ:
2300 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2301 case VM_PROT_ALL:
2302 /* None of these should happen in a sane system. */
2303 return;
2304 }
2305
2306 /*
2307 * If the pmap has no A table, it has no mappings and therefore
2308 * there is nothing to protect.
2309 */
2310 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2311 return;
2312
2313 a_idx = MMU_TIA(startva);
2314 b_idx = MMU_TIB(startva);
2315 c_idx = MMU_TIC(startva);
2316 b_tbl = NULL;
2317 c_tbl = NULL;
2318
2319 iscurpmap = (pmap == current_pmap());
2320 while (startva < endva) {
2321 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2322 if (b_tbl == NULL) {
2323 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2324 b_tbl = mmu_ptov((vaddr_t)b_tbl);
2325 b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2326 }
2327 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2328 if (c_tbl == NULL) {
2329 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2330 c_tbl = mmu_ptov((vaddr_t)c_tbl);
2331 c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2332 }
2333 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2334 pte = &c_tbl->ct_dtbl[c_idx];
2335 /* make the mapping read-only */
2336 pte->attr.raw |= MMU_SHORT_PTE_WP;
2337 /*
2338 * If we just modified the current address space,
2339 * flush any translations for the modified page from
2340 * the translation cache and any data from it in the
2341 * data cache.
2342 */
2343 if (iscurpmap)
2344 TBIS(startva);
2345 }
2346 startva += PAGE_SIZE;
2347
2348 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2349 c_tbl = NULL;
2350 c_idx = 0;
2351 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2352 b_tbl = NULL;
2353 b_idx = 0;
2354 }
2355 }
2356 } else { /* C table wasn't valid */
2357 c_tbl = NULL;
2358 c_idx = 0;
2359 startva += MMU_TIB_RANGE;
2360 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2361 b_tbl = NULL;
2362 b_idx = 0;
2363 }
2364 } /* C table */
2365 } else { /* B table wasn't valid */
2366 b_tbl = NULL;
2367 b_idx = 0;
2368 startva += MMU_TIA_RANGE;
2369 a_idx++;
2370 } /* B table */
2371 }
2372 }
2373
2374 /* pmap_unwire INTERFACE
2375 **
2376 * Clear the wired attribute of the specified page.
2377 *
2378 * This function is called from vm_fault.c to unwire
2379 * a mapping.
2380 */
2381 void
2382 pmap_unwire(pmap_t pmap, vaddr_t va)
2383 {
2384 int a_idx, b_idx, c_idx;
2385 a_tmgr_t *a_tbl;
2386 b_tmgr_t *b_tbl;
2387 c_tmgr_t *c_tbl;
2388 mmu_short_pte_t *pte;
2389
2390 /* Kernel mappings always remain wired. */
2391 if (pmap == pmap_kernel())
2392 return;
2393
2394 /*
2395 * Walk through the tables. If the walk terminates without
2396 * a valid PTE then the address wasn't wired in the first place.
2397 * Return immediately.
2398 */
2399 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2400 &b_idx, &c_idx) == false)
2401 return;
2402
2403
2404 /* Is the PTE wired? If not, return. */
2405 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2406 return;
2407
2408 /* Remove the wiring bit. */
2409 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2410
2411 /*
2412 * Decrement the wired entry count in the C table.
2413 * If it reaches zero the following things happen:
2414 * 1. The table no longer has any wired entries and is considered
2415 * unwired.
2416 * 2. It is placed on the available queue.
2417 * 3. The parent table's wired entry count is decremented.
2418 * 4. If it reaches zero, this process repeats at step 1 and
2419 * stops at after reaching the A table.
2420 */
2421 if (--c_tbl->ct_wcnt == 0) {
2422 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2423 if (--b_tbl->bt_wcnt == 0) {
2424 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2425 if (--a_tbl->at_wcnt == 0) {
2426 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2427 }
2428 }
2429 }
2430 }
2431
2432 /* pmap_copy INTERFACE
2433 **
2434 * Copy the mappings of a range of addresses in one pmap, into
2435 * the destination address of another.
2436 *
2437 * This routine is advisory. Should we one day decide that MMU tables
2438 * may be shared by more than one pmap, this function should be used to
2439 * link them together. Until that day however, we do nothing.
2440 */
2441 void
2442 pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src)
2443 {
2444
2445 /* not implemented. */
2446 }
2447
2448 /* pmap_copy_page INTERFACE
2449 **
2450 * Copy the contents of one physical page into another.
2451 *
2452 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2453 * to map the two specified physical pages into the kernel address space.
2454 *
2455 * Note: We could use the transparent translation registers to make the
2456 * mappings. If we do so, be sure to disable interrupts before using them.
2457 */
2458 void
2459 pmap_copy_page(paddr_t srcpa, paddr_t dstpa)
2460 {
2461 vaddr_t srcva, dstva;
2462 int s;
2463
2464 srcva = tmp_vpages[0];
2465 dstva = tmp_vpages[1];
2466
2467 s = splvm();
2468 #ifdef DIAGNOSTIC
2469 if (tmp_vpages_inuse++)
2470 panic("pmap_copy_page: temporary vpages are in use.");
2471 #endif
2472
2473 /* Map pages as non-cacheable to avoid cache polution? */
2474 pmap_kenter_pa(srcva, srcpa, VM_PROT_READ, 0);
2475 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0);
2476
2477 /* Hand-optimized version of memcpy(dst, src, PAGE_SIZE) */
2478 copypage((char *)srcva, (char *)dstva);
2479
2480 pmap_kremove(srcva, PAGE_SIZE);
2481 pmap_kremove(dstva, PAGE_SIZE);
2482
2483 #ifdef DIAGNOSTIC
2484 --tmp_vpages_inuse;
2485 #endif
2486 splx(s);
2487 }
2488
2489 /* pmap_zero_page INTERFACE
2490 **
2491 * Zero the contents of the specified physical page.
2492 *
2493 * Uses one of the virtual pages allocated in pmap_boostrap()
2494 * to map the specified page into the kernel address space.
2495 */
2496 void
2497 pmap_zero_page(paddr_t dstpa)
2498 {
2499 vaddr_t dstva;
2500 int s;
2501
2502 dstva = tmp_vpages[1];
2503 s = splvm();
2504 #ifdef DIAGNOSTIC
2505 if (tmp_vpages_inuse++)
2506 panic("pmap_zero_page: temporary vpages are in use.");
2507 #endif
2508
2509 /* The comments in pmap_copy_page() above apply here also. */
2510 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0);
2511
2512 /* Hand-optimized version of memset(ptr, 0, PAGE_SIZE) */
2513 zeropage((char *)dstva);
2514
2515 pmap_kremove(dstva, PAGE_SIZE);
2516 #ifdef DIAGNOSTIC
2517 --tmp_vpages_inuse;
2518 #endif
2519 splx(s);
2520 }
2521
2522 /* pmap_pinit INTERNAL
2523 **
2524 * Initialize a pmap structure.
2525 */
2526 static INLINE void
2527 pmap_pinit(pmap_t pmap)
2528 {
2529
2530 memset(pmap, 0, sizeof(struct pmap));
2531 pmap->pm_a_tmgr = NULL;
2532 pmap->pm_a_phys = kernAphys;
2533 pmap->pm_refcount = 1;
2534 simple_lock_init(&pmap->pm_lock);
2535 }
2536
2537 /* pmap_create INTERFACE
2538 **
2539 * Create and return a pmap structure.
2540 */
2541 pmap_t
2542 pmap_create(void)
2543 {
2544 pmap_t pmap;
2545
2546 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2547 pmap_pinit(pmap);
2548 return pmap;
2549 }
2550
2551 /* pmap_release INTERNAL
2552 **
2553 * Release any resources held by the given pmap.
2554 *
2555 * This is the reverse analog to pmap_pinit. It does not
2556 * necessarily mean for the pmap structure to be deallocated,
2557 * as in pmap_destroy.
2558 */
2559 static INLINE void
2560 pmap_release(pmap_t pmap)
2561 {
2562
2563 /*
2564 * As long as the pmap contains no mappings,
2565 * which always should be the case whenever
2566 * this function is called, there really should
2567 * be nothing to do.
2568 */
2569 #ifdef PMAP_DEBUG
2570 if (pmap == pmap_kernel())
2571 panic("pmap_release: kernel pmap");
2572 #endif
2573 /*
2574 * XXX - If this pmap has an A table, give it back.
2575 * The pmap SHOULD be empty by now, and pmap_remove
2576 * should have already given back the A table...
2577 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2578 * at this point, which means some mapping was not
2579 * removed when it should have been. -gwr
2580 */
2581 if (pmap->pm_a_tmgr != NULL) {
2582 /* First make sure we are not using it! */
2583 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2584 kernel_crp.rp_addr = kernAphys;
2585 loadcrp(&kernel_crp);
2586 }
2587 #ifdef PMAP_DEBUG /* XXX - todo! */
2588 /* XXX - Now complain... */
2589 printf("pmap_release: still have table\n");
2590 Debugger();
2591 #endif
2592 free_a_table(pmap->pm_a_tmgr, true);
2593 pmap->pm_a_tmgr = NULL;
2594 pmap->pm_a_phys = kernAphys;
2595 }
2596 }
2597
2598 /* pmap_reference INTERFACE
2599 **
2600 * Increment the reference count of a pmap.
2601 */
2602 void
2603 pmap_reference(pmap_t pmap)
2604 {
2605 pmap_lock(pmap);
2606 pmap_add_ref(pmap);
2607 pmap_unlock(pmap);
2608 }
2609
2610 /* pmap_dereference INTERNAL
2611 **
2612 * Decrease the reference count on the given pmap
2613 * by one and return the current count.
2614 */
2615 static INLINE int
2616 pmap_dereference(pmap_t pmap)
2617 {
2618 int rtn;
2619
2620 pmap_lock(pmap);
2621 rtn = pmap_del_ref(pmap);
2622 pmap_unlock(pmap);
2623
2624 return rtn;
2625 }
2626
2627 /* pmap_destroy INTERFACE
2628 **
2629 * Decrement a pmap's reference count and delete
2630 * the pmap if it becomes zero. Will be called
2631 * only after all mappings have been removed.
2632 */
2633 void
2634 pmap_destroy(pmap_t pmap)
2635 {
2636
2637 if (pmap_dereference(pmap) == 0) {
2638 pmap_release(pmap);
2639 pool_put(&pmap_pmap_pool, pmap);
2640 }
2641 }
2642
2643 /* pmap_is_referenced INTERFACE
2644 **
2645 * Determine if the given physical page has been
2646 * referenced (read from [or written to.])
2647 */
2648 bool
2649 pmap_is_referenced(struct vm_page *pg)
2650 {
2651 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2652 pv_t *pv;
2653 int idx;
2654
2655 /*
2656 * Check the flags on the pv head. If they are set,
2657 * return immediately. Otherwise a search must be done.
2658 */
2659
2660 pv = pa2pv(pa);
2661 if (pv->pv_flags & PV_FLAGS_USED)
2662 return true;
2663
2664 /*
2665 * Search through all pv elements pointing
2666 * to this page and query their reference bits
2667 */
2668
2669 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2670 if (MMU_PTE_USED(kernCbase[idx])) {
2671 return true;
2672 }
2673 }
2674 return false;
2675 }
2676
2677 /* pmap_is_modified INTERFACE
2678 **
2679 * Determine if the given physical page has been
2680 * modified (written to.)
2681 */
2682 bool
2683 pmap_is_modified(struct vm_page *pg)
2684 {
2685 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2686 pv_t *pv;
2687 int idx;
2688
2689 /* see comments in pmap_is_referenced() */
2690 pv = pa2pv(pa);
2691 if (pv->pv_flags & PV_FLAGS_MDFY)
2692 return true;
2693
2694 for (idx = pv->pv_idx;
2695 idx != PVE_EOL;
2696 idx = pvebase[idx].pve_next) {
2697
2698 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2699 return true;
2700 }
2701 }
2702
2703 return false;
2704 }
2705
2706 /* pmap_page_protect INTERFACE
2707 **
2708 * Applies the given protection to all mappings to the given
2709 * physical page.
2710 */
2711 void
2712 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2713 {
2714 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2715 pv_t *pv;
2716 int idx;
2717 vaddr_t va;
2718 struct mmu_short_pte_struct *pte;
2719 c_tmgr_t *c_tbl;
2720 pmap_t pmap, curpmap;
2721
2722 curpmap = current_pmap();
2723 pv = pa2pv(pa);
2724
2725 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2726 pte = &kernCbase[idx];
2727 switch (prot) {
2728 case VM_PROT_ALL:
2729 /* do nothing */
2730 break;
2731 case VM_PROT_EXECUTE:
2732 case VM_PROT_READ:
2733 case VM_PROT_READ|VM_PROT_EXECUTE:
2734 /*
2735 * Determine the virtual address mapped by
2736 * the PTE and flush ATC entries if necessary.
2737 */
2738 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2739 pte->attr.raw |= MMU_SHORT_PTE_WP;
2740 if (pmap == curpmap || pmap == pmap_kernel())
2741 TBIS(va);
2742 break;
2743 case VM_PROT_NONE:
2744 /* Save the mod/ref bits. */
2745 pv->pv_flags |= pte->attr.raw;
2746 /* Invalidate the PTE. */
2747 pte->attr.raw = MMU_DT_INVALID;
2748
2749 /*
2750 * Update table counts. And flush ATC entries
2751 * if necessary.
2752 */
2753 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2754
2755 /*
2756 * If the PTE belongs to the kernel map,
2757 * be sure to flush the page it maps.
2758 */
2759 if (pmap == pmap_kernel()) {
2760 TBIS(va);
2761 } else {
2762 /*
2763 * The PTE belongs to a user map.
2764 * update the entry count in the C
2765 * table to which it belongs and flush
2766 * the ATC if the mapping belongs to
2767 * the current pmap.
2768 */
2769 c_tbl->ct_ecnt--;
2770 if (pmap == curpmap)
2771 TBIS(va);
2772 }
2773 break;
2774 default:
2775 break;
2776 }
2777 }
2778
2779 /*
2780 * If the protection code indicates that all mappings to the page
2781 * be removed, truncate the PV list to zero entries.
2782 */
2783 if (prot == VM_PROT_NONE)
2784 pv->pv_idx = PVE_EOL;
2785 }
2786
2787 /* pmap_get_pteinfo INTERNAL
2788 **
2789 * Called internally to find the pmap and virtual address within that
2790 * map to which the pte at the given index maps. Also includes the PTE's C
2791 * table manager.
2792 *
2793 * Returns the pmap in the argument provided, and the virtual address
2794 * by return value.
2795 */
2796 vaddr_t
2797 pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl)
2798 {
2799 vaddr_t va = 0;
2800
2801 /*
2802 * Determine if the PTE is a kernel PTE or a user PTE.
2803 */
2804 if (idx >= NUM_KERN_PTES) {
2805 /*
2806 * The PTE belongs to a user mapping.
2807 */
2808 /* XXX: Would like an inline for this to validate idx... */
2809 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2810
2811 *pmap = (*tbl)->ct_pmap;
2812 /*
2813 * To find the va to which the PTE maps, we first take
2814 * the table's base virtual address mapping which is stored
2815 * in ct_va. We then increment this address by a page for
2816 * every slot skipped until we reach the PTE.
2817 */
2818 va = (*tbl)->ct_va;
2819 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2820 } else {
2821 /*
2822 * The PTE belongs to the kernel map.
2823 */
2824 *pmap = pmap_kernel();
2825
2826 va = m68k_ptob(idx);
2827 va += KERNBASE;
2828 }
2829
2830 return va;
2831 }
2832
2833 /* pmap_clear_modify INTERFACE
2834 **
2835 * Clear the modification bit on the page at the specified
2836 * physical address.
2837 *
2838 */
2839 bool
2840 pmap_clear_modify(struct vm_page *pg)
2841 {
2842 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2843 bool rv;
2844
2845 rv = pmap_is_modified(pg);
2846 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2847 return rv;
2848 }
2849
2850 /* pmap_clear_reference INTERFACE
2851 **
2852 * Clear the referenced bit on the page at the specified
2853 * physical address.
2854 */
2855 bool
2856 pmap_clear_reference(struct vm_page *pg)
2857 {
2858 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2859 bool rv;
2860
2861 rv = pmap_is_referenced(pg);
2862 pmap_clear_pv(pa, PV_FLAGS_USED);
2863 return rv;
2864 }
2865
2866 /* pmap_clear_pv INTERNAL
2867 **
2868 * Clears the specified flag from the specified physical address.
2869 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2870 *
2871 * Flag is one of:
2872 * PV_FLAGS_MDFY - Page modified bit.
2873 * PV_FLAGS_USED - Page used (referenced) bit.
2874 *
2875 * This routine must not only clear the flag on the pv list
2876 * head. It must also clear the bit on every pte in the pv
2877 * list associated with the address.
2878 */
2879 void
2880 pmap_clear_pv(paddr_t pa, int flag)
2881 {
2882 pv_t *pv;
2883 int idx;
2884 vaddr_t va;
2885 pmap_t pmap;
2886 mmu_short_pte_t *pte;
2887 c_tmgr_t *c_tbl;
2888
2889 pv = pa2pv(pa);
2890 pv->pv_flags &= ~(flag);
2891 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2892 pte = &kernCbase[idx];
2893 pte->attr.raw &= ~(flag);
2894
2895 /*
2896 * The MC68030 MMU will not set the modified or
2897 * referenced bits on any MMU tables for which it has
2898 * a cached descriptor with its modify bit set. To insure
2899 * that it will modify these bits on the PTE during the next
2900 * time it is written to or read from, we must flush it from
2901 * the ATC.
2902 *
2903 * Ordinarily it is only necessary to flush the descriptor
2904 * if it is used in the current address space. But since I
2905 * am not sure that there will always be a notion of
2906 * 'the current address space' when this function is called,
2907 * I will skip the test and always flush the address. It
2908 * does no harm.
2909 */
2910
2911 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2912 TBIS(va);
2913 }
2914 }
2915
2916 /* pmap_extract_kernel INTERNAL
2917 **
2918 * Extract a translation from the kernel address space.
2919 */
2920 static INLINE bool
2921 pmap_extract_kernel(vaddr_t va, paddr_t *pap)
2922 {
2923 mmu_short_pte_t *pte;
2924
2925 pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE)];
2926 if (!MMU_VALID_DT(*pte))
2927 return false;
2928 if (pap != NULL)
2929 *pap = MMU_PTE_PA(*pte);
2930 return true;
2931 }
2932
2933 /* pmap_extract INTERFACE
2934 **
2935 * Return the physical address mapped by the virtual address
2936 * in the specified pmap.
2937 *
2938 * Note: this function should also apply an exclusive lock
2939 * on the pmap system during its duration.
2940 */
2941 bool
2942 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
2943 {
2944 int a_idx, b_idx, pte_idx;
2945 a_tmgr_t *a_tbl;
2946 b_tmgr_t *b_tbl;
2947 c_tmgr_t *c_tbl;
2948 mmu_short_pte_t *c_pte;
2949
2950 if (pmap == pmap_kernel())
2951 return pmap_extract_kernel(va, pap);
2952
2953 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2954 &c_pte, &a_idx, &b_idx, &pte_idx) == false)
2955 return false;
2956
2957 if (!MMU_VALID_DT(*c_pte))
2958 return false;
2959
2960 if (pap != NULL)
2961 *pap = MMU_PTE_PA(*c_pte);
2962 return true;
2963 }
2964
2965 /* pmap_remove_kernel INTERNAL
2966 **
2967 * Remove the mapping of a range of virtual addresses from the kernel map.
2968 * The arguments are already page-aligned.
2969 */
2970 static INLINE void
2971 pmap_remove_kernel(vaddr_t sva, vaddr_t eva)
2972 {
2973 int idx, eidx;
2974
2975 #ifdef PMAP_DEBUG
2976 if ((sva & PGOFSET) || (eva & PGOFSET))
2977 panic("pmap_remove_kernel: alignment");
2978 #endif
2979
2980 idx = m68k_btop(sva - KERNBASE);
2981 eidx = m68k_btop(eva - KERNBASE);
2982
2983 while (idx < eidx) {
2984 pmap_remove_pte(&kernCbase[idx++]);
2985 TBIS(sva);
2986 sva += PAGE_SIZE;
2987 }
2988 }
2989
2990 /* pmap_remove INTERFACE
2991 **
2992 * Remove the mapping of a range of virtual addresses from the given pmap.
2993 *
2994 */
2995 void
2996 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
2997 {
2998
2999 if (pmap == pmap_kernel()) {
3000 pmap_remove_kernel(sva, eva);
3001 return;
3002 }
3003
3004 /*
3005 * If the pmap doesn't have an A table of its own, it has no mappings
3006 * that can be removed.
3007 */
3008 if (pmap->pm_a_tmgr == NULL)
3009 return;
3010
3011 /*
3012 * Remove the specified range from the pmap. If the function
3013 * returns true, the operation removed all the valid mappings
3014 * in the pmap and freed its A table. If this happened to the
3015 * currently loaded pmap, the MMU root pointer must be reloaded
3016 * with the default 'kernel' map.
3017 */
3018 if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) {
3019 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3020 kernel_crp.rp_addr = kernAphys;
3021 loadcrp(&kernel_crp);
3022 /* will do TLB flush below */
3023 }
3024 pmap->pm_a_tmgr = NULL;
3025 pmap->pm_a_phys = kernAphys;
3026 }
3027
3028 /*
3029 * If we just modified the current address space,
3030 * make sure to flush the MMU cache.
3031 *
3032 * XXX - this could be an unecessarily large flush.
3033 * XXX - Could decide, based on the size of the VA range
3034 * to be removed, whether to flush "by pages" or "all".
3035 */
3036 if (pmap == current_pmap())
3037 TBIAU();
3038 }
3039
3040 /* pmap_remove_a INTERNAL
3041 **
3042 * This is function number one in a set of three that removes a range
3043 * of memory in the most efficient manner by removing the highest possible
3044 * tables from the memory space. This particular function attempts to remove
3045 * as many B tables as it can, delegating the remaining fragmented ranges to
3046 * pmap_remove_b().
3047 *
3048 * If the removal operation results in an empty A table, the function returns
3049 * true.
3050 *
3051 * It's ugly but will do for now.
3052 */
3053 bool
3054 pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva)
3055 {
3056 bool empty;
3057 int idx;
3058 vaddr_t nstart, nend;
3059 b_tmgr_t *b_tbl;
3060 mmu_long_dte_t *a_dte;
3061 mmu_short_dte_t *b_dte;
3062 uint8_t at_wired, bt_wired;
3063
3064 /*
3065 * The following code works with what I call a 'granularity
3066 * reduction algorithim'. A range of addresses will always have
3067 * the following properties, which are classified according to
3068 * how the range relates to the size of the current granularity
3069 * - an A table entry:
3070 *
3071 * 1 2 3 4
3072 * -+---+---+---+---+---+---+---+-
3073 * -+---+---+---+---+---+---+---+-
3074 *
3075 * A range will always start on a granularity boundary, illustrated
3076 * by '+' signs in the table above, or it will start at some point
3077 * inbetween a granularity boundary, as illustrated by point 1.
3078 * The first step in removing a range of addresses is to remove the
3079 * range between 1 and 2, the nearest granularity boundary. This
3080 * job is handled by the section of code governed by the
3081 * 'if (start < nstart)' statement.
3082 *
3083 * A range will always encompass zero or more intergral granules,
3084 * illustrated by points 2 and 3. Integral granules are easy to
3085 * remove. The removal of these granules is the second step, and
3086 * is handled by the code block 'if (nstart < nend)'.
3087 *
3088 * Lastly, a range will always end on a granularity boundary,
3089 * ill. by point 3, or it will fall just beyond one, ill. by point
3090 * 4. The last step involves removing this range and is handled by
3091 * the code block 'if (nend < end)'.
3092 */
3093 nstart = MMU_ROUND_UP_A(sva);
3094 nend = MMU_ROUND_A(eva);
3095
3096 at_wired = a_tbl->at_wcnt;
3097
3098 if (sva < nstart) {
3099 /*
3100 * This block is executed if the range starts between
3101 * a granularity boundary.
3102 *
3103 * First find the DTE which is responsible for mapping
3104 * the start of the range.
3105 */
3106 idx = MMU_TIA(sva);
3107 a_dte = &a_tbl->at_dtbl[idx];
3108
3109 /*
3110 * If the DTE is valid then delegate the removal of the sub
3111 * range to pmap_remove_b(), which can remove addresses at
3112 * a finer granularity.
3113 */
3114 if (MMU_VALID_DT(*a_dte)) {
3115 b_dte = mmu_ptov(a_dte->addr.raw);
3116 b_tbl = mmuB2tmgr(b_dte);
3117 bt_wired = b_tbl->bt_wcnt;
3118
3119 /*
3120 * The sub range to be removed starts at the start
3121 * of the full range we were asked to remove, and ends
3122 * at the greater of:
3123 * 1. The end of the full range, -or-
3124 * 2. The end of the full range, rounded down to the
3125 * nearest granularity boundary.
3126 */
3127 if (eva < nstart)
3128 empty = pmap_remove_b(b_tbl, sva, eva);
3129 else
3130 empty = pmap_remove_b(b_tbl, sva, nstart);
3131
3132 /*
3133 * If the child table no longer has wired entries,
3134 * decrement wired entry count.
3135 */
3136 if (bt_wired && b_tbl->bt_wcnt == 0)
3137 a_tbl->at_wcnt--;
3138
3139 /*
3140 * If the removal resulted in an empty B table,
3141 * invalidate the DTE that points to it and decrement
3142 * the valid entry count of the A table.
3143 */
3144 if (empty) {
3145 a_dte->attr.raw = MMU_DT_INVALID;
3146 a_tbl->at_ecnt--;
3147 }
3148 }
3149 /*
3150 * If the DTE is invalid, the address range is already non-
3151 * existent and can simply be skipped.
3152 */
3153 }
3154 if (nstart < nend) {
3155 /*
3156 * This block is executed if the range spans a whole number
3157 * multiple of granules (A table entries.)
3158 *
3159 * First find the DTE which is responsible for mapping
3160 * the start of the first granule involved.
3161 */
3162 idx = MMU_TIA(nstart);
3163 a_dte = &a_tbl->at_dtbl[idx];
3164
3165 /*
3166 * Remove entire sub-granules (B tables) one at a time,
3167 * until reaching the end of the range.
3168 */
3169 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3170 if (MMU_VALID_DT(*a_dte)) {
3171 /*
3172 * Find the B table manager for the
3173 * entry and free it.
3174 */
3175 b_dte = mmu_ptov(a_dte->addr.raw);
3176 b_tbl = mmuB2tmgr(b_dte);
3177 bt_wired = b_tbl->bt_wcnt;
3178
3179 free_b_table(b_tbl, true);
3180
3181 /*
3182 * All child entries has been removed.
3183 * If there were any wired entries in it,
3184 * decrement wired entry count.
3185 */
3186 if (bt_wired)
3187 a_tbl->at_wcnt--;
3188
3189 /*
3190 * Invalidate the DTE that points to the
3191 * B table and decrement the valid entry
3192 * count of the A table.
3193 */
3194 a_dte->attr.raw = MMU_DT_INVALID;
3195 a_tbl->at_ecnt--;
3196 }
3197 }
3198 if (nend < eva) {
3199 /*
3200 * This block is executed if the range ends beyond a
3201 * granularity boundary.
3202 *
3203 * First find the DTE which is responsible for mapping
3204 * the start of the nearest (rounded down) granularity
3205 * boundary.
3206 */
3207 idx = MMU_TIA(nend);
3208 a_dte = &a_tbl->at_dtbl[idx];
3209
3210 /*
3211 * If the DTE is valid then delegate the removal of the sub
3212 * range to pmap_remove_b(), which can remove addresses at
3213 * a finer granularity.
3214 */
3215 if (MMU_VALID_DT(*a_dte)) {
3216 /*
3217 * Find the B table manager for the entry
3218 * and hand it to pmap_remove_b() along with
3219 * the sub range.
3220 */
3221 b_dte = mmu_ptov(a_dte->addr.raw);
3222 b_tbl = mmuB2tmgr(b_dte);
3223 bt_wired = b_tbl->bt_wcnt;
3224
3225 empty = pmap_remove_b(b_tbl, nend, eva);
3226
3227 /*
3228 * If the child table no longer has wired entries,
3229 * decrement wired entry count.
3230 */
3231 if (bt_wired && b_tbl->bt_wcnt == 0)
3232 a_tbl->at_wcnt--;
3233 /*
3234 * If the removal resulted in an empty B table,
3235 * invalidate the DTE that points to it and decrement
3236 * the valid entry count of the A table.
3237 */
3238 if (empty) {
3239 a_dte->attr.raw = MMU_DT_INVALID;
3240 a_tbl->at_ecnt--;
3241 }
3242 }
3243 }
3244
3245 /*
3246 * If there are no more entries in the A table, release it
3247 * back to the available pool and return true.
3248 */
3249 if (a_tbl->at_ecnt == 0) {
3250 KASSERT(a_tbl->at_wcnt == 0);
3251 a_tbl->at_parent = NULL;
3252 if (!at_wired)
3253 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3254 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3255 empty = true;
3256 } else {
3257 /*
3258 * If the table doesn't have wired entries any longer
3259 * but still has unwired entries, put it back into
3260 * the available queue.
3261 */
3262 if (at_wired && a_tbl->at_wcnt == 0)
3263 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
3264 empty = false;
3265 }
3266
3267 return empty;
3268 }
3269
3270 /* pmap_remove_b INTERNAL
3271 **
3272 * Remove a range of addresses from an address space, trying to remove entire
3273 * C tables if possible.
3274 *
3275 * If the operation results in an empty B table, the function returns true.
3276 */
3277 bool
3278 pmap_remove_b(b_tmgr_t *b_tbl, vaddr_t sva, vaddr_t eva)
3279 {
3280 bool empty;
3281 int idx;
3282 vaddr_t nstart, nend, rstart;
3283 c_tmgr_t *c_tbl;
3284 mmu_short_dte_t *b_dte;
3285 mmu_short_pte_t *c_dte;
3286 uint8_t bt_wired, ct_wired;
3287
3288 nstart = MMU_ROUND_UP_B(sva);
3289 nend = MMU_ROUND_B(eva);
3290
3291 bt_wired = b_tbl->bt_wcnt;
3292
3293 if (sva < nstart) {
3294 idx = MMU_TIB(sva);
3295 b_dte = &b_tbl->bt_dtbl[idx];
3296 if (MMU_VALID_DT(*b_dte)) {
3297 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3298 c_tbl = mmuC2tmgr(c_dte);
3299 ct_wired = c_tbl->ct_wcnt;
3300
3301 if (eva < nstart)
3302 empty = pmap_remove_c(c_tbl, sva, eva);
3303 else
3304 empty = pmap_remove_c(c_tbl, sva, nstart);
3305
3306 /*
3307 * If the child table no longer has wired entries,
3308 * decrement wired entry count.
3309 */
3310 if (ct_wired && c_tbl->ct_wcnt == 0)
3311 b_tbl->bt_wcnt--;
3312
3313 if (empty) {
3314 b_dte->attr.raw = MMU_DT_INVALID;
3315 b_tbl->bt_ecnt--;
3316 }
3317 }
3318 }
3319 if (nstart < nend) {
3320 idx = MMU_TIB(nstart);
3321 b_dte = &b_tbl->bt_dtbl[idx];
3322 rstart = nstart;
3323 while (rstart < nend) {
3324 if (MMU_VALID_DT(*b_dte)) {
3325 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3326 c_tbl = mmuC2tmgr(c_dte);
3327 ct_wired = c_tbl->ct_wcnt;
3328
3329 free_c_table(c_tbl, true);
3330
3331 /*
3332 * All child entries has been removed.
3333 * If there were any wired entries in it,
3334 * decrement wired entry count.
3335 */
3336 if (ct_wired)
3337 b_tbl->bt_wcnt--;
3338
3339 b_dte->attr.raw = MMU_DT_INVALID;
3340 b_tbl->bt_ecnt--;
3341 }
3342 b_dte++;
3343 rstart += MMU_TIB_RANGE;
3344 }
3345 }
3346 if (nend < eva) {
3347 idx = MMU_TIB(nend);
3348 b_dte = &b_tbl->bt_dtbl[idx];
3349 if (MMU_VALID_DT(*b_dte)) {
3350 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3351 c_tbl = mmuC2tmgr(c_dte);
3352 ct_wired = c_tbl->ct_wcnt;
3353 empty = pmap_remove_c(c_tbl, nend, eva);
3354
3355 /*
3356 * If the child table no longer has wired entries,
3357 * decrement wired entry count.
3358 */
3359 if (ct_wired && c_tbl->ct_wcnt == 0)
3360 b_tbl->bt_wcnt--;
3361
3362 if (empty) {
3363 b_dte->attr.raw = MMU_DT_INVALID;
3364 b_tbl->bt_ecnt--;
3365 }
3366 }
3367 }
3368
3369 if (b_tbl->bt_ecnt == 0) {
3370 KASSERT(b_tbl->bt_wcnt == 0);
3371 b_tbl->bt_parent = NULL;
3372 if (!bt_wired)
3373 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3374 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3375 empty = true;
3376 } else {
3377 /*
3378 * If the table doesn't have wired entries any longer
3379 * but still has unwired entries, put it back into
3380 * the available queue.
3381 */
3382 if (bt_wired && b_tbl->bt_wcnt == 0)
3383 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
3384
3385 empty = false;
3386 }
3387
3388 return empty;
3389 }
3390
3391 /* pmap_remove_c INTERNAL
3392 **
3393 * Remove a range of addresses from the given C table.
3394 */
3395 bool
3396 pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva)
3397 {
3398 bool empty;
3399 int idx;
3400 mmu_short_pte_t *c_pte;
3401 uint8_t ct_wired;
3402
3403 ct_wired = c_tbl->ct_wcnt;
3404
3405 idx = MMU_TIC(sva);
3406 c_pte = &c_tbl->ct_dtbl[idx];
3407 for (; sva < eva; sva += MMU_PAGE_SIZE, c_pte++) {
3408 if (MMU_VALID_DT(*c_pte)) {
3409 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
3410 c_tbl->ct_wcnt--;
3411 pmap_remove_pte(c_pte);
3412 c_tbl->ct_ecnt--;
3413 }
3414 }
3415
3416 if (c_tbl->ct_ecnt == 0) {
3417 KASSERT(c_tbl->ct_wcnt == 0);
3418 c_tbl->ct_parent = NULL;
3419 if (!ct_wired)
3420 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3421 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3422 empty = true;
3423 } else {
3424 /*
3425 * If the table doesn't have wired entries any longer
3426 * but still has unwired entries, put it back into
3427 * the available queue.
3428 */
3429 if (ct_wired && c_tbl->ct_wcnt == 0)
3430 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
3431 empty = false;
3432 }
3433
3434 return empty;
3435 }
3436
3437 /* pmap_bootstrap_alloc INTERNAL
3438 **
3439 * Used internally for memory allocation at startup when malloc is not
3440 * available. This code will fail once it crosses the first memory
3441 * bank boundary on the 3/80. Hopefully by then however, the VM system
3442 * will be in charge of allocation.
3443 */
3444 void *
3445 pmap_bootstrap_alloc(int size)
3446 {
3447 void *rtn;
3448
3449 #ifdef PMAP_DEBUG
3450 if (bootstrap_alloc_enabled == false) {
3451 mon_printf("pmap_bootstrap_alloc: disabled\n");
3452 sunmon_abort();
3453 }
3454 #endif
3455
3456 rtn = (void *) virtual_avail;
3457 virtual_avail += size;
3458
3459 #ifdef PMAP_DEBUG
3460 if (virtual_avail > virtual_contig_end) {
3461 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3462 sunmon_abort();
3463 }
3464 #endif
3465
3466 return rtn;
3467 }
3468
3469 /* pmap_bootstap_aalign INTERNAL
3470 **
3471 * Used to insure that the next call to pmap_bootstrap_alloc() will
3472 * return a chunk of memory aligned to the specified size.
3473 *
3474 * Note: This function will only support alignment sizes that are powers
3475 * of two.
3476 */
3477 void
3478 pmap_bootstrap_aalign(int size)
3479 {
3480 int off;
3481
3482 off = virtual_avail & (size - 1);
3483 if (off) {
3484 (void)pmap_bootstrap_alloc(size - off);
3485 }
3486 }
3487
3488 /* pmap_pa_exists
3489 **
3490 * Used by the /dev/mem driver to see if a given PA is memory
3491 * that can be mapped. (The PA is not in a hole.)
3492 */
3493 int
3494 pmap_pa_exists(paddr_t pa)
3495 {
3496 int i;
3497
3498 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3499 if ((pa >= avail_mem[i].pmem_start) &&
3500 (pa < avail_mem[i].pmem_end))
3501 return 1;
3502 if (avail_mem[i].pmem_next == NULL)
3503 break;
3504 }
3505 return 0;
3506 }
3507
3508 /* Called only from locore.s and pmap.c */
3509 void _pmap_switch(pmap_t pmap);
3510
3511 /*
3512 * _pmap_switch INTERNAL
3513 *
3514 * This is called by locore.s:cpu_switch() when it is
3515 * switching to a new process. Load new translations.
3516 * Note: done in-line by locore.s unless PMAP_DEBUG
3517 *
3518 * Note that we do NOT allocate a context here, but
3519 * share the "kernel only" context until we really
3520 * need our own context for user-space mappings in
3521 * pmap_enter_user(). [ s/context/mmu A table/ ]
3522 */
3523 void
3524 _pmap_switch(pmap_t pmap)
3525 {
3526 u_long rootpa;
3527
3528 /*
3529 * Only do reload/flush if we have to.
3530 * Note that if the old and new process
3531 * were BOTH using the "null" context,
3532 * then this will NOT flush the TLB.
3533 */
3534 rootpa = pmap->pm_a_phys;
3535 if (kernel_crp.rp_addr != rootpa) {
3536 DPRINT(("pmap_activate(%p)\n", pmap));
3537 kernel_crp.rp_addr = rootpa;
3538 loadcrp(&kernel_crp);
3539 TBIAU();
3540 }
3541 }
3542
3543 /*
3544 * Exported version of pmap_activate(). This is called from the
3545 * machine-independent VM code when a process is given a new pmap.
3546 * If (p == curlwp) do like cpu_switch would do; otherwise just
3547 * take this as notification that the process has a new pmap.
3548 */
3549 void
3550 pmap_activate(struct lwp *l)
3551 {
3552
3553 if (l->l_proc == curproc) {
3554 _pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
3555 }
3556 }
3557
3558 /*
3559 * pmap_deactivate INTERFACE
3560 **
3561 * This is called to deactivate the specified process's address space.
3562 */
3563 void
3564 pmap_deactivate(struct lwp *l)
3565 {
3566
3567 /* Nothing to do. */
3568 }
3569
3570 /*
3571 * Fill in the sun3x-specific part of the kernel core header
3572 * for dumpsys(). (See machdep.c for the rest.)
3573 */
3574 void
3575 pmap_kcore_hdr(struct sun3x_kcore_hdr *sh)
3576 {
3577 u_long spa, len;
3578 int i;
3579
3580 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3581 sh->pg_valid = MMU_DT_PAGE;
3582 sh->contig_end = virtual_contig_end;
3583 sh->kernCbase = (u_long)kernCbase;
3584 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3585 spa = avail_mem[i].pmem_start;
3586 spa = m68k_trunc_page(spa);
3587 len = avail_mem[i].pmem_end - spa;
3588 len = m68k_round_page(len);
3589 sh->ram_segs[i].start = spa;
3590 sh->ram_segs[i].size = len;
3591 }
3592 }
3593
3594
3595 /* pmap_virtual_space INTERFACE
3596 **
3597 * Return the current available range of virtual addresses in the
3598 * arguuments provided. Only really called once.
3599 */
3600 void
3601 pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend)
3602 {
3603
3604 *vstart = virtual_avail;
3605 *vend = virtual_end;
3606 }
3607
3608 /*
3609 * Provide memory to the VM system.
3610 *
3611 * Assume avail_start is always in the
3612 * first segment as pmap_bootstrap does.
3613 */
3614 static void
3615 pmap_page_upload(void)
3616 {
3617 paddr_t a, b; /* memory range */
3618 int i;
3619
3620 /* Supply the memory in segments. */
3621 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3622 a = atop(avail_mem[i].pmem_start);
3623 b = atop(avail_mem[i].pmem_end);
3624 if (i == 0)
3625 a = atop(avail_start);
3626 if (avail_mem[i].pmem_end > avail_end)
3627 b = atop(avail_end);
3628
3629 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3630
3631 if (avail_mem[i].pmem_next == NULL)
3632 break;
3633 }
3634 }
3635
3636 /* pmap_count INTERFACE
3637 **
3638 * Return the number of resident (valid) pages in the given pmap.
3639 *
3640 * Note: If this function is handed the kernel map, it will report
3641 * that it has no mappings. Hopefully the VM system won't ask for kernel
3642 * map statistics.
3643 */
3644 segsz_t
3645 pmap_count(pmap_t pmap, int type)
3646 {
3647 u_int count;
3648 int a_idx, b_idx;
3649 a_tmgr_t *a_tbl;
3650 b_tmgr_t *b_tbl;
3651 c_tmgr_t *c_tbl;
3652
3653 /*
3654 * If the pmap does not have its own A table manager, it has no
3655 * valid entires.
3656 */
3657 if (pmap->pm_a_tmgr == NULL)
3658 return 0;
3659
3660 a_tbl = pmap->pm_a_tmgr;
3661
3662 count = 0;
3663 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3664 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3665 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3666 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3667 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3668 c_tbl = mmuC2tmgr(
3669 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3670 if (type == 0)
3671 /*
3672 * A resident entry count has been requested.
3673 */
3674 count += c_tbl->ct_ecnt;
3675 else
3676 /*
3677 * A wired entry count has been requested.
3678 */
3679 count += c_tbl->ct_wcnt;
3680 }
3681 }
3682 }
3683 }
3684
3685 return count;
3686 }
3687
3688 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3689 * The following routines are only used by DDB for tricky kernel text *
3690 * text operations in db_memrw.c. They are provided for sun3 *
3691 * compatibility. *
3692 *************************************************************************/
3693 /* get_pte INTERNAL
3694 **
3695 * Return the page descriptor the describes the kernel mapping
3696 * of the given virtual address.
3697 */
3698 extern u_long ptest_addr(u_long); /* XXX: locore.s */
3699 u_int
3700 get_pte(vaddr_t va)
3701 {
3702 u_long pte_pa;
3703 mmu_short_pte_t *pte;
3704
3705 /* Get the physical address of the PTE */
3706 pte_pa = ptest_addr(va & ~PGOFSET);
3707
3708 /* Convert to a virtual address... */
3709 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3710
3711 /* Make sure it is in our level-C tables... */
3712 if ((pte < kernCbase) ||
3713 (pte >= &mmuCbase[NUM_USER_PTES]))
3714 return 0;
3715
3716 /* ... and just return its contents. */
3717 return (pte->attr.raw);
3718 }
3719
3720
3721 /* set_pte INTERNAL
3722 **
3723 * Set the page descriptor that describes the kernel mapping
3724 * of the given virtual address.
3725 */
3726 void
3727 set_pte(vaddr_t va, u_int pte)
3728 {
3729 u_long idx;
3730
3731 if (va < KERNBASE)
3732 return;
3733
3734 idx = (unsigned long) m68k_btop(va - KERNBASE);
3735 kernCbase[idx].attr.raw = pte;
3736 TBIS(va);
3737 }
3738
3739 /*
3740 * Routine: pmap_procwr
3741 *
3742 * Function:
3743 * Synchronize caches corresponding to [addr, addr+len) in p.
3744 */
3745 void
3746 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
3747 {
3748
3749 (void)cachectl1(0x80000004, va, len, p);
3750 }
3751
3752
3753 #ifdef PMAP_DEBUG
3754 /************************** DEBUGGING ROUTINES **************************
3755 * The following routines are meant to be an aid to debugging the pmap *
3756 * system. They are callable from the DDB command line and should be *
3757 * prepared to be handed unstable or incomplete states of the system. *
3758 ************************************************************************/
3759
3760 /* pv_list
3761 **
3762 * List all pages found on the pv list for the given physical page.
3763 * To avoid endless loops, the listing will stop at the end of the list
3764 * or after 'n' entries - whichever comes first.
3765 */
3766 void
3767 pv_list(paddr_t pa, int n)
3768 {
3769 int idx;
3770 vaddr_t va;
3771 pv_t *pv;
3772 c_tmgr_t *c_tbl;
3773 pmap_t pmap;
3774
3775 pv = pa2pv(pa);
3776 idx = pv->pv_idx;
3777 for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3778 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3779 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3780 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3781 }
3782 }
3783 #endif /* PMAP_DEBUG */
3784
3785 #ifdef NOT_YET
3786 /* and maybe not ever */
3787 /************************** LOW-LEVEL ROUTINES **************************
3788 * These routines will eventually be re-written into assembly and placed*
3789 * in locore.s. They are here now as stubs so that the pmap module can *
3790 * be linked as a standalone user program for testing. *
3791 ************************************************************************/
3792 /* flush_atc_crp INTERNAL
3793 **
3794 * Flush all page descriptors derived from the given CPU Root Pointer
3795 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3796 * cache.
3797 */
3798 void
3799 flush_atc_crp(int a_tbl)
3800 {
3801 mmu_long_rp_t rp;
3802
3803 /* Create a temporary root table pointer that points to the
3804 * given A table.
3805 */
3806 rp.attr.raw = ~MMU_LONG_RP_LU;
3807 rp.addr.raw = (unsigned int) a_tbl;
3808
3809 mmu_pflushr(&rp);
3810 /* mmu_pflushr:
3811 * movel sp(4)@,a0
3812 * pflushr a0@
3813 * rts
3814 */
3815 }
3816 #endif /* NOT_YET */
3817