1 /* $NetBSD: pmap_motorola.c,v 1.101 2025/12/04 02:55:24 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1991, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * This code is derived from software contributed to Berkeley by 37 * the Systems Programming Group of the University of Utah Computer 38 * Science Department. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 * 64 * @(#)pmap.c 8.6 (Berkeley) 5/27/94 65 */ 66 67 /* 68 * Motorola m68k-family physical map management code. 69 * 70 * Supports: 71 * 68020 with 68851 MMU 72 * 68020 with HP MMU 73 * 68030 with on-chip MMU 74 * 68040 with on-chip MMU 75 * 68060 with on-chip MMU 76 * 77 * Notes: 78 * Don't even pay lip service to multiprocessor support. 79 * 80 * We assume TLB entries don't have process tags (except for the 81 * supervisor/user distinction) so we only invalidate TLB entries 82 * when changing mappings for the current (or kernel) pmap. This is 83 * technically not true for the 68851 but we flush the TLB on every 84 * context switch, so it effectively winds up that way. 85 * 86 * Bitwise and/or operations are significantly faster than bitfield 87 * references so we use them when accessing STE/PTEs in the pmap_pte_* 88 * macros. Note also that the two are not always equivalent; e.g.: 89 * (*pte & PG_PROT) [4] != pte->pg_prot [1] 90 * and a couple of routines that deal with protection and wiring take 91 * some shortcuts that assume the and/or definitions. 92 */ 93 94 /* 95 * Manages physical address maps. 96 * 97 * In addition to hardware address maps, this 98 * module is called upon to provide software-use-only 99 * maps which may or may not be stored in the same 100 * form as hardware maps. These pseudo-maps are 101 * used to store intermediate results from copy 102 * operations to and from address spaces. 103 * 104 * Since the information managed by this module is 105 * also stored by the logical address mapping module, 106 * this module may throw away valid virtual-to-physical 107 * mappings at almost any time. However, invalidations 108 * of virtual-to-physical mappings must be done as 109 * requested. 110 * 111 * In order to cope with hardware architectures which 112 * make virtual-to-physical map invalidates expensive, 113 * this module may delay invalidate or reduced protection 114 * operations until such time as they are actually 115 * necessary. This module is given full information as 116 * to which processors are currently using which maps, 117 * and to when physical maps must be made correct. 118 */ 119 120 #include "opt_m68k_arch.h" 121 122 #include <sys/cdefs.h> 123 __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.101 2025/12/04 02:55:24 thorpej Exp $"); 124 125 #include <sys/param.h> 126 #include <sys/systm.h> 127 #include <sys/proc.h> 128 #include <sys/pool.h> 129 #include <sys/cpu.h> 130 #ifndef __HAVE_M68K_BROKEN_RMC 131 #include <sys/atomic.h> 132 #endif 133 134 #include <machine/pcb.h> 135 136 #include <uvm/uvm.h> 137 #include <uvm/uvm_physseg.h> 138 139 #include <m68k/cacheops.h> 140 141 #if !defined(M68K_MMU_MOTOROLA) && !defined(M68K_MMU_HP) 142 #error Hit the road, Jack... 143 #endif 144 145 #ifdef DEBUG 146 #define PDB_FOLLOW 0x0001 147 #define PDB_INIT 0x0002 148 #define PDB_ENTER 0x0004 149 #define PDB_REMOVE 0x0008 150 #define PDB_CREATE 0x0010 151 #define PDB_PTPAGE 0x0020 152 #define PDB_CACHE 0x0040 153 #define PDB_BITS 0x0080 154 #define PDB_COLLECT 0x0100 155 #define PDB_PROTECT 0x0200 156 #define PDB_SEGTAB 0x0400 157 #define PDB_MULTIMAP 0x0800 158 #define PDB_PARANOIA 0x2000 159 #define PDB_WIRING 0x4000 160 #define PDB_PVDUMP 0x8000 161 162 int debugmap = 0; 163 int pmapdebug = PDB_PARANOIA; 164 165 #define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x 166 #else /* ! DEBUG */ 167 #define PMAP_DPRINTF(l, x) /* nothing */ 168 #endif /* DEBUG */ 169 170 /* 171 * Get STEs and PTEs for user/kernel address space 172 */ 173 #if defined(M68040) || defined(M68060) 174 #define pmap_ste1(m, v) \ 175 (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1])) 176 /* XXX assumes physically contiguous ST pages (if more than one) */ 177 #define pmap_ste2(m, v) \ 178 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \ 179 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)])) 180 #if defined(M68020) || defined(M68030) 181 #define pmap_ste(m, v) \ 182 (&((m)->pm_stab[(vaddr_t)(v) \ 183 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)])) 184 #define pmap_ste_v(m, v) \ 185 (mmutype == MMU_68040 \ 186 ? ((*pmap_ste1(m, v) & SG_V) && \ 187 (*pmap_ste2(m, v) & SG_V)) \ 188 : (*pmap_ste(m, v) & SG_V)) 189 #else 190 #define pmap_ste(m, v) \ 191 (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1])) 192 #define pmap_ste_v(m, v) \ 193 ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V)) 194 #endif 195 #else 196 #define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT])) 197 #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V) 198 #endif 199 200 #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT])) 201 #define pmap_pte_pa(pte) (*(pte) & PG_FRAME) 202 #define pmap_pte_w(pte) (*(pte) & PG_W) 203 #define pmap_pte_ci(pte) (*(pte) & PG_CI) 204 #define pmap_pte_m(pte) (*(pte) & PG_M) 205 #define pmap_pte_u(pte) (*(pte) & PG_U) 206 #define pmap_pte_prot(pte) (*(pte) & PG_PROT) 207 #define pmap_pte_v(pte) (*(pte) & PG_V) 208 209 #define pmap_pte_set_w(pte, v) \ 210 if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W 211 #define pmap_pte_set_prot(pte, v) \ 212 if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT 213 #define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte)) 214 #define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte)) 215 216 /* 217 * Given a map and a machine independent protection code, 218 * convert to an m68k protection code. 219 */ 220 #define pte_prot(m, p) (protection_codes[p]) 221 static u_int protection_codes[8]; 222 223 /* 224 * Kernel page table page management. 225 */ 226 struct kpt_page { 227 struct kpt_page *kpt_next; /* link on either used or free list */ 228 vaddr_t kpt_va; /* always valid kernel VA */ 229 paddr_t kpt_pa; /* PA of this page (for speed) */ 230 }; 231 struct kpt_page *kpt_free_list, *kpt_used_list; 232 struct kpt_page *kpt_pages; 233 234 /* 235 * Kernel segment/page table and page table map. 236 * The page table map gives us a level of indirection we need to dynamically 237 * expand the page table. It is essentially a copy of the segment table 238 * with PTEs instead of STEs. All are initialized in locore at boot time. 239 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs. 240 * Segtabzero is an empty segment table which all processes share til they 241 * reference something. 242 */ 243 paddr_t Sysseg_pa; 244 st_entry_t *Sysseg; 245 pt_entry_t *Sysmap, *Sysptmap; 246 st_entry_t *Segtabzero, *Segtabzeropa; 247 vsize_t Sysptsize = VM_KERNEL_PT_PAGES; 248 249 static struct pmap kernel_pmap_store; 250 struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; 251 struct vm_map *st_map, *pt_map; 252 struct vm_map st_map_store, pt_map_store; 253 254 vaddr_t lwp0uarea; /* lwp0 u-area VA, initialized in bootstrap */ 255 256 paddr_t avail_start; /* PA of first available physical page */ 257 paddr_t avail_end; /* PA of last available physical page */ 258 vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 259 vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */ 260 int page_cnt; /* number of pages managed by VM system */ 261 262 bool pmap_initialized = false; /* Has pmap_init completed? */ 263 264 vaddr_t m68k_uptbase = M68K_PTBASE; 265 266 struct pv_header { 267 struct pv_entry pvh_first; /* first PV entry */ 268 uint32_t pvh_attrs; /* attributes: 269 bits 0-7: PTE bits 270 bits 8-15: flags */ 271 }; 272 273 #define PVH_CI 0x10 /* all entries are cache-inhibited */ 274 #define PVH_PTPAGE 0x20 /* entry maps a page table page */ 275 276 struct pv_header *pv_table; 277 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist; 278 int pv_nfree; 279 280 #ifdef CACHE_HAVE_VAC 281 static u_int pmap_aliasmask; /* separation at which VA aliasing ok */ 282 #endif 283 #if defined(M68040) || defined(M68060) 284 u_int protostfree; /* prototype (default) free ST map */ 285 #endif 286 287 pt_entry_t *caddr1_pte; /* PTE for CADDR1 */ 288 pt_entry_t *caddr2_pte; /* PTE for CADDR2 */ 289 290 struct pool pmap_pmap_pool; /* memory pool for pmap structures */ 291 struct pool pmap_pv_pool; /* memory pool for pv entries */ 292 293 #define pmap_alloc_pv() pool_get(&pmap_pv_pool, PR_NOWAIT) 294 #define pmap_free_pv(pv) pool_put(&pmap_pv_pool, (pv)) 295 296 #define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa)) 297 298 static inline struct pv_header * 299 pa_to_pvh(paddr_t pa) 300 { 301 uvm_physseg_t bank = 0; /* XXX gcc4 -Wuninitialized */ 302 psize_t pg = 0; 303 304 bank = uvm_physseg_find(atop((pa)), &pg); 305 return &uvm_physseg_get_pmseg(bank)->pvheader[pg]; 306 } 307 308 /* 309 * Internal routines 310 */ 311 void pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int, 312 struct pv_entry **); 313 bool pmap_testbit(paddr_t, int); 314 bool pmap_changebit(paddr_t, pt_entry_t, pt_entry_t); 315 int pmap_enter_ptpage(pmap_t, vaddr_t, bool); 316 void pmap_ptpage_addref(vaddr_t); 317 int pmap_ptpage_delref(vaddr_t); 318 void pmap_pinit(pmap_t); 319 void pmap_release(pmap_t); 320 321 #ifdef DEBUG 322 void pmap_pvdump(paddr_t); 323 void pmap_check_wiring(const char *, vaddr_t); 324 #endif 325 326 /* pmap_remove_mapping flags */ 327 #define PRM_TFLUSH 0x01 328 #define PRM_CFLUSH 0x02 329 #define PRM_KEEPPTPAGE 0x04 330 331 #define active_pmap(pm) \ 332 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap) 333 334 #define active_user_pmap(pm) \ 335 (curproc && \ 336 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap) 337 338 static void (*pmap_load_urp_func)(paddr_t); 339 340 /* 341 * pmap_load_urp: 342 * 343 * Load the user root table into the MMU. 344 */ 345 static inline void 346 pmap_load_urp(paddr_t urp) 347 { 348 (*pmap_load_urp_func)(urp); 349 } 350 351 #ifdef CACHE_HAVE_VAC 352 /* 353 * pmap_init_vac: 354 * 355 * Set up virtually-addressed cache information. Only relevant 356 * for the HP MMU. 357 */ 358 void 359 pmap_init_vac(size_t vacsize) 360 { 361 KASSERT(pmap_aliasmask == 0); 362 KASSERT(powerof2(vacsize)); 363 pmap_aliasmask = vacsize - 1; 364 } 365 #endif /* CACHE_HAVE_VAC */ 366 367 /* 368 * pmap_bootstrap2: [ INTERFACE ] 369 * 370 * Phase 2 of pmap bootstrap. (Phase 1 is system-specific.) 371 * 372 * Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on, 373 * using lwp0uarea variable saved during pmap_bootstrap(). 374 */ 375 void * 376 pmap_bootstrap2(void) 377 { 378 379 uvmexp.pagesize = NBPG; 380 uvm_md_init(); 381 382 /* 383 * Initialize protection array. 384 * XXX: Could this have port specific values? Can't this be static? 385 */ 386 protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 387 protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 388 protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 389 protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 390 protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 391 protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 392 protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 393 protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 394 395 /* 396 * Initialize pmap_kernel(). 397 */ 398 pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa; 399 pmap_kernel()->pm_stab = Sysseg; 400 pmap_kernel()->pm_ptab = Sysmap; 401 #if defined(M68040) || defined(M68060) 402 if (mmutype == MMU_68040) 403 pmap_kernel()->pm_stfree = protostfree; 404 #endif 405 pmap_kernel()->pm_count = 1; 406 407 /* 408 * Initialize lwp0 uarea, curlwp, and curpcb. 409 */ 410 memset((void *)lwp0uarea, 0, USPACE); 411 uvm_lwp_setuarea(&lwp0, lwp0uarea); 412 curlwp = &lwp0; 413 curpcb = lwp_getpcb(&lwp0); 414 415 /* 416 * Initialize the source/destination control registers for 417 * movs. 418 */ 419 setsfc(FC_USERD); 420 setdfc(FC_USERD); 421 422 return (void *)lwp0uarea; 423 } 424 425 /* 426 * pmap_virtual_space: [ INTERFACE ] 427 * 428 * Report the range of available kernel virtual address 429 * space to the VM system during bootstrap. 430 * 431 * This is only an interface function if we do not use 432 * pmap_steal_memory()! 433 * 434 * Note: no locking is necessary in this function. 435 */ 436 void 437 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) 438 { 439 440 *vstartp = virtual_avail; 441 *vendp = virtual_end; 442 } 443 444 /* 445 * pmap_init: [ INTERFACE ] 446 * 447 * Initialize the pmap module. Called by vm_init(), to initialize any 448 * structures that the pmap system needs to map virtual memory. 449 * 450 * Note: no locking is necessary in this function. 451 */ 452 void 453 pmap_init(void) 454 { 455 vaddr_t addr, addr2; 456 vsize_t s; 457 struct pv_header *pvh; 458 int rv; 459 int npages; 460 uvm_physseg_t bank; 461 462 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n")); 463 464 /* 465 * Before we do anything else, initialize the PTE pointers 466 * used by pmap_zero_page() and pmap_copy_page(). 467 */ 468 caddr1_pte = pmap_pte(pmap_kernel(), CADDR1); 469 caddr2_pte = pmap_pte(pmap_kernel(), CADDR2); 470 471 PMAP_DPRINTF(PDB_INIT, 472 ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n", 473 Sysseg, Sysmap, Sysptmap)); 474 PMAP_DPRINTF(PDB_INIT, 475 (" pstart %lx, pend %lx, vstart %lx, vend %lx\n", 476 avail_start, avail_end, virtual_avail, virtual_end)); 477 478 /* 479 * Allocate memory for random pmap data structures. Includes the 480 * initial segment table, pv_head_table and pmap_attributes. 481 */ 482 for (page_cnt = 0, bank = uvm_physseg_get_first(); 483 uvm_physseg_valid_p(bank); 484 bank = uvm_physseg_get_next(bank)) 485 page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank); 486 s = M68K_STSIZE; /* Segtabzero */ 487 s += page_cnt * sizeof(struct pv_header); /* pv table */ 488 s = round_page(s); 489 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 490 if (addr == 0) 491 panic("pmap_init: can't allocate data structures"); 492 493 Segtabzero = (st_entry_t *)addr; 494 (void)pmap_extract(pmap_kernel(), addr, 495 (paddr_t *)(void *)&Segtabzeropa); 496 addr += M68K_STSIZE; 497 498 pv_table = (struct pv_header *) addr; 499 addr += page_cnt * sizeof(struct pv_header); 500 501 PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) " 502 "tbl %p\n", 503 s, page_cnt, Segtabzero, Segtabzeropa, 504 pv_table)); 505 506 /* 507 * Now that the pv and attribute tables have been allocated, 508 * assign them to the memory segments. 509 */ 510 pvh = pv_table; 511 for (bank = uvm_physseg_get_first(); 512 uvm_physseg_valid_p(bank); 513 bank = uvm_physseg_get_next(bank)) { 514 npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank); 515 uvm_physseg_get_pmseg(bank)->pvheader = pvh; 516 pvh += npages; 517 } 518 519 /* 520 * Allocate physical memory for kernel PT pages and their management. 521 * We need 1 PT page per possible task plus some slop. 522 */ 523 npages = uimin(atop(M68K_MAX_KPTSIZE), maxproc+16); 524 s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page)); 525 526 /* 527 * Verify that space will be allocated in region for which 528 * we already have kernel PT pages. 529 */ 530 addr = 0; 531 rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0, 532 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 533 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)); 534 if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap) 535 panic("pmap_init: kernel PT too small"); 536 uvm_unmap(kernel_map, addr, addr + s); 537 538 /* 539 * Now allocate the space and link the pages together to 540 * form the KPT free list. 541 */ 542 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 543 if (addr == 0) 544 panic("pmap_init: cannot allocate KPT free list"); 545 s = ptoa(npages); 546 addr2 = addr + s; 547 kpt_pages = &((struct kpt_page *)addr2)[npages]; 548 kpt_free_list = NULL; 549 do { 550 addr2 -= PAGE_SIZE; 551 (--kpt_pages)->kpt_next = kpt_free_list; 552 kpt_free_list = kpt_pages; 553 kpt_pages->kpt_va = addr2; 554 (void) pmap_extract(pmap_kernel(), addr2, 555 (paddr_t *)&kpt_pages->kpt_pa); 556 } while (addr != addr2); 557 558 PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n", 559 atop(s), addr, addr + s)); 560 561 /* 562 * Allocate the segment table map and the page table map. 563 */ 564 s = maxproc * M68K_STSIZE; 565 st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false, 566 &st_map_store); 567 568 addr = m68k_uptbase; 569 if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) { 570 s = M68K_PTMAXSIZE; 571 /* 572 * XXX We don't want to hang when we run out of 573 * page tables, so we lower maxproc so that fork() 574 * will fail instead. Note that root could still raise 575 * this value via sysctl(3). 576 */ 577 maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE); 578 } else 579 s = (maxproc * M68K_MAX_PTSIZE); 580 pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, 581 true, &pt_map_store); 582 583 #if defined(M68040) || defined(M68060) 584 if (mmutype == MMU_68040) { 585 protostfree = ~l2tobm(0); 586 for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++) 587 protostfree &= ~l2tobm(rv); 588 } 589 #endif 590 591 /* 592 * Initialize the pmap pools. 593 */ 594 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", 595 &pool_allocator_nointr, IPL_NONE); 596 597 /* 598 * Initialize the pv_entry pools. 599 */ 600 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl", 601 &pool_allocator_meta, IPL_NONE); 602 603 /* 604 * Now that this is done, mark the pages shared with the 605 * hardware page table search as non-CCB (actually, as CI). 606 * 607 * XXX Hm. Given that this is in the kernel map, can't we just 608 * use the va's? 609 */ 610 #ifdef M68060 611 #if defined(M68020) || defined(M68030) || defined(M68040) 612 if (cputype == CPU_68060) 613 #endif 614 { 615 struct kpt_page *kptp = kpt_free_list; 616 paddr_t paddr; 617 618 while (kptp) { 619 pmap_changebit(kptp->kpt_pa, PG_CI, 620 (pt_entry_t)~PG_CCB); 621 kptp = kptp->kpt_next; 622 } 623 624 paddr = (paddr_t)Segtabzeropa; 625 while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) { 626 pmap_changebit(paddr, PG_CI, 627 (pt_entry_t)~PG_CCB); 628 paddr += PAGE_SIZE; 629 } 630 631 DCIS(); 632 } 633 #endif 634 635 /* 636 * Set up the routine that loads the MMU root table pointer. 637 */ 638 switch (cputype) { 639 #if defined(M68020) 640 case CPU_68020: 641 #ifdef M68K_MMU_MOTOROLA 642 if (mmutype == MMU_68851) { 643 protorp[0] = MMU51_CRP_BITS; 644 pmap_load_urp_func = mmu_load_urp51; 645 } 646 #endif 647 #ifdef M68K_MMU_HP 648 if (mmutype == MMU_HP) { 649 pmap_load_urp_func = mmu_load_urp20hp; 650 } 651 #endif 652 break; 653 #endif /* M68020 */ 654 #if defined(M68030) 655 case CPU_68030: 656 protorp[0] = MMU51_CRP_BITS; 657 pmap_load_urp_func = mmu_load_urp51; 658 break; 659 #endif /* M68030 */ 660 #if defined(M68040) 661 case CPU_68040: 662 pmap_load_urp_func = mmu_load_urp40; 663 break; 664 #endif /* M68040 */ 665 #if defined(M68060) 666 case CPU_68060: 667 pmap_load_urp_func = mmu_load_urp60; 668 break; 669 #endif /* M68060 */ 670 default: 671 break; 672 } 673 if (pmap_load_urp_func == NULL) { 674 panic("pmap_init: No mmu_load_*() for cpu=%d mmu=%d", 675 cputype, mmutype); 676 } 677 678 /* 679 * Now it is safe to enable pv_table recording. 680 */ 681 pmap_initialized = true; 682 } 683 684 /* 685 * pmap_create: [ INTERFACE ] 686 * 687 * Create and return a physical map. 688 * 689 * Note: no locking is necessary in this function. 690 */ 691 pmap_t 692 pmap_create(void) 693 { 694 struct pmap *pmap; 695 696 PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE, 697 ("pmap_create()\n")); 698 699 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); 700 memset(pmap, 0, sizeof(*pmap)); 701 pmap_pinit(pmap); 702 return pmap; 703 } 704 705 /* 706 * pmap_pinit: 707 * 708 * Initialize a preallocated and zeroed pmap structure. 709 * 710 * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()! 711 */ 712 void 713 pmap_pinit(struct pmap *pmap) 714 { 715 716 PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE, 717 ("pmap_pinit(%p)\n", pmap)); 718 719 /* 720 * No need to allocate page table space yet but we do need a 721 * valid segment table. Initially, we point everyone at the 722 * "null" segment table. On the first pmap_enter, a real 723 * segment table will be allocated. 724 */ 725 pmap->pm_stab = Segtabzero; 726 pmap->pm_stpa = Segtabzeropa; 727 #if defined(M68040) || defined(M68060) 728 #if defined(M68020) || defined(M68030) 729 if (mmutype == MMU_68040) 730 #endif 731 pmap->pm_stfree = protostfree; 732 #endif 733 pmap->pm_count = 1; 734 } 735 736 /* 737 * pmap_destroy: [ INTERFACE ] 738 * 739 * Drop the reference count on the specified pmap, releasing 740 * all resources if the reference count drops to zero. 741 */ 742 void 743 pmap_destroy(pmap_t pmap) 744 { 745 int count; 746 747 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap)); 748 749 #ifdef __HAVE_M68K_BROKEN_RMC 750 count = --pmap->pm_count; 751 #else 752 count = atomic_dec_uint_nv(&pmap->pm_count); 753 #endif 754 if (count == 0) { 755 pmap_release(pmap); 756 pool_put(&pmap_pmap_pool, pmap); 757 } 758 } 759 760 /* 761 * pmap_release: 762 * 763 * Release the resources held by a pmap. 764 * 765 * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy(). 766 */ 767 void 768 pmap_release(pmap_t pmap) 769 { 770 771 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap)); 772 773 #ifdef notdef /* DIAGNOSTIC */ 774 /* count would be 0 from pmap_destroy... */ 775 if (pmap->pm_count != 1) 776 panic("pmap_release count"); 777 #endif 778 779 if (pmap->pm_ptab) { 780 pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab, 781 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE); 782 uvm_km_pgremove((vaddr_t)pmap->pm_ptab, 783 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE); 784 uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab, 785 M68K_MAX_PTSIZE, UVM_KMF_VAONLY); 786 } 787 KASSERT(pmap->pm_stab == Segtabzero); 788 } 789 790 /* 791 * pmap_reference: [ INTERFACE ] 792 * 793 * Add a reference to the specified pmap. 794 */ 795 void 796 pmap_reference(pmap_t pmap) 797 { 798 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap)); 799 800 #ifdef __HAVE_M68K_BROKEN_RMC 801 pmap->pm_count++; 802 #else 803 atomic_inc_uint(&pmap->pm_count); 804 #endif 805 } 806 807 /* 808 * pmap_activate: [ INTERFACE ] 809 * 810 * Activate the pmap used by the specified process. This includes 811 * reloading the MMU context if the current process, and marking 812 * the pmap in use by the processor. 813 * 814 * Note: we may only use spin locks here, since we are called 815 * by a critical section in cpu_switch()! 816 */ 817 void 818 pmap_activate(struct lwp *l) 819 { 820 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 821 822 PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB, 823 ("pmap_activate(%p)\n", l)); 824 825 KASSERT(l == curlwp); 826 827 /* 828 * Because the kernel has a separate root pointer, we don't 829 * need to activate the kernel pmap. 830 */ 831 if (pmap != pmap_kernel()) { 832 pmap_load_urp((paddr_t)pmap->pm_stpa); 833 } 834 } 835 836 /* 837 * pmap_deactivate: [ INTERFACE ] 838 * 839 * Mark that the pmap used by the specified process is no longer 840 * in use by the processor. 841 * 842 * The comment above pmap_activate() wrt. locking applies here, 843 * as well. 844 */ 845 void 846 pmap_deactivate(struct lwp *l) 847 { 848 849 /* No action necessary in this pmap implementation. */ 850 } 851 852 /* 853 * pmap_remove: [ INTERFACE ] 854 * 855 * Remove the given range of addresses from the specified map. 856 * 857 * It is assumed that the start and end are properly 858 * rounded to the page size. 859 */ 860 void 861 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) 862 { 863 vaddr_t nssva; 864 pt_entry_t *pte; 865 int flags; 866 #ifdef CACHE_HAVE_VAC 867 bool firstpage = true, needcflush = false; 868 #endif 869 870 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT, 871 ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva)); 872 873 flags = active_pmap(pmap) ? PRM_TFLUSH : 0; 874 while (sva < eva) { 875 nssva = m68k_trunc_seg(sva) + NBSEG; 876 if (nssva == 0 || nssva > eva) 877 nssva = eva; 878 879 /* 880 * Invalidate every valid mapping within this segment. 881 */ 882 883 pte = pmap_pte(pmap, sva); 884 while (sva < nssva) { 885 886 /* 887 * If this segment is unallocated, 888 * skip to the next segment boundary. 889 */ 890 891 if (!pmap_ste_v(pmap, sva)) { 892 sva = nssva; 893 break; 894 } 895 896 if (pmap_pte_v(pte)) { 897 #ifdef CACHE_HAVE_VAC 898 if (pmap_aliasmask) { 899 900 /* 901 * Purge kernel side of VAC to ensure 902 * we get the correct state of any 903 * hardware maintained bits. 904 */ 905 906 if (firstpage) { 907 DCIS(); 908 } 909 910 /* 911 * Remember if we may need to 912 * flush the VAC due to a non-CI 913 * mapping. 914 */ 915 916 if (!needcflush && !pmap_pte_ci(pte)) 917 needcflush = true; 918 919 } 920 firstpage = false; 921 #endif 922 pmap_remove_mapping(pmap, sva, pte, flags, NULL); 923 } 924 pte++; 925 sva += PAGE_SIZE; 926 } 927 } 928 929 #ifdef CACHE_HAVE_VAC 930 931 /* 932 * Didn't do anything, no need for cache flushes 933 */ 934 935 if (firstpage) 936 return; 937 938 /* 939 * In a couple of cases, we don't need to worry about flushing 940 * the VAC: 941 * 1. if this is a kernel mapping, 942 * we have already done it 943 * 2. if it is a user mapping not for the current process, 944 * it won't be there 945 */ 946 947 if (pmap_aliasmask && !active_user_pmap(pmap)) 948 needcflush = false; 949 if (needcflush) { 950 if (pmap == pmap_kernel()) { 951 DCIS(); 952 } else { 953 DCIU(); 954 } 955 } 956 #endif 957 } 958 959 /* 960 * pmap_page_protect: [ INTERFACE ] 961 * 962 * Lower the permission for all mappings to a given page to 963 * the permissions specified. 964 */ 965 void 966 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 967 { 968 paddr_t pa = VM_PAGE_TO_PHYS(pg); 969 struct pv_header *pvh; 970 struct pv_entry *pv; 971 pt_entry_t *pte; 972 int s; 973 974 #ifdef DEBUG 975 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 976 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) 977 printf("pmap_page_protect(%p, %x)\n", pg, prot); 978 #endif 979 980 switch (prot) { 981 case VM_PROT_READ|VM_PROT_WRITE: 982 case VM_PROT_ALL: 983 return; 984 985 /* copy_on_write */ 986 case VM_PROT_READ: 987 case VM_PROT_READ|VM_PROT_EXECUTE: 988 pmap_changebit(pa, PG_RO, ~0); 989 return; 990 991 /* remove_all */ 992 default: 993 break; 994 } 995 996 pvh = pa_to_pvh(pa); 997 pv = &pvh->pvh_first; 998 s = splvm(); 999 while (pv->pv_pmap != NULL) { 1000 1001 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1002 #ifdef DEBUG 1003 if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) || 1004 pmap_pte_pa(pte) != pa) 1005 panic("pmap_page_protect: bad mapping"); 1006 #endif 1007 pmap_remove_mapping(pv->pv_pmap, pv->pv_va, 1008 pte, PRM_TFLUSH|PRM_CFLUSH, NULL); 1009 } 1010 splx(s); 1011 } 1012 1013 /* 1014 * pmap_protect: [ INTERFACE ] 1015 * 1016 * Set the physical protection on the specified range of this map 1017 * as requested. 1018 */ 1019 void 1020 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1021 { 1022 vaddr_t nssva; 1023 pt_entry_t *pte; 1024 bool firstpage __unused, needtflush; 1025 int isro; 1026 1027 PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT, 1028 ("pmap_protect(%p, %lx, %lx, %x)\n", 1029 pmap, sva, eva, prot)); 1030 1031 #ifdef PMAPSTATS 1032 protect_stats.calls++; 1033 #endif 1034 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1035 pmap_remove(pmap, sva, eva); 1036 return; 1037 } 1038 isro = pte_prot(pmap, prot); 1039 needtflush = active_pmap(pmap); 1040 firstpage = true; 1041 while (sva < eva) { 1042 nssva = m68k_trunc_seg(sva) + NBSEG; 1043 if (nssva == 0 || nssva > eva) 1044 nssva = eva; 1045 1046 /* 1047 * If VA belongs to an unallocated segment, 1048 * skip to the next segment boundary. 1049 */ 1050 1051 if (!pmap_ste_v(pmap, sva)) { 1052 sva = nssva; 1053 continue; 1054 } 1055 1056 /* 1057 * Change protection on mapping if it is valid and doesn't 1058 * already have the correct protection. 1059 */ 1060 1061 pte = pmap_pte(pmap, sva); 1062 while (sva < nssva) { 1063 if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) { 1064 #ifdef CACHE_HAVE_VAC 1065 1066 /* 1067 * Purge kernel side of VAC to ensure we 1068 * get the correct state of any hardware 1069 * maintained bits. 1070 * 1071 * XXX do we need to clear the VAC in 1072 * general to reflect the new protection? 1073 */ 1074 1075 if (firstpage && pmap_aliasmask) 1076 DCIS(); 1077 #endif 1078 1079 #if defined(M68040) || defined(M68060) 1080 1081 /* 1082 * Clear caches if making RO (see section 1083 * "7.3 Cache Coherency" in the manual). 1084 */ 1085 1086 #if defined(M68020) || defined(M68030) 1087 if (isro && mmutype == MMU_68040) 1088 #else 1089 if (isro) 1090 #endif 1091 { 1092 paddr_t pa = pmap_pte_pa(pte); 1093 1094 DCFP(pa); 1095 ICPP(pa); 1096 } 1097 #endif 1098 pmap_pte_set_prot(pte, isro); 1099 if (needtflush) 1100 TBIS(sva); 1101 firstpage = false; 1102 } 1103 pte++; 1104 sva += PAGE_SIZE; 1105 } 1106 } 1107 } 1108 1109 /* 1110 * pmap_enter: [ INTERFACE ] 1111 * 1112 * Insert the given physical page (pa) at 1113 * the specified virtual address (va) in the 1114 * target physical map with the protection requested. 1115 * 1116 * If specified, the page will be wired down, meaning 1117 * that the related pte cannot be reclaimed. 1118 * 1119 * Note: This is the only routine which MAY NOT lazy-evaluate 1120 * or lose information. Thatis, this routine must actually 1121 * insert this page into the given map NOW. 1122 */ 1123 int 1124 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1125 { 1126 pt_entry_t *pte; 1127 struct pv_entry *opv = NULL; 1128 int npte; 1129 paddr_t opa; 1130 bool cacheable = true; 1131 bool checkpv = true; 1132 bool wired = (flags & PMAP_WIRED) != 0; 1133 bool can_fail = (flags & PMAP_CANFAIL) != 0; 1134 1135 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER, 1136 ("pmap_enter(%p, %lx, %lx, %x, %x)\n", 1137 pmap, va, pa, prot, wired)); 1138 1139 #ifdef DIAGNOSTIC 1140 /* 1141 * pmap_enter() should never be used for CADDR1 and CADDR2. 1142 */ 1143 if (pmap == pmap_kernel() && 1144 (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2)) 1145 panic("pmap_enter: used for CADDR1 or CADDR2"); 1146 #endif 1147 1148 /* 1149 * For user mapping, allocate kernel VM resources if necessary. 1150 */ 1151 if (pmap->pm_ptab == NULL) { 1152 pmap->pm_ptab = (pt_entry_t *) 1153 uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0, 1154 UVM_KMF_VAONLY | 1155 (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA)); 1156 if (pmap->pm_ptab == NULL) 1157 return ENOMEM; 1158 } 1159 1160 /* 1161 * Segment table entry not valid, we need a new PT page 1162 */ 1163 if (!pmap_ste_v(pmap, va)) { 1164 int err = pmap_enter_ptpage(pmap, va, can_fail); 1165 if (err) 1166 return err; 1167 } 1168 1169 pa = m68k_trunc_page(pa); 1170 pte = pmap_pte(pmap, va); 1171 opa = pmap_pte_pa(pte); 1172 1173 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte)); 1174 1175 /* 1176 * Mapping has not changed, must be protection or wiring change. 1177 */ 1178 if (opa == pa) { 1179 /* 1180 * Wiring change, just update stats. 1181 * We don't worry about wiring PT pages as they remain 1182 * resident as long as there are valid mappings in them. 1183 * Hence, if a user page is wired, the PT page will be also. 1184 */ 1185 if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) { 1186 PMAP_DPRINTF(PDB_ENTER, 1187 ("enter: wiring change -> %x\n", wired)); 1188 if (wired) 1189 pmap->pm_stats.wired_count++; 1190 else 1191 pmap->pm_stats.wired_count--; 1192 } 1193 /* 1194 * Retain cache inhibition status 1195 */ 1196 checkpv = false; 1197 if (pmap_pte_ci(pte)) 1198 cacheable = false; 1199 goto validate; 1200 } 1201 1202 /* 1203 * Mapping has changed, invalidate old range and fall through to 1204 * handle validating new mapping. 1205 */ 1206 if (opa) { 1207 PMAP_DPRINTF(PDB_ENTER, 1208 ("enter: removing old mapping %lx\n", va)); 1209 pmap_remove_mapping(pmap, va, pte, 1210 PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv); 1211 } 1212 1213 /* 1214 * If this is a new user mapping, increment the wiring count 1215 * on this PT page. PT pages are wired down as long as there 1216 * is a valid mapping in the page. 1217 */ 1218 if (pmap != pmap_kernel()) 1219 pmap_ptpage_addref(trunc_page((vaddr_t)pte)); 1220 1221 /* 1222 * Enter on the PV list if part of our managed memory 1223 * Note that we raise IPL while manipulating pv_table 1224 * since pmap_enter can be called at interrupt time. 1225 */ 1226 if (PAGE_IS_MANAGED(pa)) { 1227 struct pv_header *pvh; 1228 struct pv_entry *pv, *npv; 1229 int s; 1230 1231 pvh = pa_to_pvh(pa); 1232 pv = &pvh->pvh_first; 1233 s = splvm(); 1234 1235 PMAP_DPRINTF(PDB_ENTER, 1236 ("enter: pv at %p: %lx/%p/%p\n", 1237 pv, pv->pv_va, pv->pv_pmap, pv->pv_next)); 1238 /* 1239 * No entries yet, use header as the first entry 1240 */ 1241 if (pv->pv_pmap == NULL) { 1242 pv->pv_va = va; 1243 pv->pv_pmap = pmap; 1244 pv->pv_next = NULL; 1245 pv->pv_ptste = NULL; 1246 pv->pv_ptpmap = NULL; 1247 pvh->pvh_attrs = 0; 1248 } 1249 /* 1250 * There is at least one other VA mapping this page. 1251 * Place this entry after the header. 1252 */ 1253 else { 1254 #ifdef DEBUG 1255 for (npv = pv; npv; npv = npv->pv_next) 1256 if (pmap == npv->pv_pmap && va == npv->pv_va) 1257 panic("pmap_enter: already in pv_tab"); 1258 #endif 1259 if (opv != NULL) { 1260 npv = opv; 1261 opv = NULL; 1262 } else { 1263 npv = pmap_alloc_pv(); 1264 } 1265 KASSERT(npv != NULL); 1266 npv->pv_va = va; 1267 npv->pv_pmap = pmap; 1268 npv->pv_next = pv->pv_next; 1269 npv->pv_ptste = NULL; 1270 npv->pv_ptpmap = NULL; 1271 pv->pv_next = npv; 1272 1273 #ifdef CACHE_HAVE_VAC 1274 1275 /* 1276 * Since there is another logical mapping for the 1277 * same page we may need to cache-inhibit the 1278 * descriptors on those CPUs with external VACs. 1279 * We don't need to CI if: 1280 * 1281 * - No two mappings belong to the same user pmaps. 1282 * Since the cache is flushed on context switches 1283 * there is no problem between user processes. 1284 * 1285 * - Mappings within a single pmap are a certain 1286 * magic distance apart. VAs at these appropriate 1287 * boundaries map to the same cache entries or 1288 * otherwise don't conflict. 1289 * 1290 * To keep it simple, we only check for these special 1291 * cases if there are only two mappings, otherwise we 1292 * punt and always CI. 1293 * 1294 * Note that there are no aliasing problems with the 1295 * on-chip data-cache when the WA bit is set. 1296 */ 1297 1298 if (pmap_aliasmask) { 1299 if (pvh->pvh_attrs & PVH_CI) { 1300 PMAP_DPRINTF(PDB_CACHE, 1301 ("enter: pa %lx already CI'ed\n", 1302 pa)); 1303 checkpv = cacheable = false; 1304 } else if (npv->pv_next || 1305 ((pmap == pv->pv_pmap || 1306 pmap == pmap_kernel() || 1307 pv->pv_pmap == pmap_kernel()) && 1308 ((pv->pv_va & pmap_aliasmask) != 1309 (va & pmap_aliasmask)))) { 1310 PMAP_DPRINTF(PDB_CACHE, 1311 ("enter: pa %lx CI'ing all\n", 1312 pa)); 1313 cacheable = false; 1314 pvh->pvh_attrs |= PVH_CI; 1315 } 1316 } 1317 #endif 1318 } 1319 1320 /* 1321 * Speed pmap_is_referenced() or pmap_is_modified() based 1322 * on the hint provided in access_type. 1323 */ 1324 #ifdef DIAGNOSTIC 1325 if ((flags & VM_PROT_ALL) & ~prot) 1326 panic("pmap_enter: access_type exceeds prot"); 1327 #endif 1328 if (flags & VM_PROT_WRITE) 1329 pvh->pvh_attrs |= (PG_U|PG_M); 1330 else if (flags & VM_PROT_ALL) 1331 pvh->pvh_attrs |= PG_U; 1332 1333 splx(s); 1334 } 1335 /* 1336 * Assumption: if it is not part of our managed memory 1337 * then it must be device memory which may be volitile. 1338 */ 1339 else if (pmap_initialized) { 1340 checkpv = cacheable = false; 1341 } 1342 1343 /* 1344 * Increment counters 1345 */ 1346 pmap->pm_stats.resident_count++; 1347 if (wired) 1348 pmap->pm_stats.wired_count++; 1349 1350 validate: 1351 #ifdef CACHE_HAVE_VAC 1352 /* 1353 * Purge kernel side of VAC to ensure we get correct state 1354 * of HW bits so we don't clobber them. 1355 */ 1356 if (pmap_aliasmask) 1357 DCIS(); 1358 #endif 1359 1360 /* 1361 * Build the new PTE. 1362 */ 1363 1364 npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V; 1365 if (wired) 1366 npte |= PG_W; 1367 if (!checkpv && !cacheable) 1368 #if defined(M68040) || defined(M68060) 1369 #if defined(M68020) || defined(M68030) 1370 npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI); 1371 #else 1372 npte |= PG_CIN; 1373 #endif 1374 #else 1375 npte |= PG_CI; 1376 #endif 1377 #if defined(M68040) || defined(M68060) 1378 #if defined(M68020) || defined(M68030) 1379 else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW) 1380 #else 1381 else if ((npte & (PG_PROT|PG_CI)) == PG_RW) 1382 #endif 1383 npte |= PG_CCB; 1384 #endif 1385 1386 PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte)); 1387 1388 /* 1389 * Remember if this was a wiring-only change. 1390 * If so, we need not flush the TLB and caches. 1391 */ 1392 1393 wired = ((*pte ^ npte) == PG_W); 1394 #if defined(M68040) || defined(M68060) 1395 #if defined(M68020) || defined(M68030) 1396 if (mmutype == MMU_68040 && !wired) 1397 #else 1398 if (!wired) 1399 #endif 1400 { 1401 DCFP(pa); 1402 ICPP(pa); 1403 } 1404 #endif 1405 *pte = npte; 1406 if (!wired && active_pmap(pmap)) 1407 TBIS(va); 1408 #ifdef CACHE_HAVE_VAC 1409 /* 1410 * The following is executed if we are entering a second 1411 * (or greater) mapping for a physical page and the mappings 1412 * may create an aliasing problem. In this case we must 1413 * cache inhibit the descriptors involved and flush any 1414 * external VAC. 1415 */ 1416 if (checkpv && !cacheable) { 1417 pmap_changebit(pa, PG_CI, ~0); 1418 DCIA(); 1419 #ifdef DEBUG 1420 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 1421 (PDB_CACHE|PDB_PVDUMP)) 1422 pmap_pvdump(pa); 1423 #endif 1424 } 1425 #endif 1426 #ifdef DEBUG 1427 if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) 1428 pmap_check_wiring("enter", trunc_page((vaddr_t)pte)); 1429 #endif 1430 1431 if (opv != NULL) 1432 pmap_free_pv(opv); 1433 1434 return 0; 1435 } 1436 1437 void 1438 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1439 { 1440 pmap_t pmap = pmap_kernel(); 1441 pt_entry_t *pte; 1442 int s, npte; 1443 1444 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER, 1445 ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot)); 1446 1447 /* 1448 * Segment table entry not valid, we need a new PT page 1449 */ 1450 1451 if (!pmap_ste_v(pmap, va)) { 1452 s = splvm(); 1453 pmap_enter_ptpage(pmap, va, false); 1454 splx(s); 1455 } 1456 1457 pa = m68k_trunc_page(pa); 1458 pte = pmap_pte(pmap, va); 1459 1460 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte)); 1461 KASSERT(!pmap_pte_v(pte)); 1462 1463 /* 1464 * Increment counters 1465 */ 1466 1467 pmap->pm_stats.resident_count++; 1468 pmap->pm_stats.wired_count++; 1469 1470 /* 1471 * Build the new PTE. 1472 */ 1473 1474 npte = pa | pte_prot(pmap, prot) | PG_V | PG_W; 1475 if (flags & PMAP_NOCACHE) { 1476 npte |= PG_CI; 1477 } 1478 #if defined(M68040) || defined(M68060) 1479 #if defined(M68020) || defined(M68030) 1480 if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW) 1481 #else 1482 if ((npte & (PG_PROT|PG_CI)) == PG_RW) 1483 #endif 1484 npte |= PG_CCB; 1485 1486 if (mmutype == MMU_68040) { 1487 DCFP(pa); 1488 ICPP(pa); 1489 } 1490 #endif 1491 1492 *pte = npte; 1493 TBIS(va); 1494 } 1495 1496 void 1497 pmap_kremove(vaddr_t va, vsize_t size) 1498 { 1499 pmap_t pmap = pmap_kernel(); 1500 pt_entry_t *pte; 1501 vaddr_t nssva; 1502 vaddr_t eva = va + size; 1503 #ifdef CACHE_HAVE_VAC 1504 bool firstpage, needcflush; 1505 #endif 1506 1507 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT, 1508 ("pmap_kremove(%lx, %lx)\n", va, size)); 1509 1510 #ifdef CACHE_HAVE_VAC 1511 firstpage = true; 1512 needcflush = false; 1513 #endif 1514 while (va < eva) { 1515 nssva = m68k_trunc_seg(va) + NBSEG; 1516 if (nssva == 0 || nssva > eva) 1517 nssva = eva; 1518 1519 /* 1520 * If VA belongs to an unallocated segment, 1521 * skip to the next segment boundary. 1522 */ 1523 1524 if (!pmap_ste_v(pmap, va)) { 1525 va = nssva; 1526 continue; 1527 } 1528 1529 /* 1530 * Invalidate every valid mapping within this segment. 1531 */ 1532 1533 pte = pmap_pte(pmap, va); 1534 while (va < nssva) { 1535 if (!pmap_pte_v(pte)) { 1536 pte++; 1537 va += PAGE_SIZE; 1538 continue; 1539 } 1540 #ifdef CACHE_HAVE_VAC 1541 if (pmap_aliasmask) { 1542 1543 /* 1544 * Purge kernel side of VAC to ensure 1545 * we get the correct state of any 1546 * hardware maintained bits. 1547 */ 1548 1549 if (firstpage) { 1550 DCIS(); 1551 firstpage = false; 1552 } 1553 1554 /* 1555 * Remember if we may need to 1556 * flush the VAC. 1557 */ 1558 1559 needcflush = true; 1560 } 1561 #endif 1562 pmap->pm_stats.wired_count--; 1563 pmap->pm_stats.resident_count--; 1564 *pte = PG_NV; 1565 TBIS(va); 1566 pte++; 1567 va += PAGE_SIZE; 1568 } 1569 } 1570 1571 #ifdef CACHE_HAVE_VAC 1572 1573 /* 1574 * In a couple of cases, we don't need to worry about flushing 1575 * the VAC: 1576 * 1. if this is a kernel mapping, 1577 * we have already done it 1578 * 2. if it is a user mapping not for the current process, 1579 * it won't be there 1580 */ 1581 1582 if (pmap_aliasmask && !active_user_pmap(pmap)) 1583 needcflush = false; 1584 if (needcflush) { 1585 if (pmap == pmap_kernel()) { 1586 DCIS(); 1587 } else { 1588 DCIU(); 1589 } 1590 } 1591 #endif 1592 } 1593 1594 /* 1595 * pmap_unwire: [ INTERFACE ] 1596 * 1597 * Clear the wired attribute for a map/virtual-address pair. 1598 * 1599 * The mapping must already exist in the pmap. 1600 */ 1601 void 1602 pmap_unwire(pmap_t pmap, vaddr_t va) 1603 { 1604 pt_entry_t *pte; 1605 1606 PMAP_DPRINTF(PDB_FOLLOW, 1607 ("pmap_unwire(%p, %lx)\n", pmap, va)); 1608 1609 pte = pmap_pte(pmap, va); 1610 1611 /* 1612 * If wiring actually changed (always?) clear the wire bit and 1613 * update the wire count. Note that wiring is not a hardware 1614 * characteristic so there is no need to invalidate the TLB. 1615 */ 1616 1617 if (pmap_pte_w_chg(pte, 0)) { 1618 pmap_pte_set_w(pte, false); 1619 pmap->pm_stats.wired_count--; 1620 } 1621 } 1622 1623 /* 1624 * pmap_extract: [ INTERFACE ] 1625 * 1626 * Extract the physical address associated with the given 1627 * pmap/virtual address pair. 1628 */ 1629 bool 1630 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 1631 { 1632 paddr_t pa; 1633 u_int pte; 1634 1635 PMAP_DPRINTF(PDB_FOLLOW, 1636 ("pmap_extract(%p, %lx) -> ", pmap, va)); 1637 1638 if (pmap_ste_v(pmap, va)) { 1639 pte = *(u_int *)pmap_pte(pmap, va); 1640 if (pte) { 1641 pa = (pte & PG_FRAME) | (va & ~PG_FRAME); 1642 if (pap != NULL) 1643 *pap = pa; 1644 #ifdef DEBUG 1645 if (pmapdebug & PDB_FOLLOW) 1646 printf("%lx\n", pa); 1647 #endif 1648 return true; 1649 } 1650 } 1651 #ifdef DEBUG 1652 if (pmapdebug & PDB_FOLLOW) 1653 printf("failed\n"); 1654 #endif 1655 return false; 1656 } 1657 1658 /* 1659 * vtophys: [ INTERFACE-ish ] 1660 * 1661 * Kernel virtual to physical. Use with caution. 1662 */ 1663 paddr_t 1664 vtophys(vaddr_t va) 1665 { 1666 paddr_t pa; 1667 1668 if (pmap_extract(pmap_kernel(), va, &pa)) 1669 return pa; 1670 KASSERT(0); 1671 return (paddr_t) -1; 1672 } 1673 1674 /* 1675 * pmap_copy: [ INTERFACE ] 1676 * 1677 * Copy the mapping range specified by src_addr/len 1678 * from the source map to the range dst_addr/len 1679 * in the destination map. 1680 * 1681 * This routine is only advisory and need not do anything. 1682 */ 1683 void 1684 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, 1685 vaddr_t src_addr) 1686 { 1687 1688 PMAP_DPRINTF(PDB_FOLLOW, 1689 ("pmap_copy(%p, %p, %lx, %lx, %lx)\n", 1690 dst_pmap, src_pmap, dst_addr, len, src_addr)); 1691 } 1692 1693 /* 1694 * pmap_collect1(): 1695 * 1696 * Garbage-collect KPT pages. Helper for the above (bogus) 1697 * pmap_collect(). 1698 * 1699 * Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER 1700 * WAY OF HANDLING PT PAGES! 1701 */ 1702 static inline void 1703 pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa) 1704 { 1705 paddr_t pa; 1706 struct pv_header *pvh; 1707 struct pv_entry *pv; 1708 pt_entry_t *pte; 1709 paddr_t kpa; 1710 #ifdef DEBUG 1711 st_entry_t *ste; 1712 int opmapdebug = 0; 1713 #endif 1714 1715 for (pa = startpa; pa < endpa; pa += PAGE_SIZE) { 1716 struct kpt_page *kpt, **pkpt; 1717 1718 /* 1719 * Locate physical pages which are being used as kernel 1720 * page table pages. 1721 */ 1722 1723 pvh = pa_to_pvh(pa); 1724 pv = &pvh->pvh_first; 1725 if (pv->pv_pmap != pmap_kernel() || 1726 !(pvh->pvh_attrs & PVH_PTPAGE)) 1727 continue; 1728 do { 1729 if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel()) 1730 break; 1731 } while ((pv = pv->pv_next)); 1732 if (pv == NULL) 1733 continue; 1734 #ifdef DEBUG 1735 if (pv->pv_va < (vaddr_t)Sysmap || 1736 pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) { 1737 printf("collect: kernel PT VA out of range\n"); 1738 pmap_pvdump(pa); 1739 continue; 1740 } 1741 #endif 1742 pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE); 1743 while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV) 1744 ; 1745 if (pte >= (pt_entry_t *)pv->pv_va) 1746 continue; 1747 1748 #ifdef DEBUG 1749 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) { 1750 printf("collect: freeing KPT page at %lx (ste %x@%p)\n", 1751 pv->pv_va, *pv->pv_ptste, pv->pv_ptste); 1752 opmapdebug = pmapdebug; 1753 pmapdebug |= PDB_PTPAGE; 1754 } 1755 1756 ste = pv->pv_ptste; 1757 #endif 1758 /* 1759 * If all entries were invalid we can remove the page. 1760 * We call pmap_remove_entry to take care of invalidating 1761 * ST and Sysptmap entries. 1762 */ 1763 1764 if (!pmap_extract(pmap, pv->pv_va, &kpa)) { 1765 printf("collect: freeing KPT page at %lx (ste %x@%p)\n", 1766 pv->pv_va, *pv->pv_ptste, pv->pv_ptste); 1767 panic("pmap_collect: mapping not found"); 1768 } 1769 pmap_remove_mapping(pmap, pv->pv_va, NULL, 1770 PRM_TFLUSH|PRM_CFLUSH, NULL); 1771 1772 /* 1773 * Use the physical address to locate the original 1774 * (kmem_alloc assigned) address for the page and put 1775 * that page back on the free list. 1776 */ 1777 1778 for (pkpt = &kpt_used_list, kpt = *pkpt; 1779 kpt != NULL; 1780 pkpt = &kpt->kpt_next, kpt = *pkpt) 1781 if (kpt->kpt_pa == kpa) 1782 break; 1783 #ifdef DEBUG 1784 if (kpt == NULL) 1785 panic("pmap_collect: lost a KPT page"); 1786 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1787 printf("collect: %lx (%lx) to free list\n", 1788 kpt->kpt_va, kpa); 1789 #endif 1790 *pkpt = kpt->kpt_next; 1791 kpt->kpt_next = kpt_free_list; 1792 kpt_free_list = kpt; 1793 #ifdef DEBUG 1794 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1795 pmapdebug = opmapdebug; 1796 1797 if (*ste != SG_NV) 1798 printf("collect: kernel STE at %p still valid (%x)\n", 1799 ste, *ste); 1800 ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)]; 1801 if (*ste != SG_NV) 1802 printf("collect: kernel PTmap at %p still valid (%x)\n", 1803 ste, *ste); 1804 #endif 1805 } 1806 } 1807 1808 /* 1809 * pmap_collect: 1810 * 1811 * Helper for pmap_enter_ptpage(). 1812 * 1813 * Garbage collects the physical map system for pages which are no 1814 * longer used. Success need not be guaranteed -- that is, there 1815 * may well be pages which are not referenced, but others may be 1816 * collected. 1817 */ 1818 static void 1819 pmap_collect(void) 1820 { 1821 int s; 1822 uvm_physseg_t bank; 1823 1824 /* 1825 * XXX This is very bogus. We should handle kernel PT 1826 * XXX pages much differently. 1827 */ 1828 1829 s = splvm(); 1830 for (bank = uvm_physseg_get_first(); 1831 uvm_physseg_valid_p(bank); 1832 bank = uvm_physseg_get_next(bank)) { 1833 pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)), 1834 ptoa(uvm_physseg_get_end(bank))); 1835 } 1836 splx(s); 1837 } 1838 1839 /* 1840 * pmap_zero_page: [ INTERFACE ] 1841 * 1842 * Zero the specified (machine independent) page by mapping the page 1843 * into virtual memory and using memset to clear its contents, one 1844 * machine dependent page at a time. 1845 * 1846 * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES! 1847 * (Actually, we go to splvm(), and since we don't 1848 * support multiple processors, this is sufficient.) 1849 */ 1850 void 1851 pmap_zero_page(paddr_t phys) 1852 { 1853 int npte; 1854 1855 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys)); 1856 1857 npte = phys | PG_V; 1858 #ifdef CACHE_HAVE_VAC 1859 if (pmap_aliasmask) { 1860 1861 /* 1862 * Cache-inhibit the mapping on VAC machines, as we would 1863 * be wasting the cache load. 1864 */ 1865 1866 npte |= PG_CI; 1867 } 1868 #endif 1869 1870 #if defined(M68040) || defined(M68060) 1871 #if defined(M68020) || defined(M68030) 1872 if (mmutype == MMU_68040) 1873 #endif 1874 { 1875 /* 1876 * Set copyback caching on the page; this is required 1877 * for cache consistency (since regular mappings are 1878 * copyback as well). 1879 */ 1880 1881 npte |= PG_CCB; 1882 } 1883 #endif 1884 1885 *caddr1_pte = npte; 1886 TBIS((vaddr_t)CADDR1); 1887 1888 zeropage(CADDR1); 1889 1890 #ifdef DEBUG 1891 *caddr1_pte = PG_NV; 1892 TBIS((vaddr_t)CADDR1); 1893 #endif 1894 } 1895 1896 /* 1897 * pmap_copy_page: [ INTERFACE ] 1898 * 1899 * Copy the specified (machine independent) page by mapping the page 1900 * into virtual memory and using memcpy to copy the page, one machine 1901 * dependent page at a time. 1902 * 1903 * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES! 1904 * (Actually, we go to splvm(), and since we don't 1905 * support multiple processors, this is sufficient.) 1906 */ 1907 void 1908 pmap_copy_page(paddr_t src, paddr_t dst) 1909 { 1910 pt_entry_t npte1, npte2; 1911 1912 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst)); 1913 1914 npte1 = src | PG_RO | PG_V; 1915 npte2 = dst | PG_V; 1916 #ifdef CACHE_HAVE_VAC 1917 if (pmap_aliasmask) { 1918 1919 /* 1920 * Cache-inhibit the mapping on VAC machines, as we would 1921 * be wasting the cache load. 1922 */ 1923 1924 npte1 |= PG_CI; 1925 npte2 |= PG_CI; 1926 } 1927 #endif 1928 1929 #if defined(M68040) || defined(M68060) 1930 #if defined(M68020) || defined(M68030) 1931 if (mmutype == MMU_68040) 1932 #endif 1933 { 1934 /* 1935 * Set copyback caching on the pages; this is required 1936 * for cache consistency (since regular mappings are 1937 * copyback as well). 1938 */ 1939 1940 npte1 |= PG_CCB; 1941 npte2 |= PG_CCB; 1942 } 1943 #endif 1944 1945 *caddr1_pte = npte1; 1946 TBIS((vaddr_t)CADDR1); 1947 1948 *caddr2_pte = npte2; 1949 TBIS((vaddr_t)CADDR2); 1950 1951 copypage(CADDR1, CADDR2); 1952 1953 #ifdef DEBUG 1954 *caddr1_pte = PG_NV; 1955 TBIS((vaddr_t)CADDR1); 1956 1957 *caddr2_pte = PG_NV; 1958 TBIS((vaddr_t)CADDR2); 1959 #endif 1960 } 1961 1962 /* 1963 * pmap_clear_modify: [ INTERFACE ] 1964 * 1965 * Clear the modify bits on the specified physical page. 1966 */ 1967 bool 1968 pmap_clear_modify(struct vm_page *pg) 1969 { 1970 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1971 1972 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg)); 1973 1974 return pmap_changebit(pa, 0, (pt_entry_t)~PG_M); 1975 } 1976 1977 /* 1978 * pmap_clear_reference: [ INTERFACE ] 1979 * 1980 * Clear the reference bit on the specified physical page. 1981 */ 1982 bool 1983 pmap_clear_reference(struct vm_page *pg) 1984 { 1985 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1986 1987 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg)); 1988 1989 return pmap_changebit(pa, 0, (pt_entry_t)~PG_U); 1990 } 1991 1992 /* 1993 * pmap_is_referenced: [ INTERFACE ] 1994 * 1995 * Return whether or not the specified physical page is referenced 1996 * by any physical maps. 1997 */ 1998 bool 1999 pmap_is_referenced(struct vm_page *pg) 2000 { 2001 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2002 2003 return pmap_testbit(pa, PG_U); 2004 } 2005 2006 /* 2007 * pmap_is_modified: [ INTERFACE ] 2008 * 2009 * Return whether or not the specified physical page is modified 2010 * by any physical maps. 2011 */ 2012 bool 2013 pmap_is_modified(struct vm_page *pg) 2014 { 2015 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2016 2017 return pmap_testbit(pa, PG_M); 2018 } 2019 2020 /* 2021 * pmap_phys_address: [ INTERFACE ] 2022 * 2023 * Return the physical address corresponding to the specified 2024 * cookie. Used by the device pager to decode a device driver's 2025 * mmap entry point return value. 2026 * 2027 * Note: no locking is necessary in this function. 2028 */ 2029 paddr_t 2030 pmap_phys_address(paddr_t ppn) 2031 { 2032 return m68k_ptob(ppn); 2033 } 2034 2035 #ifdef CACHE_HAVE_VAC 2036 /* 2037 * pmap_prefer: [ INTERFACE ] 2038 * 2039 * Find the first virtual address >= *vap that does not 2040 * cause a virtually-addressed cache alias problem. 2041 */ 2042 void 2043 pmap_prefer(vaddr_t foff, vaddr_t *vap) 2044 { 2045 vaddr_t va; 2046 vsize_t d; 2047 2048 #ifdef M68K_MMU_MOTOROLA 2049 if (pmap_aliasmask) 2050 #endif 2051 { 2052 va = *vap; 2053 d = foff - va; 2054 d &= pmap_aliasmask; 2055 *vap = va + d; 2056 } 2057 } 2058 #endif /* CACHE_HAVE_VAC */ 2059 2060 /* 2061 * Miscellaneous support routines follow 2062 */ 2063 2064 /* 2065 * pmap_remove_mapping: 2066 * 2067 * Invalidate a single page denoted by pmap/va. 2068 * 2069 * If (pte != NULL), it is the already computed PTE for the page. 2070 * 2071 * If (flags & PRM_TFLUSH), we must invalidate any TLB information. 2072 * 2073 * If (flags & PRM_CFLUSH), we must flush/invalidate any cache 2074 * information. 2075 * 2076 * If (flags & PRM_KEEPPTPAGE), we don't free the page table page 2077 * if the reference drops to zero. 2078 */ 2079 /* static */ 2080 void 2081 pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags, 2082 struct pv_entry **opvp) 2083 { 2084 paddr_t pa; 2085 struct pv_header *pvh; 2086 struct pv_entry *pv, *npv, *opv = NULL; 2087 struct pmap *ptpmap; 2088 st_entry_t *ste; 2089 int s, bits; 2090 #ifdef DEBUG 2091 pt_entry_t opte; 2092 #endif 2093 2094 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT, 2095 ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n", 2096 pmap, va, pte, flags, opvp)); 2097 2098 /* 2099 * PTE not provided, compute it from pmap and va. 2100 */ 2101 2102 if (pte == NULL) { 2103 pte = pmap_pte(pmap, va); 2104 if (*pte == PG_NV) 2105 return; 2106 } 2107 2108 #ifdef CACHE_HAVE_VAC 2109 if (pmap_aliasmask && (flags & PRM_CFLUSH)) { 2110 2111 /* 2112 * Purge kernel side of VAC to ensure we get the correct 2113 * state of any hardware maintained bits. 2114 */ 2115 2116 DCIS(); 2117 2118 /* 2119 * If this is a non-CI user mapping for the current process, 2120 * flush the VAC. Note that the kernel side was flushed 2121 * above so we don't worry about non-CI kernel mappings. 2122 */ 2123 2124 if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) { 2125 DCIU(); 2126 } 2127 } 2128 #endif 2129 2130 pa = pmap_pte_pa(pte); 2131 #ifdef DEBUG 2132 opte = *pte; 2133 #endif 2134 2135 /* 2136 * Update statistics 2137 */ 2138 2139 if (pmap_pte_w(pte)) 2140 pmap->pm_stats.wired_count--; 2141 pmap->pm_stats.resident_count--; 2142 2143 #if defined(M68040) || defined(M68060) 2144 #if defined(M68020) || defined(M68030) 2145 if (mmutype == MMU_68040) 2146 #endif 2147 if ((flags & PRM_CFLUSH)) { 2148 DCFP(pa); 2149 ICPP(pa); 2150 } 2151 #endif 2152 2153 /* 2154 * Invalidate the PTE after saving the reference modify info. 2155 */ 2156 2157 PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte)); 2158 bits = *pte & (PG_U|PG_M); 2159 *pte = PG_NV; 2160 if ((flags & PRM_TFLUSH) && active_pmap(pmap)) 2161 TBIS(va); 2162 2163 /* 2164 * For user mappings decrement the wiring count on 2165 * the PT page. 2166 */ 2167 2168 if (pmap != pmap_kernel()) { 2169 vaddr_t ptpva = trunc_page((vaddr_t)pte); 2170 int refs = pmap_ptpage_delref(ptpva); 2171 #ifdef DEBUG 2172 if (pmapdebug & PDB_WIRING) 2173 pmap_check_wiring("remove", ptpva); 2174 #endif 2175 2176 /* 2177 * If reference count drops to 0, and we're not instructed 2178 * to keep it around, free the PT page. 2179 */ 2180 2181 if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) { 2182 #ifdef DIAGNOSTIC 2183 struct pv_header *ptppvh; 2184 struct pv_entry *ptppv; 2185 #endif 2186 paddr_t ptppa; 2187 2188 ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva)); 2189 #ifdef DIAGNOSTIC 2190 if (PAGE_IS_MANAGED(ptppa) == 0) 2191 panic("pmap_remove_mapping: unmanaged PT page"); 2192 ptppvh = pa_to_pvh(ptppa); 2193 ptppv = &ptppvh->pvh_first; 2194 if (ptppv->pv_ptste == NULL) 2195 panic("pmap_remove_mapping: ptste == NULL"); 2196 if (ptppv->pv_pmap != pmap_kernel() || 2197 ptppv->pv_va != ptpva || 2198 ptppv->pv_next != NULL) 2199 panic("pmap_remove_mapping: " 2200 "bad PT page pmap %p, va 0x%lx, next %p", 2201 ptppv->pv_pmap, ptppv->pv_va, 2202 ptppv->pv_next); 2203 #endif 2204 pmap_remove_mapping(pmap_kernel(), ptpva, 2205 NULL, PRM_TFLUSH|PRM_CFLUSH, NULL); 2206 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2207 uvm_pagefree(PHYS_TO_VM_PAGE(ptppa)); 2208 rw_exit(uvm_kernel_object->vmobjlock); 2209 PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE, 2210 ("remove: PT page 0x%lx (0x%lx) freed\n", 2211 ptpva, ptppa)); 2212 } 2213 } 2214 2215 /* 2216 * If this isn't a managed page, we are all done. 2217 */ 2218 2219 if (PAGE_IS_MANAGED(pa) == 0) 2220 return; 2221 2222 /* 2223 * Otherwise remove it from the PV table 2224 * (raise IPL since we may be called at interrupt time). 2225 */ 2226 2227 pvh = pa_to_pvh(pa); 2228 pv = &pvh->pvh_first; 2229 ste = NULL; 2230 s = splvm(); 2231 2232 /* 2233 * If it is the first entry on the list, it is actually 2234 * in the header and we must copy the following entry up 2235 * to the header. Otherwise we must search the list for 2236 * the entry. In either case we free the now unused entry. 2237 */ 2238 2239 if (pmap == pv->pv_pmap && va == pv->pv_va) { 2240 ste = pv->pv_ptste; 2241 ptpmap = pv->pv_ptpmap; 2242 npv = pv->pv_next; 2243 if (npv) { 2244 *pv = *npv; 2245 opv = npv; 2246 } else 2247 pv->pv_pmap = NULL; 2248 } else { 2249 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 2250 if (pmap == npv->pv_pmap && va == npv->pv_va) 2251 break; 2252 pv = npv; 2253 } 2254 #ifdef DEBUG 2255 if (npv == NULL) 2256 panic("pmap_remove: PA not in pv_tab"); 2257 #endif 2258 ste = npv->pv_ptste; 2259 ptpmap = npv->pv_ptpmap; 2260 pv->pv_next = npv->pv_next; 2261 opv = npv; 2262 pvh = pa_to_pvh(pa); 2263 pv = &pvh->pvh_first; 2264 } 2265 2266 #ifdef CACHE_HAVE_VAC 2267 2268 /* 2269 * If only one mapping left we no longer need to cache inhibit 2270 */ 2271 2272 if (pmap_aliasmask && 2273 pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) { 2274 PMAP_DPRINTF(PDB_CACHE, 2275 ("remove: clearing CI for pa %lx\n", pa)); 2276 pvh->pvh_attrs &= ~PVH_CI; 2277 pmap_changebit(pa, 0, (pt_entry_t)~PG_CI); 2278 #ifdef DEBUG 2279 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 2280 (PDB_CACHE|PDB_PVDUMP)) 2281 pmap_pvdump(pa); 2282 #endif 2283 } 2284 #endif 2285 2286 /* 2287 * If this was a PT page we must also remove the 2288 * mapping from the associated segment table. 2289 */ 2290 2291 if (ste) { 2292 PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE, 2293 ("remove: ste was %x@%p pte was %x@%p\n", 2294 *ste, ste, opte, pmap_pte(pmap, va))); 2295 #if defined(M68040) || defined(M68060) 2296 #if defined(M68020) || defined(M68030) 2297 if (mmutype == MMU_68040) 2298 #endif 2299 { 2300 st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE]; 2301 2302 while (ste < este) 2303 *ste++ = SG_NV; 2304 #ifdef DEBUG 2305 ste -= NPTEPG/SG4_LEV3SIZE; 2306 #endif 2307 } 2308 #if defined(M68020) || defined(M68030) 2309 else 2310 #endif 2311 #endif 2312 #if defined(M68020) || defined(M68030) 2313 *ste = SG_NV; 2314 #endif 2315 2316 /* 2317 * If it was a user PT page, we decrement the 2318 * reference count on the segment table as well, 2319 * freeing it if it is now empty. 2320 */ 2321 2322 if (ptpmap != pmap_kernel()) { 2323 PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB, 2324 ("remove: stab %p, refcnt %d\n", 2325 ptpmap->pm_stab, ptpmap->pm_sref - 1)); 2326 #ifdef DEBUG 2327 if ((pmapdebug & PDB_PARANOIA) && 2328 ptpmap->pm_stab != 2329 (st_entry_t *)trunc_page((vaddr_t)ste)) 2330 panic("remove: bogus ste"); 2331 #endif 2332 if (--(ptpmap->pm_sref) == 0) { 2333 PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB, 2334 ("remove: free stab %p\n", 2335 ptpmap->pm_stab)); 2336 uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab, 2337 M68K_STSIZE, UVM_KMF_WIRED); 2338 ptpmap->pm_stab = Segtabzero; 2339 ptpmap->pm_stpa = Segtabzeropa; 2340 #if defined(M68040) || defined(M68060) 2341 #if defined(M68020) || defined(M68030) 2342 if (mmutype == MMU_68040) 2343 #endif 2344 ptpmap->pm_stfree = protostfree; 2345 #endif 2346 /* 2347 * Segment table has changed; reload the 2348 * MMU if it's the active user pmap. 2349 */ 2350 if (active_user_pmap(ptpmap)) { 2351 pmap_load_urp((paddr_t)ptpmap->pm_stpa); 2352 } 2353 } 2354 } 2355 pvh->pvh_attrs &= ~PVH_PTPAGE; 2356 ptpmap->pm_ptpages--; 2357 } 2358 2359 /* 2360 * Update saved attributes for managed page 2361 */ 2362 2363 pvh->pvh_attrs |= bits; 2364 splx(s); 2365 2366 if (opvp != NULL) 2367 *opvp = opv; 2368 else if (opv != NULL) 2369 pmap_free_pv(opv); 2370 } 2371 2372 /* 2373 * pmap_testbit: 2374 * 2375 * Test the modified/referenced bits of a physical page. 2376 */ 2377 /* static */ 2378 bool 2379 pmap_testbit(paddr_t pa, int bit) 2380 { 2381 struct pv_header *pvh; 2382 struct pv_entry *pv; 2383 pt_entry_t *pte; 2384 int s; 2385 2386 pvh = pa_to_pvh(pa); 2387 pv = &pvh->pvh_first; 2388 s = splvm(); 2389 2390 /* 2391 * Check saved info first 2392 */ 2393 2394 if (pvh->pvh_attrs & bit) { 2395 splx(s); 2396 return true; 2397 } 2398 2399 #ifdef CACHE_HAVE_VAC 2400 2401 /* 2402 * Flush VAC to get correct state of any hardware maintained bits. 2403 */ 2404 2405 if (pmap_aliasmask && (bit & (PG_U|PG_M))) 2406 DCIS(); 2407 #endif 2408 2409 /* 2410 * Not found. Check current mappings, returning immediately if 2411 * found. Cache a hit to speed future lookups. 2412 */ 2413 2414 if (pv->pv_pmap != NULL) { 2415 for (; pv; pv = pv->pv_next) { 2416 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2417 if (*pte & bit) { 2418 pvh->pvh_attrs |= bit; 2419 splx(s); 2420 return true; 2421 } 2422 } 2423 } 2424 splx(s); 2425 return false; 2426 } 2427 2428 /* 2429 * pmap_changebit: 2430 * 2431 * Change the modified/referenced bits, or other PTE bits, 2432 * for a physical page. 2433 */ 2434 /* static */ 2435 bool 2436 pmap_changebit(paddr_t pa, pt_entry_t set, pt_entry_t mask) 2437 { 2438 struct pv_header *pvh; 2439 struct pv_entry *pv; 2440 pt_entry_t *pte, npte; 2441 vaddr_t va; 2442 int s; 2443 #if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060) 2444 bool firstpage = true; 2445 #endif 2446 bool r; 2447 2448 PMAP_DPRINTF(PDB_BITS, 2449 ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask)); 2450 2451 pvh = pa_to_pvh(pa); 2452 pv = &pvh->pvh_first; 2453 s = splvm(); 2454 2455 /* 2456 * Clear saved attributes (modify, reference) 2457 */ 2458 2459 r = (pvh->pvh_attrs & ~mask) != 0; 2460 pvh->pvh_attrs &= mask; 2461 2462 /* 2463 * Loop over all current mappings setting/clearing as appropriate 2464 * If setting RO do we need to clear the VAC? 2465 */ 2466 2467 if (pv->pv_pmap != NULL) { 2468 #ifdef DEBUG 2469 int toflush = 0; 2470 #endif 2471 for (; pv; pv = pv->pv_next) { 2472 #ifdef DEBUG 2473 toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1; 2474 #endif 2475 va = pv->pv_va; 2476 pte = pmap_pte(pv->pv_pmap, va); 2477 #ifdef CACHE_HAVE_VAC 2478 2479 /* 2480 * Flush VAC to ensure we get correct state of HW bits 2481 * so we don't clobber them. 2482 */ 2483 2484 if (firstpage && pmap_aliasmask) { 2485 firstpage = false; 2486 DCIS(); 2487 } 2488 #endif 2489 npte = (*pte | set) & mask; 2490 if (*pte != npte) { 2491 r = true; 2492 #if defined(M68040) || defined(M68060) 2493 /* 2494 * If we are changing caching status or 2495 * protection make sure the caches are 2496 * flushed (but only once). 2497 */ 2498 if (firstpage && 2499 #if defined(M68020) || defined(M68030) 2500 (mmutype == MMU_68040) && 2501 #endif 2502 ((set == PG_RO) || 2503 (set & PG_CMASK) || 2504 (mask & PG_CMASK) == 0)) { 2505 firstpage = false; 2506 DCFP(pa); 2507 ICPP(pa); 2508 } 2509 #endif 2510 *pte = npte; 2511 if (active_pmap(pv->pv_pmap)) 2512 TBIS(va); 2513 } 2514 } 2515 } 2516 splx(s); 2517 return r; 2518 } 2519 2520 /* 2521 * pmap_enter_ptpage: 2522 * 2523 * Allocate and map a PT page for the specified pmap/va pair. 2524 */ 2525 /* static */ 2526 int 2527 pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail) 2528 { 2529 paddr_t ptpa; 2530 struct vm_page *pg; 2531 struct pv_header *pvh; 2532 struct pv_entry *pv; 2533 st_entry_t *ste; 2534 int s; 2535 2536 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE, 2537 ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va)); 2538 2539 /* 2540 * Allocate a segment table if necessary. Note that it is allocated 2541 * from a private map and not pt_map. This keeps user page tables 2542 * aligned on segment boundaries in the kernel address space. 2543 * The segment table is wired down. It will be freed whenever the 2544 * reference count drops to zero. 2545 */ 2546 if (pmap->pm_stab == Segtabzero) { 2547 pmap->pm_stab = (st_entry_t *) 2548 uvm_km_alloc(st_map, M68K_STSIZE, 0, 2549 UVM_KMF_WIRED | UVM_KMF_ZERO | 2550 (can_fail ? UVM_KMF_NOWAIT : 0)); 2551 if (pmap->pm_stab == NULL) { 2552 pmap->pm_stab = Segtabzero; 2553 return ENOMEM; 2554 } 2555 (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab, 2556 (paddr_t *)&pmap->pm_stpa); 2557 #if defined(M68040) || defined(M68060) 2558 #if defined(M68020) || defined(M68030) 2559 if (mmutype == MMU_68040) 2560 #endif 2561 { 2562 pt_entry_t *pte; 2563 2564 pte = pmap_pte(pmap_kernel(), pmap->pm_stab); 2565 *pte = (*pte & ~PG_CMASK) | PG_CI; 2566 pmap->pm_stfree = protostfree; 2567 } 2568 #endif 2569 /* 2570 * Segment table has changed; reload the 2571 * MMU if it's the active user pmap. 2572 */ 2573 if (active_user_pmap(pmap)) { 2574 pmap_load_urp((paddr_t)pmap->pm_stpa); 2575 } 2576 2577 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2578 ("enter: pmap %p stab %p(%p)\n", 2579 pmap, pmap->pm_stab, pmap->pm_stpa)); 2580 } 2581 2582 ste = pmap_ste(pmap, va); 2583 #if defined(M68040) || defined(M68060) 2584 /* 2585 * Allocate level 2 descriptor block if necessary 2586 */ 2587 #if defined(M68020) || defined(M68030) 2588 if (mmutype == MMU_68040) 2589 #endif 2590 { 2591 if (*ste == SG_NV) { 2592 int ix; 2593 void *addr; 2594 2595 ix = bmtol2(pmap->pm_stfree); 2596 if (ix == -1) 2597 panic("enter: out of address space"); /* XXX */ 2598 pmap->pm_stfree &= ~l2tobm(ix); 2599 addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE]; 2600 memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t)); 2601 addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE]; 2602 *ste = (u_int)addr | SG_RW | SG_U | SG_V; 2603 2604 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2605 ("enter: alloc ste2 %d(%p)\n", ix, addr)); 2606 } 2607 ste = pmap_ste2(pmap, va); 2608 /* 2609 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE 2610 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE 2611 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a 2612 * PT page--the unit of allocation. We set `ste' to point 2613 * to the first entry of that chunk which is validated in its 2614 * entirety below. 2615 */ 2616 ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1)); 2617 2618 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2619 ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste)); 2620 } 2621 #endif 2622 va = trunc_page((vaddr_t)pmap_pte(pmap, va)); 2623 2624 /* 2625 * In the kernel we allocate a page from the kernel PT page 2626 * free list and map it into the kernel page table map (via 2627 * pmap_enter). 2628 */ 2629 if (pmap == pmap_kernel()) { 2630 struct kpt_page *kpt; 2631 2632 s = splvm(); 2633 if ((kpt = kpt_free_list) == NULL) { 2634 /* 2635 * No PT pages available. 2636 * Try once to free up unused ones. 2637 */ 2638 PMAP_DPRINTF(PDB_COLLECT, 2639 ("enter: no KPT pages, collecting...\n")); 2640 pmap_collect(); 2641 if ((kpt = kpt_free_list) == NULL) 2642 panic("pmap_enter_ptpage: can't get KPT page"); 2643 } 2644 kpt_free_list = kpt->kpt_next; 2645 kpt->kpt_next = kpt_used_list; 2646 kpt_used_list = kpt; 2647 ptpa = kpt->kpt_pa; 2648 memset((void *)kpt->kpt_va, 0, PAGE_SIZE); 2649 pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE, 2650 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 2651 pmap_update(pmap); 2652 #ifdef DEBUG 2653 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) { 2654 int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0); 2655 2656 printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n", 2657 ix, Sysptmap[ix], kpt->kpt_va); 2658 } 2659 #endif 2660 splx(s); 2661 } else { 2662 2663 /* 2664 * For user processes we just allocate a page from the 2665 * VM system. Note that we set the page "wired" count to 1, 2666 * which is what we use to check if the page can be freed. 2667 * See pmap_remove_mapping(). 2668 * 2669 * Count the segment table reference first so that we won't 2670 * lose the segment table when low on memory. 2671 */ 2672 2673 pmap->pm_sref++; 2674 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE, 2675 ("enter: about to alloc UPT pg at %lx\n", va)); 2676 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2677 while ((pg = uvm_pagealloc(uvm_kernel_object, 2678 va - vm_map_min(kernel_map), 2679 NULL, UVM_PGA_ZERO)) == NULL) { 2680 rw_exit(uvm_kernel_object->vmobjlock); 2681 if (can_fail) { 2682 pmap->pm_sref--; 2683 return ENOMEM; 2684 } 2685 uvm_wait("ptpage"); 2686 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2687 } 2688 rw_exit(uvm_kernel_object->vmobjlock); 2689 pg->flags &= ~(PG_BUSY|PG_FAKE); 2690 UVM_PAGE_OWN(pg, NULL); 2691 ptpa = VM_PAGE_TO_PHYS(pg); 2692 pmap_enter(pmap_kernel(), va, ptpa, 2693 VM_PROT_READ | VM_PROT_WRITE, 2694 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 2695 pmap_update(pmap_kernel()); 2696 } 2697 #if defined(M68040) || defined(M68060) 2698 /* 2699 * Turn off copyback caching of page table pages, 2700 * could get ugly otherwise. 2701 */ 2702 #if defined(M68020) || defined(M68030) 2703 if (mmutype == MMU_68040) 2704 #endif 2705 { 2706 #ifdef DEBUG 2707 pt_entry_t *pte = pmap_pte(pmap_kernel(), va); 2708 if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0) 2709 printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n", 2710 pmap == pmap_kernel() ? "Kernel" : "User", 2711 va, ptpa, pte, *pte); 2712 #endif 2713 if (pmap_changebit(ptpa, PG_CI, (pt_entry_t)~PG_CCB)) 2714 DCIS(); 2715 } 2716 #endif 2717 /* 2718 * Locate the PV entry in the kernel for this PT page and 2719 * record the STE address. This is so that we can invalidate 2720 * the STE when we remove the mapping for the page. 2721 */ 2722 pvh = pa_to_pvh(ptpa); 2723 s = splvm(); 2724 if (pvh) { 2725 pv = &pvh->pvh_first; 2726 pvh->pvh_attrs |= PVH_PTPAGE; 2727 do { 2728 if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va) 2729 break; 2730 } while ((pv = pv->pv_next)); 2731 } else { 2732 pv = NULL; 2733 } 2734 #ifdef DEBUG 2735 if (pv == NULL) 2736 panic("pmap_enter_ptpage: PT page not entered"); 2737 #endif 2738 pv->pv_ptste = ste; 2739 pv->pv_ptpmap = pmap; 2740 2741 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE, 2742 ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste)); 2743 2744 /* 2745 * Map the new PT page into the segment table. 2746 * Also increment the reference count on the segment table if this 2747 * was a user page table page. Note that we don't use vm_map_pageable 2748 * to keep the count like we do for PT pages, this is mostly because 2749 * it would be difficult to identify ST pages in pmap_pageable to 2750 * release them. We also avoid the overhead of vm_map_pageable. 2751 */ 2752 #if defined(M68040) || defined(M68060) 2753 #if defined(M68020) || defined(M68030) 2754 if (mmutype == MMU_68040) 2755 #endif 2756 { 2757 st_entry_t *este; 2758 2759 for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) { 2760 *ste = ptpa | SG_U | SG_RW | SG_V; 2761 ptpa += SG4_LEV3SIZE * sizeof(st_entry_t); 2762 } 2763 } 2764 #if defined(M68020) || defined(M68030) 2765 else 2766 *ste = (ptpa & SG_FRAME) | SG_RW | SG_V; 2767 #endif 2768 #else 2769 *ste = (ptpa & SG_FRAME) | SG_RW | SG_V; 2770 #endif 2771 if (pmap != pmap_kernel()) { 2772 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2773 ("enter: stab %p refcnt %d\n", 2774 pmap->pm_stab, pmap->pm_sref)); 2775 } 2776 /* 2777 * Flush stale TLB info. 2778 */ 2779 if (pmap == pmap_kernel()) 2780 TBIAS(); 2781 else 2782 TBIAU(); 2783 pmap->pm_ptpages++; 2784 splx(s); 2785 2786 return 0; 2787 } 2788 2789 /* 2790 * pmap_ptpage_addref: 2791 * 2792 * Add a reference to the specified PT page. 2793 */ 2794 void 2795 pmap_ptpage_addref(vaddr_t ptpva) 2796 { 2797 struct vm_page *pg; 2798 2799 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2800 pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map)); 2801 pg->wire_count++; 2802 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2803 ("ptpage addref: pg %p now %d\n", 2804 pg, pg->wire_count)); 2805 rw_exit(uvm_kernel_object->vmobjlock); 2806 } 2807 2808 /* 2809 * pmap_ptpage_delref: 2810 * 2811 * Delete a reference to the specified PT page. 2812 */ 2813 int 2814 pmap_ptpage_delref(vaddr_t ptpva) 2815 { 2816 struct vm_page *pg; 2817 int rv; 2818 2819 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2820 pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map)); 2821 rv = --pg->wire_count; 2822 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2823 ("ptpage delref: pg %p now %d\n", 2824 pg, pg->wire_count)); 2825 rw_exit(uvm_kernel_object->vmobjlock); 2826 return rv; 2827 } 2828 2829 /* 2830 * Routine: pmap_procwr 2831 * 2832 * Function: 2833 * Synchronize caches corresponding to [addr, addr + len) in p. 2834 */ 2835 void 2836 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2837 { 2838 2839 (void)cachectl1(0x80000004, va, len, p); 2840 } 2841 2842 void 2843 _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va) 2844 { 2845 2846 if (!pmap_ste_v(pmap, va)) 2847 return; 2848 2849 #if defined(M68040) || defined(M68060) 2850 #if defined(M68020) || defined(M68030) 2851 if (mmutype == MMU_68040) { 2852 #endif 2853 if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB, 2854 (pt_entry_t)~PG_CI)) 2855 DCIS(); 2856 2857 #if defined(M68020) || defined(M68030) 2858 } else 2859 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, 2860 (pt_entry_t)~PG_CI); 2861 #endif 2862 #else 2863 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, 2864 (pt_entry_t)~PG_CI); 2865 #endif 2866 } 2867 2868 void 2869 _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va) 2870 { 2871 2872 if (!pmap_ste_v(pmap, va)) 2873 return; 2874 2875 #if defined(M68040) || defined(M68060) 2876 #if defined(M68020) || defined(M68030) 2877 if (mmutype == MMU_68040) { 2878 #endif 2879 if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, 2880 (pt_entry_t)~PG_CCB)) 2881 DCIS(); 2882 #if defined(M68020) || defined(M68030) 2883 } else 2884 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0); 2885 #endif 2886 #else 2887 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0); 2888 #endif 2889 } 2890 2891 int 2892 _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va) 2893 { 2894 2895 if (!pmap_ste_v(pmap, va)) 2896 return 0; 2897 2898 return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0; 2899 } 2900 2901 vaddr_t kernel_reloc_offset; 2902 2903 /* 2904 * pmap_init_kcore_hdr: 2905 * 2906 * Initialize the m68k kernel crash dump header with information 2907 * necessary to perform KVA -> phys translations. 2908 * 2909 * Returns a pointer to the crash dump RAM segment entries for 2910 * machine-specific code to initialize. 2911 */ 2912 phys_ram_seg_t * 2913 pmap_init_kcore_hdr(cpu_kcore_hdr_t *h) 2914 { 2915 struct m68k_kcore_hdr *m = &h->un._m68k; 2916 extern char end[]; 2917 2918 memset(h, 0, sizeof(*h)); 2919 2920 /* 2921 * Initialize the `dispatcher' portion of the header. 2922 */ 2923 strcpy(h->name, machine); 2924 h->page_size = PAGE_SIZE; 2925 h->kernbase = KERNBASE; 2926 2927 /* 2928 * Fill in information about our MMU configuration. 2929 */ 2930 m->mmutype = mmutype; 2931 m->sg_v = SG_V; 2932 m->sg_frame = SG_FRAME; 2933 m->sg_ishift = SG_ISHIFT; 2934 m->sg_pmask = SG_PMASK; 2935 m->sg40_shift1 = SG4_SHIFT1; 2936 m->sg40_mask2 = SG4_MASK2; 2937 m->sg40_shift2 = SG4_SHIFT2; 2938 m->sg40_mask3 = SG4_MASK3; 2939 m->sg40_shift3 = SG4_SHIFT3; 2940 m->sg40_addr1 = SG4_ADDR1; 2941 m->sg40_addr2 = SG4_ADDR2; 2942 m->pg_v = PG_V; 2943 m->pg_frame = PG_FRAME; 2944 2945 /* 2946 * Initialize pointer to kernel segment table. 2947 */ 2948 m->sysseg_pa = Sysseg_pa; 2949 2950 /* 2951 * Initialize relocation value such that: 2952 * 2953 * pa = (va - KERNBASE) + reloc 2954 */ 2955 m->reloc = kernel_reloc_offset; 2956 2957 /* 2958 * Define the end of the relocatable range. 2959 */ 2960 m->relocend = (uint32_t)end; 2961 2962 return m->ram_segs; 2963 } 2964 2965 #ifdef DEBUG 2966 /* 2967 * pmap_pvdump: 2968 * 2969 * Dump the contents of the PV list for the specified physical page. 2970 */ 2971 void 2972 pmap_pvdump(paddr_t pa) 2973 { 2974 struct pv_header *pvh; 2975 struct pv_entry *pv; 2976 2977 printf("pa %lx", pa); 2978 pvh = pa_to_pvh(pa); 2979 for (pv = &pvh->pvh_first; pv; pv = pv->pv_next) 2980 printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p", 2981 pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap); 2982 printf("\n"); 2983 } 2984 2985 /* 2986 * pmap_check_wiring: 2987 * 2988 * Count the number of valid mappings in the specified PT page, 2989 * and ensure that it is consistent with the number of wirings 2990 * to that page that the VM system has. 2991 */ 2992 void 2993 pmap_check_wiring(const char *str, vaddr_t va) 2994 { 2995 pt_entry_t *pte; 2996 paddr_t pa; 2997 struct vm_page *pg; 2998 int count; 2999 3000 if (!pmap_ste_v(pmap_kernel(), va) || 3001 !pmap_pte_v(pmap_pte(pmap_kernel(), va))) 3002 return; 3003 3004 pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va)); 3005 pg = PHYS_TO_VM_PAGE(pa); 3006 if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) { 3007 panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count); 3008 } 3009 3010 count = 0; 3011 for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE); 3012 pte++) 3013 if (*pte) 3014 count++; 3015 if (pg->wire_count != count) 3016 panic("*%s*: 0x%lx: w%d/a%d", 3017 str, va, pg->wire_count, count); 3018 } 3019 #endif /* DEBUG */ 3020 3021 /* 3022 * XXX XXX XXX These are legacy remants and should go away XXX XXX XXX 3023 * (Cribbed from vm_machdep.c because they're tied to this pmap impl.) 3024 */ 3025 3026 /* 3027 * Map `size' bytes of physical memory starting at `paddr' into 3028 * kernel VA space at `vaddr'. Read/write and cache-inhibit status 3029 * are specified by `prot'. 3030 */ 3031 void 3032 physaccess(void *vaddr, void *paddr, int size, int prot) 3033 { 3034 pt_entry_t *pte; 3035 u_int page; 3036 3037 pte = kvtopte(vaddr); 3038 page = (u_int)paddr & PG_FRAME; 3039 for (size = btoc(size); size; size--) { 3040 *pte++ = PG_V | prot | page; 3041 page += PAGE_SIZE; 3042 } 3043 TBIAS(); 3044 } 3045 3046 void 3047 physunaccess(void *vaddr, int size) 3048 { 3049 pt_entry_t *pte; 3050 3051 pte = kvtopte(vaddr); 3052 for (size = btoc(size); size; size--) 3053 *pte++ = PG_NV; 3054 TBIAS(); 3055 } 3056 3057 /* 3058 * Convert kernel VA to physical address 3059 */ 3060 int 3061 kvtop(void *addr) 3062 { 3063 return (int)vtophys((vaddr_t)addr); 3064 } 3065