1 /* $NetBSD: pmap_motorola.c,v 1.94 2025/11/05 00:42:18 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1991, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * This code is derived from software contributed to Berkeley by 37 * the Systems Programming Group of the University of Utah Computer 38 * Science Department. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 * 64 * @(#)pmap.c 8.6 (Berkeley) 5/27/94 65 */ 66 67 /* 68 * Motorola m68k-family physical map management code. 69 * 70 * Supports: 71 * 68020 with 68851 MMU 72 * 68020 with HP MMU 73 * 68030 with on-chip MMU 74 * 68040 with on-chip MMU 75 * 68060 with on-chip MMU 76 * 77 * Notes: 78 * Don't even pay lip service to multiprocessor support. 79 * 80 * We assume TLB entries don't have process tags (except for the 81 * supervisor/user distinction) so we only invalidate TLB entries 82 * when changing mappings for the current (or kernel) pmap. This is 83 * technically not true for the 68851 but we flush the TLB on every 84 * context switch, so it effectively winds up that way. 85 * 86 * Bitwise and/or operations are significantly faster than bitfield 87 * references so we use them when accessing STE/PTEs in the pmap_pte_* 88 * macros. Note also that the two are not always equivalent; e.g.: 89 * (*pte & PG_PROT) [4] != pte->pg_prot [1] 90 * and a couple of routines that deal with protection and wiring take 91 * some shortcuts that assume the and/or definitions. 92 */ 93 94 /* 95 * Manages physical address maps. 96 * 97 * In addition to hardware address maps, this 98 * module is called upon to provide software-use-only 99 * maps which may or may not be stored in the same 100 * form as hardware maps. These pseudo-maps are 101 * used to store intermediate results from copy 102 * operations to and from address spaces. 103 * 104 * Since the information managed by this module is 105 * also stored by the logical address mapping module, 106 * this module may throw away valid virtual-to-physical 107 * mappings at almost any time. However, invalidations 108 * of virtual-to-physical mappings must be done as 109 * requested. 110 * 111 * In order to cope with hardware architectures which 112 * make virtual-to-physical map invalidates expensive, 113 * this module may delay invalidate or reduced protection 114 * operations until such time as they are actually 115 * necessary. This module is given full information as 116 * to which processors are currently using which maps, 117 * and to when physical maps must be made correct. 118 */ 119 120 #include "opt_m68k_arch.h" 121 122 #include <sys/cdefs.h> 123 __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.94 2025/11/05 00:42:18 thorpej Exp $"); 124 125 #include <sys/param.h> 126 #include <sys/systm.h> 127 #include <sys/proc.h> 128 #include <sys/pool.h> 129 #include <sys/cpu.h> 130 #include <sys/atomic.h> 131 132 #include <machine/pte.h> 133 #include <machine/pcb.h> 134 135 #include <uvm/uvm.h> 136 #include <uvm/uvm_physseg.h> 137 138 #include <m68k/cacheops.h> 139 140 #if !defined(M68K_MMU_MOTOROLA) && !defined(M68K_MMU_HP) 141 #error Hit the road, Jack... 142 #endif 143 144 #ifdef DEBUG 145 #define PDB_FOLLOW 0x0001 146 #define PDB_INIT 0x0002 147 #define PDB_ENTER 0x0004 148 #define PDB_REMOVE 0x0008 149 #define PDB_CREATE 0x0010 150 #define PDB_PTPAGE 0x0020 151 #define PDB_CACHE 0x0040 152 #define PDB_BITS 0x0080 153 #define PDB_COLLECT 0x0100 154 #define PDB_PROTECT 0x0200 155 #define PDB_SEGTAB 0x0400 156 #define PDB_MULTIMAP 0x0800 157 #define PDB_PARANOIA 0x2000 158 #define PDB_WIRING 0x4000 159 #define PDB_PVDUMP 0x8000 160 161 int debugmap = 0; 162 int pmapdebug = PDB_PARANOIA; 163 164 #define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x 165 #else /* ! DEBUG */ 166 #define PMAP_DPRINTF(l, x) /* nothing */ 167 #endif /* DEBUG */ 168 169 /* 170 * Get STEs and PTEs for user/kernel address space 171 */ 172 #if defined(M68040) || defined(M68060) 173 #define pmap_ste1(m, v) \ 174 (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1])) 175 /* XXX assumes physically contiguous ST pages (if more than one) */ 176 #define pmap_ste2(m, v) \ 177 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \ 178 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)])) 179 #if defined(M68020) || defined(M68030) 180 #define pmap_ste(m, v) \ 181 (&((m)->pm_stab[(vaddr_t)(v) \ 182 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)])) 183 #define pmap_ste_v(m, v) \ 184 (mmutype == MMU_68040 \ 185 ? ((*pmap_ste1(m, v) & SG_V) && \ 186 (*pmap_ste2(m, v) & SG_V)) \ 187 : (*pmap_ste(m, v) & SG_V)) 188 #else 189 #define pmap_ste(m, v) \ 190 (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1])) 191 #define pmap_ste_v(m, v) \ 192 ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V)) 193 #endif 194 #else 195 #define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT])) 196 #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V) 197 #endif 198 199 #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT])) 200 #define pmap_pte_pa(pte) (*(pte) & PG_FRAME) 201 #define pmap_pte_w(pte) (*(pte) & PG_W) 202 #define pmap_pte_ci(pte) (*(pte) & PG_CI) 203 #define pmap_pte_m(pte) (*(pte) & PG_M) 204 #define pmap_pte_u(pte) (*(pte) & PG_U) 205 #define pmap_pte_prot(pte) (*(pte) & PG_PROT) 206 #define pmap_pte_v(pte) (*(pte) & PG_V) 207 208 #define pmap_pte_set_w(pte, v) \ 209 if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W 210 #define pmap_pte_set_prot(pte, v) \ 211 if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT 212 #define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte)) 213 #define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte)) 214 215 /* 216 * Given a map and a machine independent protection code, 217 * convert to an m68k protection code. 218 */ 219 #define pte_prot(m, p) (protection_codes[p]) 220 u_int protection_codes[8]; 221 222 /* 223 * Kernel page table page management. 224 */ 225 struct kpt_page { 226 struct kpt_page *kpt_next; /* link on either used or free list */ 227 vaddr_t kpt_va; /* always valid kernel VA */ 228 paddr_t kpt_pa; /* PA of this page (for speed) */ 229 }; 230 struct kpt_page *kpt_free_list, *kpt_used_list; 231 struct kpt_page *kpt_pages; 232 233 /* 234 * Kernel segment/page table and page table map. 235 * The page table map gives us a level of indirection we need to dynamically 236 * expand the page table. It is essentially a copy of the segment table 237 * with PTEs instead of STEs. All are initialized in locore at boot time. 238 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs. 239 * Segtabzero is an empty segment table which all processes share til they 240 * reference something. 241 */ 242 paddr_t Sysseg_pa; 243 st_entry_t *Sysseg; 244 pt_entry_t *Sysmap, *Sysptmap; 245 st_entry_t *Segtabzero, *Segtabzeropa; 246 vsize_t Sysptsize = VM_KERNEL_PT_PAGES; 247 248 static struct pmap kernel_pmap_store; 249 struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; 250 struct vm_map *st_map, *pt_map; 251 struct vm_map st_map_store, pt_map_store; 252 253 vaddr_t lwp0uarea; /* lwp0 u-area VA, initialized in bootstrap */ 254 255 paddr_t avail_start; /* PA of first available physical page */ 256 paddr_t avail_end; /* PA of last available physical page */ 257 vsize_t mem_size; /* memory size in bytes */ 258 vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 259 vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */ 260 int page_cnt; /* number of pages managed by VM system */ 261 262 bool pmap_initialized = false; /* Has pmap_init completed? */ 263 264 vaddr_t m68k_uptbase = M68K_PTBASE; 265 266 struct pv_header { 267 struct pv_entry pvh_first; /* first PV entry */ 268 uint32_t pvh_attrs; /* attributes: 269 bits 0-7: PTE bits 270 bits 8-15: flags */ 271 }; 272 273 #define PVH_CI 0x10 /* all entries are cache-inhibited */ 274 #define PVH_PTPAGE 0x20 /* entry maps a page table page */ 275 276 struct pv_header *pv_table; 277 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist; 278 int pv_nfree; 279 280 #ifdef CACHE_HAVE_VAC 281 static u_int pmap_aliasmask; /* separation at which VA aliasing ok */ 282 #endif 283 #if defined(M68040) || defined(M68060) 284 u_int protostfree; /* prototype (default) free ST map */ 285 #endif 286 287 pt_entry_t *caddr1_pte; /* PTE for CADDR1 */ 288 pt_entry_t *caddr2_pte; /* PTE for CADDR2 */ 289 290 struct pool pmap_pmap_pool; /* memory pool for pmap structures */ 291 struct pool pmap_pv_pool; /* memory pool for pv entries */ 292 293 #define pmap_alloc_pv() pool_get(&pmap_pv_pool, PR_NOWAIT) 294 #define pmap_free_pv(pv) pool_put(&pmap_pv_pool, (pv)) 295 296 #define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa)) 297 298 static inline struct pv_header * 299 pa_to_pvh(paddr_t pa) 300 { 301 uvm_physseg_t bank = 0; /* XXX gcc4 -Wuninitialized */ 302 psize_t pg = 0; 303 304 bank = uvm_physseg_find(atop((pa)), &pg); 305 return &uvm_physseg_get_pmseg(bank)->pvheader[pg]; 306 } 307 308 /* 309 * Internal routines 310 */ 311 void pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int, 312 struct pv_entry **); 313 bool pmap_testbit(paddr_t, int); 314 bool pmap_changebit(paddr_t, pt_entry_t, pt_entry_t); 315 int pmap_enter_ptpage(pmap_t, vaddr_t, bool); 316 void pmap_ptpage_addref(vaddr_t); 317 int pmap_ptpage_delref(vaddr_t); 318 void pmap_pinit(pmap_t); 319 void pmap_release(pmap_t); 320 321 #ifdef DEBUG 322 void pmap_pvdump(paddr_t); 323 void pmap_check_wiring(const char *, vaddr_t); 324 #endif 325 326 /* pmap_remove_mapping flags */ 327 #define PRM_TFLUSH 0x01 328 #define PRM_CFLUSH 0x02 329 #define PRM_KEEPPTPAGE 0x04 330 331 #define active_pmap(pm) \ 332 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap) 333 334 #define active_user_pmap(pm) \ 335 (curproc && \ 336 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap) 337 338 static void (*pmap_load_urp_func)(paddr_t); 339 340 /* 341 * pmap_load_urp: 342 * 343 * Load the user root table into the MMU. 344 */ 345 static inline void 346 pmap_load_urp(paddr_t urp) 347 { 348 (*pmap_load_urp_func)(urp); 349 } 350 351 #ifdef CACHE_HAVE_VAC 352 /* 353 * pmap_init_vac: 354 * 355 * Set up virtually-addressed cache information. Only relevant 356 * for the HP MMU. 357 */ 358 void 359 pmap_init_vac(size_t vacsize) 360 { 361 KASSERT(pmap_aliasmask == 0); 362 KASSERT(powerof2(vacsize)); 363 pmap_aliasmask = vacsize - 1; 364 } 365 #endif /* CACHE_HAVE_VAC */ 366 367 /* 368 * pmap_bootstrap2: [ INTERFACE ] 369 * 370 * Phase 2 of pmap bootstrap. (Phase 1 is system-specific.) 371 * 372 * Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on, 373 * using lwp0uarea variable saved during pmap_bootstrap(). 374 */ 375 void * 376 pmap_bootstrap2(void) 377 { 378 379 uvmexp.pagesize = NBPG; 380 uvm_md_init(); 381 382 /* 383 * Initialize protection array. 384 * XXX: Could this have port specific values? Can't this be static? 385 */ 386 protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 387 protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 388 protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 389 protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 390 protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 391 protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 392 protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 393 protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 394 395 /* 396 * Initialize pmap_kernel(). 397 */ 398 pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa; 399 pmap_kernel()->pm_stab = Sysseg; 400 pmap_kernel()->pm_ptab = Sysmap; 401 #if defined(M68040) || defined(M68060) 402 if (mmutype == MMU_68040) 403 pmap_kernel()->pm_stfree = protostfree; 404 #endif 405 pmap_kernel()->pm_count = 1; 406 407 /* 408 * Initialize lwp0 uarea, curlwp, and curpcb. 409 */ 410 memset((void *)lwp0uarea, 0, USPACE); 411 uvm_lwp_setuarea(&lwp0, lwp0uarea); 412 curlwp = &lwp0; 413 curpcb = lwp_getpcb(&lwp0); 414 415 return (void *)lwp0uarea; 416 } 417 418 /* 419 * pmap_virtual_space: [ INTERFACE ] 420 * 421 * Report the range of available kernel virtual address 422 * space to the VM system during bootstrap. 423 * 424 * This is only an interface function if we do not use 425 * pmap_steal_memory()! 426 * 427 * Note: no locking is necessary in this function. 428 */ 429 void 430 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) 431 { 432 433 *vstartp = virtual_avail; 434 *vendp = virtual_end; 435 } 436 437 /* 438 * pmap_init: [ INTERFACE ] 439 * 440 * Initialize the pmap module. Called by vm_init(), to initialize any 441 * structures that the pmap system needs to map virtual memory. 442 * 443 * Note: no locking is necessary in this function. 444 */ 445 void 446 pmap_init(void) 447 { 448 vaddr_t addr, addr2; 449 vsize_t s; 450 struct pv_header *pvh; 451 int rv; 452 int npages; 453 uvm_physseg_t bank; 454 455 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n")); 456 457 /* 458 * Before we do anything else, initialize the PTE pointers 459 * used by pmap_zero_page() and pmap_copy_page(). 460 */ 461 caddr1_pte = pmap_pte(pmap_kernel(), CADDR1); 462 caddr2_pte = pmap_pte(pmap_kernel(), CADDR2); 463 464 PMAP_DPRINTF(PDB_INIT, 465 ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n", 466 Sysseg, Sysmap, Sysptmap)); 467 PMAP_DPRINTF(PDB_INIT, 468 (" pstart %lx, pend %lx, vstart %lx, vend %lx\n", 469 avail_start, avail_end, virtual_avail, virtual_end)); 470 471 /* 472 * Allocate memory for random pmap data structures. Includes the 473 * initial segment table, pv_head_table and pmap_attributes. 474 */ 475 for (page_cnt = 0, bank = uvm_physseg_get_first(); 476 uvm_physseg_valid_p(bank); 477 bank = uvm_physseg_get_next(bank)) 478 page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank); 479 s = M68K_STSIZE; /* Segtabzero */ 480 s += page_cnt * sizeof(struct pv_header); /* pv table */ 481 s = round_page(s); 482 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 483 if (addr == 0) 484 panic("pmap_init: can't allocate data structures"); 485 486 Segtabzero = (st_entry_t *)addr; 487 (void)pmap_extract(pmap_kernel(), addr, 488 (paddr_t *)(void *)&Segtabzeropa); 489 addr += M68K_STSIZE; 490 491 pv_table = (struct pv_header *) addr; 492 addr += page_cnt * sizeof(struct pv_header); 493 494 PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) " 495 "tbl %p\n", 496 s, page_cnt, Segtabzero, Segtabzeropa, 497 pv_table)); 498 499 /* 500 * Now that the pv and attribute tables have been allocated, 501 * assign them to the memory segments. 502 */ 503 pvh = pv_table; 504 for (bank = uvm_physseg_get_first(); 505 uvm_physseg_valid_p(bank); 506 bank = uvm_physseg_get_next(bank)) { 507 npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank); 508 uvm_physseg_get_pmseg(bank)->pvheader = pvh; 509 pvh += npages; 510 } 511 512 /* 513 * Allocate physical memory for kernel PT pages and their management. 514 * We need 1 PT page per possible task plus some slop. 515 */ 516 npages = uimin(atop(M68K_MAX_KPTSIZE), maxproc+16); 517 s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page)); 518 519 /* 520 * Verify that space will be allocated in region for which 521 * we already have kernel PT pages. 522 */ 523 addr = 0; 524 rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0, 525 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 526 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)); 527 if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap) 528 panic("pmap_init: kernel PT too small"); 529 uvm_unmap(kernel_map, addr, addr + s); 530 531 /* 532 * Now allocate the space and link the pages together to 533 * form the KPT free list. 534 */ 535 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 536 if (addr == 0) 537 panic("pmap_init: cannot allocate KPT free list"); 538 s = ptoa(npages); 539 addr2 = addr + s; 540 kpt_pages = &((struct kpt_page *)addr2)[npages]; 541 kpt_free_list = NULL; 542 do { 543 addr2 -= PAGE_SIZE; 544 (--kpt_pages)->kpt_next = kpt_free_list; 545 kpt_free_list = kpt_pages; 546 kpt_pages->kpt_va = addr2; 547 (void) pmap_extract(pmap_kernel(), addr2, 548 (paddr_t *)&kpt_pages->kpt_pa); 549 } while (addr != addr2); 550 551 PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n", 552 atop(s), addr, addr + s)); 553 554 /* 555 * Allocate the segment table map and the page table map. 556 */ 557 s = maxproc * M68K_STSIZE; 558 st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false, 559 &st_map_store); 560 561 addr = m68k_uptbase; 562 if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) { 563 s = M68K_PTMAXSIZE; 564 /* 565 * XXX We don't want to hang when we run out of 566 * page tables, so we lower maxproc so that fork() 567 * will fail instead. Note that root could still raise 568 * this value via sysctl(3). 569 */ 570 maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE); 571 } else 572 s = (maxproc * M68K_MAX_PTSIZE); 573 pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, 574 true, &pt_map_store); 575 576 #if defined(M68040) || defined(M68060) 577 if (mmutype == MMU_68040) { 578 protostfree = ~l2tobm(0); 579 for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++) 580 protostfree &= ~l2tobm(rv); 581 } 582 #endif 583 584 /* 585 * Initialize the pmap pools. 586 */ 587 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", 588 &pool_allocator_nointr, IPL_NONE); 589 590 /* 591 * Initialize the pv_entry pools. 592 */ 593 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl", 594 &pool_allocator_meta, IPL_NONE); 595 596 /* 597 * Now that this is done, mark the pages shared with the 598 * hardware page table search as non-CCB (actually, as CI). 599 * 600 * XXX Hm. Given that this is in the kernel map, can't we just 601 * use the va's? 602 */ 603 #ifdef M68060 604 #if defined(M68020) || defined(M68030) || defined(M68040) 605 if (cputype == CPU_68060) 606 #endif 607 { 608 struct kpt_page *kptp = kpt_free_list; 609 paddr_t paddr; 610 611 while (kptp) { 612 pmap_changebit(kptp->kpt_pa, PG_CI, 613 (pt_entry_t)~PG_CCB); 614 kptp = kptp->kpt_next; 615 } 616 617 paddr = (paddr_t)Segtabzeropa; 618 while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) { 619 pmap_changebit(paddr, PG_CI, 620 (pt_entry_t)~PG_CCB); 621 paddr += PAGE_SIZE; 622 } 623 624 DCIS(); 625 } 626 #endif 627 628 /* 629 * Set up the routine that loads the MMU root table pointer. 630 */ 631 switch (cputype) { 632 #if defined(M68020) 633 case CPU_68020: 634 #ifdef M68K_MMU_MOTOROLA 635 if (mmutype == MMU_68851) { 636 protorp[0] = MMU51_CRP_BITS; 637 pmap_load_urp_func = mmu_load_urp51; 638 } 639 #endif 640 #ifdef M68K_MMU_HP 641 if (mmutype == MMU_HP) { 642 pmap_load_urp_func = mmu_load_urp20hp; 643 } 644 #endif 645 break; 646 #endif /* M68020 */ 647 #if defined(M68030) 648 case CPU_68030: 649 protorp[0] = MMU51_CRP_BITS; 650 pmap_load_urp_func = mmu_load_urp51; 651 break; 652 #endif /* M68030 */ 653 #if defined(M68040) 654 case CPU_68040: 655 pmap_load_urp_func = mmu_load_urp40; 656 break; 657 #endif /* M68040 */ 658 #if defined(M68060) 659 case CPU_68060: 660 pmap_load_urp_func = mmu_load_urp60; 661 break; 662 #endif /* M68060 */ 663 default: 664 break; 665 } 666 if (pmap_load_urp_func == NULL) { 667 panic("pmap_init: No mmu_load_*() for cpu=%d mmu=%d", 668 cputype, mmutype); 669 } 670 671 /* 672 * Now it is safe to enable pv_table recording. 673 */ 674 pmap_initialized = true; 675 } 676 677 /* 678 * pmap_create: [ INTERFACE ] 679 * 680 * Create and return a physical map. 681 * 682 * Note: no locking is necessary in this function. 683 */ 684 pmap_t 685 pmap_create(void) 686 { 687 struct pmap *pmap; 688 689 PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE, 690 ("pmap_create()\n")); 691 692 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); 693 memset(pmap, 0, sizeof(*pmap)); 694 pmap_pinit(pmap); 695 return pmap; 696 } 697 698 /* 699 * pmap_pinit: 700 * 701 * Initialize a preallocated and zeroed pmap structure. 702 * 703 * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()! 704 */ 705 void 706 pmap_pinit(struct pmap *pmap) 707 { 708 709 PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE, 710 ("pmap_pinit(%p)\n", pmap)); 711 712 /* 713 * No need to allocate page table space yet but we do need a 714 * valid segment table. Initially, we point everyone at the 715 * "null" segment table. On the first pmap_enter, a real 716 * segment table will be allocated. 717 */ 718 pmap->pm_stab = Segtabzero; 719 pmap->pm_stpa = Segtabzeropa; 720 #if defined(M68040) || defined(M68060) 721 #if defined(M68020) || defined(M68030) 722 if (mmutype == MMU_68040) 723 #endif 724 pmap->pm_stfree = protostfree; 725 #endif 726 pmap->pm_count = 1; 727 } 728 729 /* 730 * pmap_destroy: [ INTERFACE ] 731 * 732 * Drop the reference count on the specified pmap, releasing 733 * all resources if the reference count drops to zero. 734 */ 735 void 736 pmap_destroy(pmap_t pmap) 737 { 738 int count; 739 740 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap)); 741 742 count = atomic_dec_uint_nv(&pmap->pm_count); 743 if (count == 0) { 744 pmap_release(pmap); 745 pool_put(&pmap_pmap_pool, pmap); 746 } 747 } 748 749 /* 750 * pmap_release: 751 * 752 * Release the resources held by a pmap. 753 * 754 * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy(). 755 */ 756 void 757 pmap_release(pmap_t pmap) 758 { 759 760 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap)); 761 762 #ifdef notdef /* DIAGNOSTIC */ 763 /* count would be 0 from pmap_destroy... */ 764 if (pmap->pm_count != 1) 765 panic("pmap_release count"); 766 #endif 767 768 if (pmap->pm_ptab) { 769 pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab, 770 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE); 771 uvm_km_pgremove((vaddr_t)pmap->pm_ptab, 772 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE); 773 uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab, 774 M68K_MAX_PTSIZE, UVM_KMF_VAONLY); 775 } 776 KASSERT(pmap->pm_stab == Segtabzero); 777 } 778 779 /* 780 * pmap_reference: [ INTERFACE ] 781 * 782 * Add a reference to the specified pmap. 783 */ 784 void 785 pmap_reference(pmap_t pmap) 786 { 787 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap)); 788 789 atomic_inc_uint(&pmap->pm_count); 790 } 791 792 /* 793 * pmap_activate: [ INTERFACE ] 794 * 795 * Activate the pmap used by the specified process. This includes 796 * reloading the MMU context if the current process, and marking 797 * the pmap in use by the processor. 798 * 799 * Note: we may only use spin locks here, since we are called 800 * by a critical section in cpu_switch()! 801 */ 802 void 803 pmap_activate(struct lwp *l) 804 { 805 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 806 807 PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB, 808 ("pmap_activate(%p)\n", l)); 809 810 KASSERT(l == curlwp); 811 812 /* 813 * Because the kernel has a separate root pointer, we don't 814 * need to activate the kernel pmap. 815 */ 816 if (pmap != pmap_kernel()) { 817 pmap_load_urp((paddr_t)pmap->pm_stpa); 818 } 819 } 820 821 /* 822 * pmap_deactivate: [ INTERFACE ] 823 * 824 * Mark that the pmap used by the specified process is no longer 825 * in use by the processor. 826 * 827 * The comment above pmap_activate() wrt. locking applies here, 828 * as well. 829 */ 830 void 831 pmap_deactivate(struct lwp *l) 832 { 833 834 /* No action necessary in this pmap implementation. */ 835 } 836 837 /* 838 * pmap_remove: [ INTERFACE ] 839 * 840 * Remove the given range of addresses from the specified map. 841 * 842 * It is assumed that the start and end are properly 843 * rounded to the page size. 844 */ 845 void 846 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) 847 { 848 vaddr_t nssva; 849 pt_entry_t *pte; 850 int flags; 851 #ifdef CACHE_HAVE_VAC 852 bool firstpage = true, needcflush = false; 853 #endif 854 855 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT, 856 ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva)); 857 858 flags = active_pmap(pmap) ? PRM_TFLUSH : 0; 859 while (sva < eva) { 860 nssva = m68k_trunc_seg(sva) + NBSEG; 861 if (nssva == 0 || nssva > eva) 862 nssva = eva; 863 864 /* 865 * Invalidate every valid mapping within this segment. 866 */ 867 868 pte = pmap_pte(pmap, sva); 869 while (sva < nssva) { 870 871 /* 872 * If this segment is unallocated, 873 * skip to the next segment boundary. 874 */ 875 876 if (!pmap_ste_v(pmap, sva)) { 877 sva = nssva; 878 break; 879 } 880 881 if (pmap_pte_v(pte)) { 882 #ifdef CACHE_HAVE_VAC 883 if (pmap_aliasmask) { 884 885 /* 886 * Purge kernel side of VAC to ensure 887 * we get the correct state of any 888 * hardware maintained bits. 889 */ 890 891 if (firstpage) { 892 DCIS(); 893 } 894 895 /* 896 * Remember if we may need to 897 * flush the VAC due to a non-CI 898 * mapping. 899 */ 900 901 if (!needcflush && !pmap_pte_ci(pte)) 902 needcflush = true; 903 904 } 905 firstpage = false; 906 #endif 907 pmap_remove_mapping(pmap, sva, pte, flags, NULL); 908 } 909 pte++; 910 sva += PAGE_SIZE; 911 } 912 } 913 914 #ifdef CACHE_HAVE_VAC 915 916 /* 917 * Didn't do anything, no need for cache flushes 918 */ 919 920 if (firstpage) 921 return; 922 923 /* 924 * In a couple of cases, we don't need to worry about flushing 925 * the VAC: 926 * 1. if this is a kernel mapping, 927 * we have already done it 928 * 2. if it is a user mapping not for the current process, 929 * it won't be there 930 */ 931 932 if (pmap_aliasmask && !active_user_pmap(pmap)) 933 needcflush = false; 934 if (needcflush) { 935 if (pmap == pmap_kernel()) { 936 DCIS(); 937 } else { 938 DCIU(); 939 } 940 } 941 #endif 942 } 943 944 /* 945 * pmap_page_protect: [ INTERFACE ] 946 * 947 * Lower the permission for all mappings to a given page to 948 * the permissions specified. 949 */ 950 void 951 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 952 { 953 paddr_t pa = VM_PAGE_TO_PHYS(pg); 954 struct pv_header *pvh; 955 struct pv_entry *pv; 956 pt_entry_t *pte; 957 int s; 958 959 #ifdef DEBUG 960 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 961 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) 962 printf("pmap_page_protect(%p, %x)\n", pg, prot); 963 #endif 964 965 switch (prot) { 966 case VM_PROT_READ|VM_PROT_WRITE: 967 case VM_PROT_ALL: 968 return; 969 970 /* copy_on_write */ 971 case VM_PROT_READ: 972 case VM_PROT_READ|VM_PROT_EXECUTE: 973 pmap_changebit(pa, PG_RO, ~0); 974 return; 975 976 /* remove_all */ 977 default: 978 break; 979 } 980 981 pvh = pa_to_pvh(pa); 982 pv = &pvh->pvh_first; 983 s = splvm(); 984 while (pv->pv_pmap != NULL) { 985 986 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 987 #ifdef DEBUG 988 if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) || 989 pmap_pte_pa(pte) != pa) 990 panic("pmap_page_protect: bad mapping"); 991 #endif 992 pmap_remove_mapping(pv->pv_pmap, pv->pv_va, 993 pte, PRM_TFLUSH|PRM_CFLUSH, NULL); 994 } 995 splx(s); 996 } 997 998 /* 999 * pmap_protect: [ INTERFACE ] 1000 * 1001 * Set the physical protection on the specified range of this map 1002 * as requested. 1003 */ 1004 void 1005 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1006 { 1007 vaddr_t nssva; 1008 pt_entry_t *pte; 1009 bool firstpage __unused, needtflush; 1010 int isro; 1011 1012 PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT, 1013 ("pmap_protect(%p, %lx, %lx, %x)\n", 1014 pmap, sva, eva, prot)); 1015 1016 #ifdef PMAPSTATS 1017 protect_stats.calls++; 1018 #endif 1019 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1020 pmap_remove(pmap, sva, eva); 1021 return; 1022 } 1023 isro = pte_prot(pmap, prot); 1024 needtflush = active_pmap(pmap); 1025 firstpage = true; 1026 while (sva < eva) { 1027 nssva = m68k_trunc_seg(sva) + NBSEG; 1028 if (nssva == 0 || nssva > eva) 1029 nssva = eva; 1030 1031 /* 1032 * If VA belongs to an unallocated segment, 1033 * skip to the next segment boundary. 1034 */ 1035 1036 if (!pmap_ste_v(pmap, sva)) { 1037 sva = nssva; 1038 continue; 1039 } 1040 1041 /* 1042 * Change protection on mapping if it is valid and doesn't 1043 * already have the correct protection. 1044 */ 1045 1046 pte = pmap_pte(pmap, sva); 1047 while (sva < nssva) { 1048 if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) { 1049 #ifdef CACHE_HAVE_VAC 1050 1051 /* 1052 * Purge kernel side of VAC to ensure we 1053 * get the correct state of any hardware 1054 * maintained bits. 1055 * 1056 * XXX do we need to clear the VAC in 1057 * general to reflect the new protection? 1058 */ 1059 1060 if (firstpage && pmap_aliasmask) 1061 DCIS(); 1062 #endif 1063 1064 #if defined(M68040) || defined(M68060) 1065 1066 /* 1067 * Clear caches if making RO (see section 1068 * "7.3 Cache Coherency" in the manual). 1069 */ 1070 1071 #if defined(M68020) || defined(M68030) 1072 if (isro && mmutype == MMU_68040) 1073 #else 1074 if (isro) 1075 #endif 1076 { 1077 paddr_t pa = pmap_pte_pa(pte); 1078 1079 DCFP(pa); 1080 ICPP(pa); 1081 } 1082 #endif 1083 pmap_pte_set_prot(pte, isro); 1084 if (needtflush) 1085 TBIS(sva); 1086 firstpage = false; 1087 } 1088 pte++; 1089 sva += PAGE_SIZE; 1090 } 1091 } 1092 } 1093 1094 /* 1095 * pmap_enter: [ INTERFACE ] 1096 * 1097 * Insert the given physical page (pa) at 1098 * the specified virtual address (va) in the 1099 * target physical map with the protection requested. 1100 * 1101 * If specified, the page will be wired down, meaning 1102 * that the related pte cannot be reclaimed. 1103 * 1104 * Note: This is the only routine which MAY NOT lazy-evaluate 1105 * or lose information. Thatis, this routine must actually 1106 * insert this page into the given map NOW. 1107 */ 1108 int 1109 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1110 { 1111 pt_entry_t *pte; 1112 struct pv_entry *opv = NULL; 1113 int npte; 1114 paddr_t opa; 1115 bool cacheable = true; 1116 bool checkpv = true; 1117 bool wired = (flags & PMAP_WIRED) != 0; 1118 bool can_fail = (flags & PMAP_CANFAIL) != 0; 1119 1120 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER, 1121 ("pmap_enter(%p, %lx, %lx, %x, %x)\n", 1122 pmap, va, pa, prot, wired)); 1123 1124 #ifdef DIAGNOSTIC 1125 /* 1126 * pmap_enter() should never be used for CADDR1 and CADDR2. 1127 */ 1128 if (pmap == pmap_kernel() && 1129 (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2)) 1130 panic("pmap_enter: used for CADDR1 or CADDR2"); 1131 #endif 1132 1133 /* 1134 * For user mapping, allocate kernel VM resources if necessary. 1135 */ 1136 if (pmap->pm_ptab == NULL) { 1137 pmap->pm_ptab = (pt_entry_t *) 1138 uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0, 1139 UVM_KMF_VAONLY | 1140 (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA)); 1141 if (pmap->pm_ptab == NULL) 1142 return ENOMEM; 1143 } 1144 1145 /* 1146 * Segment table entry not valid, we need a new PT page 1147 */ 1148 if (!pmap_ste_v(pmap, va)) { 1149 int err = pmap_enter_ptpage(pmap, va, can_fail); 1150 if (err) 1151 return err; 1152 } 1153 1154 pa = m68k_trunc_page(pa); 1155 pte = pmap_pte(pmap, va); 1156 opa = pmap_pte_pa(pte); 1157 1158 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte)); 1159 1160 /* 1161 * Mapping has not changed, must be protection or wiring change. 1162 */ 1163 if (opa == pa) { 1164 /* 1165 * Wiring change, just update stats. 1166 * We don't worry about wiring PT pages as they remain 1167 * resident as long as there are valid mappings in them. 1168 * Hence, if a user page is wired, the PT page will be also. 1169 */ 1170 if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) { 1171 PMAP_DPRINTF(PDB_ENTER, 1172 ("enter: wiring change -> %x\n", wired)); 1173 if (wired) 1174 pmap->pm_stats.wired_count++; 1175 else 1176 pmap->pm_stats.wired_count--; 1177 } 1178 /* 1179 * Retain cache inhibition status 1180 */ 1181 checkpv = false; 1182 if (pmap_pte_ci(pte)) 1183 cacheable = false; 1184 goto validate; 1185 } 1186 1187 /* 1188 * Mapping has changed, invalidate old range and fall through to 1189 * handle validating new mapping. 1190 */ 1191 if (opa) { 1192 PMAP_DPRINTF(PDB_ENTER, 1193 ("enter: removing old mapping %lx\n", va)); 1194 pmap_remove_mapping(pmap, va, pte, 1195 PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv); 1196 } 1197 1198 /* 1199 * If this is a new user mapping, increment the wiring count 1200 * on this PT page. PT pages are wired down as long as there 1201 * is a valid mapping in the page. 1202 */ 1203 if (pmap != pmap_kernel()) 1204 pmap_ptpage_addref(trunc_page((vaddr_t)pte)); 1205 1206 /* 1207 * Enter on the PV list if part of our managed memory 1208 * Note that we raise IPL while manipulating pv_table 1209 * since pmap_enter can be called at interrupt time. 1210 */ 1211 if (PAGE_IS_MANAGED(pa)) { 1212 struct pv_header *pvh; 1213 struct pv_entry *pv, *npv; 1214 int s; 1215 1216 pvh = pa_to_pvh(pa); 1217 pv = &pvh->pvh_first; 1218 s = splvm(); 1219 1220 PMAP_DPRINTF(PDB_ENTER, 1221 ("enter: pv at %p: %lx/%p/%p\n", 1222 pv, pv->pv_va, pv->pv_pmap, pv->pv_next)); 1223 /* 1224 * No entries yet, use header as the first entry 1225 */ 1226 if (pv->pv_pmap == NULL) { 1227 pv->pv_va = va; 1228 pv->pv_pmap = pmap; 1229 pv->pv_next = NULL; 1230 pv->pv_ptste = NULL; 1231 pv->pv_ptpmap = NULL; 1232 pvh->pvh_attrs = 0; 1233 } 1234 /* 1235 * There is at least one other VA mapping this page. 1236 * Place this entry after the header. 1237 */ 1238 else { 1239 #ifdef DEBUG 1240 for (npv = pv; npv; npv = npv->pv_next) 1241 if (pmap == npv->pv_pmap && va == npv->pv_va) 1242 panic("pmap_enter: already in pv_tab"); 1243 #endif 1244 if (opv != NULL) { 1245 npv = opv; 1246 opv = NULL; 1247 } else { 1248 npv = pmap_alloc_pv(); 1249 } 1250 KASSERT(npv != NULL); 1251 npv->pv_va = va; 1252 npv->pv_pmap = pmap; 1253 npv->pv_next = pv->pv_next; 1254 npv->pv_ptste = NULL; 1255 npv->pv_ptpmap = NULL; 1256 pv->pv_next = npv; 1257 1258 #ifdef CACHE_HAVE_VAC 1259 1260 /* 1261 * Since there is another logical mapping for the 1262 * same page we may need to cache-inhibit the 1263 * descriptors on those CPUs with external VACs. 1264 * We don't need to CI if: 1265 * 1266 * - No two mappings belong to the same user pmaps. 1267 * Since the cache is flushed on context switches 1268 * there is no problem between user processes. 1269 * 1270 * - Mappings within a single pmap are a certain 1271 * magic distance apart. VAs at these appropriate 1272 * boundaries map to the same cache entries or 1273 * otherwise don't conflict. 1274 * 1275 * To keep it simple, we only check for these special 1276 * cases if there are only two mappings, otherwise we 1277 * punt and always CI. 1278 * 1279 * Note that there are no aliasing problems with the 1280 * on-chip data-cache when the WA bit is set. 1281 */ 1282 1283 if (pmap_aliasmask) { 1284 if (pvh->pvh_attrs & PVH_CI) { 1285 PMAP_DPRINTF(PDB_CACHE, 1286 ("enter: pa %lx already CI'ed\n", 1287 pa)); 1288 checkpv = cacheable = false; 1289 } else if (npv->pv_next || 1290 ((pmap == pv->pv_pmap || 1291 pmap == pmap_kernel() || 1292 pv->pv_pmap == pmap_kernel()) && 1293 ((pv->pv_va & pmap_aliasmask) != 1294 (va & pmap_aliasmask)))) { 1295 PMAP_DPRINTF(PDB_CACHE, 1296 ("enter: pa %lx CI'ing all\n", 1297 pa)); 1298 cacheable = false; 1299 pvh->pvh_attrs |= PVH_CI; 1300 } 1301 } 1302 #endif 1303 } 1304 1305 /* 1306 * Speed pmap_is_referenced() or pmap_is_modified() based 1307 * on the hint provided in access_type. 1308 */ 1309 #ifdef DIAGNOSTIC 1310 if ((flags & VM_PROT_ALL) & ~prot) 1311 panic("pmap_enter: access_type exceeds prot"); 1312 #endif 1313 if (flags & VM_PROT_WRITE) 1314 pvh->pvh_attrs |= (PG_U|PG_M); 1315 else if (flags & VM_PROT_ALL) 1316 pvh->pvh_attrs |= PG_U; 1317 1318 splx(s); 1319 } 1320 /* 1321 * Assumption: if it is not part of our managed memory 1322 * then it must be device memory which may be volitile. 1323 */ 1324 else if (pmap_initialized) { 1325 checkpv = cacheable = false; 1326 } 1327 1328 /* 1329 * Increment counters 1330 */ 1331 pmap->pm_stats.resident_count++; 1332 if (wired) 1333 pmap->pm_stats.wired_count++; 1334 1335 validate: 1336 #ifdef CACHE_HAVE_VAC 1337 /* 1338 * Purge kernel side of VAC to ensure we get correct state 1339 * of HW bits so we don't clobber them. 1340 */ 1341 if (pmap_aliasmask) 1342 DCIS(); 1343 #endif 1344 1345 /* 1346 * Build the new PTE. 1347 */ 1348 1349 npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V; 1350 if (wired) 1351 npte |= PG_W; 1352 if (!checkpv && !cacheable) 1353 #if defined(M68040) || defined(M68060) 1354 #if defined(M68020) || defined(M68030) 1355 npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI); 1356 #else 1357 npte |= PG_CIN; 1358 #endif 1359 #else 1360 npte |= PG_CI; 1361 #endif 1362 #if defined(M68040) || defined(M68060) 1363 #if defined(M68020) || defined(M68030) 1364 else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW) 1365 #else 1366 else if ((npte & (PG_PROT|PG_CI)) == PG_RW) 1367 #endif 1368 npte |= PG_CCB; 1369 #endif 1370 1371 PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte)); 1372 1373 /* 1374 * Remember if this was a wiring-only change. 1375 * If so, we need not flush the TLB and caches. 1376 */ 1377 1378 wired = ((*pte ^ npte) == PG_W); 1379 #if defined(M68040) || defined(M68060) 1380 #if defined(M68020) || defined(M68030) 1381 if (mmutype == MMU_68040 && !wired) 1382 #else 1383 if (!wired) 1384 #endif 1385 { 1386 DCFP(pa); 1387 ICPP(pa); 1388 } 1389 #endif 1390 *pte = npte; 1391 if (!wired && active_pmap(pmap)) 1392 TBIS(va); 1393 #ifdef CACHE_HAVE_VAC 1394 /* 1395 * The following is executed if we are entering a second 1396 * (or greater) mapping for a physical page and the mappings 1397 * may create an aliasing problem. In this case we must 1398 * cache inhibit the descriptors involved and flush any 1399 * external VAC. 1400 */ 1401 if (checkpv && !cacheable) { 1402 pmap_changebit(pa, PG_CI, ~0); 1403 DCIA(); 1404 #ifdef DEBUG 1405 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 1406 (PDB_CACHE|PDB_PVDUMP)) 1407 pmap_pvdump(pa); 1408 #endif 1409 } 1410 #endif 1411 #ifdef DEBUG 1412 if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) 1413 pmap_check_wiring("enter", trunc_page((vaddr_t)pte)); 1414 #endif 1415 1416 if (opv != NULL) 1417 pmap_free_pv(opv); 1418 1419 return 0; 1420 } 1421 1422 void 1423 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1424 { 1425 pmap_t pmap = pmap_kernel(); 1426 pt_entry_t *pte; 1427 int s, npte; 1428 1429 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER, 1430 ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot)); 1431 1432 /* 1433 * Segment table entry not valid, we need a new PT page 1434 */ 1435 1436 if (!pmap_ste_v(pmap, va)) { 1437 s = splvm(); 1438 pmap_enter_ptpage(pmap, va, false); 1439 splx(s); 1440 } 1441 1442 pa = m68k_trunc_page(pa); 1443 pte = pmap_pte(pmap, va); 1444 1445 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte)); 1446 KASSERT(!pmap_pte_v(pte)); 1447 1448 /* 1449 * Increment counters 1450 */ 1451 1452 pmap->pm_stats.resident_count++; 1453 pmap->pm_stats.wired_count++; 1454 1455 /* 1456 * Build the new PTE. 1457 */ 1458 1459 npte = pa | pte_prot(pmap, prot) | PG_V | PG_W; 1460 #if defined(M68040) || defined(M68060) 1461 #if defined(M68020) || defined(M68030) 1462 if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW) 1463 #else 1464 if ((npte & PG_PROT) == PG_RW) 1465 #endif 1466 npte |= PG_CCB; 1467 1468 if (mmutype == MMU_68040) { 1469 DCFP(pa); 1470 ICPP(pa); 1471 } 1472 #endif 1473 1474 *pte = npte; 1475 TBIS(va); 1476 } 1477 1478 void 1479 pmap_kremove(vaddr_t va, vsize_t size) 1480 { 1481 pmap_t pmap = pmap_kernel(); 1482 pt_entry_t *pte; 1483 vaddr_t nssva; 1484 vaddr_t eva = va + size; 1485 #ifdef CACHE_HAVE_VAC 1486 bool firstpage, needcflush; 1487 #endif 1488 1489 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT, 1490 ("pmap_kremove(%lx, %lx)\n", va, size)); 1491 1492 #ifdef CACHE_HAVE_VAC 1493 firstpage = true; 1494 needcflush = false; 1495 #endif 1496 while (va < eva) { 1497 nssva = m68k_trunc_seg(va) + NBSEG; 1498 if (nssva == 0 || nssva > eva) 1499 nssva = eva; 1500 1501 /* 1502 * If VA belongs to an unallocated segment, 1503 * skip to the next segment boundary. 1504 */ 1505 1506 if (!pmap_ste_v(pmap, va)) { 1507 va = nssva; 1508 continue; 1509 } 1510 1511 /* 1512 * Invalidate every valid mapping within this segment. 1513 */ 1514 1515 pte = pmap_pte(pmap, va); 1516 while (va < nssva) { 1517 if (!pmap_pte_v(pte)) { 1518 pte++; 1519 va += PAGE_SIZE; 1520 continue; 1521 } 1522 #ifdef CACHE_HAVE_VAC 1523 if (pmap_aliasmask) { 1524 1525 /* 1526 * Purge kernel side of VAC to ensure 1527 * we get the correct state of any 1528 * hardware maintained bits. 1529 */ 1530 1531 if (firstpage) { 1532 DCIS(); 1533 firstpage = false; 1534 } 1535 1536 /* 1537 * Remember if we may need to 1538 * flush the VAC. 1539 */ 1540 1541 needcflush = true; 1542 } 1543 #endif 1544 pmap->pm_stats.wired_count--; 1545 pmap->pm_stats.resident_count--; 1546 *pte = PG_NV; 1547 TBIS(va); 1548 pte++; 1549 va += PAGE_SIZE; 1550 } 1551 } 1552 1553 #ifdef CACHE_HAVE_VAC 1554 1555 /* 1556 * In a couple of cases, we don't need to worry about flushing 1557 * the VAC: 1558 * 1. if this is a kernel mapping, 1559 * we have already done it 1560 * 2. if it is a user mapping not for the current process, 1561 * it won't be there 1562 */ 1563 1564 if (pmap_aliasmask && !active_user_pmap(pmap)) 1565 needcflush = false; 1566 if (needcflush) { 1567 if (pmap == pmap_kernel()) { 1568 DCIS(); 1569 } else { 1570 DCIU(); 1571 } 1572 } 1573 #endif 1574 } 1575 1576 /* 1577 * pmap_unwire: [ INTERFACE ] 1578 * 1579 * Clear the wired attribute for a map/virtual-address pair. 1580 * 1581 * The mapping must already exist in the pmap. 1582 */ 1583 void 1584 pmap_unwire(pmap_t pmap, vaddr_t va) 1585 { 1586 pt_entry_t *pte; 1587 1588 PMAP_DPRINTF(PDB_FOLLOW, 1589 ("pmap_unwire(%p, %lx)\n", pmap, va)); 1590 1591 pte = pmap_pte(pmap, va); 1592 1593 /* 1594 * If wiring actually changed (always?) clear the wire bit and 1595 * update the wire count. Note that wiring is not a hardware 1596 * characteristic so there is no need to invalidate the TLB. 1597 */ 1598 1599 if (pmap_pte_w_chg(pte, 0)) { 1600 pmap_pte_set_w(pte, false); 1601 pmap->pm_stats.wired_count--; 1602 } 1603 } 1604 1605 /* 1606 * pmap_extract: [ INTERFACE ] 1607 * 1608 * Extract the physical address associated with the given 1609 * pmap/virtual address pair. 1610 */ 1611 bool 1612 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 1613 { 1614 paddr_t pa; 1615 u_int pte; 1616 1617 PMAP_DPRINTF(PDB_FOLLOW, 1618 ("pmap_extract(%p, %lx) -> ", pmap, va)); 1619 1620 if (pmap_ste_v(pmap, va)) { 1621 pte = *(u_int *)pmap_pte(pmap, va); 1622 if (pte) { 1623 pa = (pte & PG_FRAME) | (va & ~PG_FRAME); 1624 if (pap != NULL) 1625 *pap = pa; 1626 #ifdef DEBUG 1627 if (pmapdebug & PDB_FOLLOW) 1628 printf("%lx\n", pa); 1629 #endif 1630 return true; 1631 } 1632 } 1633 #ifdef DEBUG 1634 if (pmapdebug & PDB_FOLLOW) 1635 printf("failed\n"); 1636 #endif 1637 return false; 1638 } 1639 1640 /* 1641 * vtophys: [ INTERFACE-ish ] 1642 * 1643 * Kernel virtual to physical. Use with caution. 1644 */ 1645 paddr_t 1646 vtophys(vaddr_t va) 1647 { 1648 paddr_t pa; 1649 1650 if (pmap_extract(pmap_kernel(), va, &pa)) 1651 return pa; 1652 KASSERT(0); 1653 return (paddr_t) -1; 1654 } 1655 1656 /* 1657 * pmap_copy: [ INTERFACE ] 1658 * 1659 * Copy the mapping range specified by src_addr/len 1660 * from the source map to the range dst_addr/len 1661 * in the destination map. 1662 * 1663 * This routine is only advisory and need not do anything. 1664 */ 1665 void 1666 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, 1667 vaddr_t src_addr) 1668 { 1669 1670 PMAP_DPRINTF(PDB_FOLLOW, 1671 ("pmap_copy(%p, %p, %lx, %lx, %lx)\n", 1672 dst_pmap, src_pmap, dst_addr, len, src_addr)); 1673 } 1674 1675 /* 1676 * pmap_collect1(): 1677 * 1678 * Garbage-collect KPT pages. Helper for the above (bogus) 1679 * pmap_collect(). 1680 * 1681 * Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER 1682 * WAY OF HANDLING PT PAGES! 1683 */ 1684 static inline void 1685 pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa) 1686 { 1687 paddr_t pa; 1688 struct pv_header *pvh; 1689 struct pv_entry *pv; 1690 pt_entry_t *pte; 1691 paddr_t kpa; 1692 #ifdef DEBUG 1693 st_entry_t *ste; 1694 int opmapdebug = 0; 1695 #endif 1696 1697 for (pa = startpa; pa < endpa; pa += PAGE_SIZE) { 1698 struct kpt_page *kpt, **pkpt; 1699 1700 /* 1701 * Locate physical pages which are being used as kernel 1702 * page table pages. 1703 */ 1704 1705 pvh = pa_to_pvh(pa); 1706 pv = &pvh->pvh_first; 1707 if (pv->pv_pmap != pmap_kernel() || 1708 !(pvh->pvh_attrs & PVH_PTPAGE)) 1709 continue; 1710 do { 1711 if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel()) 1712 break; 1713 } while ((pv = pv->pv_next)); 1714 if (pv == NULL) 1715 continue; 1716 #ifdef DEBUG 1717 if (pv->pv_va < (vaddr_t)Sysmap || 1718 pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) { 1719 printf("collect: kernel PT VA out of range\n"); 1720 pmap_pvdump(pa); 1721 continue; 1722 } 1723 #endif 1724 pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE); 1725 while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV) 1726 ; 1727 if (pte >= (pt_entry_t *)pv->pv_va) 1728 continue; 1729 1730 #ifdef DEBUG 1731 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) { 1732 printf("collect: freeing KPT page at %lx (ste %x@%p)\n", 1733 pv->pv_va, *pv->pv_ptste, pv->pv_ptste); 1734 opmapdebug = pmapdebug; 1735 pmapdebug |= PDB_PTPAGE; 1736 } 1737 1738 ste = pv->pv_ptste; 1739 #endif 1740 /* 1741 * If all entries were invalid we can remove the page. 1742 * We call pmap_remove_entry to take care of invalidating 1743 * ST and Sysptmap entries. 1744 */ 1745 1746 if (!pmap_extract(pmap, pv->pv_va, &kpa)) { 1747 printf("collect: freeing KPT page at %lx (ste %x@%p)\n", 1748 pv->pv_va, *pv->pv_ptste, pv->pv_ptste); 1749 panic("pmap_collect: mapping not found"); 1750 } 1751 pmap_remove_mapping(pmap, pv->pv_va, NULL, 1752 PRM_TFLUSH|PRM_CFLUSH, NULL); 1753 1754 /* 1755 * Use the physical address to locate the original 1756 * (kmem_alloc assigned) address for the page and put 1757 * that page back on the free list. 1758 */ 1759 1760 for (pkpt = &kpt_used_list, kpt = *pkpt; 1761 kpt != NULL; 1762 pkpt = &kpt->kpt_next, kpt = *pkpt) 1763 if (kpt->kpt_pa == kpa) 1764 break; 1765 #ifdef DEBUG 1766 if (kpt == NULL) 1767 panic("pmap_collect: lost a KPT page"); 1768 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1769 printf("collect: %lx (%lx) to free list\n", 1770 kpt->kpt_va, kpa); 1771 #endif 1772 *pkpt = kpt->kpt_next; 1773 kpt->kpt_next = kpt_free_list; 1774 kpt_free_list = kpt; 1775 #ifdef DEBUG 1776 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1777 pmapdebug = opmapdebug; 1778 1779 if (*ste != SG_NV) 1780 printf("collect: kernel STE at %p still valid (%x)\n", 1781 ste, *ste); 1782 ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)]; 1783 if (*ste != SG_NV) 1784 printf("collect: kernel PTmap at %p still valid (%x)\n", 1785 ste, *ste); 1786 #endif 1787 } 1788 } 1789 1790 /* 1791 * pmap_collect: 1792 * 1793 * Helper for pmap_enter_ptpage(). 1794 * 1795 * Garbage collects the physical map system for pages which are no 1796 * longer used. Success need not be guaranteed -- that is, there 1797 * may well be pages which are not referenced, but others may be 1798 * collected. 1799 */ 1800 static void 1801 pmap_collect(void) 1802 { 1803 int s; 1804 uvm_physseg_t bank; 1805 1806 /* 1807 * XXX This is very bogus. We should handle kernel PT 1808 * XXX pages much differently. 1809 */ 1810 1811 s = splvm(); 1812 for (bank = uvm_physseg_get_first(); 1813 uvm_physseg_valid_p(bank); 1814 bank = uvm_physseg_get_next(bank)) { 1815 pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)), 1816 ptoa(uvm_physseg_get_end(bank))); 1817 } 1818 splx(s); 1819 } 1820 1821 /* 1822 * pmap_zero_page: [ INTERFACE ] 1823 * 1824 * Zero the specified (machine independent) page by mapping the page 1825 * into virtual memory and using memset to clear its contents, one 1826 * machine dependent page at a time. 1827 * 1828 * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES! 1829 * (Actually, we go to splvm(), and since we don't 1830 * support multiple processors, this is sufficient.) 1831 */ 1832 void 1833 pmap_zero_page(paddr_t phys) 1834 { 1835 int npte; 1836 1837 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys)); 1838 1839 npte = phys | PG_V; 1840 #ifdef CACHE_HAVE_VAC 1841 if (pmap_aliasmask) { 1842 1843 /* 1844 * Cache-inhibit the mapping on VAC machines, as we would 1845 * be wasting the cache load. 1846 */ 1847 1848 npte |= PG_CI; 1849 } 1850 #endif 1851 1852 #if defined(M68040) || defined(M68060) 1853 #if defined(M68020) || defined(M68030) 1854 if (mmutype == MMU_68040) 1855 #endif 1856 { 1857 /* 1858 * Set copyback caching on the page; this is required 1859 * for cache consistency (since regular mappings are 1860 * copyback as well). 1861 */ 1862 1863 npte |= PG_CCB; 1864 } 1865 #endif 1866 1867 *caddr1_pte = npte; 1868 TBIS((vaddr_t)CADDR1); 1869 1870 zeropage(CADDR1); 1871 1872 #ifdef DEBUG 1873 *caddr1_pte = PG_NV; 1874 TBIS((vaddr_t)CADDR1); 1875 #endif 1876 } 1877 1878 /* 1879 * pmap_copy_page: [ INTERFACE ] 1880 * 1881 * Copy the specified (machine independent) page by mapping the page 1882 * into virtual memory and using memcpy to copy the page, one machine 1883 * dependent page at a time. 1884 * 1885 * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES! 1886 * (Actually, we go to splvm(), and since we don't 1887 * support multiple processors, this is sufficient.) 1888 */ 1889 void 1890 pmap_copy_page(paddr_t src, paddr_t dst) 1891 { 1892 pt_entry_t npte1, npte2; 1893 1894 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst)); 1895 1896 npte1 = src | PG_RO | PG_V; 1897 npte2 = dst | PG_V; 1898 #ifdef CACHE_HAVE_VAC 1899 if (pmap_aliasmask) { 1900 1901 /* 1902 * Cache-inhibit the mapping on VAC machines, as we would 1903 * be wasting the cache load. 1904 */ 1905 1906 npte1 |= PG_CI; 1907 npte2 |= PG_CI; 1908 } 1909 #endif 1910 1911 #if defined(M68040) || defined(M68060) 1912 #if defined(M68020) || defined(M68030) 1913 if (mmutype == MMU_68040) 1914 #endif 1915 { 1916 /* 1917 * Set copyback caching on the pages; this is required 1918 * for cache consistency (since regular mappings are 1919 * copyback as well). 1920 */ 1921 1922 npte1 |= PG_CCB; 1923 npte2 |= PG_CCB; 1924 } 1925 #endif 1926 1927 *caddr1_pte = npte1; 1928 TBIS((vaddr_t)CADDR1); 1929 1930 *caddr2_pte = npte2; 1931 TBIS((vaddr_t)CADDR2); 1932 1933 copypage(CADDR1, CADDR2); 1934 1935 #ifdef DEBUG 1936 *caddr1_pte = PG_NV; 1937 TBIS((vaddr_t)CADDR1); 1938 1939 *caddr2_pte = PG_NV; 1940 TBIS((vaddr_t)CADDR2); 1941 #endif 1942 } 1943 1944 /* 1945 * pmap_clear_modify: [ INTERFACE ] 1946 * 1947 * Clear the modify bits on the specified physical page. 1948 */ 1949 bool 1950 pmap_clear_modify(struct vm_page *pg) 1951 { 1952 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1953 1954 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg)); 1955 1956 return pmap_changebit(pa, 0, (pt_entry_t)~PG_M); 1957 } 1958 1959 /* 1960 * pmap_clear_reference: [ INTERFACE ] 1961 * 1962 * Clear the reference bit on the specified physical page. 1963 */ 1964 bool 1965 pmap_clear_reference(struct vm_page *pg) 1966 { 1967 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1968 1969 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg)); 1970 1971 return pmap_changebit(pa, 0, (pt_entry_t)~PG_U); 1972 } 1973 1974 /* 1975 * pmap_is_referenced: [ INTERFACE ] 1976 * 1977 * Return whether or not the specified physical page is referenced 1978 * by any physical maps. 1979 */ 1980 bool 1981 pmap_is_referenced(struct vm_page *pg) 1982 { 1983 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1984 1985 return pmap_testbit(pa, PG_U); 1986 } 1987 1988 /* 1989 * pmap_is_modified: [ INTERFACE ] 1990 * 1991 * Return whether or not the specified physical page is modified 1992 * by any physical maps. 1993 */ 1994 bool 1995 pmap_is_modified(struct vm_page *pg) 1996 { 1997 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1998 1999 return pmap_testbit(pa, PG_M); 2000 } 2001 2002 /* 2003 * pmap_phys_address: [ INTERFACE ] 2004 * 2005 * Return the physical address corresponding to the specified 2006 * cookie. Used by the device pager to decode a device driver's 2007 * mmap entry point return value. 2008 * 2009 * Note: no locking is necessary in this function. 2010 */ 2011 paddr_t 2012 pmap_phys_address(paddr_t ppn) 2013 { 2014 return m68k_ptob(ppn); 2015 } 2016 2017 #ifdef CACHE_HAVE_VAC 2018 /* 2019 * pmap_prefer: [ INTERFACE ] 2020 * 2021 * Find the first virtual address >= *vap that does not 2022 * cause a virtually-addressed cache alias problem. 2023 */ 2024 void 2025 pmap_prefer(vaddr_t foff, vaddr_t *vap) 2026 { 2027 vaddr_t va; 2028 vsize_t d; 2029 2030 #ifdef M68K_MMU_MOTOROLA 2031 if (pmap_aliasmask) 2032 #endif 2033 { 2034 va = *vap; 2035 d = foff - va; 2036 d &= pmap_aliasmask; 2037 *vap = va + d; 2038 } 2039 } 2040 #endif /* CACHE_HAVE_VAC */ 2041 2042 /* 2043 * Miscellaneous support routines follow 2044 */ 2045 2046 /* 2047 * pmap_remove_mapping: 2048 * 2049 * Invalidate a single page denoted by pmap/va. 2050 * 2051 * If (pte != NULL), it is the already computed PTE for the page. 2052 * 2053 * If (flags & PRM_TFLUSH), we must invalidate any TLB information. 2054 * 2055 * If (flags & PRM_CFLUSH), we must flush/invalidate any cache 2056 * information. 2057 * 2058 * If (flags & PRM_KEEPPTPAGE), we don't free the page table page 2059 * if the reference drops to zero. 2060 */ 2061 /* static */ 2062 void 2063 pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags, 2064 struct pv_entry **opvp) 2065 { 2066 paddr_t pa; 2067 struct pv_header *pvh; 2068 struct pv_entry *pv, *npv, *opv = NULL; 2069 struct pmap *ptpmap; 2070 st_entry_t *ste; 2071 int s, bits; 2072 #ifdef DEBUG 2073 pt_entry_t opte; 2074 #endif 2075 2076 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT, 2077 ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n", 2078 pmap, va, pte, flags, opvp)); 2079 2080 /* 2081 * PTE not provided, compute it from pmap and va. 2082 */ 2083 2084 if (pte == NULL) { 2085 pte = pmap_pte(pmap, va); 2086 if (*pte == PG_NV) 2087 return; 2088 } 2089 2090 #ifdef CACHE_HAVE_VAC 2091 if (pmap_aliasmask && (flags & PRM_CFLUSH)) { 2092 2093 /* 2094 * Purge kernel side of VAC to ensure we get the correct 2095 * state of any hardware maintained bits. 2096 */ 2097 2098 DCIS(); 2099 2100 /* 2101 * If this is a non-CI user mapping for the current process, 2102 * flush the VAC. Note that the kernel side was flushed 2103 * above so we don't worry about non-CI kernel mappings. 2104 */ 2105 2106 if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) { 2107 DCIU(); 2108 } 2109 } 2110 #endif 2111 2112 pa = pmap_pte_pa(pte); 2113 #ifdef DEBUG 2114 opte = *pte; 2115 #endif 2116 2117 /* 2118 * Update statistics 2119 */ 2120 2121 if (pmap_pte_w(pte)) 2122 pmap->pm_stats.wired_count--; 2123 pmap->pm_stats.resident_count--; 2124 2125 #if defined(M68040) || defined(M68060) 2126 #if defined(M68020) || defined(M68030) 2127 if (mmutype == MMU_68040) 2128 #endif 2129 if ((flags & PRM_CFLUSH)) { 2130 DCFP(pa); 2131 ICPP(pa); 2132 } 2133 #endif 2134 2135 /* 2136 * Invalidate the PTE after saving the reference modify info. 2137 */ 2138 2139 PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte)); 2140 bits = *pte & (PG_U|PG_M); 2141 *pte = PG_NV; 2142 if ((flags & PRM_TFLUSH) && active_pmap(pmap)) 2143 TBIS(va); 2144 2145 /* 2146 * For user mappings decrement the wiring count on 2147 * the PT page. 2148 */ 2149 2150 if (pmap != pmap_kernel()) { 2151 vaddr_t ptpva = trunc_page((vaddr_t)pte); 2152 int refs = pmap_ptpage_delref(ptpva); 2153 #ifdef DEBUG 2154 if (pmapdebug & PDB_WIRING) 2155 pmap_check_wiring("remove", ptpva); 2156 #endif 2157 2158 /* 2159 * If reference count drops to 0, and we're not instructed 2160 * to keep it around, free the PT page. 2161 */ 2162 2163 if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) { 2164 #ifdef DIAGNOSTIC 2165 struct pv_header *ptppvh; 2166 struct pv_entry *ptppv; 2167 #endif 2168 paddr_t ptppa; 2169 2170 ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva)); 2171 #ifdef DIAGNOSTIC 2172 if (PAGE_IS_MANAGED(ptppa) == 0) 2173 panic("pmap_remove_mapping: unmanaged PT page"); 2174 ptppvh = pa_to_pvh(ptppa); 2175 ptppv = &ptppvh->pvh_first; 2176 if (ptppv->pv_ptste == NULL) 2177 panic("pmap_remove_mapping: ptste == NULL"); 2178 if (ptppv->pv_pmap != pmap_kernel() || 2179 ptppv->pv_va != ptpva || 2180 ptppv->pv_next != NULL) 2181 panic("pmap_remove_mapping: " 2182 "bad PT page pmap %p, va 0x%lx, next %p", 2183 ptppv->pv_pmap, ptppv->pv_va, 2184 ptppv->pv_next); 2185 #endif 2186 pmap_remove_mapping(pmap_kernel(), ptpva, 2187 NULL, PRM_TFLUSH|PRM_CFLUSH, NULL); 2188 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2189 uvm_pagefree(PHYS_TO_VM_PAGE(ptppa)); 2190 rw_exit(uvm_kernel_object->vmobjlock); 2191 PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE, 2192 ("remove: PT page 0x%lx (0x%lx) freed\n", 2193 ptpva, ptppa)); 2194 } 2195 } 2196 2197 /* 2198 * If this isn't a managed page, we are all done. 2199 */ 2200 2201 if (PAGE_IS_MANAGED(pa) == 0) 2202 return; 2203 2204 /* 2205 * Otherwise remove it from the PV table 2206 * (raise IPL since we may be called at interrupt time). 2207 */ 2208 2209 pvh = pa_to_pvh(pa); 2210 pv = &pvh->pvh_first; 2211 ste = NULL; 2212 s = splvm(); 2213 2214 /* 2215 * If it is the first entry on the list, it is actually 2216 * in the header and we must copy the following entry up 2217 * to the header. Otherwise we must search the list for 2218 * the entry. In either case we free the now unused entry. 2219 */ 2220 2221 if (pmap == pv->pv_pmap && va == pv->pv_va) { 2222 ste = pv->pv_ptste; 2223 ptpmap = pv->pv_ptpmap; 2224 npv = pv->pv_next; 2225 if (npv) { 2226 *pv = *npv; 2227 opv = npv; 2228 } else 2229 pv->pv_pmap = NULL; 2230 } else { 2231 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 2232 if (pmap == npv->pv_pmap && va == npv->pv_va) 2233 break; 2234 pv = npv; 2235 } 2236 #ifdef DEBUG 2237 if (npv == NULL) 2238 panic("pmap_remove: PA not in pv_tab"); 2239 #endif 2240 ste = npv->pv_ptste; 2241 ptpmap = npv->pv_ptpmap; 2242 pv->pv_next = npv->pv_next; 2243 opv = npv; 2244 pvh = pa_to_pvh(pa); 2245 pv = &pvh->pvh_first; 2246 } 2247 2248 #ifdef CACHE_HAVE_VAC 2249 2250 /* 2251 * If only one mapping left we no longer need to cache inhibit 2252 */ 2253 2254 if (pmap_aliasmask && 2255 pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) { 2256 PMAP_DPRINTF(PDB_CACHE, 2257 ("remove: clearing CI for pa %lx\n", pa)); 2258 pvh->pvh_attrs &= ~PVH_CI; 2259 pmap_changebit(pa, 0, (pt_entry_t)~PG_CI); 2260 #ifdef DEBUG 2261 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 2262 (PDB_CACHE|PDB_PVDUMP)) 2263 pmap_pvdump(pa); 2264 #endif 2265 } 2266 #endif 2267 2268 /* 2269 * If this was a PT page we must also remove the 2270 * mapping from the associated segment table. 2271 */ 2272 2273 if (ste) { 2274 PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE, 2275 ("remove: ste was %x@%p pte was %x@%p\n", 2276 *ste, ste, opte, pmap_pte(pmap, va))); 2277 #if defined(M68040) || defined(M68060) 2278 #if defined(M68020) || defined(M68030) 2279 if (mmutype == MMU_68040) 2280 #endif 2281 { 2282 st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE]; 2283 2284 while (ste < este) 2285 *ste++ = SG_NV; 2286 #ifdef DEBUG 2287 ste -= NPTEPG/SG4_LEV3SIZE; 2288 #endif 2289 } 2290 #if defined(M68020) || defined(M68030) 2291 else 2292 #endif 2293 #endif 2294 #if defined(M68020) || defined(M68030) 2295 *ste = SG_NV; 2296 #endif 2297 2298 /* 2299 * If it was a user PT page, we decrement the 2300 * reference count on the segment table as well, 2301 * freeing it if it is now empty. 2302 */ 2303 2304 if (ptpmap != pmap_kernel()) { 2305 PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB, 2306 ("remove: stab %p, refcnt %d\n", 2307 ptpmap->pm_stab, ptpmap->pm_sref - 1)); 2308 #ifdef DEBUG 2309 if ((pmapdebug & PDB_PARANOIA) && 2310 ptpmap->pm_stab != 2311 (st_entry_t *)trunc_page((vaddr_t)ste)) 2312 panic("remove: bogus ste"); 2313 #endif 2314 if (--(ptpmap->pm_sref) == 0) { 2315 PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB, 2316 ("remove: free stab %p\n", 2317 ptpmap->pm_stab)); 2318 uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab, 2319 M68K_STSIZE, UVM_KMF_WIRED); 2320 ptpmap->pm_stab = Segtabzero; 2321 ptpmap->pm_stpa = Segtabzeropa; 2322 #if defined(M68040) || defined(M68060) 2323 #if defined(M68020) || defined(M68030) 2324 if (mmutype == MMU_68040) 2325 #endif 2326 ptpmap->pm_stfree = protostfree; 2327 #endif 2328 /* 2329 * Segment table has changed; reload the 2330 * MMU if it's the active user pmap. 2331 */ 2332 if (active_user_pmap(ptpmap)) { 2333 pmap_load_urp((paddr_t)ptpmap->pm_stpa); 2334 } 2335 } 2336 } 2337 pvh->pvh_attrs &= ~PVH_PTPAGE; 2338 ptpmap->pm_ptpages--; 2339 } 2340 2341 /* 2342 * Update saved attributes for managed page 2343 */ 2344 2345 pvh->pvh_attrs |= bits; 2346 splx(s); 2347 2348 if (opvp != NULL) 2349 *opvp = opv; 2350 else if (opv != NULL) 2351 pmap_free_pv(opv); 2352 } 2353 2354 /* 2355 * pmap_testbit: 2356 * 2357 * Test the modified/referenced bits of a physical page. 2358 */ 2359 /* static */ 2360 bool 2361 pmap_testbit(paddr_t pa, int bit) 2362 { 2363 struct pv_header *pvh; 2364 struct pv_entry *pv; 2365 pt_entry_t *pte; 2366 int s; 2367 2368 pvh = pa_to_pvh(pa); 2369 pv = &pvh->pvh_first; 2370 s = splvm(); 2371 2372 /* 2373 * Check saved info first 2374 */ 2375 2376 if (pvh->pvh_attrs & bit) { 2377 splx(s); 2378 return true; 2379 } 2380 2381 #ifdef CACHE_HAVE_VAC 2382 2383 /* 2384 * Flush VAC to get correct state of any hardware maintained bits. 2385 */ 2386 2387 if (pmap_aliasmask && (bit & (PG_U|PG_M))) 2388 DCIS(); 2389 #endif 2390 2391 /* 2392 * Not found. Check current mappings, returning immediately if 2393 * found. Cache a hit to speed future lookups. 2394 */ 2395 2396 if (pv->pv_pmap != NULL) { 2397 for (; pv; pv = pv->pv_next) { 2398 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2399 if (*pte & bit) { 2400 pvh->pvh_attrs |= bit; 2401 splx(s); 2402 return true; 2403 } 2404 } 2405 } 2406 splx(s); 2407 return false; 2408 } 2409 2410 /* 2411 * pmap_changebit: 2412 * 2413 * Change the modified/referenced bits, or other PTE bits, 2414 * for a physical page. 2415 */ 2416 /* static */ 2417 bool 2418 pmap_changebit(paddr_t pa, pt_entry_t set, pt_entry_t mask) 2419 { 2420 struct pv_header *pvh; 2421 struct pv_entry *pv; 2422 pt_entry_t *pte, npte; 2423 vaddr_t va; 2424 int s; 2425 #if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060) 2426 bool firstpage = true; 2427 #endif 2428 bool r; 2429 2430 PMAP_DPRINTF(PDB_BITS, 2431 ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask)); 2432 2433 pvh = pa_to_pvh(pa); 2434 pv = &pvh->pvh_first; 2435 s = splvm(); 2436 2437 /* 2438 * Clear saved attributes (modify, reference) 2439 */ 2440 2441 r = (pvh->pvh_attrs & ~mask) != 0; 2442 pvh->pvh_attrs &= mask; 2443 2444 /* 2445 * Loop over all current mappings setting/clearing as appropriate 2446 * If setting RO do we need to clear the VAC? 2447 */ 2448 2449 if (pv->pv_pmap != NULL) { 2450 #ifdef DEBUG 2451 int toflush = 0; 2452 #endif 2453 for (; pv; pv = pv->pv_next) { 2454 #ifdef DEBUG 2455 toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1; 2456 #endif 2457 va = pv->pv_va; 2458 pte = pmap_pte(pv->pv_pmap, va); 2459 #ifdef CACHE_HAVE_VAC 2460 2461 /* 2462 * Flush VAC to ensure we get correct state of HW bits 2463 * so we don't clobber them. 2464 */ 2465 2466 if (firstpage && pmap_aliasmask) { 2467 firstpage = false; 2468 DCIS(); 2469 } 2470 #endif 2471 npte = (*pte | set) & mask; 2472 if (*pte != npte) { 2473 r = true; 2474 #if defined(M68040) || defined(M68060) 2475 /* 2476 * If we are changing caching status or 2477 * protection make sure the caches are 2478 * flushed (but only once). 2479 */ 2480 if (firstpage && 2481 #if defined(M68020) || defined(M68030) 2482 (mmutype == MMU_68040) && 2483 #endif 2484 ((set == PG_RO) || 2485 (set & PG_CMASK) || 2486 (mask & PG_CMASK) == 0)) { 2487 firstpage = false; 2488 DCFP(pa); 2489 ICPP(pa); 2490 } 2491 #endif 2492 *pte = npte; 2493 if (active_pmap(pv->pv_pmap)) 2494 TBIS(va); 2495 } 2496 } 2497 } 2498 splx(s); 2499 return r; 2500 } 2501 2502 /* 2503 * pmap_enter_ptpage: 2504 * 2505 * Allocate and map a PT page for the specified pmap/va pair. 2506 */ 2507 /* static */ 2508 int 2509 pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail) 2510 { 2511 paddr_t ptpa; 2512 struct vm_page *pg; 2513 struct pv_header *pvh; 2514 struct pv_entry *pv; 2515 st_entry_t *ste; 2516 int s; 2517 2518 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE, 2519 ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va)); 2520 2521 /* 2522 * Allocate a segment table if necessary. Note that it is allocated 2523 * from a private map and not pt_map. This keeps user page tables 2524 * aligned on segment boundaries in the kernel address space. 2525 * The segment table is wired down. It will be freed whenever the 2526 * reference count drops to zero. 2527 */ 2528 if (pmap->pm_stab == Segtabzero) { 2529 pmap->pm_stab = (st_entry_t *) 2530 uvm_km_alloc(st_map, M68K_STSIZE, 0, 2531 UVM_KMF_WIRED | UVM_KMF_ZERO | 2532 (can_fail ? UVM_KMF_NOWAIT : 0)); 2533 if (pmap->pm_stab == NULL) { 2534 pmap->pm_stab = Segtabzero; 2535 return ENOMEM; 2536 } 2537 (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab, 2538 (paddr_t *)&pmap->pm_stpa); 2539 #if defined(M68040) || defined(M68060) 2540 #if defined(M68020) || defined(M68030) 2541 if (mmutype == MMU_68040) 2542 #endif 2543 { 2544 pt_entry_t *pte; 2545 2546 pte = pmap_pte(pmap_kernel(), pmap->pm_stab); 2547 *pte = (*pte & ~PG_CMASK) | PG_CI; 2548 pmap->pm_stfree = protostfree; 2549 } 2550 #endif 2551 /* 2552 * Segment table has changed; reload the 2553 * MMU if it's the active user pmap. 2554 */ 2555 if (active_user_pmap(pmap)) { 2556 pmap_load_urp((paddr_t)pmap->pm_stpa); 2557 } 2558 2559 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2560 ("enter: pmap %p stab %p(%p)\n", 2561 pmap, pmap->pm_stab, pmap->pm_stpa)); 2562 } 2563 2564 ste = pmap_ste(pmap, va); 2565 #if defined(M68040) || defined(M68060) 2566 /* 2567 * Allocate level 2 descriptor block if necessary 2568 */ 2569 #if defined(M68020) || defined(M68030) 2570 if (mmutype == MMU_68040) 2571 #endif 2572 { 2573 if (*ste == SG_NV) { 2574 int ix; 2575 void *addr; 2576 2577 ix = bmtol2(pmap->pm_stfree); 2578 if (ix == -1) 2579 panic("enter: out of address space"); /* XXX */ 2580 pmap->pm_stfree &= ~l2tobm(ix); 2581 addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE]; 2582 memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t)); 2583 addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE]; 2584 *ste = (u_int)addr | SG_RW | SG_U | SG_V; 2585 2586 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2587 ("enter: alloc ste2 %d(%p)\n", ix, addr)); 2588 } 2589 ste = pmap_ste2(pmap, va); 2590 /* 2591 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE 2592 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE 2593 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a 2594 * PT page--the unit of allocation. We set `ste' to point 2595 * to the first entry of that chunk which is validated in its 2596 * entirety below. 2597 */ 2598 ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1)); 2599 2600 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2601 ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste)); 2602 } 2603 #endif 2604 va = trunc_page((vaddr_t)pmap_pte(pmap, va)); 2605 2606 /* 2607 * In the kernel we allocate a page from the kernel PT page 2608 * free list and map it into the kernel page table map (via 2609 * pmap_enter). 2610 */ 2611 if (pmap == pmap_kernel()) { 2612 struct kpt_page *kpt; 2613 2614 s = splvm(); 2615 if ((kpt = kpt_free_list) == NULL) { 2616 /* 2617 * No PT pages available. 2618 * Try once to free up unused ones. 2619 */ 2620 PMAP_DPRINTF(PDB_COLLECT, 2621 ("enter: no KPT pages, collecting...\n")); 2622 pmap_collect(); 2623 if ((kpt = kpt_free_list) == NULL) 2624 panic("pmap_enter_ptpage: can't get KPT page"); 2625 } 2626 kpt_free_list = kpt->kpt_next; 2627 kpt->kpt_next = kpt_used_list; 2628 kpt_used_list = kpt; 2629 ptpa = kpt->kpt_pa; 2630 memset((void *)kpt->kpt_va, 0, PAGE_SIZE); 2631 pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE, 2632 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 2633 pmap_update(pmap); 2634 #ifdef DEBUG 2635 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) { 2636 int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0); 2637 2638 printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n", 2639 ix, Sysptmap[ix], kpt->kpt_va); 2640 } 2641 #endif 2642 splx(s); 2643 } else { 2644 2645 /* 2646 * For user processes we just allocate a page from the 2647 * VM system. Note that we set the page "wired" count to 1, 2648 * which is what we use to check if the page can be freed. 2649 * See pmap_remove_mapping(). 2650 * 2651 * Count the segment table reference first so that we won't 2652 * lose the segment table when low on memory. 2653 */ 2654 2655 pmap->pm_sref++; 2656 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE, 2657 ("enter: about to alloc UPT pg at %lx\n", va)); 2658 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2659 while ((pg = uvm_pagealloc(uvm_kernel_object, 2660 va - vm_map_min(kernel_map), 2661 NULL, UVM_PGA_ZERO)) == NULL) { 2662 rw_exit(uvm_kernel_object->vmobjlock); 2663 if (can_fail) { 2664 pmap->pm_sref--; 2665 return ENOMEM; 2666 } 2667 uvm_wait("ptpage"); 2668 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2669 } 2670 rw_exit(uvm_kernel_object->vmobjlock); 2671 pg->flags &= ~(PG_BUSY|PG_FAKE); 2672 UVM_PAGE_OWN(pg, NULL); 2673 ptpa = VM_PAGE_TO_PHYS(pg); 2674 pmap_enter(pmap_kernel(), va, ptpa, 2675 VM_PROT_READ | VM_PROT_WRITE, 2676 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); 2677 pmap_update(pmap_kernel()); 2678 } 2679 #if defined(M68040) || defined(M68060) 2680 /* 2681 * Turn off copyback caching of page table pages, 2682 * could get ugly otherwise. 2683 */ 2684 #if defined(M68020) || defined(M68030) 2685 if (mmutype == MMU_68040) 2686 #endif 2687 { 2688 #ifdef DEBUG 2689 pt_entry_t *pte = pmap_pte(pmap_kernel(), va); 2690 if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0) 2691 printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n", 2692 pmap == pmap_kernel() ? "Kernel" : "User", 2693 va, ptpa, pte, *pte); 2694 #endif 2695 if (pmap_changebit(ptpa, PG_CI, (pt_entry_t)~PG_CCB)) 2696 DCIS(); 2697 } 2698 #endif 2699 /* 2700 * Locate the PV entry in the kernel for this PT page and 2701 * record the STE address. This is so that we can invalidate 2702 * the STE when we remove the mapping for the page. 2703 */ 2704 pvh = pa_to_pvh(ptpa); 2705 s = splvm(); 2706 if (pvh) { 2707 pv = &pvh->pvh_first; 2708 pvh->pvh_attrs |= PVH_PTPAGE; 2709 do { 2710 if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va) 2711 break; 2712 } while ((pv = pv->pv_next)); 2713 } else { 2714 pv = NULL; 2715 } 2716 #ifdef DEBUG 2717 if (pv == NULL) 2718 panic("pmap_enter_ptpage: PT page not entered"); 2719 #endif 2720 pv->pv_ptste = ste; 2721 pv->pv_ptpmap = pmap; 2722 2723 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE, 2724 ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste)); 2725 2726 /* 2727 * Map the new PT page into the segment table. 2728 * Also increment the reference count on the segment table if this 2729 * was a user page table page. Note that we don't use vm_map_pageable 2730 * to keep the count like we do for PT pages, this is mostly because 2731 * it would be difficult to identify ST pages in pmap_pageable to 2732 * release them. We also avoid the overhead of vm_map_pageable. 2733 */ 2734 #if defined(M68040) || defined(M68060) 2735 #if defined(M68020) || defined(M68030) 2736 if (mmutype == MMU_68040) 2737 #endif 2738 { 2739 st_entry_t *este; 2740 2741 for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) { 2742 *ste = ptpa | SG_U | SG_RW | SG_V; 2743 ptpa += SG4_LEV3SIZE * sizeof(st_entry_t); 2744 } 2745 } 2746 #if defined(M68020) || defined(M68030) 2747 else 2748 *ste = (ptpa & SG_FRAME) | SG_RW | SG_V; 2749 #endif 2750 #else 2751 *ste = (ptpa & SG_FRAME) | SG_RW | SG_V; 2752 #endif 2753 if (pmap != pmap_kernel()) { 2754 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2755 ("enter: stab %p refcnt %d\n", 2756 pmap->pm_stab, pmap->pm_sref)); 2757 } 2758 /* 2759 * Flush stale TLB info. 2760 */ 2761 if (pmap == pmap_kernel()) 2762 TBIAS(); 2763 else 2764 TBIAU(); 2765 pmap->pm_ptpages++; 2766 splx(s); 2767 2768 return 0; 2769 } 2770 2771 /* 2772 * pmap_ptpage_addref: 2773 * 2774 * Add a reference to the specified PT page. 2775 */ 2776 void 2777 pmap_ptpage_addref(vaddr_t ptpva) 2778 { 2779 struct vm_page *pg; 2780 2781 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2782 pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map)); 2783 pg->wire_count++; 2784 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2785 ("ptpage addref: pg %p now %d\n", 2786 pg, pg->wire_count)); 2787 rw_exit(uvm_kernel_object->vmobjlock); 2788 } 2789 2790 /* 2791 * pmap_ptpage_delref: 2792 * 2793 * Delete a reference to the specified PT page. 2794 */ 2795 int 2796 pmap_ptpage_delref(vaddr_t ptpva) 2797 { 2798 struct vm_page *pg; 2799 int rv; 2800 2801 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER); 2802 pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map)); 2803 rv = --pg->wire_count; 2804 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB, 2805 ("ptpage delref: pg %p now %d\n", 2806 pg, pg->wire_count)); 2807 rw_exit(uvm_kernel_object->vmobjlock); 2808 return rv; 2809 } 2810 2811 /* 2812 * Routine: pmap_procwr 2813 * 2814 * Function: 2815 * Synchronize caches corresponding to [addr, addr + len) in p. 2816 */ 2817 void 2818 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2819 { 2820 2821 (void)cachectl1(0x80000004, va, len, p); 2822 } 2823 2824 void 2825 _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va) 2826 { 2827 2828 if (!pmap_ste_v(pmap, va)) 2829 return; 2830 2831 #if defined(M68040) || defined(M68060) 2832 #if defined(M68020) || defined(M68030) 2833 if (mmutype == MMU_68040) { 2834 #endif 2835 if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB, 2836 (pt_entry_t)~PG_CI)) 2837 DCIS(); 2838 2839 #if defined(M68020) || defined(M68030) 2840 } else 2841 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, 2842 (pt_entry_t)~PG_CI); 2843 #endif 2844 #else 2845 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, 2846 (pt_entry_t)~PG_CI); 2847 #endif 2848 } 2849 2850 void 2851 _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va) 2852 { 2853 2854 if (!pmap_ste_v(pmap, va)) 2855 return; 2856 2857 #if defined(M68040) || defined(M68060) 2858 #if defined(M68020) || defined(M68030) 2859 if (mmutype == MMU_68040) { 2860 #endif 2861 if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, 2862 (pt_entry_t)~PG_CCB)) 2863 DCIS(); 2864 #if defined(M68020) || defined(M68030) 2865 } else 2866 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0); 2867 #endif 2868 #else 2869 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0); 2870 #endif 2871 } 2872 2873 int 2874 _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va) 2875 { 2876 2877 if (!pmap_ste_v(pmap, va)) 2878 return 0; 2879 2880 return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0; 2881 } 2882 2883 #ifdef DEBUG 2884 /* 2885 * pmap_pvdump: 2886 * 2887 * Dump the contents of the PV list for the specified physical page. 2888 */ 2889 void 2890 pmap_pvdump(paddr_t pa) 2891 { 2892 struct pv_header *pvh; 2893 struct pv_entry *pv; 2894 2895 printf("pa %lx", pa); 2896 pvh = pa_to_pvh(pa); 2897 for (pv = &pvh->pvh_first; pv; pv = pv->pv_next) 2898 printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p", 2899 pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap); 2900 printf("\n"); 2901 } 2902 2903 /* 2904 * pmap_check_wiring: 2905 * 2906 * Count the number of valid mappings in the specified PT page, 2907 * and ensure that it is consistent with the number of wirings 2908 * to that page that the VM system has. 2909 */ 2910 void 2911 pmap_check_wiring(const char *str, vaddr_t va) 2912 { 2913 pt_entry_t *pte; 2914 paddr_t pa; 2915 struct vm_page *pg; 2916 int count; 2917 2918 if (!pmap_ste_v(pmap_kernel(), va) || 2919 !pmap_pte_v(pmap_pte(pmap_kernel(), va))) 2920 return; 2921 2922 pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va)); 2923 pg = PHYS_TO_VM_PAGE(pa); 2924 if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) { 2925 panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count); 2926 } 2927 2928 count = 0; 2929 for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE); 2930 pte++) 2931 if (*pte) 2932 count++; 2933 if (pg->wire_count != count) 2934 panic("*%s*: 0x%lx: w%d/a%d", 2935 str, va, pg->wire_count, count); 2936 } 2937 #endif /* DEBUG */ 2938 2939 /* 2940 * XXX XXX XXX These are legacy remants and should go away XXX XXX XXX 2941 * (Cribbed from vm_machdep.c because they're tied to this pmap impl.) 2942 */ 2943 2944 /* 2945 * Map `size' bytes of physical memory starting at `paddr' into 2946 * kernel VA space at `vaddr'. Read/write and cache-inhibit status 2947 * are specified by `prot'. 2948 */ 2949 void 2950 physaccess(void *vaddr, void *paddr, int size, int prot) 2951 { 2952 pt_entry_t *pte; 2953 u_int page; 2954 2955 pte = kvtopte(vaddr); 2956 page = (u_int)paddr & PG_FRAME; 2957 for (size = btoc(size); size; size--) { 2958 *pte++ = PG_V | prot | page; 2959 page += PAGE_SIZE; 2960 } 2961 TBIAS(); 2962 } 2963 2964 void 2965 physunaccess(void *vaddr, int size) 2966 { 2967 pt_entry_t *pte; 2968 2969 pte = kvtopte(vaddr); 2970 for (size = btoc(size); size; size--) 2971 *pte++ = PG_NV; 2972 TBIAS(); 2973 } 2974 2975 /* 2976 * Convert kernel VA to physical address 2977 */ 2978 int 2979 kvtop(void *addr) 2980 { 2981 return (int)vtophys((vaddr_t)addr); 2982 } 2983