1 /* $NetBSD: pmap_bootstrap.c,v 1.97 2025/04/08 23:42:08 nat Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.97 2025/04/08 23:42:08 nat Exp $"); 40 41 #include "audio.h" 42 #include "opt_ddb.h" 43 #include "opt_kgdb.h" 44 #include "opt_m68k_arch.h" 45 46 #include "zsc.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/reboot.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include <machine/cpu.h> 55 #include <machine/pte.h> 56 #include <machine/vmparam.h> 57 #include <machine/pmap.h> 58 #include <machine/autoconf.h> 59 #include <machine/video.h> 60 61 #include <mac68k/mac68k/macrom.h> 62 63 #define PA2VA(v, t) (t)((u_int)(v) - firstpa) 64 65 extern char *etext; 66 extern char *extiobase; 67 68 extern paddr_t avail_start, avail_end; 69 70 #if NZSC > 0 71 extern int zsinited; 72 #endif 73 74 /* 75 * These are used to map the RAM: 76 */ 77 int numranges; /* = 0 == don't use the ranges */ 78 u_long low[8]; 79 u_long high[8]; 80 u_long maxaddr; /* PA of the last physical page */ 81 int vidlen; 82 #define VIDMAPSIZE btoc(vidlen) 83 static vaddr_t newvideoaddr; 84 85 extern void * ROMBase; 86 87 /* 88 * Special purpose kernel virtual addresses, used for mapping 89 * physical pages for a variety of temporary or permanent purposes: 90 * 91 * CADDR1, CADDR2: pmap zero/copy operations 92 * vmmap: /dev/mem, crash dumps, parity error checking 93 * msgbufaddr: kernel message buffer 94 */ 95 void *CADDR1, *CADDR2; 96 char *vmmap; 97 void *msgbufaddr; 98 99 void pmap_bootstrap(paddr_t, paddr_t); 100 void bootstrap_mac68k(int); 101 102 /* 103 * Bootstrap the VM system. 104 * 105 * This is called with the MMU either on or off. If it's on, we assume 106 * that it's mapped with the same PA <=> LA mapping that we eventually 107 * want. The page sizes and the protections will be wrong, anyway. 108 * 109 * nextpa is the first address following the loaded kernel. On a IIsi 110 * on 12 May 1996, that was 0xf9000 beyond firstpa. 111 */ 112 void 113 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) 114 { 115 paddr_t lwp0upa, kstpa, kptmpa, kptpa; 116 u_int nptpages, kstsize; 117 paddr_t avail_next; 118 int avail_remaining; 119 int avail_range; 120 int i; 121 st_entry_t protoste, *ste, *este; 122 pt_entry_t protopte, *pte, *epte; 123 u_int stfree = 0; /* XXX: gcc -Wuninitialized */ 124 extern char start[]; 125 126 vidlen = m68k_round_page(mac68k_video.mv_height * 127 mac68k_video.mv_stride + m68k_page_offset(mac68k_video.mv_phys)); 128 129 /* 130 * Calculate important physical addresses: 131 * 132 * lwp0upa lwp0 u-area UPAGES pages 133 * 134 * kstpa kernel segment table 1 page (!040) 135 * N pages (040) 136 * 137 * kptmpa kernel PT map 1 page 138 * 139 * kptpa statically allocated 140 * kernel PT pages Sysptsize+ pages 141 * 142 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE and 143 * NBMAPSIZE are the number of PTEs, hence we need to round 144 * the total to a page boundary with IO maps at the end. ] 145 * 146 */ 147 lwp0upa = nextpa; 148 nextpa += USPACE; 149 if (mmutype == MMU_68040) 150 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 151 else 152 kstsize = 1; 153 kstpa = nextpa; 154 nextpa += kstsize * PAGE_SIZE; 155 kptmpa = nextpa; 156 nextpa += PAGE_SIZE; 157 kptpa = nextpa; 158 nptpages = Sysptsize + 159 (IIOMAPSIZE + ROMMAPSIZE + VIDMAPSIZE + NPTEPG - 1) / NPTEPG; 160 /* 161 * New kmem arena is allocated prior to pmap_init(), so we need 162 * additiona PT pages to account for that allocation, which is based 163 * on physical memory size. Just sum up memory and add enough PT 164 * pages for that size. 165 */ 166 mem_size = 0; 167 for (i = 0; i < numranges; i++) 168 mem_size += high[i] - low[i]; 169 nptpages += howmany(m68k_btop(mem_size), NPTEPG); 170 nptpages++; 171 nextpa += nptpages * PAGE_SIZE; 172 173 for (i = 0; i < numranges; i++) 174 if (low[i] <= firstpa && firstpa < high[i]) 175 break; 176 if (i >= numranges || nextpa > high[i]) { 177 if (mac68k_machine.do_graybars) { 178 printf("Failure in NetBSD boot; "); 179 if (i < numranges) 180 printf("nextpa=0x%lx, high[%d]=0x%lx.\n", 181 nextpa, i, high[i]); 182 else 183 printf("can't find kernel RAM segment.\n"); 184 printf("You're hosed! Try booting with 32-bit "); 185 printf("addressing enabled in the memory control "); 186 printf("panel.\n"); 187 printf("Older machines may need Mode32 to get that "); 188 printf("option.\n"); 189 } 190 panic("Cannot work with the current memory mappings."); 191 } 192 193 /* 194 * Initialize segment table and kernel page table map. 195 * 196 * On 68030s and earlier MMUs the two are identical except for 197 * the valid bits so both are initialized with essentially the 198 * same values. On the 68040, which has a mandatory 3-level 199 * structure, the segment table holds the level 1 table and part 200 * (or all) of the level 2 table and hence is considerably 201 * different. Here the first level consists of 128 descriptors 202 * (512 bytes) each mapping 32mb of address space. Each of these 203 * points to blocks of 128 second level descriptors (512 bytes) 204 * each mapping 256kb. Note that there may be additional "segment 205 * table" pages depending on how large MAXKL2SIZE is. 206 * 207 * Portions of the last segment of KVA space (0xFFC00000 - 208 * 0xFFFFFFFF) are mapped for the kernel page tables. 209 * 210 * XXX cramming two levels of mapping into the single "segment" 211 * table on the 68040 is intended as a temporary hack to get things 212 * working. The 224mb of address space that this allows will most 213 * likely be insufficient in the future (at least for the kernel). 214 */ 215 if (mmutype == MMU_68040) { 216 int nl1desc, nl2desc; 217 218 /* 219 * First invalidate the entire "segment table" pages 220 * (levels 1 and 2 have the same "invalid" value). 221 */ 222 ste = PA2VA(kstpa, st_entry_t *); 223 este = &ste[kstsize * NPTEPG]; 224 while (ste < este) 225 *ste++ = SG_NV; 226 /* 227 * Initialize level 2 descriptors (which immediately 228 * follow the level 1 table). We need: 229 * NPTEPG / SG4_LEV3SIZE 230 * level 2 descriptors to map each of the nptpages 231 * pages of PTEs. Note that we set the "used" bit 232 * now to save the HW the expense of doing it. 233 */ 234 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE); 235 ste = PA2VA(kstpa, st_entry_t *); 236 ste = &ste[SG4_LEV1SIZE]; 237 este = &ste[nl2desc]; 238 protoste = kptpa | SG_U | SG_RW | SG_V; 239 while (ste < este) { 240 *ste++ = protoste; 241 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 242 } 243 /* 244 * Initialize level 1 descriptors. We need: 245 * howmany(nl2desc, SG4_LEV2SIZE) 246 * level 1 descriptors to map the `nl2desc' level 2's. 247 */ 248 nl1desc = howmany(nl2desc, SG4_LEV2SIZE); 249 ste = PA2VA(kstpa, u_int *); 250 este = &ste[nl1desc]; 251 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 252 while (ste < este) { 253 *ste++ = protoste; 254 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 255 } 256 /* 257 * Initialize the final level 1 descriptor to map the next 258 * block of level 2 descriptors for Sysptmap. 259 */ 260 ste = PA2VA(kstpa, st_entry_t *); 261 ste = &ste[SG4_LEV1SIZE - 1]; 262 *ste = protoste; 263 /* 264 * Now initialize the final portion of that block of 265 * descriptors to map Sysmap. 266 */ 267 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE); 268 ste = PA2VA(kstpa, st_entry_t *); 269 ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)]; 270 este = &ste[NPTEPG / SG4_LEV3SIZE]; 271 protoste = kptmpa | SG_U | SG_RW | SG_V; 272 while (ste < este) { 273 *ste++ = protoste; 274 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 275 } 276 /* 277 * Calculate the free level 2 descriptor mask 278 * noting that we have used: 279 * 0: level 1 table 280 * 1 to nl1desc: map page tables 281 * nl1desc + 1: maps kptmpa and last-page page table 282 */ 283 /* mark an entry for level 1 table */ 284 stfree = ~l2tobm(0); 285 /* mark entries for map page tables */ 286 for (i = 1; i <= nl1desc; i++) 287 stfree &= ~l2tobm(i); 288 /* mark an entry for kptmpa and lkptpa */ 289 stfree &= ~l2tobm(i); 290 /* mark entries not available */ 291 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++) 292 stfree &= ~l2tobm(i); 293 294 /* 295 * Initialize Sysptmap 296 */ 297 pte = PA2VA(kptmpa, pt_entry_t *); 298 epte = &pte[nptpages]; 299 protopte = kptpa | PG_RW | PG_CI | PG_V; 300 while (pte < epte) { 301 *pte++ = protopte; 302 protopte += PAGE_SIZE; 303 } 304 /* 305 * Invalidate all remaining entries. 306 */ 307 epte = PA2VA(kptmpa, pt_entry_t *); 308 epte = &epte[TIB_SIZE]; 309 while (pte < epte) { 310 *pte++ = PG_NV; 311 } 312 /* 313 * Initialize the last one to point to Sysptmap. 314 */ 315 pte = PA2VA(kptmpa, pt_entry_t *); 316 pte = &pte[SYSMAP_VA >> SEGSHIFT]; 317 *pte = kptmpa | PG_RW | PG_CI | PG_V; 318 } else { 319 /* 320 * Map the page table pages in both the HW segment table 321 * and the software Sysptmap. 322 */ 323 ste = PA2VA(kstpa, st_entry_t *); 324 pte = PA2VA(kptmpa, pt_entry_t *); 325 epte = &pte[nptpages]; 326 protoste = kptpa | SG_RW | SG_V; 327 protopte = kptpa | PG_RW | PG_CI | PG_V; 328 while (pte < epte) { 329 *ste++ = protoste; 330 *pte++ = protopte; 331 protoste += PAGE_SIZE; 332 protopte += PAGE_SIZE; 333 } 334 /* 335 * Invalidate all remaining entries in both. 336 */ 337 este = PA2VA(kstpa, st_entry_t *); 338 este = &este[TIA_SIZE]; 339 while (ste < este) 340 *ste++ = SG_NV; 341 epte = PA2VA(kptmpa, pt_entry_t *); 342 epte = &epte[TIB_SIZE]; 343 while (pte < epte) 344 *pte++ = PG_NV; 345 /* 346 * Initialize the last one to point to Sysptmap. 347 */ 348 ste = PA2VA(kstpa, st_entry_t *); 349 ste = &ste[SYSMAP_VA >> SEGSHIFT]; 350 pte = PA2VA(kptmpa, pt_entry_t *); 351 pte = &pte[SYSMAP_VA >> SEGSHIFT]; 352 *ste = kptmpa | SG_RW | SG_V; 353 *pte = kptmpa | PG_RW | PG_CI | PG_V; 354 } 355 356 /* 357 * Initialize kernel page table. 358 * Start by invalidating the `nptpages' that we have allocated. 359 */ 360 pte = PA2VA(kptpa, pt_entry_t *); 361 epte = &pte[nptpages * NPTEPG]; 362 while (pte < epte) 363 *pte++ = PG_NV; 364 /* 365 * Validate PTEs for kernel text (RO). 366 * Pages up to "start" (vectors and Mac OS global variable space) 367 * must be writable for the ROM. 368 */ 369 pte = PA2VA(kptpa, pt_entry_t *); 370 pte = &pte[m68k_btop(KERNBASE)]; 371 epte = &pte[m68k_btop(m68k_round_page(start))]; 372 protopte = firstpa | PG_RW | PG_V; 373 while (pte < epte) { 374 *pte++ = protopte; 375 protopte += PAGE_SIZE; 376 } 377 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 378 protopte = (protopte & ~PG_PROT) | PG_RO; 379 while (pte < epte) { 380 *pte++ = protopte; 381 protopte += PAGE_SIZE; 382 } 383 /* 384 * Validate PTEs for kernel data/bss, dynamic data allocated 385 * by us so far (kstpa - firstpa bytes), and pages for lwp0 386 * u-area and page table allocated below (RW). 387 */ 388 epte = PA2VA(kptpa, pt_entry_t *); 389 epte = &epte[m68k_btop(kstpa - firstpa)]; 390 protopte = (protopte & ~PG_PROT) | PG_RW; 391 /* 392 * Enable copy-back caching of data pages 393 */ 394 if (mmutype == MMU_68040) 395 protopte |= PG_CCB; 396 while (pte < epte) { 397 *pte++ = protopte; 398 protopte += PAGE_SIZE; 399 } 400 /* 401 * Map the kernel segment table cache invalidated for 68040/68060. 402 * (for the 68040 not strictly necessary, but recommended by Motorola; 403 * for the 68060 mandatory) 404 */ 405 epte = PA2VA(kptpa, pt_entry_t *); 406 epte = &epte[m68k_btop(nextpa - firstpa)]; 407 protopte = (protopte & ~PG_PROT) | PG_RW; 408 if (mmutype == MMU_68040) { 409 protopte &= ~PG_CCB; 410 protopte |= PG_CIN; 411 } 412 while (pte < epte) { 413 *pte++ = protopte; 414 protopte += PAGE_SIZE; 415 } 416 417 /* 418 * Finally, validate the internal IO space PTEs (RW+CI). 419 */ 420 421 #define PTE2VA(pte) m68k_ptob(pte - PA2VA(kptpa, pt_entry_t *)) 422 423 protopte = IOBase | PG_RW | PG_CI | PG_V; 424 IOBase = PTE2VA(pte); 425 epte = &pte[IIOMAPSIZE]; 426 while (pte < epte) { 427 *pte++ = protopte; 428 protopte += PAGE_SIZE; 429 } 430 431 protopte = (pt_entry_t)ROMBase | PG_RO | PG_V; 432 ROMBase = (void *)PTE2VA(pte); 433 epte = &pte[ROMMAPSIZE]; 434 while (pte < epte) { 435 *pte++ = protopte; 436 protopte += PAGE_SIZE; 437 } 438 439 if (vidlen) { 440 protopte = m68k_trunc_page(mac68k_video.mv_phys) | 441 PG_RW | PG_V | PG_CI; 442 newvideoaddr = PTE2VA(pte) 443 + m68k_page_offset(mac68k_video.mv_phys); 444 epte = &pte[VIDMAPSIZE]; 445 while (pte < epte) { 446 *pte++ = protopte; 447 protopte += PAGE_SIZE; 448 } 449 } 450 virtual_avail = PTE2VA(pte); 451 452 /* 453 * Calculate important exported kernel addresses and related values. 454 */ 455 /* 456 * Sysseg: base of kernel segment table 457 */ 458 Sysseg = PA2VA(kstpa, st_entry_t *); 459 Sysseg_pa = PA2VA(kstpa, paddr_t); 460 #if defined(M68040) 461 if (mmutype == MMU_68040) 462 protostfree = stfree; 463 #endif 464 /* 465 * Sysptmap: base of kernel page table map 466 */ 467 Sysptmap = PA2VA(kptmpa, pt_entry_t *); 468 /* 469 * Sysmap: kernel page table (as mapped through Sysptmap) 470 * Allocated at the end of KVA space. 471 */ 472 Sysmap = (pt_entry_t *)SYSMAP_VA; 473 474 /* 475 * Remember the u-area address so it can be loaded in the lwp0 476 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize(). 477 */ 478 lwp0uarea = PA2VA(lwp0upa, vaddr_t); 479 480 /* 481 * VM data structures are now initialized, set up data for 482 * the pmap module. 483 * 484 * Note about avail_end: msgbuf is initialized just after 485 * avail_end in machdep.c. Since the last page is used 486 * for rebooting the system (code is copied there and 487 * execution continues from copied code before the MMU 488 * is disabled), the msgbuf will get trounced between 489 * reboots if it's placed in the last physical page. 490 * To work around this, we move avail_end back one more 491 * page so the msgbuf can be preserved. 492 */ 493 avail_next = avail_start = m68k_round_page(nextpa); 494 avail_remaining = 0; 495 avail_range = -1; 496 for (i = 0; i < numranges; i++) { 497 if (low[i] <= avail_next && avail_next < high[i]) { 498 avail_range = i; 499 avail_remaining = high[i] - avail_next; 500 } else if (avail_range != -1) { 501 avail_remaining += (high[i] - low[i]); 502 } 503 } 504 physmem = m68k_btop(avail_remaining + nextpa - firstpa); 505 506 maxaddr = high[numranges - 1] - m68k_ptob(1); 507 508 #if NAUDIO > 0 509 /* 510 * Reduce high by an extra 7 pages which are used by the EASC on some 511 * machines. maxaddr is unchanged as the last page can still be 512 * safetly used to reboot the system. 513 */ 514 high[numranges - 1] -= (m68k_round_page(MSGBUFSIZE) + m68k_ptob(8)); 515 #else 516 high[numranges - 1] -= (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1)); 517 #endif 518 519 avail_end = high[numranges - 1]; 520 mem_size = m68k_ptob(physmem); 521 virtual_end = VM_MAX_KERNEL_ADDRESS; 522 523 /* 524 * Allocate some fixed, special purpose kernel virtual addresses 525 */ 526 { 527 vaddr_t va = virtual_avail; 528 529 CADDR1 = (void *)va; 530 va += PAGE_SIZE; 531 CADDR2 = (void *)va; 532 va += PAGE_SIZE; 533 vmmap = (void *)va; 534 va += PAGE_SIZE; 535 msgbufaddr = (void *)va; 536 va += m68k_round_page(MSGBUFSIZE); 537 virtual_avail = va; 538 } 539 } 540 541 void 542 bootstrap_mac68k(int tc) 543 { 544 #if NZSC > 0 545 extern void zs_init(void); 546 #endif 547 extern int *esym; 548 paddr_t nextpa; 549 void *oldROMBase; 550 char use_bootmem = 0; 551 552 #ifdef DJMEMCMAX 553 if(mac68k_machine.machineid == MACH_MACC650 || 554 mac68k_machine.machineid == MACH_MACQ650 || 555 mac68k_machine.machineid == MACH_MACQ610 || 556 mac68k_machine.machineid == MACH_MACC610 || 557 mac68k_machine.machineid == MACH_MACQ800) { 558 use_bootmem = 1; 559 } 560 #endif 561 562 if (mac68k_machine.do_graybars) 563 printf("Bootstrapping NetBSD/mac68k.\n"); 564 565 oldROMBase = ROMBase; 566 mac68k_video.mv_phys = mac68k_video.mv_kvaddr; 567 568 if ((!use_bootmem) && (((tc & 0x80000000) && (mmutype == MMU_68030)) || 569 ((tc & 0x8000) && (mmutype == MMU_68040)))) { 570 if (mac68k_machine.do_graybars) 571 printf("Getting mapping from MMU.\n"); 572 (void) get_mapping(); 573 if (mac68k_machine.do_graybars) 574 printf("Done.\n"); 575 } else { 576 /* MMU not enabled. Fake up ranges. */ 577 numranges = 1; 578 low[0] = 0; 579 high[0] = mac68k_machine.mach_memsize * (1024 * 1024); 580 if (mac68k_machine.do_graybars) 581 printf("Faked range to byte 0x%lx.\n", high[0]); 582 } 583 nextpa = load_addr + m68k_round_page(esym); 584 585 if (mac68k_machine.do_graybars) 586 printf("Bootstrapping the pmap system.\n"); 587 588 pmap_bootstrap(nextpa, load_addr); 589 590 if (mac68k_machine.do_graybars) 591 printf("Pmap bootstrapped.\n"); 592 593 if (!vidlen) 594 panic("Don't know how to relocate video!"); 595 596 if (mac68k_machine.do_graybars) 597 printf("Moving ROMBase from %p to %p.\n", oldROMBase, ROMBase); 598 599 mrg_fixupROMBase(oldROMBase, ROMBase); 600 601 if (mac68k_machine.do_graybars) 602 printf("Video address %p -> %p.\n", 603 (void *)mac68k_video.mv_kvaddr, (void *)newvideoaddr); 604 605 mac68k_set_io_offsets(IOBase); 606 607 /* 608 * If the serial ports are going (for console or 'echo'), then 609 * we need to make sure the IO change gets propagated properly. 610 * This resets the base addresses for the 8530 (serial) driver. 611 * 612 * WARNING!!! No printfs() (etc) BETWEEN zs_init() and the end 613 * of this function (where we start using the MMU, so the new 614 * address is correct. 615 */ 616 #if NZSC > 0 617 if (zsinited != 0) 618 zs_init(); 619 #endif 620 621 mac68k_video.mv_kvaddr = newvideoaddr; 622 } 623