1 /* $NetBSD: atari_init.c,v 1.115 2025/11/28 21:52:53 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Leo Weppelman 5 * Copyright (c) 1994 Michael L. Hitch 6 * Copyright (c) 1993 Markus Wild 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Markus Wild. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: atari_init.c,v 1.115 2025/11/28 21:52:53 thorpej Exp $"); 37 38 #include "opt_ddb.h" 39 #include "opt_mbtype.h" 40 #include "opt_m060sp.h" 41 #include "opt_m68k_arch.h" 42 #include "opt_st_pool_size.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/ioctl.h> 47 #include <sys/select.h> 48 #include <sys/tty.h> 49 #include <sys/buf.h> 50 #include <sys/msgbuf.h> 51 #include <sys/mbuf.h> 52 #include <sys/protosw.h> 53 #include <sys/domain.h> 54 #include <sys/dkbad.h> 55 #include <sys/reboot.h> 56 #include <sys/exec.h> 57 #include <sys/exec_aout.h> 58 #include <sys/core.h> 59 #include <sys/kcore.h> 60 #include <sys/bus.h> 61 62 #include <uvm/uvm_extern.h> 63 64 #include <machine/vmparam.h> 65 #include <machine/pte.h> 66 #include <machine/cpu.h> 67 #include <machine/iomap.h> 68 #include <machine/mfp.h> 69 #include <machine/scu.h> 70 #include <machine/acia.h> 71 #include <machine/kcore.h> 72 #include <machine/intr.h> 73 74 #include <m68k/cpu.h> 75 #include <m68k/cacheops.h> 76 77 #include <atari/atari/stalloc.h> 78 #include <atari/dev/clockvar.h> 79 #include <atari/dev/ym2149reg.h> 80 81 #include "pci.h" 82 83 void start_c(int, u_int, u_int, u_int, char *); 84 static void atari_hwinit(void); 85 static void cpu_init_kcorehdr(paddr_t, paddr_t); 86 static void initcpu(void); 87 static void mmu030_setup(paddr_t, u_int, paddr_t, psize_t, paddr_t, paddr_t); 88 static void map_io_areas(paddr_t, psize_t, u_int); 89 static void set_machtype(void); 90 91 #if defined(M68040) || defined(M68060) 92 static void mmu040_setup(paddr_t, u_int, paddr_t, psize_t, paddr_t, paddr_t); 93 #endif 94 95 #if defined(_MILANHW_) 96 static u_int milan_probe_bank_1(paddr_t paddr); 97 static u_int milan_probe_bank(paddr_t paddr); 98 99 #define NBANK 2 100 #define NSLOT 4 101 102 #define MB(n) ((n) * 1024 * 1024) 103 #define MB_END(n) (MB(n) - 1) 104 #define MAGIC_4M (4 - 1) 105 #define MAGIC_4M_INV ((uint8_t)~MAGIC_4M) 106 #define MAGIC_8M (8 - 1) 107 #define MAGIC_16M (16 - 1) 108 #define MAGIC_32M (32 - 1) 109 #define MAGIC_64M (64 - 1) 110 #endif 111 112 /* 113 * All info needed to generate a panic dump. All fields are setup by 114 * start_c(). 115 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap 116 * here.... Also, the name is badly chosen. Phys_segs contains the 117 * segment descriptions _after_ reservations are made. 118 * XXX: 'lowram' is obsoleted by the new panicdump format 119 */ 120 static cpu_kcore_hdr_t cpu_kcore_hdr; 121 122 extern u_int lowram; 123 int machineid, mmutype, cputype; 124 125 extern char *esym; 126 extern struct pcb *curpcb; 127 128 /* 129 * This is the virtual address of physical page 0. Used by 'do_boot()'. 130 */ 131 vaddr_t page_zero; 132 133 /* 134 * Simple support for allocation in ST-ram. 135 * Currently 16 bit ST-ram is required to allocate DMA buffers for SCSI and 136 * FDC transfers, and video memory for the XFree68 based Xservers. 137 * The physical address is also returned because the video init needs it to 138 * setup the controller at the time the vm-system is not yet operational so 139 * 'kvtop()' cannot be used. 140 */ 141 #define ST_POOL_SIZE_MIN 24 /* for DMA bounce buffers */ 142 #ifndef ST_POOL_SIZE 143 #define ST_POOL_SIZE 56 /* Xserver requires 320KB (40 pages) */ 144 #endif 145 146 psize_t st_pool_size = ST_POOL_SIZE * PAGE_SIZE; /* Patchable */ 147 vaddr_t st_pool_virt; 148 paddr_t st_pool_phys; 149 150 /* 151 * Thresholds to restrict size of reserved ST memory to make sure 152 * the kernel at least boot even on lower memory machines. 153 * Nowadays we could assume most users have 4MB ST-RAM and 16MB TT-RAM. 154 */ 155 #define STRAM_MINTHRESH (2 * 1024 * 1024) 156 #define TTRAM_MINTHRESH (4 * 1024 * 1024) 157 158 /* I/O address space variables */ 159 vaddr_t stio_addr; /* Where the st io-area is mapped */ 160 vaddr_t pci_conf_addr; /* KVA base of PCI config space */ 161 vaddr_t pci_io_addr; /* KVA base of PCI io-space */ 162 vaddr_t pci_mem_addr; /* KVA base of PCI mem-space */ 163 vaddr_t pci_mem_uncached; /* KVA base of an uncached PCI mem-page */ 164 165 /* 166 * Are we relocating the kernel to TT-Ram if possible? It is faster, but 167 * it is also reported not to work on all TT's. So the default is NO. 168 */ 169 #ifndef RELOC_KERNEL 170 #define RELOC_KERNEL 0 171 #endif 172 int reloc_kernel = RELOC_KERNEL; /* Patchable */ 173 174 #define RELOC_PA(base, pa) ((base) + (pa)) /* used to set up PTE etc. */ 175 176 /* 177 * this is the C-level entry function, it's called from locore.s. 178 * Preconditions: 179 * Interrupts are disabled 180 * PA == VA, we don't have to relocate addresses before enabling 181 * the MMU 182 * Exec is no longer available (because we're loaded all over 183 * low memory, no ExecBase is available anymore) 184 * 185 * It's purpose is: 186 * Do the things that are done in locore.s in the hp300 version, 187 * this includes allocation of kernel maps and enabling the MMU. 188 * 189 * Some of the code in here is `stolen' from Amiga MACH, and was 190 * written by Bryan Ford and Niklas Hallqvist. 191 * 192 * Very crude 68040 support by Michael L. Hitch. 193 */ 194 int kernel_copyback = 1; 195 196 void 197 start_c(int id, u_int ttphystart, u_int ttphysize, u_int stphysize, 198 char *esym_addr) 199 /* id: Machine id */ 200 /* ttphystart, ttphysize: Start address and size of TT-ram */ 201 /* stphysize: Size of ST-ram */ 202 /* esym_addr: Address of kernel '_esym' symbol */ 203 { 204 extern char end[]; 205 extern void etext(void); 206 paddr_t pstart; /* Next available physical address */ 207 vaddr_t vstart; /* Next available virtual address */ 208 vsize_t avail; 209 paddr_t ptpa; 210 psize_t ptsize; 211 u_int ptextra; 212 vaddr_t kva; 213 u_int i; 214 pt_entry_t *pg, *epg; 215 pt_entry_t pg_proto; 216 vaddr_t end_loaded; 217 paddr_t kbase; 218 u_int kstsize; 219 paddr_t Sysptmap_pa; 220 #if defined(_MILANHW_) 221 /* 222 * The Milan Lies about the presence of TT-RAM. If you insert 223 * 16MB it is split in 14MB ST starting at address 0 and 2MB TT RAM, 224 * starting at address 16MB as the BIOS remapping memory using MMU. 225 * 226 * Milan actually has four SIMM slots and each slot has two banks, 227 * so it could have up to 8 memory segment regions. 228 */ 229 const paddr_t simm_base[NBANK][NSLOT] = { 230 /* slot 0-3, bank 0 */ 231 { 0x00000000, 0x04000000, 0x08000000, 0x0c000000 }, 232 /* slot 0-3, bank 1 */ 233 { 0x10000000, 0x14000000, 0x18000000, 0x1c000000 } 234 }; 235 int slot, bank, seg; 236 u_int mb; 237 238 /* On Milan, all RAMs are fast 32 bit so no need to reloc kernel */ 239 reloc_kernel = 0; 240 241 /* probe memory region in all SIMM slots and banks */ 242 seg = 0; 243 ttphysize = 0; 244 for (bank = 0; bank < 2; bank++) { 245 for (slot = 0; slot < 4; slot++) { 246 if (bank == 0 && slot == 0) { 247 /* 248 * The first bank has at least 16MB because 249 * the Milan's ROM bootloader requires it 250 * to allocate ST RAM. 251 */ 252 mb = milan_probe_bank_1(simm_base[bank][slot]); 253 boot_segs[0].start = 0; 254 boot_segs[0].end = MB(mb); 255 stphysize = MB(mb); 256 seg++; 257 } else { 258 /* 259 * The rest banks could be empty or 260 * have 4, 8, 16, 32, or 64MB. 261 */ 262 mb = milan_probe_bank(simm_base[bank][slot]); 263 if (mb > 0) { 264 boot_segs[seg].start = 265 simm_base[bank][slot]; 266 boot_segs[seg].end = 267 simm_base[bank][slot] + MB(mb); 268 ttphysize += MB(mb); 269 seg++; 270 } 271 } 272 } 273 } 274 #else /* _MILANHW_ */ 275 boot_segs[0].start = 0; 276 boot_segs[0].end = stphysize; 277 boot_segs[1].start = ttphystart; 278 boot_segs[1].end = ttphystart + ttphysize; 279 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */ 280 #endif 281 282 /* 283 * We do not know how much ST memory we really need until after 284 * configuration has finished, but typical users of ST memory 285 * are bounce buffers DMA against TT-RAM for SCSI and FDC, 286 * and video memory for the Xserver. 287 * If we have enough RAMs reserve ST memory including for the Xserver. 288 * Otherwise just allocate minimum one for SCSI and FDC. 289 * 290 * The round_page() call is meant to correct errors made by 291 * binpatching! 292 */ 293 if (st_pool_size > ST_POOL_SIZE_MIN * PAGE_SIZE && 294 (stphysize <= STRAM_MINTHRESH || ttphysize <= TTRAM_MINTHRESH)) { 295 st_pool_size = ST_POOL_SIZE_MIN * PAGE_SIZE; 296 } 297 st_pool_size = m68k_round_page(st_pool_size); 298 st_pool_phys = stphysize - st_pool_size; 299 stphysize = st_pool_phys; 300 301 physmem = btoc(stphysize) + btoc(ttphysize); 302 machineid = id; 303 esym = esym_addr; 304 305 /* 306 * the kernel ends at end() or esym. 307 */ 308 if (esym == NULL) 309 end_loaded = (vaddr_t)&end; 310 else 311 end_loaded = (vaddr_t)esym; 312 313 /* 314 * If we have enough fast-memory to put the kernel in and the 315 * RELOC_KERNEL option is set, do it! 316 */ 317 if ((reloc_kernel != 0) && (ttphysize >= end_loaded)) 318 kbase = ttphystart; 319 else 320 kbase = 0; 321 322 /* 323 * Determine the type of machine we are running on. This needs 324 * to be done early (and before initcpu())! 325 */ 326 set_machtype(); 327 328 /* 329 * Initialize CPU specific stuff 330 */ 331 initcpu(); 332 333 /* 334 * We run the kernel from ST memory at the moment. 335 * The kernel segment table is put just behind the loaded image. 336 * pstart: start of usable ST memory 337 * avail : size of ST memory available. 338 */ 339 vstart = (vaddr_t)end_loaded; 340 vstart = m68k_round_page(vstart); 341 pstart = (paddr_t)vstart; /* pre-reloc PA == kernel VA here */ 342 avail = stphysize - pstart; 343 344 /* 345 * Save KVA of lwp0 uarea and allocate it. 346 */ 347 lwp0uarea = vstart; 348 pstart += USPACE; 349 vstart += USPACE; 350 avail -= USPACE; 351 352 /* 353 * Calculate the number of pages needed for Sysseg. 354 * For the 68030, we need 256 descriptors (segment-table-entries). 355 * This easily fits into one page. 356 * For the 68040, both the level-1 and level-2 descriptors are 357 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE 358 * level-1 & level-2 tables. 359 */ 360 #if defined(M68040) || defined(M68060) 361 if (mmutype == MMU_68040) 362 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 363 else 364 #endif 365 kstsize = 1; 366 /* 367 * allocate the kernel segment table 368 */ 369 Sysseg_pa = pstart; /* pre-reloc PA to init STEs */ 370 Sysseg = (st_entry_t *)vstart; 371 pstart += kstsize * PAGE_SIZE; 372 vstart += kstsize * PAGE_SIZE; 373 avail -= kstsize * PAGE_SIZE; 374 375 /* 376 * allocate kernel page table map 377 */ 378 Sysptmap_pa = pstart; /* pre-reloc PA to init PTEs */ 379 Sysptmap = (pt_entry_t *)vstart; 380 pstart += PAGE_SIZE; 381 vstart += PAGE_SIZE; 382 avail -= PAGE_SIZE; 383 384 /* 385 * Determine the number of pte's we need for extra's like 386 * ST I/O map's. 387 */ 388 ptextra = btoc(STIO_SIZE); 389 390 /* 391 * If present, add pci areas 392 */ 393 if (machineid & ATARI_HADES) 394 ptextra += btoc(PCI_CONFIG_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE); 395 if (machineid & ATARI_MILAN) 396 ptextra += btoc(PCI_IO_SIZE + PCI_MEM_SIZE); 397 ptextra += btoc(BOOTM_VA_POOL); 398 /* 399 * now need to account for the kmem area, which is allocated 400 * before pmap_init() is called. It is roughly the size of physical 401 * memory. 402 */ 403 ptextra += physmem; 404 405 /* 406 * The 'pt' (the initial kernel pagetable) has to map the kernel and 407 * the I/O areas. The various I/O areas are mapped (virtually) at 408 * the top of the address space mapped by 'pt' (ie. just below Sysmap). 409 */ 410 ptpa = pstart; /* pre-reloc PA to init PTEs */ 411 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT; 412 pstart += ptsize; 413 vstart += ptsize; 414 avail -= ptsize; 415 416 /* 417 * Sysmap is now placed at the end of Supervisor virtual address space. 418 */ 419 Sysmap = (pt_entry_t *)SYSMAP_VA; 420 421 /* 422 * Initialize segment tables 423 */ 424 #if defined(M68040) || defined(M68060) 425 if (mmutype == MMU_68040) 426 mmu040_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa, 427 kbase); 428 else 429 #endif /* defined(M68040) || defined(M68060) */ 430 mmu030_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa, 431 kbase); 432 433 /* 434 * initialize kernel page table page(s). 435 * Assume load at VA 0. 436 * - Text pages are RO 437 * - Page zero is invalid 438 */ 439 pg_proto = RELOC_PA(kbase, 0) | PG_RO | PG_V; 440 pg = (pt_entry_t *)ptpa; 441 *pg++ = PG_NV; 442 443 pg_proto += PAGE_SIZE; 444 for (kva = PAGE_SIZE; kva < (vaddr_t)etext; kva += PAGE_SIZE) { 445 *pg++ = pg_proto; 446 pg_proto += PAGE_SIZE; 447 } 448 449 /* 450 * data, bss and dynamic tables are read/write 451 */ 452 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 453 454 #if defined(M68040) || defined(M68060) 455 /* 456 * Map the kernel segment table cache invalidated for 68040/68060. 457 * (for the 68040 not strictly necessary, but recommended by Motorola; 458 * for the 68060 mandatory) 459 */ 460 if (mmutype == MMU_68040) { 461 462 if (kernel_copyback) 463 pg_proto |= PG_CCB; 464 465 for (; kva < (vaddr_t)Sysseg; kva += PAGE_SIZE) { 466 *pg++ = pg_proto; 467 pg_proto += PAGE_SIZE; 468 } 469 470 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 471 for (; kva < (vaddr_t)Sysptmap; kva += PAGE_SIZE) { 472 *pg++ = pg_proto; 473 pg_proto += PAGE_SIZE; 474 } 475 476 pg_proto = (pg_proto & ~PG_CI); 477 if (kernel_copyback) 478 pg_proto |= PG_CCB; 479 } 480 #endif /* defined(M68040) || defined(M68060) */ 481 482 /* 483 * go till end of data allocated so far 484 * plus lwp0 u-area (to be allocated) 485 */ 486 for (; kva < vstart; kva += PAGE_SIZE) { 487 *pg++ = pg_proto; 488 pg_proto += PAGE_SIZE; 489 } 490 491 /* 492 * invalidate remainder of kernel PT 493 */ 494 epg = (pt_entry_t *)ptpa; 495 epg = &epg[ptsize / sizeof(pt_entry_t)]; 496 while (pg < epg) 497 *pg++ = PG_NV; 498 499 /* 500 * Map various I/O areas 501 */ 502 map_io_areas(ptpa, ptsize, ptextra); 503 504 /* 505 * Map the allocated space in ST-ram now. In the contig-case, there 506 * is no need to make a distinction between virtual and physical 507 * addresses. But I make it anyway to be prepared. 508 * Physical space is already reserved! 509 */ 510 st_pool_virt = vstart; 511 pg = (pt_entry_t *)ptpa; 512 pg = &pg[vstart / PAGE_SIZE]; 513 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V; 514 vstart += st_pool_size; 515 while (pg_proto < (st_pool_phys + st_pool_size)) { 516 *pg++ = pg_proto; 517 pg_proto += PAGE_SIZE; 518 } 519 520 /* 521 * Map physical page_zero and page-zero+1 (First ST-ram page). We need 522 * to reference it in the reboot code. Two pages are mapped, because 523 * we must make sure 'doboot()' is contained in it (see the tricky 524 * copying there....). 525 */ 526 page_zero = vstart; 527 pg = (pt_entry_t *)ptpa; 528 pg = &pg[vstart / PAGE_SIZE]; 529 *pg++ = PG_RW | PG_CI | PG_V; 530 vstart += PAGE_SIZE; 531 *pg = PG_RW | PG_CI | PG_V | PAGE_SIZE; 532 vstart += PAGE_SIZE; 533 534 /* 535 * All necessary STEs and PTEs have been initialized. 536 * Update Sysseg_pa and Sysptmap_pa to point relocated PA. 537 */ 538 if (kbase) { 539 Sysseg_pa += kbase; 540 Sysptmap_pa += kbase; 541 } 542 543 lowram = 0 >> PGSHIFT; /* XXX */ 544 545 /* 546 * Fill in usable segments. The page indexes will be initialized 547 * later when all reservations are made. 548 */ 549 usable_segs[0].start = 0; 550 usable_segs[0].end = stphysize; 551 usable_segs[0].free_list = VM_FREELIST_STRAM; 552 #if defined(_MILANHW_) 553 for (i = 1; i < seg; i++) { 554 usable_segs[i].start = boot_segs[i].start; 555 usable_segs[i].end = boot_segs[i].end; 556 usable_segs[i].free_list = VM_FREELIST_TTRAM; 557 } 558 for (; i < NMEM_SEGS; i++) { 559 usable_segs[i].start = usable_segs[i].end = 0; 560 } 561 #else 562 usable_segs[1].start = ttphystart; 563 usable_segs[1].end = ttphystart + ttphysize; 564 usable_segs[1].free_list = VM_FREELIST_TTRAM; 565 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */ 566 #endif 567 568 if (kbase) { 569 /* 570 * First page of ST-ram is unusable, reserve the space 571 * for the kernel in the TT-ram segment. 572 * Note: Because physical page-zero is partially mapped to ROM 573 * by hardware, it is unusable. 574 */ 575 usable_segs[0].start = PAGE_SIZE; 576 usable_segs[1].start += pstart; 577 } else 578 usable_segs[0].start += pstart; 579 580 /* 581 * As all segment sizes are now valid, calculate page indexes and 582 * available physical memory. 583 */ 584 usable_segs[0].first_page = 0; 585 for (i = 1; i < NMEM_SEGS && usable_segs[i].start; i++) { 586 usable_segs[i].first_page = usable_segs[i-1].first_page; 587 usable_segs[i].first_page += 588 (usable_segs[i-1].end - usable_segs[i-1].start) / PAGE_SIZE; 589 } 590 for (i = 0, physmem = 0; usable_segs[i].start; i++) 591 physmem += usable_segs[i].end - usable_segs[i].start; 592 physmem >>= PGSHIFT; 593 594 /* 595 * get the pmap module in sync with reality. 596 */ 597 pmap_bootstrap(vstart); 598 599 /* 600 * Prepare to enable the MMU. 601 * Setup and load SRP (see pmap.h) 602 */ 603 604 cpu_init_kcorehdr(kbase, Sysseg_pa); 605 606 /* 607 * copy over the kernel (and all now initialized variables) 608 * to fastram. DONT use bcopy(), this beast is much larger 609 * than 128k ! 610 */ 611 if (kbase) { 612 register paddr_t *lp, *le, *fp; 613 614 lp = (paddr_t *)0; 615 le = (paddr_t *)pstart; 616 fp = (paddr_t *)kbase; 617 while (lp < le) 618 *fp++ = *lp++; 619 } 620 #if defined(M68040) || defined(M68060) 621 if (mmutype == MMU_68040) { 622 /* 623 * movel Sysseg_pa,a0; 624 * movec a0,SRP; 625 * pflusha; 626 * movel #$0xc000,d0; 627 * movec d0,TC 628 */ 629 if (cputype == CPU_68060) { 630 /* XXX: Need the branch cache be cleared? */ 631 __asm volatile (".word 0x4e7a,0x0002;" 632 "orl #0x400000,%%d0;" 633 ".word 0x4e7b,0x0002" : : : "d0"); 634 } 635 __asm volatile ("movel %0,%%a0;" 636 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0"); 637 __asm volatile (".word 0xf518" : : ); 638 __asm volatile ("movel #0xc000,%%d0;" 639 ".word 0x4e7b,0x0003" : : : "d0" ); 640 } else 641 #endif 642 { 643 #if defined(M68030) 644 protorp[1] = Sysseg_pa; /* + segtable address */ 645 __asm volatile ("pmove %0@,%%srp" : : "a" (&protorp[0])); 646 /* 647 * setup and load TC register. 648 * enable_cpr, enable_srp, pagesize=8k, 649 * A = 8 bits, B = 11 bits 650 */ 651 u_int tc = MMU51_TCR_BITS; 652 __asm volatile ("pflusha" : : ); 653 __asm volatile ("pmove %0@,%%tc" : : "a" (&tc)); 654 #endif /* M68030 */ 655 } 656 657 /* 658 * Initialize the "u-area" pages etc. 659 */ 660 pmap_bootstrap2(); 661 662 /* 663 * Get the hardware into a defined state 664 */ 665 atari_hwinit(); 666 667 /* 668 * Initialize stmem allocator 669 */ 670 init_stmem(); 671 672 /* 673 * Initialize the iomem arena for bus_space(9) to manage address 674 * spaces and allocate the physical RAM from the extent map. 675 */ 676 atari_bus_space_arena_init(0x0, 0xffffffff); 677 for (i = 0; i < NMEM_SEGS && boot_segs[i].end != 0; i++) { 678 if (atari_bus_space_alloc_physmem(boot_segs[i].start, 679 boot_segs[i].end)) { 680 /* XXX: Ahum, should not happen ;-) */ 681 printf("Warning: Cannot allocate boot memory from" 682 " extent map!?\n"); 683 } 684 } 685 686 /* 687 * Initialize interrupt mapping. 688 */ 689 intr_init(); 690 } 691 692 #if defined(_MILANHW_) 693 /* 694 * Probe and return available memory size in MB at specified address. 695 * The first slot SIMM have at least 16MB, so check if it has 32 or 64 MB. 696 * 697 * Note it seems Milan does not generate bus errors on accesses against 698 * address regions where memory doesn't exist, but it returns memory images 699 * of lower address of the bank. 700 */ 701 static u_int 702 milan_probe_bank_1(paddr_t start_paddr) 703 { 704 volatile uint8_t *base; 705 u_int mb; 706 uint8_t save_16, save_32, save_64; 707 708 /* Assume that this bank has at least 16MB */ 709 mb = 16; 710 711 base = (uint8_t *)start_paddr; 712 713 /* save and write a MAGIC at the end of 16MB region */ 714 save_16 = base[MB_END(16)]; 715 base[MB_END(16)] = MAGIC_16M; 716 717 /* check bus error at the end of 32MB region */ 718 if (badbaddr(__UNVOLATILE(base + MB_END(32)), sizeof(uint8_t))) { 719 /* bus error; assume no memory there */ 720 goto out16; 721 } 722 723 /* check if the 32MB region is not image of the prior 16MB region */ 724 save_32 = base[MB_END(32)]; 725 base[MB_END(32)] = MAGIC_32M; 726 if (base[MB_END(32)] != MAGIC_32M || base[MB_END(16)] != MAGIC_16M) { 727 /* no memory or image at the 32MB region */ 728 goto out16; 729 } 730 /* we have at least 32MB */ 731 mb = 32; 732 733 /* check bus error at the end of 64MB region */ 734 if (badbaddr(__UNVOLATILE(base + MB_END(64)), sizeof(uint8_t))) { 735 /* bus error; assume no memory there */ 736 goto out32; 737 } 738 739 /* check if the 64MB region is not image of the prior 32MB region */ 740 save_64 = base[MB_END(64)]; 741 base[MB_END(64)] = MAGIC_64M; 742 if (base[MB_END(64)] != MAGIC_64M || base[MB_END(32)] != MAGIC_32M) { 743 /* no memory or image at the 64MB region */ 744 goto out32; 745 } 746 /* we have 64MB */ 747 mb = 64; 748 base[MB_END(64)] = save_64; 749 out32: 750 base[MB_END(32)] = save_32; 751 out16: 752 base[MB_END(16)] = save_16; 753 754 return mb; 755 } 756 757 /* 758 * Probe and return available memory size in MB at specified address. 759 * The rest slot could be empty so check all possible size. 760 */ 761 static u_int 762 milan_probe_bank(paddr_t start_paddr) 763 { 764 volatile uint8_t *base; 765 u_int mb; 766 uint8_t save_4, save_8, save_16; 767 768 /* The rest banks might have no memory */ 769 mb = 0; 770 771 base = (uint8_t *)start_paddr; 772 773 /* check bus error at the end of 4MB region */ 774 if (badbaddr(__UNVOLATILE(base + MB_END(4)), sizeof(uint8_t))) { 775 /* bus error; assume no memory there */ 776 goto out; 777 } 778 779 /* check if the 4MB region has memory */ 780 save_4 = base[MB_END(4)]; 781 base[MB_END(4)] = MAGIC_4M_INV; 782 if (base[MB_END(4)] != MAGIC_4M_INV) { 783 /* no memory */ 784 goto out; 785 } 786 base[MB_END(4)] = MAGIC_4M; 787 if (base[MB_END(4)] != MAGIC_4M) { 788 /* no memory */ 789 goto out; 790 } 791 /* we have at least 4MB */ 792 mb = 4; 793 794 /* check bus error at the end of 8MB region */ 795 if (badbaddr(__UNVOLATILE(base + MB_END(8)), sizeof(uint8_t))) { 796 /* bus error; assume no memory there */ 797 goto out4; 798 } 799 800 /* check if the 8MB region is not image of the prior 4MB region */ 801 save_8 = base[MB_END(8)]; 802 base[MB_END(8)] = MAGIC_8M; 803 if (base[MB_END(8)] != MAGIC_8M || base[MB_END(4)] != MAGIC_4M) { 804 /* no memory or image at the 8MB region */ 805 goto out4; 806 } 807 /* we have at least 8MB */ 808 mb = 8; 809 810 /* check bus error at the end of 16MB region */ 811 if (badbaddr(__UNVOLATILE(base + MB_END(16)), sizeof(uint8_t))) { 812 /* bus error; assume no memory there */ 813 goto out8; 814 } 815 816 /* check if the 16MB region is not image of the prior 8MB region */ 817 save_16 = base[MB_END(16)]; 818 base[MB_END(16)] = MAGIC_16M; 819 if (base[MB_END(16)] != MAGIC_16M || base[MB_END(8)] != MAGIC_8M) { 820 /* no memory or image at the 32MB region */ 821 goto out8; 822 } 823 /* we have at least 16MB, so check more region as the first bank */ 824 mb = milan_probe_bank_1(start_paddr); 825 826 base[MB_END(16)] = save_16; 827 out8: 828 base[MB_END(8)] = save_8; 829 out4: 830 base[MB_END(4)] = save_4; 831 out: 832 833 return mb; 834 } 835 #endif /* _MILANHW_ */ 836 837 /* 838 * Try to figure out on what type of machine we are running 839 * Note: This module runs *before* the io-mapping is setup! 840 */ 841 static void 842 set_machtype(void) 843 { 844 845 #ifdef _MILANHW_ 846 machineid |= ATARI_MILAN; 847 848 #else 849 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */ 850 if (badbaddr((void *)__UNVOLATILE(&MFP2->mf_gpip), sizeof(char))) { 851 /* 852 * Watch out! We can also have a Hades with < 16Mb 853 * RAM here... 854 */ 855 if (!badbaddr((void *)__UNVOLATILE(&MFP->mf_gpip), 856 sizeof(char))) { 857 machineid |= ATARI_FALCON; 858 return; 859 } 860 } 861 if (!badbaddr((void *)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char))) 862 machineid |= ATARI_HADES; 863 else 864 machineid |= ATARI_TT; 865 #endif /* _MILANHW_ */ 866 } 867 868 static void 869 atari_hwinit(void) 870 { 871 872 #if defined(_ATARIHW_) 873 /* 874 * Initialize the sound chip 875 */ 876 ym2149_init(); 877 878 /* 879 * Make sure that the midi acia will not generate an interrupt 880 * unless something attaches to it. We cannot do this for the 881 * keyboard acia because this breaks the '-d' option of the 882 * booter... 883 */ 884 MDI->ac_cs = 0; 885 #endif /* defined(_ATARIHW_) */ 886 887 /* 888 * Initialize both MFP chips (if both present!) to generate 889 * auto-vectored interrupts with EOI. The active-edge registers are 890 * set up. The interrupt enable registers are set to disable all 891 * interrupts. 892 */ 893 MFP->mf_iera = MFP->mf_ierb = 0; 894 MFP->mf_imra = MFP->mf_imrb = 0; 895 MFP->mf_aer = MFP->mf_ddr = 0; 896 MFP->mf_vr = 0x40; 897 898 #if defined(_ATARIHW_) 899 if (machineid & (ATARI_TT|ATARI_HADES)) { 900 MFP2->mf_iera = MFP2->mf_ierb = 0; 901 MFP2->mf_imra = MFP2->mf_imrb = 0; 902 MFP2->mf_aer = 0x80; 903 MFP2->mf_vr = 0x50; 904 } 905 906 if (machineid & ATARI_TT) { 907 /* 908 * Initialize the SCU, to enable interrupts on the SCC (ipl5), 909 * MFP (ipl6) and softints (ipl1). 910 */ 911 SCU->sys_mask = SCU_SYS_SOFT; 912 SCU->vme_mask = SCU_MFP | SCU_SCC; 913 #ifdef DDB 914 /* 915 * This allows people with the correct hardware modification 916 * to drop into the debugger from an NMI. 917 */ 918 SCU->sys_mask |= SCU_IRQ7; 919 #endif 920 } 921 #endif /* defined(_ATARIHW_) */ 922 923 /* 924 * Initialize a timer for delay(9). 925 */ 926 init_delay(); 927 928 #if NPCI > 0 929 if (machineid & (ATARI_HADES|ATARI_MILAN)) { 930 /* 931 * Configure PCI-bus 932 */ 933 init_pci_bus(); 934 } 935 #endif 936 937 } 938 939 /* 940 * Do the dull work of mapping the various I/O areas. They MUST be Cache 941 * inhibited! 942 * All I/O areas are virtually mapped at the end of the pt-table. 943 */ 944 static void 945 map_io_areas(paddr_t ptpa, psize_t ptsize, u_int ptextra) 946 /* ptsize: Size of 'pt' in bytes */ 947 /* ptextra: #of additional I/O pte's */ 948 { 949 vaddr_t ioaddr; 950 pt_entry_t *pt, *pg, *epg; 951 pt_entry_t pg_proto; 952 u_long mask; 953 954 pt = (pt_entry_t *)ptpa; 955 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * PAGE_SIZE; 956 957 /* 958 * Map ST-IO area 959 */ 960 stio_addr = ioaddr; 961 ioaddr += STIO_SIZE; 962 pg = &pt[stio_addr / PAGE_SIZE]; 963 epg = &pg[btoc(STIO_SIZE)]; 964 #ifdef _MILANHW_ 965 /* 966 * Turn on byte swaps in the ST I/O area. On the Milan, the 967 * U0 signal of the MMU controls the BigEndian signal 968 * of the PLX9080. We use this setting so we can read/write the 969 * PLX registers (and PCI-config space) in big-endian mode. 970 */ 971 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V | 0x100; 972 #else 973 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V; 974 #endif 975 while (pg < epg) { 976 *pg++ = pg_proto; 977 pg_proto += PAGE_SIZE; 978 } 979 980 /* 981 * Map PCI areas 982 */ 983 if (machineid & ATARI_HADES) { 984 /* 985 * Only Hades maps the PCI-config space! 986 */ 987 pci_conf_addr = ioaddr; 988 ioaddr += PCI_CONFIG_SIZE; 989 pg = &pt[pci_conf_addr / PAGE_SIZE]; 990 epg = &pg[btoc(PCI_CONFIG_SIZE)]; 991 mask = PCI_CONFM_PHYS; 992 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V; 993 for (; pg < epg; mask <<= 1) 994 *pg++ = pg_proto | mask; 995 } else 996 pci_conf_addr = 0; /* XXX: should crash */ 997 998 if (machineid & (ATARI_HADES|ATARI_MILAN)) { 999 pci_io_addr = ioaddr; 1000 ioaddr += PCI_IO_SIZE; 1001 pg = &pt[pci_io_addr / PAGE_SIZE]; 1002 epg = &pg[btoc(PCI_IO_SIZE)]; 1003 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V; 1004 while (pg < epg) { 1005 *pg++ = pg_proto; 1006 pg_proto += PAGE_SIZE; 1007 } 1008 1009 pci_mem_addr = ioaddr; 1010 /* Provide an uncached PCI address for the MILAN */ 1011 pci_mem_uncached = ioaddr; 1012 ioaddr += PCI_MEM_SIZE; 1013 epg = &pg[btoc(PCI_MEM_SIZE)]; 1014 pg_proto = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V; 1015 while (pg < epg) { 1016 *pg++ = pg_proto; 1017 pg_proto += PAGE_SIZE; 1018 } 1019 } 1020 1021 bootm_init(ioaddr, pg, BOOTM_VA_POOL); 1022 /* 1023 * ioaddr += BOOTM_VA_POOL; 1024 * pg = &pg[btoc(BOOTM_VA_POOL)]; 1025 */ 1026 } 1027 1028 /* 1029 * Used by dumpconf() to get the size of the machine-dependent panic-dump 1030 * header in disk blocks. 1031 */ 1032 1033 #define CHDRSIZE (ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t))) 1034 #define MDHDRSIZE roundup(CHDRSIZE, dbtob(1)) 1035 1036 int 1037 cpu_dumpsize(void) 1038 { 1039 1040 return btodb(MDHDRSIZE); 1041 } 1042 1043 /* 1044 * Called by dumpsys() to dump the machine-dependent header. 1045 * XXX: Assumes that it will all fit in one diskblock. 1046 */ 1047 int 1048 cpu_dump(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t *p_blkno) 1049 { 1050 int buf[MDHDRSIZE/sizeof(int)]; 1051 int error; 1052 kcore_seg_t *kseg_p; 1053 cpu_kcore_hdr_t *chdr_p; 1054 1055 kseg_p = (kcore_seg_t *)buf; 1056 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)]; 1057 1058 /* 1059 * Generate a segment header 1060 */ 1061 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1062 kseg_p->c_size = MDHDRSIZE - ALIGN(sizeof(*kseg_p)); 1063 1064 /* 1065 * Add the md header 1066 */ 1067 *chdr_p = cpu_kcore_hdr; 1068 error = dump(dumpdev, *p_blkno, (void *)buf, sizeof(buf)); 1069 *p_blkno += btodb(sizeof(buf)); 1070 return (error); 1071 } 1072 1073 #if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS) 1074 #error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS" 1075 #endif 1076 /* 1077 * Initialize the cpu_kcore_header. 1078 */ 1079 static void 1080 cpu_init_kcorehdr(paddr_t kbase, paddr_t sysseg_pa) 1081 { 1082 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 1083 struct m68k_kcore_hdr *m = &h->un._m68k; 1084 extern char end[]; 1085 int i; 1086 1087 memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr)); 1088 1089 /* 1090 * Initialize the `dispatcher' portion of the header. 1091 */ 1092 strcpy(h->name, machine); 1093 h->page_size = PAGE_SIZE; 1094 h->kernbase = KERNBASE; 1095 1096 /* 1097 * Fill in information about our MMU configuration. 1098 */ 1099 m->mmutype = mmutype; 1100 m->sg_v = SG_V; 1101 m->sg_frame = SG_FRAME; 1102 m->sg_ishift = SG_ISHIFT; 1103 m->sg_pmask = SG_PMASK; 1104 m->sg40_shift1 = SG4_SHIFT1; 1105 m->sg40_mask2 = SG4_MASK2; 1106 m->sg40_shift2 = SG4_SHIFT2; 1107 m->sg40_mask3 = SG4_MASK3; 1108 m->sg40_shift3 = SG4_SHIFT3; 1109 m->sg40_addr1 = SG4_ADDR1; 1110 m->sg40_addr2 = SG4_ADDR2; 1111 m->pg_v = PG_V; 1112 m->pg_frame = PG_FRAME; 1113 1114 /* 1115 * Initialize pointer to kernel segment table. 1116 */ 1117 m->sysseg_pa = sysseg_pa; /* PA after relocation */ 1118 1119 /* 1120 * Initialize relocation value such that: 1121 * 1122 * pa = (va - KERNBASE) + reloc 1123 */ 1124 m->reloc = kbase; 1125 1126 /* 1127 * Define the end of the relocatable range. 1128 */ 1129 m->relocend = (vaddr_t)end; 1130 1131 for (i = 0; i < NMEM_SEGS; i++) { 1132 m->ram_segs[i].start = boot_segs[i].start; 1133 m->ram_segs[i].size = boot_segs[i].end - 1134 boot_segs[i].start; 1135 } 1136 } 1137 1138 void 1139 mmu030_setup(paddr_t sysseg_pa, u_int kstsize, paddr_t ptpa, psize_t ptsize, 1140 paddr_t sysptmap_pa, paddr_t kbase) 1141 /* sysseg_pa: System segment table */ 1142 /* kstsize: size of 'sysseg' in pages */ 1143 /* ptpa: Kernel page table */ 1144 /* ptsize: size of 'pt' in bytes */ 1145 /* sysptmap_pa: System page table */ 1146 { 1147 st_entry_t sg_proto, *sg, *esg; 1148 pt_entry_t pg_proto, *pg, *epg; 1149 1150 /* 1151 * Map the page table pages in both the HW segment table 1152 * and the software Sysptmap. 1153 */ 1154 sg = (st_entry_t *)sysseg_pa; 1155 pg = (pt_entry_t *)sysptmap_pa; 1156 epg = &pg[ptsize >> PGSHIFT]; 1157 sg_proto = RELOC_PA(kbase, ptpa) | SG_RW | SG_V; 1158 pg_proto = RELOC_PA(kbase, ptpa) | PG_RW | PG_CI | PG_V; 1159 while (pg < epg) { 1160 *sg++ = sg_proto; 1161 *pg++ = pg_proto; 1162 sg_proto += PAGE_SIZE; 1163 pg_proto += PAGE_SIZE; 1164 } 1165 1166 /* 1167 * Invalidate the remainder of the tables. 1168 */ 1169 esg = (st_entry_t *)sysseg_pa; 1170 esg = &esg[TIA_SIZE]; 1171 while (sg < esg) 1172 *sg++ = SG_NV; 1173 epg = (pt_entry_t *)sysptmap_pa; 1174 epg = &epg[TIB_SIZE]; 1175 while (pg < epg) 1176 *pg++ = PG_NV; 1177 1178 /* 1179 * Initialize the PTE for the last one to point Sysptmap. 1180 */ 1181 sg = (st_entry_t *)sysseg_pa; 1182 sg = &sg[SYSMAP_VA >> SEGSHIFT]; 1183 pg = (pt_entry_t *)sysptmap_pa; 1184 pg = &pg[SYSMAP_VA >> SEGSHIFT]; 1185 *sg = RELOC_PA(kbase, sysptmap_pa) | SG_RW | SG_V; 1186 *pg = RELOC_PA(kbase, sysptmap_pa) | PG_RW | PG_CI | PG_V; 1187 } 1188 1189 #if defined(M68040) || defined(M68060) 1190 void 1191 mmu040_setup(paddr_t sysseg_pa, u_int kstsize, paddr_t ptpa, psize_t ptsize, 1192 paddr_t sysptmap_pa, paddr_t kbase) 1193 /* sysseg_pa: System segment table */ 1194 /* kstsize: size of 'sysseg' in pages */ 1195 /* ptpa: Kernel page table */ 1196 /* ptsize: size of 'pt' in bytes */ 1197 /* sysptmap_pa: System page table */ 1198 { 1199 int nl1desc, nl2desc, i; 1200 st_entry_t sg_proto, *sg, *esg; 1201 pt_entry_t pg_proto, *pg, *epg; 1202 1203 /* 1204 * First invalidate the entire "segment table" pages 1205 * (levels 1 and 2 have the same "invalid" values). 1206 */ 1207 sg = (st_entry_t *)sysseg_pa; 1208 esg = &sg[kstsize * NPTEPG]; 1209 while (sg < esg) 1210 *sg++ = SG_NV; 1211 1212 /* 1213 * Initialize level 2 descriptors (which immediately 1214 * follow the level 1 table). 1215 * We need: 1216 * NPTEPG / SG4_LEV3SIZE 1217 * level 2 descriptors to map each of the nptpages 1218 * pages of PTEs. Note that we set the "used" bit 1219 * now to save the HW the expense of doing it. 1220 */ 1221 nl2desc = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE); 1222 sg = (st_entry_t *)sysseg_pa; 1223 sg = &sg[SG4_LEV1SIZE]; 1224 esg = &sg[nl2desc]; 1225 sg_proto = RELOC_PA(kbase, ptpa) | SG_U | SG_RW | SG_V; 1226 while (sg < esg) { 1227 *sg++ = sg_proto; 1228 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t)); 1229 } 1230 1231 /* 1232 * Initialize level 1 descriptors. We need: 1233 * howmany(nl2desc, SG4_LEV2SIZE) 1234 * level 1 descriptors to map the 'nl2desc' level 2's. 1235 */ 1236 nl1desc = howmany(nl2desc, SG4_LEV2SIZE); 1237 sg = (st_entry_t *)sysseg_pa; 1238 esg = &sg[nl1desc]; 1239 sg_proto = RELOC_PA(kbase, (paddr_t)&sg[SG4_LEV1SIZE]) 1240 | SG_U | SG_RW | SG_V; 1241 while (sg < esg) { 1242 *sg++ = sg_proto; 1243 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 1244 } 1245 1246 /* Sysmap is last entry in level 1 */ 1247 sg = (st_entry_t *)sysseg_pa; 1248 sg = &sg[SG4_LEV1SIZE - 1]; 1249 *sg = sg_proto; 1250 1251 /* 1252 * Kernel segment table at end of next level 2 table 1253 */ 1254 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE); 1255 sg = (st_entry_t *)sysseg_pa; 1256 sg = &sg[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)]; 1257 esg = &sg[NPTEPG / SG4_LEV3SIZE]; 1258 sg_proto = RELOC_PA(kbase, sysptmap_pa) | SG_U | SG_RW | SG_V; 1259 while (sg < esg) { 1260 *sg++ = sg_proto; 1261 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t)); 1262 } 1263 1264 /* Include additional level 2 table for Sysmap in protostfree */ 1265 protostfree = (~0 << (1 + nl1desc + 1)) /* & ~(~0 << MAXKL2SIZE) */; 1266 1267 /* 1268 * Initialize Sysptmap 1269 */ 1270 pg = (pt_entry_t *)sysptmap_pa; 1271 epg = &pg[ptsize >> PGSHIFT]; 1272 pg_proto = RELOC_PA(kbase, ptpa) | PG_RW | PG_CI | PG_V; 1273 while (pg < epg) { 1274 *pg++ = pg_proto; 1275 pg_proto += PAGE_SIZE; 1276 } 1277 1278 /* 1279 * Invalidate rest of Sysptmap page. 1280 */ 1281 epg = (pt_entry_t *)sysptmap_pa; 1282 epg = &epg[TIB_SIZE]; 1283 while (pg < epg) 1284 *pg++ = PG_NV; 1285 1286 /* 1287 * Initialize the PTE for the last one to point Sysptmap. 1288 */ 1289 pg = (pt_entry_t *)sysptmap_pa; 1290 pg = &pg[SYSMAP_VA >> SEGSHIFT]; 1291 *pg = RELOC_PA(kbase, sysptmap_pa) | PG_RW | PG_CI | PG_V; 1292 } 1293 #endif /* M68040 */ 1294 1295 #if defined(M68060) 1296 int m68060_pcr_init = 0x21; /* make this patchable */ 1297 #endif 1298 1299 static void 1300 initcpu(void) 1301 { 1302 typedef void trapfun(void); 1303 1304 switch (cputype) { 1305 1306 #if defined(M68060) 1307 case CPU_68060: 1308 { 1309 extern trapfun *vectab[256]; 1310 extern trapfun buserr60, addrerr4060, fpfault; 1311 #if defined(M060SP) 1312 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[]; 1313 #else 1314 extern trapfun illinst; 1315 #endif 1316 1317 __asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : : 1318 "d"(m68060_pcr_init):"d0" ); 1319 1320 /* bus/addrerr vectors */ 1321 vectab[2] = buserr60; 1322 vectab[3] = addrerr4060; 1323 1324 #if defined(M060SP) 1325 /* integer support */ 1326 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00]; 1327 1328 /* floating point support */ 1329 /* 1330 * XXX maybe we really should run-time check for the 1331 * stack frame format here: 1332 */ 1333 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30]; 1334 1335 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38]; 1336 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40]; 1337 1338 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00]; 1339 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08]; 1340 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10]; 1341 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18]; 1342 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20]; 1343 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28]; 1344 #else 1345 vectab[61] = illinst; 1346 #endif 1347 vectab[48] = fpfault; 1348 } 1349 break; 1350 #endif /* defined(M68060) */ 1351 #if defined(M68040) 1352 case CPU_68040: 1353 { 1354 extern trapfun *vectab[256]; 1355 extern trapfun buserr40, addrerr4060; 1356 1357 /* bus/addrerr vectors */ 1358 vectab[2] = buserr40; 1359 vectab[3] = addrerr4060; 1360 } 1361 break; 1362 #endif /* defined(M68040) */ 1363 #if defined(M68030) || defined(M68020) 1364 case CPU_68030: 1365 case CPU_68020: 1366 { 1367 extern trapfun *vectab[256]; 1368 extern trapfun buserr2030, addrerr2030; 1369 1370 /* bus/addrerr vectors */ 1371 vectab[2] = buserr2030; 1372 vectab[3] = addrerr2030; 1373 } 1374 break; 1375 #endif /* defined(M68030) || defined(M68020) */ 1376 } 1377 1378 DCIS(); 1379 } 1380 1381 #ifdef DEBUG 1382 void dump_segtable(u_int *); 1383 void dump_pagetable(u_int *, u_int, u_int); 1384 u_int vmtophys(u_int *, u_int); 1385 1386 void 1387 dump_segtable(u_int *stp) 1388 { 1389 u_int *s, *es; 1390 int shift, i; 1391 1392 s = stp; 1393 { 1394 es = s + (M68K_STSIZE >> 2); 1395 shift = SG_ISHIFT; 1396 } 1397 1398 /* 1399 * XXX need changes for 68040 1400 */ 1401 for (i = 0; s < es; s++, i++) 1402 if (*s & SG_V) 1403 printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME); 1404 printf("\n"); 1405 } 1406 1407 void 1408 dump_pagetable(u_int *ptp, u_int i, u_int n) 1409 { 1410 u_int *p, *ep; 1411 1412 p = ptp + i; 1413 ep = p + n; 1414 for (; p < ep; p++, i++) 1415 if (*p & PG_V) 1416 printf("$%08x -> $%08x\t", i, *p & PG_FRAME); 1417 printf("\n"); 1418 } 1419 1420 u_int 1421 vmtophys(u_int *ste, u_int vm) 1422 { 1423 1424 ste = (u_int *)(*(ste + (vm >> SEGSHIFT)) & SG_FRAME); 1425 ste += (vm & SG_PMASK) >> PGSHIFT; 1426 return (*ste & -PAGE_SIZE) | (vm & (PAGE_SIZE - 1)); 1427 } 1428 1429 #endif 1430