1 /* $NetBSD: amiga_init.c,v 1.134 2025/05/06 09:25:19 rin Exp $ */ 2 3 /* 4 * Copyright (c) 1994 Michael L. Hitch 5 * Copyright (c) 1993 Markus Wild 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Markus Wild. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "opt_amigaccgrf.h" 35 #include "opt_p5ppc68kboard.h" 36 #include "opt_devreload.h" 37 #include "opt_m68k_arch.h" 38 #include "z3rambd.h" 39 #include "ser.h" 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: amiga_init.c,v 1.134 2025/05/06 09:25:19 rin Exp $"); 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/ioctl.h> 47 #include <sys/select.h> 48 #include <sys/tty.h> 49 #include <sys/buf.h> 50 #include <sys/msgbuf.h> 51 #include <sys/mbuf.h> 52 #include <sys/protosw.h> 53 #include <sys/domain.h> 54 #include <sys/dkbad.h> 55 #include <sys/reboot.h> 56 #include <sys/exec.h> 57 58 #include <dev/mm.h> 59 #include <uvm/uvm_extern.h> 60 61 #include <machine/pte.h> 62 #include <machine/cpu.h> 63 #include <amiga/amiga/cc.h> 64 #include <amiga/amiga/cia.h> 65 #include <amiga/amiga/custom.h> 66 #include <amiga/amiga/cfdev.h> 67 #include <amiga/amiga/drcustom.h> 68 #include <amiga/amiga/gayle.h> 69 #include <amiga/amiga/memlist.h> 70 #include <amiga/dev/zbusvar.h> 71 #include <amiga/dev/z3rambdvar.h> 72 73 #define RELOC(v, t) *((t*)((u_int)&(v) + loadbase)) 74 75 extern u_int lowram; 76 extern u_int Umap; 77 extern u_long boot_partition; 78 extern vaddr_t m68k_uptbase; 79 80 #ifdef P5PPC68KBOARD 81 extern int p5ppc; 82 #endif 83 84 extern char *esym; 85 86 #ifdef GRF_AGA 87 extern u_long aga_enable; 88 #endif 89 90 #if NSER > 0 91 extern int serconsole; 92 #endif 93 94 extern u_long noncontig_enable; 95 96 /* 97 * some addresses used in locore 98 */ 99 vaddr_t INTREQRaddr; 100 vaddr_t INTREQWaddr; 101 102 /* 103 * these are used by the extended spl?() macros. 104 */ 105 volatile unsigned short *amiga_intena_read, *amiga_intena_write; 106 107 vaddr_t CHIPMEMADDR; 108 vaddr_t chipmem_start; 109 vaddr_t chipmem_end; 110 111 vaddr_t z2mem_start; /* XXX */ 112 static vaddr_t z2mem_end; /* XXX */ 113 int use_z2_mem = 1; /* XXX */ 114 115 u_long boot_fphystart, boot_fphysize, boot_cphysize; 116 static u_int start_c_fphystart; 117 static u_int start_c_pstart; 118 119 static u_long boot_flags; 120 121 struct boot_memlist *memlist; 122 123 struct cfdev *cfdev; 124 int ncfdev; 125 126 u_long scsi_nosync; 127 int shift_nosync; 128 129 void start_c(int, u_int, u_int, u_int, char *, u_int, u_long, u_long, u_int); 130 void rollcolor(int); 131 #ifdef DEVRELOAD 132 static int kernel_image_magic_size(void); 133 static void kernel_image_magic_copy(u_char *); 134 int kernel_reload_write(struct uio *); 135 extern void kernel_reload(char *, u_long, u_long, u_long, u_long, 136 u_long, u_long, u_long, u_long, u_long, u_long); 137 #endif 138 extern void etext(void); 139 void start_c_finish(void); 140 141 void * 142 chipmem_steal(long amount) 143 { 144 /* 145 * steal from top of chipmem, so we don't collide with 146 * the kernel loaded into chipmem in the not-yet-mapped state. 147 */ 148 vaddr_t p = chipmem_end - amount; 149 if (p & 1) 150 p = p - 1; 151 chipmem_end = p; 152 if(chipmem_start > chipmem_end) 153 panic("not enough chip memory"); 154 return((void *)p); 155 } 156 157 /* 158 * XXX 159 * used by certain drivers currently to allocate zorro II memory 160 * for bounce buffers, if use_z2_mem is NULL, chipmem will be 161 * returned instead. 162 * XXX 163 */ 164 void * 165 alloc_z2mem(long amount) 166 { 167 if (use_z2_mem && z2mem_end && (z2mem_end - amount) >= z2mem_start) { 168 z2mem_end -= amount; 169 return ((void *)z2mem_end); 170 } 171 return (alloc_chipmem(amount)); 172 } 173 174 175 /* 176 * this is the C-level entry function, it's called from locore.s. 177 * Preconditions: 178 * Interrupts are disabled 179 * PA may not be == VA, so we may have to relocate addresses 180 * before enabling the MMU 181 * Exec is no longer available (because we're loaded all over 182 * low memory, no ExecBase is available anymore) 183 * 184 * It's purpose is: 185 * Do the things that are done in locore.s in the hp300 version, 186 * this includes allocation of kernel maps and enabling the MMU. 187 * 188 * Some of the code in here is `stolen' from Amiga MACH, and was 189 * written by Bryan Ford and Niklas Hallqvist. 190 * 191 * Very crude 68040 support by Michael L. Hitch. 192 * 193 */ 194 195 int kernel_copyback = 1; 196 197 __attribute__ ((no_instrument_function)) 198 void 199 start_c(int id, u_int fphystart, u_int fphysize, u_int cphysize, 200 char *esym_addr, u_int flags, u_long inh_sync, u_long boot_part, 201 u_int loadbase) 202 { 203 extern char end[]; 204 struct cfdev *cd; 205 paddr_t pstart, pend; 206 vaddr_t vstart, vend; 207 psize_t avail; 208 paddr_t ptpa; 209 psize_t ptsize; 210 u_int ptextra, kstsize; 211 paddr_t Sysptmap_pa; 212 register st_entry_t sg_proto, *sg; 213 #if defined(M68040) || defined(M68060) 214 register st_entry_t *esg; 215 #endif 216 register pt_entry_t pg_proto, *pg, *epg; 217 vaddr_t end_loaded; 218 u_int ncd; 219 #if defined(M68040) || defined(M68060) 220 u_int i, nl1desc, nl2desc; 221 #endif 222 vaddr_t kva; 223 struct boot_memlist *ml; 224 225 #ifdef DEBUG_KERNEL_START 226 /* XXX this only is valid if Altais is in slot 0 */ 227 volatile u_int8_t *altaiscolpt = (u_int8_t *)0x200003c8; 228 volatile u_int8_t *altaiscol = (u_int8_t *)0x200003c9; 229 #endif 230 231 #ifdef DEBUG_KERNEL_START 232 if ((id>>24)==0x7D) { 233 *altaiscolpt = 0; 234 *altaiscol = 40; 235 *altaiscol = 0; 236 *altaiscol = 0; 237 } else 238 ((volatile struct Custom *)0xdff000)->color[0] = 0xa00; /* RED */ 239 #endif 240 241 #ifdef LIMITMEM 242 if (fphysize > LIMITMEM*1024*1024) 243 fphysize = LIMITMEM*1024*1024; 244 #endif 245 246 RELOC(boot_fphystart, u_long) = fphystart; 247 RELOC(boot_fphysize, u_long) = fphysize; 248 RELOC(boot_cphysize, u_long) = cphysize; 249 250 RELOC(machineid, int) = id; 251 RELOC(chipmem_end, vaddr_t) = cphysize; 252 RELOC(esym, char *) = esym_addr; 253 RELOC(boot_flags, u_long) = flags; 254 RELOC(boot_partition, u_long) = boot_part; 255 #ifdef GRF_AGA 256 if (flags & 1) 257 RELOC(aga_enable, u_long) |= 1; 258 #endif 259 if (flags & (3 << 1)) 260 RELOC(noncontig_enable, u_long) = (flags >> 1) & 3; 261 #if NSER > 0 262 if (flags & (1 << 3)) 263 RELOC(serconsole, int) = 0; 264 #endif 265 266 RELOC(scsi_nosync, u_long) = inh_sync; 267 268 /* 269 * the kernel ends at end(), plus the cfdev and memlist structures 270 * we placed there in the loader. Correct for this now. Also, 271 * account for kernel symbols if they are present. 272 */ 273 if (esym_addr == NULL) 274 end_loaded = (vaddr_t)&end; 275 else 276 end_loaded = (vaddr_t)esym_addr; 277 RELOC(ncfdev, int) = *(int *)(&RELOC(*(u_int *)end_loaded, u_int)); 278 RELOC(cfdev, struct cfdev *) = (struct cfdev *) ((int)end_loaded + 4); 279 end_loaded += 4 + RELOC(ncfdev, int) * sizeof(struct cfdev); 280 281 RELOC(memlist, struct boot_memlist *) = 282 (struct boot_memlist *)end_loaded; 283 ml = &RELOC(*(struct boot_memlist *)end_loaded, struct boot_memlist); 284 end_loaded = (vaddr_t)&((RELOC(memlist, struct boot_memlist *))-> 285 m_seg[ml->m_nseg]); 286 287 /* 288 * Get ZorroII (16-bit) memory if there is any and it's not where the 289 * kernel is loaded. 290 */ 291 if (ml->m_nseg > 0 && ml->m_nseg < 16 && RELOC(use_z2_mem, int)) { 292 struct boot_memseg *sp, *esp; 293 294 sp = ml->m_seg; 295 esp = sp + ml->m_nseg; 296 for (; sp < esp; sp++) { 297 if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA)) 298 != (MEMF_FAST|MEMF_24BITDMA)) 299 continue; 300 if (sp->ms_start == fphystart) 301 continue; 302 RELOC(z2mem_end, paddr_t) = 303 sp->ms_start + sp->ms_size; 304 RELOC(z2mem_start, paddr_t) = 305 RELOC(z2mem_end, paddr_t) - MAXPHYS * 306 RELOC(use_z2_mem, int) * 7; 307 RELOC(NZTWOMEMPG, u_int) = 308 (RELOC(z2mem_end, paddr_t) - 309 RELOC(z2mem_start, paddr_t)) / PAGE_SIZE; 310 if ((RELOC(z2mem_end, paddr_t) - 311 RELOC(z2mem_start, paddr_t)) > sp->ms_size) { 312 RELOC(NZTWOMEMPG, u_int) = sp->ms_size / 313 PAGE_SIZE; 314 RELOC(z2mem_start, paddr_t) = 315 RELOC(z2mem_end, paddr_t) - sp->ms_size; 316 } 317 break; 318 } 319 } 320 321 /* 322 * Scan ConfigDev list and get size of Zorro I/O boards that are 323 * outside the Zorro II I/O area. 324 */ 325 for (RELOC(ZBUSAVAIL, u_int) = 0, cd = 326 &RELOC(*RELOC(cfdev, struct cfdev *),struct cfdev), 327 ncd = RELOC(ncfdev, int); ncd > 0; ncd--, cd++) { 328 int bd_type = cd->rom.type & (ERT_TYPEMASK | ERTF_MEMLIST); 329 330 /* 331 * Hack to support p5bus and p5pb on CyberStorm Mk-III / PPC 332 * and Blizzard PPC. XXX: this hack should only be active if 333 * non-autoconfiguring CyberVision PPC or BlizzardVision PPC 334 * was found. 335 */ 336 if (cd->rom.manid == 8512 && 337 (cd->rom.prodid == 100 || cd->rom.prodid == 110)) 338 RELOC(ZBUSAVAIL, u_int) += m68k_round_page(0x1400000); 339 #if NZ3RAMBD > 0 340 if (z3rambd_match_id(cd->rom.manid, cd->rom.prodid) > 0) 341 { 342 /* XXX: remove board from memlist */ 343 } else 344 #endif 345 if (bd_type != ERT_ZORROIII && 346 (bd_type != ERT_ZORROII || isztwopa(cd->addr))) 347 continue; /* It's not Z2 or Z3 I/O board */ 348 /* 349 * Hack to adjust board size for Zorro III boards that 350 * do not specify an extended size or subsize. This is 351 * specifically for the GVP Spectrum and hopefully won't 352 * break with other boards that configure like this. 353 */ 354 if (bd_type == ERT_ZORROIII && 355 !(cd->rom.flags & ERFF_EXTENDED) && 356 (cd->rom.flags & ERT_Z3_SSMASK) == 0) 357 cd->size = 0x10000 << 358 ((cd->rom.type - 1) & ERT_MEMMASK); 359 360 RELOC(ZBUSAVAIL, u_int) += m68k_round_page(cd->size); 361 } 362 363 /* 364 * assume KVA_MIN == 0. We subtract the kernel code (and 365 * the configdev's and memlists) from the virtual and 366 * physical starts and ends. 367 */ 368 vend = fphysize; 369 avail = vend; 370 vstart = end_loaded; 371 vstart = m68k_round_page(vstart); 372 pstart = (paddr_t)vstart + fphystart; 373 pend = vend + fphystart; 374 avail -= vstart; 375 376 /* 377 * save KVA of lwp0 u-area and allocate it. 378 */ 379 RELOC(lwp0uarea, vaddr_t) = vstart; 380 pstart += USPACE; 381 vstart += USPACE; 382 avail -= USPACE; 383 384 #if defined(M68040) || defined(M68060) 385 if (RELOC(mmutype, int) == MMU_68040) 386 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 387 else 388 #endif 389 kstsize = 1; 390 391 /* 392 * allocate the kernel segment table 393 */ 394 RELOC(Sysseg_pa, u_int) = pstart; 395 RELOC(Sysseg, u_int) = vstart; 396 vstart += PAGE_SIZE * kstsize; 397 pstart += PAGE_SIZE * kstsize; 398 avail -= PAGE_SIZE * kstsize; 399 400 /* 401 * allocate kernel page table map 402 */ 403 RELOC(Sysptmap, u_int) = vstart; 404 Sysptmap_pa = pstart; 405 vstart += PAGE_SIZE; 406 pstart += PAGE_SIZE; 407 avail -= PAGE_SIZE; 408 409 /* 410 * allocate initial page table pages 411 */ 412 ptpa = pstart; 413 #ifdef DRACO 414 if ((id>>24)==0x7D) { 415 ptextra = NDRCCPG 416 + RELOC(NZTWOMEMPG, u_int) 417 + btoc(RELOC(ZBUSAVAIL, u_int)); 418 } else 419 #endif 420 ptextra = NCHIPMEMPG + NCIAPG + NZTWOROMPG + RELOC(NZTWOMEMPG, u_int) + 421 btoc(RELOC(ZBUSAVAIL, u_int)) + NPCMCIAPG; 422 423 ptsize = (RELOC(Sysptsize, u_int) + 424 howmany(ptextra, NPTEPG)) << PGSHIFT; 425 426 vstart += ptsize; 427 pstart += ptsize; 428 avail -= ptsize; 429 430 /* 431 * Sysmap is now placed at the end of Supervisor virtual address space. 432 */ 433 RELOC(Sysmap, u_int *) = (u_int *)SYSMAP_VA; 434 435 /* 436 * initialize segment table and page table map 437 */ 438 #if defined(M68040) || defined(M68060) 439 if (RELOC(mmutype, int) == MMU_68040) { 440 /* 441 * First invalidate the entire "segment table" pages 442 * (levels 1 and 2 have the same "invalid" values). 443 */ 444 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); 445 esg = &sg[kstsize * NPTEPG]; 446 while (sg < esg) 447 *sg++ = SG_NV; 448 /* 449 * Initialize level 2 descriptors (which immediately 450 * follow the level 1 table). We need: 451 * NPTEPG / SG4_LEV3SIZE 452 * level 2 descriptors to map each of the nptpages 453 * pages of PTEs. Note that we set the "used" bit 454 * now to save the HW the expense of doing it. 455 */ 456 nl2desc = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE); 457 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); 458 sg = &sg[SG4_LEV1SIZE]; 459 esg = &sg[nl2desc]; 460 sg_proto = ptpa | SG_U | SG_RW | SG_V; 461 while (sg < esg) { 462 *sg++ = sg_proto; 463 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); 464 } 465 466 /* 467 * Initialize level 1 descriptors. We need: 468 * howmany(nl2desc, SG4_LEV2SIZE) 469 * level 1 descriptors to map the 'nl2desc' level 2's. 470 */ 471 nl1desc = howmany(nl2desc, SG4_LEV2SIZE); 472 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); 473 esg = &sg[nl1desc]; 474 sg_proto = (paddr_t)&sg[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 475 while (sg < esg) { 476 *sg++ = sg_proto; 477 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 478 } 479 480 /* Sysmap is last entry in level 1 */ 481 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); 482 sg = &sg[SG4_LEV1SIZE - 1]; 483 *sg = sg_proto; 484 485 /* 486 * Kernel segment table at end of next level 2 table 487 */ 488 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE); 489 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); 490 sg = &sg[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)]; 491 esg = &sg[NPTEPG / SG4_LEV3SIZE]; 492 sg_proto = Sysptmap_pa | SG_U | SG_RW | SG_V; 493 while (sg < esg) { 494 *sg++ = sg_proto; 495 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); 496 } 497 498 /* Include additional level 2 table for Sysmap in protostfree */ 499 RELOC(protostfree, u_int) = 500 (~0 << (1 + nl1desc + 1)) /* & ~(~0 << MAXKL2SIZE) */; 501 502 /* 503 * Initialize Sysptmap 504 */ 505 pg = (pt_entry_t *)Sysptmap_pa; 506 epg = &pg[ptsize >> PGSHIFT]; 507 pg_proto = ptpa | PG_RW | PG_CI | PG_V; 508 while (pg < epg) { 509 *pg++ = pg_proto; 510 pg_proto += PAGE_SIZE; 511 } 512 /* 513 * Invalidate rest of Sysptmap page 514 */ 515 epg = (pt_entry_t *)(Sysptmap_pa + PAGE_SIZE - sizeof(st_entry_t)); 516 while (pg < epg) 517 *pg++ = SG_NV; 518 pg = (pt_entry_t *)Sysptmap_pa; 519 pg = &pg[SYSMAP_VA >> SEGSHIFT]; 520 *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V; 521 } else 522 #endif /* M68040 */ 523 { 524 /* 525 * Map the page table pages in both the HW segment table 526 * and the software Sysptmap. 527 */ 528 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); 529 pg = (pt_entry_t *)Sysptmap_pa; 530 epg = &pg[ptsize >> PGSHIFT]; 531 sg_proto = ptpa | SG_RW | SG_V; 532 pg_proto = ptpa | PG_RW | PG_CI | PG_V; 533 while (pg < epg) { 534 *sg++ = sg_proto; 535 *pg++ = pg_proto; 536 sg_proto += PAGE_SIZE; 537 pg_proto += PAGE_SIZE; 538 } 539 /* 540 * invalidate the remainder of each table 541 */ 542 epg = (pt_entry_t *)Sysptmap_pa; 543 epg = &epg[TIA_SIZE]; 544 while (pg < epg) { 545 *sg++ = SG_NV; 546 *pg++ = PG_NV; 547 } 548 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); 549 sg = &sg[SYSMAP_VA >> SEGSHIFT]; 550 pg = (pt_entry_t *)Sysptmap_pa; 551 pg = &pg[SYSMAP_VA >> SEGSHIFT]; 552 *sg = Sysptmap_pa | SG_RW | SG_V; 553 *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V; 554 /* XXX zero out rest of page? */ 555 } 556 557 /* 558 * initialize kernel page table page(s) (assume load at VA 0) 559 */ 560 pg_proto = fphystart | PG_RO | PG_V; /* text pages are RO */ 561 pg = (pt_entry_t *)ptpa; 562 *pg++ = PG_NV; /* Make page 0 invalid */ 563 pg_proto += PAGE_SIZE; 564 for (kva = PAGE_SIZE; kva < (vaddr_t)etext; 565 kva += PAGE_SIZE, pg_proto += PAGE_SIZE) 566 *pg++ = pg_proto; 567 568 /* 569 * data, bss and dynamic tables are read/write 570 */ 571 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 572 573 #if defined(M68040) || defined(M68060) 574 /* 575 * Map the kernel segment table cache invalidated for 68040/68060. 576 * (for the 68040 not strictly necessary, but recommended by Motorola; 577 * for the 68060 mandatory) 578 */ 579 if (RELOC(mmutype, int) == MMU_68040) { 580 581 if (RELOC(kernel_copyback, int)) 582 pg_proto |= PG_CCB; 583 584 /* 585 * ASSUME: segment table and statically allocated page tables 586 * of the kernel are contiguously allocated, start at 587 * Sysseg and end at the current value of vstart. 588 */ 589 for (; kva < RELOC(Sysseg, u_int); 590 kva += PAGE_SIZE, pg_proto += PAGE_SIZE) 591 *pg++ = pg_proto; 592 593 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 594 for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE) 595 *pg++ = pg_proto; 596 597 pg_proto = (pg_proto & ~PG_CI); 598 if (RELOC(kernel_copyback, int)) 599 pg_proto |= PG_CCB; 600 } 601 #endif 602 /* 603 * go till end of data allocated so far 604 * plus lwp0 u-area (to be allocated) 605 */ 606 for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE) 607 *pg++ = pg_proto; 608 /* 609 * invalidate remainder of kernel PT 610 */ 611 while (pg < (pt_entry_t *) (ptpa + ptsize)) 612 *pg++ = PG_NV; 613 614 /* 615 * validate internal IO PTEs following current vstart 616 */ 617 pg = &((u_int *)ptpa)[vstart >> PGSHIFT]; 618 #ifdef DRACO 619 if ((id >> 24) == 0x7D) { 620 RELOC(DRCCADDR, u_int) = vstart; 621 RELOC(CIAADDR, vaddr_t) = 622 RELOC(DRCCADDR, u_int) + DRCIAPG * PAGE_SIZE; 623 if (RELOC(z2mem_end, vaddr_t) == 0) 624 RELOC(ZBUSADDR, vaddr_t) = 625 RELOC(DRCCADDR, u_int) + NDRCCPG * PAGE_SIZE; 626 pg_proto = DRCCBASE | PG_RW | PG_CI | PG_V; 627 while (pg_proto < DRZ2BASE) { 628 *pg++ = pg_proto; 629 pg_proto += DRCCSTRIDE; 630 vstart += PAGE_SIZE; 631 } 632 633 /* NCR 53C710 chip */ 634 *pg++ = DRSCSIBASE | PG_RW | PG_CI | PG_V; 635 vstart += PAGE_SIZE; 636 637 #ifdef DEBUG_KERNEL_START 638 /* 639 * early rollcolor Altais mapping 640 * XXX (only works if in slot 0) 641 */ 642 *pg++ = 0x20000000 | PG_RW | PG_CI | PG_V; 643 vstart += PAGE_SIZE; 644 #endif 645 } else 646 #endif 647 { 648 RELOC(CHIPMEMADDR, vaddr_t) = vstart; 649 pg_proto = CHIPMEMBASE | PG_RW | PG_CI | PG_V; 650 /* CI needed here?? */ 651 while (pg_proto < CHIPMEMTOP) { 652 *pg++ = pg_proto; 653 pg_proto += PAGE_SIZE; 654 vstart += PAGE_SIZE; 655 } 656 } 657 if (RELOC(z2mem_end, paddr_t)) { /* XXX */ 658 RELOC(ZTWOMEMADDR, vaddr_t) = vstart; 659 RELOC(ZBUSADDR, vaddr_t) = RELOC(ZTWOMEMADDR, vaddr_t) + 660 RELOC(NZTWOMEMPG, u_int) * PAGE_SIZE; 661 pg_proto = RELOC(z2mem_start, paddr_t) | /* XXX */ 662 PG_RW | PG_V; /* XXX */ 663 while (pg_proto < RELOC(z2mem_end, paddr_t)) { /* XXX */ 664 *pg++ = pg_proto; /* XXX */ 665 pg_proto += PAGE_SIZE; /* XXX */ 666 vstart += PAGE_SIZE; 667 } /* XXX */ 668 } /* XXX */ 669 #ifdef DRACO 670 if ((id >> 24) != 0x7D) 671 #endif 672 { 673 RELOC(CIAADDR, vaddr_t) = vstart; 674 pg_proto = CIABASE | PG_RW | PG_CI | PG_V; 675 while (pg_proto < CIATOP) { 676 *pg++ = pg_proto; 677 pg_proto += PAGE_SIZE; 678 vstart += PAGE_SIZE; 679 } 680 RELOC(ZTWOROMADDR, vaddr_t) = vstart; 681 pg_proto = ZTWOROMBASE | PG_RW | PG_CI | PG_V; 682 while (pg_proto < ZTWOROMTOP) { 683 *pg++ = pg_proto; 684 pg_proto += PAGE_SIZE; 685 vstart += PAGE_SIZE; 686 } 687 RELOC(ZBUSADDR, vaddr_t) = vstart; 688 /* not on 8k boundary :-( */ 689 RELOC(CIAADDR, vaddr_t) += PAGE_SIZE/2; 690 RELOC(CUSTOMADDR, vaddr_t) = 691 RELOC(ZTWOROMADDR, vaddr_t) - ZTWOROMBASE + CUSTOMBASE; 692 } 693 694 /* 695 *[ following page tables MAY be allocated to ZORRO3 space, 696 * but they're then later mapped in autoconf.c ] 697 */ 698 vstart += RELOC(ZBUSAVAIL, u_int); 699 700 /* 701 * init mem sizes 702 */ 703 RELOC(maxmem, u_int) = pend >> PGSHIFT; 704 RELOC(lowram, u_int) = fphystart; 705 RELOC(physmem, u_int) = fphysize >> PGSHIFT; 706 707 RELOC(virtual_avail, u_int) = vstart; 708 709 /* 710 * Put user page tables starting at next 16MB boundary, to make kernel 711 * dumps more readable, with guaranteed 16MB of. 712 * XXX 16 MB instead of 256 MB should be enough, but... 713 * we need to fix the fastmem loading first. (see comment at line 375) 714 */ 715 RELOC(m68k_uptbase, vaddr_t) = 716 roundup(vstart + 0x10000000, 0x10000000); 717 718 #if defined(M68020) || defined(M68030) 719 /* 720 * set this before copying the kernel, so the variable is updated in 721 * the `real' place too. protorp[0] is already preset to the 722 * CRP setting. 723 */ 724 RELOC(protorp[1], u_int) = RELOC(Sysseg_pa, u_int); 725 #endif 726 727 RELOC(start_c_fphystart, u_int) = fphystart; 728 RELOC(start_c_pstart, u_int) = pstart; 729 730 /* 731 * copy over the kernel (and all now initialized variables) 732 * to fastram. DONT use bcopy(), this beast is much larger 733 * than 128k ! 734 */ 735 if (loadbase == 0) { 736 register paddr_t *lp, *le, *fp; 737 738 lp = (paddr_t *)0; 739 le = (paddr_t *)end_loaded; 740 fp = (paddr_t *)fphystart; 741 while (lp < le) 742 *fp++ = *lp++; 743 } 744 745 #ifdef DEBUG_KERNEL_START 746 if ((id>>24)==0x7D) { 747 *altaiscolpt = 0; 748 *altaiscol = 40; 749 *altaiscol = 40; 750 *altaiscol = 0; 751 } else 752 ((volatile struct Custom *)0xdff000)->color[0] = 0xAA0; /* YELLOW */ 753 #endif 754 /* 755 * prepare to enable the MMU 756 */ 757 #if defined(M68040) || defined(M68060) 758 if (RELOC(mmutype, int) == MMU_68040) { 759 if (id & AMIGA_68060) { 760 /* do i need to clear the branch cache? */ 761 __asm volatile ( ".word 0x4e7a,0x0002;" 762 "orl #0x400000,%%d0;" 763 ".word 0x4e7b,0x0002" : : : "d0"); 764 } 765 766 /* 767 * movel Sysseg_pa,%a0; 768 * movec %a0,%srp; 769 */ 770 771 __asm volatile ("movel %0,%%a0; .word 0x4e7b,0x8807" 772 : : "a" (RELOC(Sysseg_pa, u_int)) : "a0"); 773 774 #ifdef DEBUG_KERNEL_START 775 if ((id>>24)==0x7D) { 776 *altaiscolpt = 0; 777 *altaiscol = 40; 778 *altaiscol = 33; 779 *altaiscol = 0; 780 } else 781 ((volatile struct Custom *)0xdff000)->color[0] = 0xA70; /* ORANGE */ 782 #endif 783 return; 784 } 785 #endif /* M68040 || M68060 */ 786 #if defined(M68020) || defined(M68030) 787 /* 788 * setup and load SRP (see pmap.h) 789 */ 790 __asm volatile ("pmove %0@,%%srp":: "a" (&RELOC(protorp, u_int))); 791 #endif /* M68020 || M68030 */ 792 } 793 794 void 795 start_c_finish(void) 796 { 797 extern u_int32_t delaydivisor; 798 #ifdef P5PPC68KBOARD 799 struct cfdev *cdp, *ecdp; 800 #endif 801 802 #ifdef DEBUG_KERNEL_START 803 #ifdef DRACO 804 if ((id >> 24) == 0x7D) { /* mapping on, is_draco() is valid */ 805 int i; 806 /* XXX experimental Altais register mapping only */ 807 altaiscolpt = (volatile u_int8_t *)(DRCCADDR+PAGE_SIZE*9+0x3c8); 808 altaiscol = altaiscolpt + 1; 809 for (i=0; i<140000; i++) { 810 *altaiscolpt = 0; 811 *altaiscol = 0; 812 *altaiscol = 40; 813 *altaiscol = 0; 814 } 815 } else 816 #endif 817 ((volatile struct Custom *)CUSTOMADDR)->color[0] = 0x0a0; /* GREEN */ 818 #endif 819 820 pmap_bootstrap(start_c_pstart, start_c_fphystart); 821 pmap_bootstrap_finalize(); 822 823 /* 824 * to make life easier in locore.s, set these addresses explicitly 825 */ 826 CIAAbase = CIAADDR + 0x1001; /* CIA-A at odd addresses ! */ 827 CIABbase = CIAADDR; 828 CUSTOMbase = CUSTOMADDR; 829 #ifdef DRACO 830 if (is_draco()) { 831 draco_intena = (volatile u_int8_t *)DRCCADDR+1; 832 draco_intpen = draco_intena + PAGE_SIZE; 833 draco_intfrc = draco_intpen + PAGE_SIZE; 834 draco_misc = draco_intfrc + PAGE_SIZE; 835 draco_ioct = (struct drioct *)(DRCCADDR + DRIOCTLPG*PAGE_SIZE); 836 } else 837 #endif 838 { 839 INTREQRaddr = (vaddr_t)&custom.intreqr; 840 INTREQWaddr = (vaddr_t)&custom.intreq; 841 } 842 /* 843 * Get our chip memory allocation system working 844 */ 845 chipmem_start += CHIPMEMADDR; 846 chipmem_end += CHIPMEMADDR; 847 848 /* XXX is: this MUST NOT BE DONE before the pmap_bootstrap() call */ 849 if (z2mem_end) { 850 z2mem_end = ZTWOMEMADDR + NZTWOMEMPG * PAGE_SIZE; 851 z2mem_start = ZTWOMEMADDR; 852 } 853 854 /* 855 * disable all interrupts but enable allow them to be enabled 856 * by specific driver code (global int enable bit) 857 */ 858 #ifdef DRACO 859 if (is_draco()) { 860 /* XXX to be done. For now, just: */ 861 *draco_intena = 0; 862 *draco_intpen = 0; 863 *draco_intfrc = 0; 864 ciaa.icr = 0x7f; /* and keyboard */ 865 ciab.icr = 0x7f; /* and again */ 866 867 draco_ioct->io_control &= 868 ~(DRCNTRL_KBDINTENA|DRCNTRL_FDCINTENA); /* and another */ 869 870 draco_ioct->io_status2 &= 871 ~(DRSTAT2_PARIRQENA|DRSTAT2_TMRINTENA); /* some more */ 872 873 *(volatile u_int8_t *)(DRCCADDR + 1 + 874 DRSUPIOPG*PAGE_SIZE + 4*(0x3F8 + 1)) = 0; /* and com0 */ 875 876 *(volatile u_int8_t *)(DRCCADDR + 1 + 877 DRSUPIOPG*PAGE_SIZE + 4*(0x2F8 + 1)) = 0; /* and com1 */ 878 879 draco_ioct->io_control |= DRCNTRL_WDOGDIS; /* stop Fido */ 880 *draco_misc &= ~1/*DRMISC_FASTZ2*/; 881 882 } else 883 #endif 884 { 885 custom.intena = 0x7fff; /* disable ints */ 886 custom.intena = INTF_SETCLR | INTF_INTEN; 887 /* but allow them */ 888 custom.intreq = 0x7fff; /* clear any current */ 889 ciaa.icr = 0x7f; /* and keyboard */ 890 ciab.icr = 0x7f; /* and again */ 891 892 /* 893 * remember address of read and write intena register for use 894 * by extended spl?() macros. 895 */ 896 amiga_intena_read = &custom.intenar; 897 amiga_intena_write = &custom.intena; 898 } 899 900 /* 901 * This is needed for 3000's with superkick ROM's. Bit 7 of 902 * 0xde0002 enables the ROM if set. If this isn't set the machine 903 * has to be powercycled in order for it to boot again. ICKA! RFH 904 */ 905 if (is_a3000()) { 906 volatile unsigned char *a3000_magic_reset; 907 908 a3000_magic_reset = (volatile unsigned char *)ztwomap(0xde0002); 909 910 /* Turn SuperKick ROM (V36) back on */ 911 *a3000_magic_reset |= 0x80; 912 } 913 914 #ifdef P5PPC68KBOARD 915 /* 916 * Are we an P5 PPC/68K board? install different reset 917 * routine. 918 */ 919 920 for (cdp = cfdev, ecdp = &cfdev[ncfdev]; cdp < ecdp; cdp++) { 921 if (cdp->rom.manid == 8512 && 922 (cdp->rom.prodid == 100 || cdp->rom.prodid == 110)) { 923 p5ppc = 1; 924 break; 925 } 926 } 927 #endif 928 /* 929 * preliminary delay divisor value 930 */ 931 932 if (machineid & AMIGA_68060) 933 delaydivisor = (1024 * 1) / 80; /* 80 MHz 68060 w. BTC */ 934 935 else if (machineid & AMIGA_68040) 936 delaydivisor = (1024 * 3) / 40; /* 40 MHz 68040 */ 937 938 else if (machineid & AMIGA_68030) 939 delaydivisor = (1024 * 8) / 50; /* 50 MHz 68030 */ 940 941 else 942 delaydivisor = (1024 * 8) / 33; /* 33 MHz 68020 */ 943 } 944 945 void 946 rollcolor(int color) 947 { 948 int s, i; 949 950 s = splhigh(); 951 /* 952 * need to adjust count - 953 * too slow when cache off, too fast when cache on 954 */ 955 for (i = 0; i < 400000; i++) 956 ((volatile struct Custom *)CUSTOMbase)->color[0] = color; 957 splx(s); 958 } 959 960 #ifdef DEVRELOAD 961 /* 962 * Kernel reloading code 963 */ 964 965 static struct exec kernel_exec; 966 static u_char *kernel_image; 967 static u_long kernel_text_size, kernel_load_ofs; 968 static u_long kernel_load_phase; 969 static u_long kernel_load_endseg; 970 static u_long kernel_symbol_size, kernel_symbol_esym; 971 972 /* This supports the /dev/reload device, major 2, minor 20, 973 hooked into mem.c. Author: Bryan Ford. */ 974 975 /* 976 * This is called below to find out how much magic storage 977 * will be needed after a kernel image to be reloaded. 978 */ 979 static int 980 kernel_image_magic_size(void) 981 { 982 int sz; 983 984 /* 4 + cfdev's + Mem_Seg's + 4 */ 985 sz = 8 + ncfdev * sizeof(struct cfdev) 986 + memlist->m_nseg * sizeof(struct boot_memseg); 987 return(sz); 988 } 989 990 /* This actually copies the magic information. */ 991 static void 992 kernel_image_magic_copy(u_char *dest) 993 { 994 *((int*)dest) = ncfdev; 995 dest += 4; 996 memcpy(dest, cfdev, ncfdev * sizeof(struct cfdev) 997 + memlist->m_nseg * sizeof(struct boot_memseg) + 4); 998 } 999 1000 #undef AOUT_LDPGSZ 1001 #define AOUT_LDPGSZ 8192 /* XXX ??? */ 1002 1003 int 1004 kernel_reload_write(struct uio *uio) 1005 { 1006 extern int eclockfreq; 1007 struct iovec *iov; 1008 int error, c; 1009 1010 iov = uio->uio_iov; 1011 1012 if (kernel_image == 0) { 1013 /* 1014 * We have to get at least the whole exec header 1015 * in the first write. 1016 */ 1017 if (iov->iov_len < sizeof(kernel_exec)) 1018 return ENOEXEC; /* XXX */ 1019 1020 /* 1021 * Pull in the exec header and check it. 1022 */ 1023 if ((error = uiomove((void *)&kernel_exec, sizeof(kernel_exec), 1024 uio)) != 0) 1025 return(error); 1026 printf("loading kernel %ld+%ld+%ld+%ld\n", kernel_exec.a_text, 1027 kernel_exec.a_data, kernel_exec.a_bss, 1028 esym == NULL ? 0 : kernel_exec.a_syms); 1029 /* 1030 * Looks good - allocate memory for a kernel image. 1031 */ 1032 kernel_text_size = (kernel_exec.a_text 1033 + AOUT_LDPGSZ - 1) & (-AOUT_LDPGSZ); 1034 /* 1035 * Estimate space needed for symbol names, since we don't 1036 * know how big it really is. 1037 */ 1038 if (esym != NULL) { 1039 kernel_symbol_size = kernel_exec.a_syms; 1040 kernel_symbol_size += 16 * (kernel_symbol_size / 12); 1041 } 1042 /* 1043 * XXX - should check that image will fit in CHIP memory 1044 * XXX return an error if it doesn't 1045 */ 1046 if ((kernel_text_size + kernel_exec.a_data + 1047 kernel_exec.a_bss + kernel_symbol_size + 1048 kernel_image_magic_size()) > boot_cphysize) 1049 return (EFBIG); 1050 kernel_image = malloc(kernel_text_size + kernel_exec.a_data 1051 + kernel_exec.a_bss 1052 + kernel_symbol_size 1053 + kernel_image_magic_size(), 1054 M_TEMP, M_WAITOK); 1055 kernel_load_ofs = 0; 1056 kernel_load_phase = 0; 1057 kernel_load_endseg = kernel_exec.a_text; 1058 return(0); 1059 } 1060 /* 1061 * Continue loading in the kernel image. 1062 */ 1063 c = uimin(iov->iov_len, kernel_load_endseg - kernel_load_ofs); 1064 c = uimin(c, MAXPHYS); 1065 if ((error = uiomove(kernel_image + kernel_load_ofs, (int)c, uio)) != 0) 1066 return(error); 1067 kernel_load_ofs += c; 1068 1069 /* 1070 * Fun and games to handle loading symbols - the length of the 1071 * string table isn't know until after the symbol table has 1072 * been loaded. We have to load the kernel text, data, and 1073 * the symbol table, then get the size of the strings. A 1074 * new kernel image is then allocated and the data currently 1075 * loaded moved to the new image. Then continue reading the 1076 * string table. This has problems if there isn't enough 1077 * room to allocate space for the two copies of the kernel 1078 * image. So the approach I took is to guess at the size 1079 * of the symbol strings. If the guess is wrong, the symbol 1080 * table is ignored. 1081 */ 1082 1083 if (kernel_load_ofs != kernel_load_endseg) 1084 return(0); 1085 1086 switch (kernel_load_phase) { 1087 case 0: /* done loading kernel text */ 1088 kernel_load_ofs = kernel_text_size; 1089 kernel_load_endseg = kernel_load_ofs + kernel_exec.a_data; 1090 kernel_load_phase = 1; 1091 break; 1092 case 1: /* done loading kernel data */ 1093 for(c = 0; c < kernel_exec.a_bss; c++) 1094 kernel_image[kernel_load_ofs + c] = 0; 1095 kernel_load_ofs += kernel_exec.a_bss; 1096 if (esym) { 1097 kernel_load_endseg = kernel_load_ofs 1098 + kernel_exec.a_syms + 8; 1099 *((u_long *)(kernel_image + kernel_load_ofs)) = 1100 kernel_exec.a_syms; 1101 kernel_load_ofs += 4; 1102 kernel_load_phase = 3; 1103 break; 1104 } 1105 /*FALLTHROUGH*/ 1106 case 2: /* done loading kernel */ 1107 1108 /* 1109 * Put the finishing touches on the kernel image. 1110 */ 1111 kernel_image_magic_copy(kernel_image + kernel_load_ofs); 1112 /* 1113 * Start the new kernel with code in locore.s. 1114 */ 1115 kernel_reload(kernel_image, 1116 kernel_load_ofs + kernel_image_magic_size(), 1117 kernel_exec.a_entry, boot_fphystart, boot_fphysize, 1118 boot_cphysize, kernel_symbol_esym, eclockfreq, 1119 boot_flags, scsi_nosync, boot_partition); 1120 /* 1121 * kernel_reload() now checks to see if the reload_code 1122 * is at the same location in the new kernel. 1123 * If it isn't, it will return and we will return 1124 * an error. 1125 */ 1126 free(kernel_image, M_TEMP); 1127 kernel_image = NULL; 1128 return (ENODEV); /* Say operation not supported */ 1129 case 3: /* done loading kernel symbol table */ 1130 c = *((u_long *)(kernel_image + kernel_load_ofs - 4)); 1131 if (c > 16 * (kernel_exec.a_syms / 12)) 1132 c = 16 * (kernel_exec.a_syms / 12); 1133 kernel_load_endseg += c - 4; 1134 kernel_symbol_esym = kernel_load_endseg; 1135 #ifdef notyet 1136 kernel_image_copy = kernel_image; 1137 kernel_image = malloc(kernel_load_ofs + c 1138 + kernel_image_magic_size(), M_TEMP, M_WAITOK); 1139 if (kernel_image == NULL) 1140 panic("kernel_reload failed second malloc"); 1141 for (c = 0; c < kernel_load_ofs; c += MAXPHYS) 1142 memcpy(kernel_image + c, kernel_image_copy + c, 1143 (kernel_load_ofs - c) > MAXPHYS ? MAXPHYS : 1144 kernel_load_ofs - c); 1145 #endif 1146 kernel_load_phase = 2; 1147 } 1148 return(0); 1149 } 1150 #endif 1151 1152 int 1153 mm_md_readwrite(dev_t dev, struct uio *uio) 1154 { 1155 1156 switch (minor(dev)) { 1157 #ifdef DEVRELOAD 1158 case DEV_RELOAD: 1159 if (uio->uio_rw == UIO_READ) 1160 return 0; 1161 return kernel_reload_write(uio); 1162 #endif 1163 default: 1164 return ENXIO; 1165 } 1166 } 1167