1 /* $NetBSD: machdep.c,v 1.25 2026/01/04 21:16:29 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matthew Fredette. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* $OpenBSD: machdep.c,v 1.40 2001/09/19 20:50:56 mickey Exp $ */ 33 34 /* 35 * Copyright (c) 1999-2003 Michael Shalayeff 36 * All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 51 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 52 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 53 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 57 * THE POSSIBILITY OF SUCH DAMAGE. 58 */ 59 60 #include <sys/cdefs.h> 61 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.25 2026/01/04 21:16:29 skrll Exp $"); 62 63 #include "opt_cputype.h" 64 #include "opt_ddb.h" 65 #include "opt_kgdb.h" 66 #include "opt_modular.h" 67 #include "opt_useleds.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/signalvar.h> 72 #include <sys/kernel.h> 73 #include <sys/proc.h> 74 #include <sys/buf.h> 75 #include <sys/cpu.h> 76 #include <sys/reboot.h> 77 #include <sys/device.h> 78 #include <sys/conf.h> 79 #include <sys/file.h> 80 #include <sys/callout.h> 81 #include <sys/mbuf.h> 82 #include <sys/msgbuf.h> 83 #include <sys/ioctl.h> 84 #include <sys/tty.h> 85 #include <sys/exec.h> 86 #include <sys/exec_aout.h> /* for MID_* */ 87 #include <sys/sysctl.h> 88 #include <sys/core.h> 89 #include <sys/kcore.h> 90 #include <sys/module.h> 91 #include <sys/extent.h> 92 #include <sys/ksyms.h> 93 #include <sys/mount.h> 94 #include <sys/mutex.h> 95 #include <sys/syscallargs.h> 96 97 #include <uvm/uvm_page.h> 98 #include <uvm/uvm.h> 99 100 #include <dev/cons.h> 101 #include <dev/mm.h> 102 103 #include <machine/pdc.h> 104 #include <machine/iomod.h> 105 #include <machine/psl.h> 106 #include <machine/reg.h> 107 #include <machine/cpufunc.h> 108 #include <machine/autoconf.h> 109 #include <machine/bootinfo.h> 110 #include <machine/kcore.h> 111 #include <machine/pcb.h> 112 113 #ifdef KGDB 114 #include "com.h" 115 #endif 116 117 #ifdef DDB 118 #include <machine/db_machdep.h> 119 #include <ddb/db_access.h> 120 #include <ddb/db_sym.h> 121 #include <ddb/db_extern.h> 122 #endif 123 124 #include <hppa/hppa/machdep.h> 125 #include <hppa/hppa/pim.h> 126 #include <hppa/dev/cpudevs.h> 127 128 #include "ksyms.h" 129 #include "lcd.h" 130 131 #ifdef MACHDEPDEBUG 132 133 #define DPRINTF(s) do { \ 134 if (machdepdebug) \ 135 printf s; \ 136 } while(0) 137 138 #define DPRINTFN(l,s) do { \ 139 if (machdepdebug >= (1)) \ 140 printf s; \ 141 } while(0) 142 143 int machdepdebug = 1; 144 #else 145 #define DPRINTF(s) /* */ 146 #define DPRINTFN(l,s) /* */ 147 #endif 148 149 /* 150 * Different kinds of flags used throughout the kernel. 151 */ 152 void *msgbufaddr; 153 154 /* The primary (aka monarch) cpu HPA */ 155 hppa_hpa_t hppa_mcpuhpa; 156 157 /* 158 * cache configuration, for most machines is the same 159 * numbers, so it makes sense to do defines w/ numbers depending 160 * on configured CPU types in the kernel 161 */ 162 int icache_stride, icache_line_mask; 163 int dcache_stride, dcache_line_mask; 164 165 /* 166 * things to not kill 167 */ 168 volatile uint8_t *machine_ledaddr; 169 int machine_ledword, machine_leds; 170 171 /* 172 * This flag is nonzero iff page zero is mapped. 173 * It is initialized to 1, because before we go 174 * virtual, page zero *is* available. It is set 175 * to zero right before we go virtual. 176 */ 177 static int pagezero_mapped = 1; 178 179 /* 180 * CPU params (should be the same for all cpus in the system) 181 */ 182 struct pdc_cache pdc_cache; 183 struct pdc_btlb pdc_btlb; 184 struct pdc_model pdc_model; 185 186 int usebtlb; 187 188 /* 189 * The BTLB slots. 190 */ 191 static struct btlb_slot { 192 193 /* The number associated with this slot. */ 194 int btlb_slot_number; 195 196 /* The flags associated with this slot. */ 197 int btlb_slot_flags; 198 #define BTLB_SLOT_IBTLB (1 << 0) 199 #define BTLB_SLOT_DBTLB (1 << 1) 200 #define BTLB_SLOT_CBTLB (BTLB_SLOT_IBTLB | BTLB_SLOT_DBTLB) 201 #define BTLB_SLOT_VARIABLE_RANGE (1 << 2) 202 203 /* 204 * The mapping information. A mapping is free 205 * if its btlb_slot_frames member is zero. 206 */ 207 pa_space_t btlb_slot_va_space; 208 vaddr_t btlb_slot_va_frame; 209 paddr_t btlb_slot_pa_frame; 210 vsize_t btlb_slot_frames; 211 u_int btlb_slot_tlbprot; 212 } *btlb_slots; 213 int btlb_slots_count; 214 215 /* w/ a little deviation should be the same for all installed cpus */ 216 u_int cpu_ticksnum, cpu_ticksdenom, cpu_hzticks; 217 218 /* exported info */ 219 char machine[] = MACHINE; 220 const struct hppa_cpu_info *hppa_cpu_info; 221 enum hppa_cpu_type cpu_type; 222 int cpu_modelno; 223 int cpu_revision; 224 225 #if NLCD > 0 226 bool lcd_blink_p; 227 #endif 228 229 /* 230 * exported methods for cpus 231 */ 232 int (*cpu_desidhash)(void); 233 int (*cpu_hpt_init)(vaddr_t, vsize_t); 234 int (*cpu_ibtlb_ins)(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 235 int (*cpu_dbtlb_ins)(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 236 237 dev_t bootdev; 238 int totalphysmem; /* # pages in system */ 239 int availphysmem; /* # pages available to kernel */ 240 int esym; 241 paddr_t avail_end; 242 243 /* 244 * Our copy of the bootinfo struct passed to us by the boot loader. 245 */ 246 struct bootinfo bootinfo; 247 248 /* 249 * XXX note that 0x12000 is the old kernel text start 250 * address. Memory below this is assumed to belong 251 * to the firmware. This value is converted into pages 252 * by hppa_init and used as pages in pmap_bootstrap(). 253 */ 254 int resvmem = 0x12000; 255 int resvphysmem; 256 257 /* 258 * BTLB parameters, broken out for the MI hppa code. 259 */ 260 u_int hppa_btlb_size_min, hppa_btlb_size_max; 261 262 /* 263 * Things for MI glue to stick on. 264 */ 265 struct extent *hppa_io_extent; 266 static long hppa_io_extent_store[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)]; 267 268 struct pool hppa_fppl; 269 struct fpreg lwp0_fpregs; 270 271 /* Our exported CPU info */ 272 struct cpu_info cpus[HPPA_MAXCPUS] = { 273 #ifdef MULTIPROCESSOR 274 { 275 .ci_curlwp = &lwp0, 276 }, 277 #endif 278 }; 279 280 struct vm_map *phys_map = NULL; 281 282 void delay_init(void); 283 static inline void fall(int, int, int, int, int); 284 void dumpsys(void); 285 void cpuid(void); 286 enum hppa_cpu_type cpu_model_cpuid(int); 287 #if NLCD > 0 288 void blink_lcd_timeout(void *); 289 #endif 290 291 /* 292 * wide used hardware params 293 */ 294 struct pdc_hwtlb pdc_hwtlb; 295 struct pdc_coproc pdc_coproc; 296 struct pdc_coherence pdc_coherence; 297 struct pdc_spidb pdc_spidbits; 298 struct pdc_pim pdc_pim; 299 struct pdc_model pdc_model; 300 301 /* 302 * Debugger info. 303 */ 304 int hppa_kgdb_attached; 305 306 /* 307 * Whatever CPU types we support 308 */ 309 extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[]; 310 extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[]; 311 extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[]; 312 extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[]; 313 extern const u_int itlb_u[], itlbna_u[], dtlb_u[], dtlbna_u[], tlbd_u[]; 314 315 int iibtlb_s(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 316 int idbtlb_s(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 317 int ibtlb_t(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 318 int ibtlb_l(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 319 int ibtlb_u(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 320 int ibtlb_g(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 321 int pbtlb_g(int); 322 int pbtlb_u(int); 323 int hpti_l(vaddr_t, vsize_t); 324 int hpti_u(vaddr_t, vsize_t); 325 int hpti_g(vaddr_t, vsize_t); 326 int desidhash_x(void); 327 int desidhash_s(void); 328 int desidhash_t(void); 329 int desidhash_l(void); 330 int desidhash_u(void); 331 332 const struct hppa_cpu_info cpu_types[] = { 333 #ifdef HP7000_CPU 334 { "PA7000", NULL, "PCX", 335 hpcx, 0, 336 0, "1.0", 337 desidhash_x, itlb_x, dtlb_x, itlbna_x, dtlbna_x, tlbd_x, 338 ibtlb_g, NULL, pbtlb_g, NULL }, /* XXXNH check */ 339 #endif 340 #ifdef HP7000_CPU 341 { "PA7000", NULL, "PCXS", 342 hpcxs, 0, 343 0, "1.1a", 344 desidhash_s, itlb_s, dtlb_s, itlbna_s, dtlbna_s, tlbd_s, 345 ibtlb_g, NULL, pbtlb_g, NULL }, 346 #endif 347 #ifdef HP7100_CPU 348 { "PA7100", "T-Bird", "PCXT", 349 hpcxt, 0, 350 HPPA_FTRS_BTLBU, "1.1b", 351 desidhash_t, itlb_t, dtlb_t, itlbna_t, dtlbna_t, tlbd_t, 352 ibtlb_g, NULL, pbtlb_g, NULL }, 353 #endif 354 #ifdef HP7100LC_CPU 355 { "PA7100LC", "Hummingbird", "PCXL", 356 hpcxl, HPPA_CPU_PCXL, 357 HPPA_FTRS_TLBU | HPPA_FTRS_BTLBU | HPPA_FTRS_HVT, "1.1c", 358 desidhash_l, itlb_l, dtlb_l, itlbna_l, dtlbna_l, tlbd_l, 359 ibtlb_g, NULL, pbtlb_g, hpti_g }, 360 #endif 361 #ifdef HP7200_CPU 362 { "PA7200", "T-Bird", "PCXT'", 363 hpcxtp, HPPA_CPU_PCXT2, 364 HPPA_FTRS_BTLBU, "1.1d", 365 desidhash_t, itlb_t, dtlb_t, itlbna_t, dtlbna_t, tlbd_t, 366 ibtlb_g, NULL, pbtlb_g, NULL }, 367 #endif 368 #ifdef HP7300LC_CPU 369 { "PA7300LC", "Velociraptor", "PCXL2", 370 hpcxl2, HPPA_CPU_PCXL2, 371 HPPA_FTRS_TLBU | HPPA_FTRS_BTLBU | HPPA_FTRS_HVT, "1.1e", 372 NULL, itlb_l, dtlb_l, itlbna_l, dtlbna_l, tlbd_l, 373 ibtlb_g, NULL, pbtlb_g, hpti_g }, 374 #endif 375 #ifdef HP8000_CPU 376 { "PA8000", "Onyx", "PCXU", 377 hpcxu, HPPA_CPU_PCXU, 378 HPPA_FTRS_W32B, "2.0", 379 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 380 ibtlb_u, NULL, pbtlb_u, NULL }, 381 #endif 382 #ifdef HP8200_CPU 383 { "PA8200", "Vulcan", "PCXU+", 384 hpcxup, HPPA_CPU_PCXUP, 385 HPPA_FTRS_W32B, "2.0", 386 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 387 ibtlb_u, NULL, pbtlb_u, NULL }, 388 #endif 389 #ifdef HP8500_CPU 390 { "PA8500", "Barra'Cuda", "PCXW", 391 hpcxw, HPPA_CPU_PCXW, 392 HPPA_FTRS_W32B, "2.0", 393 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 394 ibtlb_u, NULL, pbtlb_u, NULL }, 395 #endif 396 #ifdef HP8600_CPU 397 { "PA8600", "Landshark", "PCXW+", 398 hpcxwp, HPPA_CPU_PCXWP, 399 HPPA_FTRS_W32B, "2.0", 400 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 401 ibtlb_u, NULL, pbtlb_u, NULL }, 402 #endif 403 #ifdef HP8700_CPU 404 { "PA8700", "Piranha", "PCXW2", 405 hpcxw2, HPPA_CPU_PCXW2, 406 HPPA_FTRS_W32B, "2.0", 407 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 408 ibtlb_u, NULL, pbtlb_u, NULL }, 409 #endif 410 #ifdef HP8800_CPU 411 { "PA8800", "Mako", "Make", 412 mako, HPPA_CPU_PCXW2, 413 HPPA_FTRS_W32B, "2.0", 414 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 415 ibtlb_u, NULL, pbtlb_u, NULL }, 416 #endif 417 #ifdef HP8900_CPU 418 { "PA8900", "Shortfin", "Shortfin", 419 mako, HPPA_CPU_PCXW2, 420 HPPA_FTRS_W32B, "2.0", 421 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 422 ibtlb_u, NULL, pbtlb_u, NULL }, 423 #endif 424 }; 425 426 void 427 hppa_init(paddr_t start, void *bi) 428 { 429 vaddr_t vstart; 430 vaddr_t v; 431 int error; 432 u_int *p, *q; 433 struct btlb_slot *btlb_slot; 434 int btlb_slot_i; 435 struct btinfo_symtab *bi_sym; 436 struct pcb *pcb0; 437 struct cpu_info *ci; 438 439 #ifdef KGDB 440 boothowto |= RB_KDB; /* go to kgdb early if compiled in. */ 441 #endif 442 /* Setup curlwp/curcpu early for LOCKDEBUG and spl* */ 443 #ifdef MULTIPROCESSOR 444 mtctl(&cpus[0], CR_CURCPU); 445 #else 446 mtctl(&lwp0, CR_CURLWP); 447 #endif 448 lwp0.l_cpu = &cpus[0]; 449 450 /* curcpu() is now valid */ 451 ci = curcpu(); 452 453 ci->ci_psw = 454 PSW_Q | /* Interrupt State Collection Enable */ 455 PSW_P | /* Protection Identifier Validation Enable */ 456 PSW_C | /* Instruction Address Translation Enable */ 457 PSW_D; /* Data Address Translation Enable */ 458 459 /* Copy bootinfo */ 460 if (bi != NULL) 461 memcpy(&bootinfo, bi, sizeof(struct bootinfo)); 462 463 /* init PDC iface, so we can call em easy */ 464 pdc_init(); 465 466 cpu_hzticks = (PAGE0->mem_10msec * 100) / hz; 467 468 /* calculate CPU clock ratio */ 469 delay_init(); 470 471 /* fetch the monarch/"default" cpu hpa */ 472 error = pdcproc_hpa_processor(&hppa_mcpuhpa); 473 if (error < 0) 474 panic("%s: PDC_HPA failed", __func__); 475 476 /* cache parameters */ 477 error = pdcproc_cache(&pdc_cache); 478 if (error < 0) { 479 DPRINTF(("WARNING: PDC_CACHE error %d\n", error)); 480 } 481 482 dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1; 483 dcache_stride = pdc_cache.dc_stride; 484 icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1; 485 icache_stride = pdc_cache.ic_stride; 486 487 error = pdcproc_cache_spidbits(&pdc_spidbits); 488 DPRINTF(("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error)); 489 490 /* Calculate the OS_HPMC handler checksums. */ 491 p = os_hpmc; 492 if (pdcproc_instr(p)) 493 *p = 0x08000240; 494 p[7] = ((char *) &os_hpmc_cont_end) - ((char *) &os_hpmc_cont); 495 p[6] = (u_int) &os_hpmc_cont; 496 p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]); 497 p = &os_hpmc_cont; 498 q = os_hpmc_checksum; 499 for (*q = 0; p < q; *q -= *(p++)); 500 501 /* Calculate the OS_TOC handler checksum. */ 502 p = (u_int *) &os_toc; 503 q = os_toc_checksum; 504 for (*q = 0; p < q; *q -= *(p++)); 505 506 /* Install the OS_TOC handler. */ 507 PAGE0->ivec_toc = os_toc; 508 PAGE0->ivec_toclen = ((char *) &os_toc_end) - ((char *) &os_toc); 509 510 cpuid(); 511 ptlball(); 512 fcacheall(); 513 514 avail_end = trunc_page(PAGE0->imm_max_mem); 515 totalphysmem = atop(avail_end); 516 if (avail_end > SYSCALLGATE) 517 avail_end = SYSCALLGATE; 518 physmem = atop(avail_end); 519 resvmem = atop(resvmem); /* XXXNH */ 520 521 /* we hope this won't fail */ 522 hppa_io_extent = extent_create("io", 523 HPPA_IOSPACE, 0xffffffff, 524 (void *)hppa_io_extent_store, sizeof(hppa_io_extent_store), 525 EX_NOCOALESCE|EX_NOWAIT); 526 527 vstart = round_page(start); 528 529 /* 530 * Now allocate kernel dynamic variables 531 */ 532 533 /* Allocate the msgbuf. */ 534 msgbufaddr = (void *) vstart; 535 vstart += MSGBUFSIZE; 536 vstart = round_page(vstart); 537 538 if (usebtlb) { 539 /* Allocate and initialize the BTLB slots array. */ 540 btlb_slots = (struct btlb_slot *) ALIGN(vstart); 541 btlb_slot = btlb_slots; 542 #define BTLB_SLOTS(count, flags) \ 543 do { \ 544 for (btlb_slot_i = 0; \ 545 btlb_slot_i < pdc_btlb.count; \ 546 btlb_slot_i++) { \ 547 btlb_slot->btlb_slot_number = (btlb_slot - btlb_slots); \ 548 btlb_slot->btlb_slot_flags = flags; \ 549 btlb_slot->btlb_slot_frames = 0; \ 550 btlb_slot++; \ 551 } \ 552 } while (/* CONSTCOND */ 0) 553 554 BTLB_SLOTS(finfo.num_i, BTLB_SLOT_IBTLB); 555 BTLB_SLOTS(finfo.num_d, BTLB_SLOT_DBTLB); 556 BTLB_SLOTS(finfo.num_c, BTLB_SLOT_CBTLB); 557 BTLB_SLOTS(vinfo.num_i, BTLB_SLOT_IBTLB | BTLB_SLOT_VARIABLE_RANGE); 558 BTLB_SLOTS(vinfo.num_d, BTLB_SLOT_DBTLB | BTLB_SLOT_VARIABLE_RANGE); 559 BTLB_SLOTS(vinfo.num_c, BTLB_SLOT_CBTLB | BTLB_SLOT_VARIABLE_RANGE); 560 #undef BTLB_SLOTS 561 562 btlb_slots_count = (btlb_slot - btlb_slots); 563 vstart = round_page((vaddr_t) btlb_slot); 564 } 565 566 v = vstart; 567 568 /* sets resvphysmem */ 569 pmap_bootstrap(v); 570 571 /* 572 * BELOW THIS LINE REFERENCING PAGE0 AND OTHER LOW MEMORY 573 * LOCATIONS, AND WRITING THE KERNEL TEXT ARE PROHIBITED 574 * WITHOUT TAKING SPECIAL MEASURES. 575 */ 576 577 DPRINTF(("%s: PDC_CHASSIS\n", __func__)); 578 579 /* they say PDC_COPROC might turn fault light on */ 580 pdcproc_chassis_display(PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0); 581 582 DPRINTF(("%s: intr bootstrap\n", __func__)); 583 /* Bootstrap interrupt masking and dispatching. */ 584 hppa_intr_initialise(ci); 585 586 /* 587 * Initialize any debugger. 588 */ 589 #ifdef KGDB 590 /* 591 * XXX note that we're not virtual yet, yet these 592 * KGDB attach functions will be using bus_space(9) 593 * to map and manipulate their devices. This only 594 * works because, currently, the mainbus.c bus_space 595 * implementation directly-maps things in I/O space. 596 */ 597 hppa_kgdb_attached = false; 598 #if NCOM > 0 599 if (!strcmp(KGDB_DEVNAME, "com")) { 600 int com_gsc_kgdb_attach(void); 601 if (com_gsc_kgdb_attach() == 0) 602 hppa_kgdb_attached = true; 603 } 604 #endif /* NCOM > 0 */ 605 #endif /* KGDB */ 606 607 #if NKSYMS || defined(DDB) || defined(MODULAR) 608 if ((bi_sym = lookup_bootinfo(BTINFO_SYMTAB)) != NULL) 609 ksyms_addsyms_elf(bi_sym->nsym, (int *)bi_sym->ssym, 610 (int *)bi_sym->esym); 611 else { 612 extern int end; 613 614 ksyms_addsyms_elf(esym - (int)&end, &end, (int*)esym); 615 } 616 #endif 617 618 /* We will shortly go virtual. */ 619 pagezero_mapped = 0; 620 fcacheall(); 621 622 pcb0 = lwp_getpcb(&lwp0); 623 pcb0->pcb_fpregs = &lwp0_fpregs; 624 memset(&lwp0_fpregs, 0, sizeof(struct fpreg)); 625 626 pool_init(&hppa_fppl, sizeof(struct fpreg), 16, 0, 0, "fppl", NULL, 627 IPL_NONE); 628 } 629 630 void 631 cpuid(void) 632 { 633 /* 634 * XXX fredette - much of this TLB trap handler setup should 635 * probably be moved here to hppa/hppa/hppa_machdep.c, seeing 636 * that there's related code already in hppa/hppa/trap.S. 637 */ 638 639 /* 640 * Ptrs to various tlb handlers, to be filled 641 * based on CPU features. 642 * from locore.S 643 */ 644 extern u_int trap_ep_T_TLB_DIRTY[]; 645 extern u_int trap_ep_T_DTLBMISS[]; 646 extern u_int trap_ep_T_DTLBMISSNA[]; 647 extern u_int trap_ep_T_ITLBMISS[]; 648 extern u_int trap_ep_T_ITLBMISSNA[]; 649 650 struct pdc_cpuid pdc_cpuid; 651 const struct hppa_cpu_info *p = NULL; 652 const char *model; 653 u_int cpu_version, cpu_features; 654 int error, i; 655 656 /* may the scientific guessing begin */ 657 cpu_type = hpc_unknown; 658 cpu_features = 0; 659 cpu_version = 0; 660 661 /* identify system type */ 662 error = pdcproc_model_info(&pdc_model); 663 if (error < 0) { 664 DPRINTF(("WARNING: PDC_MODEL_INFO error %d\n", error)); 665 666 pdc_model.hwmodel = 0; 667 pdc_model.hv = 0; 668 } else { 669 DPRINTF(("pdc_model.hwmodel/hv %x/%x\n", pdc_model.hwmodel, 670 pdc_model.hv)); 671 } 672 cpu_modelno = pdc_model.hwmodel; 673 model = hppa_mod_info(HPPA_TYPE_BOARD, cpu_modelno); 674 675 DPRINTF(("%s: model %s\n", __func__, model)); 676 677 pdc_settype(cpu_modelno); 678 679 memset(&pdc_cpuid, 0, sizeof(pdc_cpuid)); 680 error = pdcproc_model_cpuid(&pdc_cpuid); 681 if (error < 0) { 682 DPRINTF(("WARNING: PDC_MODEL_CPUID error %d. " 683 "Using cpu_modelno (%#x) based cpu_type.\n", error, cpu_modelno)); 684 685 cpu_type = cpu_model_cpuid(cpu_modelno); 686 if (cpu_type == hpc_unknown) { 687 printf("WARNING: Unknown cpu_type for cpu_modelno %x\n", 688 cpu_modelno); 689 } 690 } else { 691 DPRINTF(("%s: cpuid.version = %x\n", __func__, 692 pdc_cpuid.version)); 693 DPRINTF(("%s: cpuid.revision = %x\n", __func__, 694 pdc_cpuid.revision)); 695 696 cpu_version = pdc_cpuid.version; 697 698 /* XXXNH why? */ 699 /* patch for old 8200 */ 700 if (pdc_cpuid.version == HPPA_CPU_PCXU && 701 pdc_cpuid.revision > 0x0d) 702 cpu_version = HPPA_CPU_PCXUP; 703 } 704 705 /* locate coprocessors and SFUs */ 706 memset(&pdc_coproc, 0, sizeof(pdc_coproc)); 707 error = pdcproc_coproc(&pdc_coproc); 708 if (error < 0) { 709 DPRINTF(("WARNING: PDC_COPROC error %d\n", error)); 710 pdc_coproc.ccr_enable = 0; 711 } else { 712 DPRINTF(("pdc_coproc: 0x%x, 0x%x; model %x rev %x\n", 713 pdc_coproc.ccr_enable, pdc_coproc.ccr_present, 714 pdc_coproc.fpu_model, pdc_coproc.fpu_revision)); 715 716 /* a kludge to detect PCXW */ 717 if (pdc_coproc.fpu_model == HPPA_FPU_PCXW) 718 cpu_version = HPPA_CPU_PCXW; 719 } 720 mtctl(pdc_coproc.ccr_enable & CCR_MASK, CR_CCR); 721 DPRINTF(("%s: bootstrap fpu\n", __func__)); 722 723 usebtlb = 0; 724 if (cpu_version == HPPA_CPU_PCXW || cpu_version > HPPA_CPU_PCXL2) { 725 DPRINTF(("WARNING: BTLB no supported on cpu %d\n", cpu_version)); 726 } else { 727 728 /* BTLB params */ 729 error = pdcproc_block_tlb(&pdc_btlb); 730 if (error < 0) { 731 DPRINTF(("WARNING: PDC_BTLB error %d\n", error)); 732 } else { 733 DPRINTFN(10, ("btlb info: minsz=%d, maxsz=%d\n", 734 pdc_btlb.min_size, pdc_btlb.max_size)); 735 DPRINTFN(10, ("btlb fixed: i=%d, d=%d, c=%d\n", 736 pdc_btlb.finfo.num_i, 737 pdc_btlb.finfo.num_d, 738 pdc_btlb.finfo.num_c)); 739 DPRINTFN(10, ("btlb varbl: i=%d, d=%d, c=%d\n", 740 pdc_btlb.vinfo.num_i, 741 pdc_btlb.vinfo.num_d, 742 pdc_btlb.vinfo.num_c)); 743 744 /* purge TLBs and caches */ 745 if (pdcproc_btlb_purgeall() < 0) 746 DPRINTFN(10, ("WARNING: BTLB purge failed\n")); 747 748 hppa_btlb_size_min = pdc_btlb.min_size; 749 hppa_btlb_size_max = pdc_btlb.max_size; 750 751 DPRINTF(("hppa_btlb_size_min 0x%x\n", hppa_btlb_size_min)); 752 DPRINTF(("hppa_btlb_size_max 0x%x\n", hppa_btlb_size_max)); 753 754 if (pdc_btlb.finfo.num_c) 755 cpu_features |= HPPA_FTRS_BTLBU; 756 usebtlb = 1; 757 } 758 } 759 usebtlb = 0; 760 761 error = pdcproc_tlb_info(&pdc_hwtlb); 762 if (error == 0 && pdc_hwtlb.min_size != 0 && pdc_hwtlb.max_size != 0) { 763 cpu_features |= HPPA_FTRS_HVT; 764 if (pmap_hptsize > pdc_hwtlb.max_size) 765 pmap_hptsize = pdc_hwtlb.max_size; 766 else if (pmap_hptsize && pmap_hptsize < pdc_hwtlb.min_size) 767 pmap_hptsize = pdc_hwtlb.min_size; 768 769 DPRINTF(("%s: pmap_hptsize 0x%x\n", __func__, pmap_hptsize)); 770 } else { 771 DPRINTF(("WARNING: no HPT support, fine!\n")); 772 773 pmap_hptsize = 0; 774 } 775 776 bool cpu_found = false; 777 if (cpu_version) { 778 DPRINTF(("%s: looking for cpu_version %x\n", __func__, 779 cpu_version)); 780 for (i = 0, p = cpu_types; i < __arraycount(cpu_types); 781 i++, p++) { 782 if (p->hci_cpuversion == cpu_version) { 783 cpu_found = true; 784 break; 785 } 786 } 787 } else if (cpu_type != hpc_unknown) { 788 DPRINTF(("%s: looking for cpu_type %d\n", __func__, 789 cpu_type)); 790 for (i = 0, p = cpu_types; i < __arraycount(cpu_types); 791 i++, p++) { 792 if (p->hci_cputype == cpu_type) { 793 cpu_found = true; 794 break; 795 } 796 } 797 } 798 799 if (!cpu_found) { 800 panic("CPU detection failed. Please report the problem. " 801 "CPU version %#x/type %#x", cpu_version, cpu_type); 802 } 803 804 hppa_cpu_info = p; 805 806 if (hppa_cpu_info->hci_chip_name == NULL) 807 panic("bad model string for 0x%x", pdc_model.hwmodel); 808 809 /* 810 * TODO: HPT on 7200 is not currently supported 811 */ 812 if (pmap_hptsize && p->hci_cputype != hpcxl && p->hci_cputype != hpcxl2) 813 pmap_hptsize = 0; 814 815 cpu_type = hppa_cpu_info->hci_cputype; 816 cpu_ibtlb_ins = hppa_cpu_info->ibtlbins; 817 cpu_dbtlb_ins = hppa_cpu_info->dbtlbins; 818 cpu_hpt_init = hppa_cpu_info->hptinit; 819 cpu_desidhash = hppa_cpu_info->desidhash; 820 821 if (cpu_desidhash) 822 cpu_revision = (*cpu_desidhash)(); 823 else 824 cpu_revision = 0; 825 826 /* force strong ordering for now */ 827 if (hppa_cpu_ispa20_p()) 828 curcpu()->ci_psw |= PSW_O; 829 830 cpu_setmodel("HP9000/%s", model); 831 832 #define LDILDO(t,f) ((t)[0] = (f)[0], (t)[1] = (f)[1]); 833 LDILDO(trap_ep_T_TLB_DIRTY , hppa_cpu_info->tlbdh); 834 LDILDO(trap_ep_T_DTLBMISS , hppa_cpu_info->dtlbh); 835 LDILDO(trap_ep_T_DTLBMISSNA, hppa_cpu_info->dtlbnah); 836 LDILDO(trap_ep_T_ITLBMISS , hppa_cpu_info->itlbh); 837 LDILDO(trap_ep_T_ITLBMISSNA, hppa_cpu_info->itlbnah); 838 #undef LDILDO 839 840 /* Bootstrap any FPU. */ 841 hppa_fpu_bootstrap(pdc_coproc.ccr_enable); 842 } 843 844 enum hppa_cpu_type 845 cpu_model_cpuid(int modelno) 846 { 847 switch (modelno) { 848 /* no supported HP8xx/9xx models with pcx */ 849 case HPPA_BOARD_HP720: 850 case HPPA_BOARD_HP750_66: 851 case HPPA_BOARD_HP730_66: 852 case HPPA_BOARD_HP710: 853 case HPPA_BOARD_HP705: 854 return hpcxs; 855 856 case HPPA_BOARD_HPE23: 857 case HPPA_BOARD_HPE25: 858 case HPPA_BOARD_HPE35: 859 case HPPA_BOARD_HPE45: 860 case HPPA_BOARD_HP712_60: 861 case HPPA_BOARD_HP712_80: 862 case HPPA_BOARD_HP712_100: 863 case HPPA_BOARD_HP715_80: 864 case HPPA_BOARD_HP715_64: 865 case HPPA_BOARD_HP715_100: 866 case HPPA_BOARD_HP715_100XC: 867 case HPPA_BOARD_HP715_100L: 868 case HPPA_BOARD_HP715_120L: 869 case HPPA_BOARD_HP715_80M: 870 return hpcxl; 871 872 case HPPA_BOARD_HP735_99: 873 case HPPA_BOARD_HP755_99: 874 case HPPA_BOARD_HP755_125: 875 case HPPA_BOARD_HP735_130: 876 case HPPA_BOARD_HP715_50: 877 case HPPA_BOARD_HP715_33: 878 case HPPA_BOARD_HP715S_50: 879 case HPPA_BOARD_HP715S_33: 880 case HPPA_BOARD_HP715T_50: 881 case HPPA_BOARD_HP715T_33: 882 case HPPA_BOARD_HP715_75: 883 case HPPA_BOARD_HP715_99: 884 case HPPA_BOARD_HP725_50: 885 case HPPA_BOARD_HP725_75: 886 case HPPA_BOARD_HP725_99: 887 case HPPA_BOARD_HP745I_50: 888 return hpcxt; 889 } 890 return hpc_unknown; 891 } 892 893 void 894 cpu_startup(void) 895 { 896 vaddr_t minaddr, maxaddr; 897 char pbuf[3][9]; 898 899 /* Initialize the message buffer. */ 900 initmsgbuf(msgbufaddr, MSGBUFSIZE); 901 902 /* 903 * i won't understand a friend of mine, 904 * who sat in a room full of artificial ice, 905 * fogging the air w/ humid cries -- 906 * WELCOME TO SUMMER! 907 */ 908 printf("%s%s", copyright, version); 909 910 /* identify system type */ 911 printf("%s\n", cpu_getmodel()); 912 913 /* Display some memory usage information. */ 914 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(physmem)); 915 format_bytes(pbuf[1], sizeof(pbuf[1]), ptoa(resvmem)); 916 format_bytes(pbuf[2], sizeof(pbuf[2]), ptoa(availphysmem)); 917 printf("real mem = %s (%s reserved for PROM, %s used by NetBSD)\n", 918 pbuf[0], pbuf[1], pbuf[2]); 919 920 #ifdef DEBUG 921 if (totalphysmem > physmem) { 922 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(totalphysmem - physmem)); 923 DPRINTF(("lost mem = %s\n", pbuf[0])); 924 } 925 #endif 926 927 minaddr = 0; 928 929 /* 930 * Allocate a submap for physio 931 */ 932 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 933 VM_PHYS_SIZE, 0, false, NULL); 934 935 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(uvm_availmem(false))); 936 printf("avail mem = %s\n", pbuf[0]); 937 } 938 939 /* 940 * compute CPU clock ratio such as: 941 * cpu_ticksnum / cpu_ticksdenom = t + delta 942 * delta -> 0 943 */ 944 void 945 delay_init(void) 946 { 947 u_int num, denom, delta, mdelta; 948 949 mdelta = UINT_MAX; 950 for (denom = 1; denom < 1000; denom++) { 951 num = (PAGE0->mem_10msec * denom) / 10000; 952 delta = num * 10000 / denom - PAGE0->mem_10msec; 953 if (!delta) { 954 cpu_ticksdenom = denom; 955 cpu_ticksnum = num; 956 break; 957 } else if (delta < mdelta) { 958 cpu_ticksdenom = denom; 959 cpu_ticksnum = num; 960 mdelta = delta; 961 } 962 } 963 } 964 965 void 966 delay(u_int us) 967 { 968 u_int start, end, n; 969 970 mfctl(CR_ITMR, start); 971 while (us) { 972 n = uimin(1000, us); 973 end = start + n * cpu_ticksnum / cpu_ticksdenom; 974 975 /* N.B. Interval Timer may wrap around */ 976 if (end < start) { 977 do { 978 mfctl(CR_ITMR, start); 979 } while (start > end); 980 } 981 982 do 983 mfctl(CR_ITMR, start); 984 while (start < end); 985 986 us -= n; 987 mfctl(CR_ITMR, start); 988 } 989 } 990 991 static inline void 992 fall(int c_base, int c_count, int c_loop, int c_stride, int data) 993 { 994 int loop; 995 996 for (; c_count--; c_base += c_stride) 997 for (loop = c_loop; loop--; ) 998 if (data) 999 fdce(0, c_base); 1000 else 1001 fice(0, c_base); 1002 } 1003 1004 void 1005 fcacheall(void) 1006 { 1007 /* 1008 * Flush the instruction, then data cache. 1009 */ 1010 fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop, 1011 pdc_cache.ic_stride, 0); 1012 sync_caches(); 1013 fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop, 1014 pdc_cache.dc_stride, 1); 1015 sync_caches(); 1016 } 1017 1018 void 1019 ptlball(void) 1020 { 1021 pa_space_t sp; 1022 int i, j, k; 1023 1024 /* instruction TLB */ 1025 sp = pdc_cache.it_sp_base; 1026 for (i = 0; i < pdc_cache.it_sp_count; i++) { 1027 vaddr_t off = pdc_cache.it_off_base; 1028 for (j = 0; j < pdc_cache.it_off_count; j++) { 1029 for (k = 0; k < pdc_cache.it_loop; k++) 1030 pitlbe(sp, off); 1031 off += pdc_cache.it_off_stride; 1032 } 1033 sp += pdc_cache.it_sp_stride; 1034 } 1035 1036 /* data TLB */ 1037 sp = pdc_cache.dt_sp_base; 1038 for (i = 0; i < pdc_cache.dt_sp_count; i++) { 1039 vaddr_t off = pdc_cache.dt_off_base; 1040 for (j = 0; j < pdc_cache.dt_off_count; j++) { 1041 for (k = 0; k < pdc_cache.dt_loop; k++) 1042 pdtlbe(sp, off); 1043 off += pdc_cache.dt_off_stride; 1044 } 1045 sp += pdc_cache.dt_sp_stride; 1046 } 1047 } 1048 1049 int 1050 hpti_g(vaddr_t hpt, vsize_t hptsize) 1051 { 1052 1053 return pdcproc_tlb_config(&pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE); 1054 } 1055 1056 int 1057 pbtlb_g(int i) 1058 { 1059 return -1; 1060 } 1061 1062 int 1063 ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa, vsize_t sz, u_int prot) 1064 { 1065 int error; 1066 1067 error = pdcproc_btlb_insert(sp, va, pa, sz, prot, i); 1068 if (error < 0) { 1069 #ifdef BTLBDEBUG 1070 DPRINTF(("WARNING: BTLB insert failed (%d)\n", error)); 1071 #endif 1072 } 1073 return error; 1074 } 1075 1076 1077 /* 1078 * This inserts a recorded BTLB slot. 1079 */ 1080 static int _hppa_btlb_insert(struct btlb_slot *); 1081 static int 1082 _hppa_btlb_insert(struct btlb_slot *btlb_slot) 1083 { 1084 int error; 1085 #ifdef MACHDEPDEBUG 1086 const char *prot; 1087 1088 /* Display the protection like a file protection. */ 1089 switch (btlb_slot->btlb_slot_tlbprot & TLB_AR_MASK) { 1090 case TLB_AR_NA: prot = "------"; break; 1091 case TLB_AR_R: prot = "r-----"; break; 1092 case TLB_AR_RW: prot = "rw----"; break; 1093 case TLB_AR_RX: prot = "r-x---"; break; 1094 case TLB_AR_RWX: prot = "rwx---"; break; 1095 case TLB_AR_R | TLB_USER: prot = "r--r--"; break; 1096 case TLB_AR_RW | TLB_USER: prot = "rw-rw-"; break; 1097 case TLB_AR_RX | TLB_USER: prot = "r--r-x"; break; 1098 case TLB_AR_RWX | TLB_USER: prot = "rw-rwx"; break; 1099 default: prot = "??????"; break; 1100 } 1101 1102 DPRINTFN(10, ( 1103 " [ BTLB %d: %s 0x%08x @ 0x%x:0x%08x len 0x%08x prot 0x%08x] ", 1104 btlb_slot->btlb_slot_number, 1105 prot, 1106 (u_int)btlb_slot->btlb_slot_pa_frame << PGSHIFT, 1107 btlb_slot->btlb_slot_va_space, 1108 (u_int)btlb_slot->btlb_slot_va_frame << PGSHIFT, 1109 (u_int)btlb_slot->btlb_slot_frames << PGSHIFT, 1110 btlb_slot->btlb_slot_tlbprot)); 1111 1112 /* 1113 * Non-I/O space mappings are entered by the pmap, 1114 * so we do print a newline to make things look better. 1115 */ 1116 if (btlb_slot->btlb_slot_pa_frame < (HPPA_IOSPACE >> PGSHIFT)) 1117 DPRINTFN(10, ("\n")); 1118 #endif 1119 1120 /* Insert this mapping. */ 1121 error = pdcproc_btlb_insert( 1122 btlb_slot->btlb_slot_va_space, 1123 btlb_slot->btlb_slot_va_frame, 1124 btlb_slot->btlb_slot_pa_frame, 1125 btlb_slot->btlb_slot_frames, 1126 btlb_slot->btlb_slot_tlbprot, 1127 btlb_slot->btlb_slot_number); 1128 if (error < 0) { 1129 #ifdef BTLBDEBUG 1130 DPRINTF(("WARNING: BTLB insert failed (%d)\n", error); 1131 #endif 1132 } 1133 return (error ? EINVAL : 0); 1134 } 1135 1136 /* 1137 * This records and inserts a new BTLB entry. 1138 */ 1139 int 1140 hppa_btlb_insert(pa_space_t space, vaddr_t va, paddr_t pa, vsize_t *sizep, 1141 u_int tlbprot) 1142 { 1143 struct btlb_slot *btlb_slot, *btlb_slot_best, *btlb_slot_end; 1144 vsize_t frames; 1145 int error; 1146 int need_dbtlb, need_ibtlb, need_variable_range; 1147 int btlb_slot_score, btlb_slot_best_score; 1148 vsize_t slot_mapped_frames, total_mapped_frames; 1149 1150 /* 1151 * All entries need data translation. Those that 1152 * allow execution also need instruction translation. 1153 */ 1154 switch (tlbprot & TLB_AR_MASK) { 1155 case TLB_AR_R: 1156 case TLB_AR_RW: 1157 case TLB_AR_R | TLB_USER: 1158 case TLB_AR_RW | TLB_USER: 1159 need_dbtlb = true; 1160 need_ibtlb = false; 1161 break; 1162 case TLB_AR_RX: 1163 case TLB_AR_RWX: 1164 case TLB_AR_RX | TLB_USER: 1165 case TLB_AR_RWX | TLB_USER: 1166 need_dbtlb = true; 1167 need_ibtlb = true; 1168 break; 1169 default: 1170 panic("btlb_insert: bad tlbprot"); 1171 } 1172 1173 /* 1174 * If this entry isn't aligned to the size required 1175 * for a fixed-range slot, it requires a variable-range 1176 * slot. This also converts pa and va to page frame 1177 * numbers. 1178 */ 1179 frames = pdc_btlb.min_size << PGSHIFT; 1180 while (frames < *sizep) 1181 frames <<= 1; 1182 frames >>= PGSHIFT; 1183 if (frames > pdc_btlb.max_size) { 1184 #ifdef BTLBDEBUG 1185 DPRINTF(("btlb_insert: too big (%u < %u < %u)\n", 1186 pdc_btlb.min_size, (u_int) frames, pdc_btlb.max_size); 1187 #endif 1188 return -(ENOMEM); 1189 } 1190 pa >>= PGSHIFT; 1191 va >>= PGSHIFT; 1192 need_variable_range = 1193 ((pa & (frames - 1)) != 0 || (va & (frames - 1)) != 0); 1194 1195 /* I/O space must be mapped uncached. */ 1196 if (pa >= HPPA_IOBEGIN) 1197 tlbprot |= TLB_UNCACHEABLE; 1198 1199 /* 1200 * Loop while we still need slots. 1201 */ 1202 btlb_slot_end = btlb_slots + btlb_slots_count; 1203 total_mapped_frames = 0; 1204 btlb_slot_best_score = 0; 1205 while (need_dbtlb || need_ibtlb) { 1206 1207 /* 1208 * Find an applicable slot. 1209 */ 1210 btlb_slot_best = NULL; 1211 for (btlb_slot = btlb_slots; 1212 btlb_slot < btlb_slot_end; 1213 btlb_slot++) { 1214 1215 /* 1216 * Skip this slot if it's in use, or if we need a 1217 * variable-range slot and this isn't one. 1218 */ 1219 if (btlb_slot->btlb_slot_frames != 0 || 1220 (need_variable_range && 1221 !(btlb_slot->btlb_slot_flags & 1222 BTLB_SLOT_VARIABLE_RANGE))) 1223 continue; 1224 1225 /* 1226 * Score this slot. 1227 */ 1228 btlb_slot_score = 0; 1229 if (need_dbtlb && 1230 (btlb_slot->btlb_slot_flags & BTLB_SLOT_DBTLB)) 1231 btlb_slot_score++; 1232 if (need_ibtlb && 1233 (btlb_slot->btlb_slot_flags & BTLB_SLOT_IBTLB)) 1234 btlb_slot_score++; 1235 1236 /* 1237 * Update the best slot. 1238 */ 1239 if (btlb_slot_score > 0 && 1240 (btlb_slot_best == NULL || 1241 btlb_slot_score > btlb_slot_best_score)) { 1242 btlb_slot_best = btlb_slot; 1243 btlb_slot_best_score = btlb_slot_score; 1244 } 1245 } 1246 1247 /* 1248 * If there were no applicable slots. 1249 */ 1250 if (btlb_slot_best == NULL) { 1251 DPRINTFN(10, ("BTLB full\n")); 1252 return -(ENOMEM); 1253 } 1254 1255 /* 1256 * Now fill this BTLB slot record and insert the entry. 1257 */ 1258 if (btlb_slot->btlb_slot_flags & BTLB_SLOT_VARIABLE_RANGE) 1259 slot_mapped_frames = ((*sizep + PGOFSET) >> PGSHIFT); 1260 else 1261 slot_mapped_frames = frames; 1262 if (slot_mapped_frames > total_mapped_frames) 1263 total_mapped_frames = slot_mapped_frames; 1264 btlb_slot = btlb_slot_best; 1265 btlb_slot->btlb_slot_va_space = space; 1266 btlb_slot->btlb_slot_va_frame = va; 1267 btlb_slot->btlb_slot_pa_frame = pa; 1268 btlb_slot->btlb_slot_tlbprot = tlbprot; 1269 btlb_slot->btlb_slot_frames = slot_mapped_frames; 1270 error = _hppa_btlb_insert(btlb_slot); 1271 if (error) 1272 return -error; 1273 /* 1274 * Note what slots we no longer need. 1275 */ 1276 if (btlb_slot->btlb_slot_flags & BTLB_SLOT_DBTLB) 1277 need_dbtlb = false; 1278 if (btlb_slot->btlb_slot_flags & BTLB_SLOT_IBTLB) 1279 need_ibtlb = false; 1280 } 1281 1282 /* Success. */ 1283 *sizep = (total_mapped_frames << PGSHIFT); 1284 return 0; 1285 } 1286 1287 /* 1288 * This reloads the BTLB in the event that it becomes invalidated. 1289 */ 1290 int 1291 hppa_btlb_reload(void) 1292 { 1293 struct btlb_slot *btlb_slot, *btlb_slot_end; 1294 int error; 1295 1296 /* Insert all recorded BTLB entries. */ 1297 btlb_slot = btlb_slots; 1298 btlb_slot_end = btlb_slots + btlb_slots_count; 1299 error = 0; 1300 while (error == 0 && btlb_slot < btlb_slot_end) { 1301 if (btlb_slot->btlb_slot_frames != 0) 1302 error = _hppa_btlb_insert(btlb_slot); 1303 btlb_slot++; 1304 } 1305 DPRINTF(("\n")); 1306 return (error); 1307 } 1308 1309 /* 1310 * This purges a BTLB entry. 1311 */ 1312 int 1313 hppa_btlb_purge(pa_space_t space, vaddr_t va, vsize_t *sizep) 1314 { 1315 struct btlb_slot *btlb_slot, *btlb_slot_end; 1316 int error; 1317 1318 /* 1319 * Purge all slots that map this virtual address. 1320 */ 1321 error = ENOENT; 1322 va >>= PGSHIFT; 1323 btlb_slot_end = btlb_slots + btlb_slots_count; 1324 for (btlb_slot = btlb_slots; 1325 btlb_slot < btlb_slot_end; 1326 btlb_slot++) { 1327 if (btlb_slot->btlb_slot_frames != 0 && 1328 btlb_slot->btlb_slot_va_space == space && 1329 btlb_slot->btlb_slot_va_frame == va) { 1330 error = pdcproc_btlb_purge( 1331 btlb_slot->btlb_slot_va_space, 1332 btlb_slot->btlb_slot_va_frame, 1333 btlb_slot->btlb_slot_number, 1334 btlb_slot->btlb_slot_frames); 1335 if (error < 0) { 1336 DPRINTFN(10, ("WARNING: BTLB purge failed (%d)\n", 1337 error)); 1338 1339 return (error); 1340 } 1341 1342 /* 1343 * Tell our caller how many bytes were mapped 1344 * by this slot, then free the slot. 1345 */ 1346 *sizep = (btlb_slot->btlb_slot_frames << PGSHIFT); 1347 btlb_slot->btlb_slot_frames = 0; 1348 } 1349 } 1350 return (error); 1351 } 1352 1353 /* 1354 * This maps page zero if it isn't already mapped, and 1355 * returns a cookie for hppa_pagezero_unmap. 1356 */ 1357 int 1358 hppa_pagezero_map(void) 1359 { 1360 int was_mapped_before; 1361 int s; 1362 1363 was_mapped_before = pagezero_mapped; 1364 if (!was_mapped_before) { 1365 s = splhigh(); 1366 pmap_kenter_pa(0, 0, VM_PROT_ALL, 0); 1367 pagezero_mapped = 1; 1368 splx(s); 1369 } 1370 return (was_mapped_before); 1371 } 1372 1373 /* 1374 * This unmaps mape zero, given a cookie previously returned 1375 * by hppa_pagezero_map. 1376 */ 1377 void 1378 hppa_pagezero_unmap(int was_mapped_before) 1379 { 1380 int s; 1381 1382 if (!was_mapped_before) { 1383 s = splhigh(); 1384 pmap_kremove(0, PAGE_SIZE); 1385 pagezero_mapped = 0; 1386 splx(s); 1387 } 1388 } 1389 1390 int waittime = -1; 1391 1392 __dead void 1393 cpu_reboot(int howto, char *user_boot_string) 1394 { 1395 boothowto = howto | (boothowto & RB_HALT); 1396 1397 if (!(howto & RB_NOSYNC) && waittime < 0) { 1398 waittime = 0; 1399 vfs_shutdown(); 1400 } 1401 1402 /* XXX probably save howto into stable storage */ 1403 1404 /* Disable interrupts. */ 1405 splhigh(); 1406 1407 /* Make a crash dump. */ 1408 if (howto & RB_DUMP) 1409 dumpsys(); 1410 1411 /* Run any shutdown hooks. */ 1412 doshutdownhooks(); 1413 1414 pmf_system_shutdown(boothowto); 1415 1416 /* in case we came on powerfail interrupt */ 1417 if (cold_hook) 1418 (*cold_hook)(HPPA_COLD_COLD); 1419 1420 hppa_led_ctl(0xf, 0, 0); 1421 1422 if (howto & RB_HALT) { 1423 if ((howto & RB_POWERDOWN) == RB_POWERDOWN && cold_hook) { 1424 printf("Powering off..."); 1425 DELAY(1000000); 1426 (*cold_hook)(HPPA_COLD_OFF); 1427 DELAY(1000000); 1428 } 1429 1430 printf("System halted!\n"); 1431 DELAY(1000000); 1432 __asm volatile("stwas %0, 0(%1)" 1433 :: "r" (CMD_STOP), "r" (LBCAST_ADDR + iomod_command)); 1434 } else { 1435 printf("rebooting..."); 1436 DELAY(1000000); 1437 __asm volatile("stwas %0, 0(%1)" 1438 :: "r" (CMD_RESET), "r" (LBCAST_ADDR + iomod_command)); 1439 1440 /* ask firmware to reset */ 1441 pdcproc_doreset(); 1442 /* forcably reset module if that fails */ 1443 __asm __volatile("stwas %0, 0(%1)" 1444 :: "r" (CMD_RESET), "r" (HPPA_LBCAST + iomod_command)); 1445 } 1446 1447 for (;;) { 1448 /* 1449 * loop while bus reset is coming up. This NOP instruction 1450 * is used by qemu to detect the 'death loop'. 1451 */ 1452 __asm volatile("or %%r31, %%r31, %%r31" ::: "memory"); 1453 } 1454 /* NOTREACHED */ 1455 } 1456 1457 uint32_t dumpmag = 0x8fca0101; /* magic number */ 1458 int dumpsize = 0; /* pages */ 1459 long dumplo = 0; /* blocks */ 1460 1461 /* 1462 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. 1463 */ 1464 int 1465 cpu_dumpsize(void) 1466 { 1467 int size; 1468 1469 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 1470 if (roundup(size, dbtob(1)) != dbtob(1)) 1471 return -1; 1472 1473 return 1; 1474 } 1475 1476 /* 1477 * This handles a machine check. This can be either an HPMC, 1478 * an LPMC, or a TOC. The check type is passed in as a trap 1479 * type, one of T_HPMC, T_LPMC, or T_INTERRUPT (for TOC). 1480 */ 1481 static char in_check = 0; 1482 1483 #define PIM_WORD(name, word, bits) \ 1484 do { \ 1485 snprintb(bitmask_buffer, sizeof(bitmask_buffer),\ 1486 bits, word); \ 1487 printf("%s %s", name, bitmask_buffer); \ 1488 } while (/* CONSTCOND */ 0) 1489 1490 1491 static inline void 1492 hppa_pim_dump(int check_type, void *data, size_t size) 1493 { 1494 struct hppa_pim_hpmc *hpmc; 1495 struct hppa_pim_lpmc *lpmc; 1496 struct hppa_pim_toc *toc; 1497 struct hppa_pim_regs *regs; 1498 struct hppa_pim_checks *checks; 1499 u_int *regarray; 1500 int reg_i, reg_j, reg_k; 1501 char bitmask_buffer[64]; 1502 const char *name; 1503 1504 regs = NULL; 1505 checks = NULL; 1506 switch (check_type) { 1507 case T_HPMC: 1508 hpmc = (struct hppa_pim_hpmc *) data; 1509 regs = &hpmc->pim_hpmc_regs; 1510 checks = &hpmc->pim_hpmc_checks; 1511 break; 1512 case T_LPMC: 1513 lpmc = (struct hppa_pim_lpmc *) data; 1514 checks = &lpmc->pim_lpmc_checks; 1515 break; 1516 case T_INTERRUPT: 1517 toc = (struct hppa_pim_toc *) data; 1518 regs = &toc->pim_toc_regs; 1519 break; 1520 default: 1521 panic("unknown machine check type"); 1522 /* NOTREACHED */ 1523 } 1524 1525 /* If we have register arrays, display them. */ 1526 if (regs != NULL) { 1527 for (reg_i = 0; reg_i < 3; reg_i++) { 1528 if (reg_i == 0) { 1529 name = "General"; 1530 regarray = ®s->pim_regs_r0; 1531 reg_j = 32; 1532 } else if (reg_i == 1) { 1533 name = "Control"; 1534 regarray = ®s->pim_regs_cr0; 1535 reg_j = 32; 1536 } else { 1537 name = "Space"; 1538 regarray = ®s->pim_regs_sr0; 1539 reg_j = 8; 1540 } 1541 printf("\n\n\t%s Registers:", name); 1542 for (reg_k = 0; reg_k < reg_j; reg_k++) 1543 printf("%s0x%08x", 1544 (reg_k & 3) ? " " : "\n", 1545 regarray[reg_k]); 1546 } 1547 1548 /* Print out some interesting registers. */ 1549 printf("\n\n\tIIA head 0x%x:0x%08x\n" 1550 "\tIIA tail 0x%x:0x%08x", 1551 regs->pim_regs_cr17, regs->pim_regs_cr18, 1552 regs->pim_regs_iisq_tail, regs->pim_regs_iioq_tail); 1553 PIM_WORD("\n\tIPSW", regs->pim_regs_cr22, PSW_BITS); 1554 printf("\n\tSP 0x%x:0x%08x FP 0x%x:0x%08x", 1555 regs->pim_regs_sr0, regs->pim_regs_r30, 1556 regs->pim_regs_sr0, regs->pim_regs_r3); 1557 } 1558 1559 /* If we have check words, display them. */ 1560 if (checks != NULL) { 1561 PIM_WORD("\n\n\tCheck Type", checks->pim_check_type, 1562 PIM_CHECK_BITS); 1563 PIM_WORD("\n\tCPU State", checks->pim_check_cpu_state, 1564 PIM_CPU_HPMC_BITS); 1565 PIM_WORD("\n\tCache Check", checks->pim_check_cache, 1566 PIM_CACHE_BITS); 1567 PIM_WORD("\n\tTLB Check", checks->pim_check_tlb, 1568 PIM_TLB_BITS); 1569 PIM_WORD("\n\tBus Check", checks->pim_check_bus, 1570 PIM_BUS_BITS); 1571 PIM_WORD("\n\tAssist Check", checks->pim_check_assist, 1572 PIM_ASSIST_BITS); 1573 printf("\tAssist State %u", checks->pim_check_assist_state); 1574 printf("\n\tSystem Responder 0x%08x", 1575 checks->pim_check_responder); 1576 printf("\n\tSystem Requestor 0x%08x", 1577 checks->pim_check_requestor); 1578 printf("\n\tPath Info 0x%08x", 1579 checks->pim_check_path_info); 1580 } 1581 } 1582 1583 static inline void 1584 hppa_pim64_dump(int check_type, void *data, size_t size) 1585 { 1586 struct hppa_pim64_hpmc *hpmc; 1587 struct hppa_pim64_lpmc *lpmc; 1588 struct hppa_pim64_toc *toc; 1589 struct hppa_pim64_regs *regs; 1590 struct hppa_pim64_checks *checks; 1591 int reg_i, reg_j, reg_k; 1592 uint64_t *regarray; 1593 char bitmask_buffer[64]; 1594 const char *name; 1595 1596 regs = NULL; 1597 checks = NULL; 1598 switch (check_type) { 1599 case T_HPMC: 1600 hpmc = (struct hppa_pim64_hpmc *) data; 1601 regs = &hpmc->pim_hpmc_regs; 1602 checks = &hpmc->pim_hpmc_checks; 1603 break; 1604 case T_LPMC: 1605 lpmc = (struct hppa_pim64_lpmc *) data; 1606 checks = &lpmc->pim_lpmc_checks; 1607 break; 1608 case T_INTERRUPT: 1609 toc = (struct hppa_pim64_toc *) data; 1610 regs = &toc->pim_toc_regs; 1611 break; 1612 default: 1613 panic("unknown machine check type"); 1614 /* NOTREACHED */ 1615 } 1616 1617 /* If we have register arrays, display them. */ 1618 if (regs != NULL) { 1619 for (reg_i = 0; reg_i < 3; reg_i++) { 1620 if (reg_i == 0) { 1621 name = "General"; 1622 regarray = ®s->pim_regs_r0; 1623 reg_j = 32; 1624 } else if (reg_i == 1) { 1625 name = "Control"; 1626 regarray = ®s->pim_regs_cr0; 1627 reg_j = 32; 1628 } else { 1629 name = "Space"; 1630 regarray = ®s->pim_regs_sr0; 1631 reg_j = 8; 1632 } 1633 printf("\n\n%s Registers:", name); 1634 for (reg_k = 0; reg_k < reg_j; reg_k++) 1635 printf("%s0x%016lx", 1636 (reg_k & 3) ? " " : "\n", 1637 (unsigned long)regarray[reg_k]); 1638 } 1639 1640 /* Print out some interesting registers. */ 1641 printf("\n\nIIA head 0x%lx:0x%016lx\n" 1642 "IIA tail 0x%lx:0x%016lx", 1643 (unsigned long)regs->pim_regs_cr17, 1644 (unsigned long)regs->pim_regs_cr18, 1645 (unsigned long)regs->pim_regs_iisq_tail, 1646 (unsigned long)regs->pim_regs_iioq_tail); 1647 PIM_WORD("\nIPSW", regs->pim_regs_cr22, PSW_BITS); 1648 printf("\nSP 0x%lx:0x%016lx\nFP 0x%lx:0x%016lx", 1649 (unsigned long)regs->pim_regs_sr0, 1650 (unsigned long)regs->pim_regs_r30, 1651 (unsigned long)regs->pim_regs_sr0, 1652 (unsigned long)regs->pim_regs_r3); 1653 } 1654 1655 /* If we have check words, display them. */ 1656 if (checks != NULL) { 1657 PIM_WORD("\n\nCheck Type", checks->pim_check_type, 1658 PIM_CHECK_BITS); 1659 PIM_WORD("\nCPU State", checks->pim_check_cpu_state, 1660 PIM_CPU_BITS PIM_CPU_HPMC_BITS); 1661 PIM_WORD("\nCache Check", checks->pim_check_cache, 1662 PIM_CACHE_BITS); 1663 PIM_WORD("\nTLB Check", checks->pim_check_tlb, 1664 PIM_TLB_BITS); 1665 PIM_WORD("\nBus Check", checks->pim_check_bus, 1666 PIM_BUS_BITS); 1667 PIM_WORD("\nAssist Check", checks->pim_check_assist, 1668 PIM_ASSIST_BITS); 1669 printf("\nAssist State %u", checks->pim_check_assist_state); 1670 printf("\nSystem Responder 0x%016lx", 1671 (unsigned long)checks->pim_check_responder); 1672 printf("\nSystem Requestor 0x%016lx", 1673 (unsigned long)checks->pim_check_requestor); 1674 printf("\nPath Info 0x%08x", 1675 checks->pim_check_path_info); 1676 } 1677 } 1678 1679 void 1680 hppa_machine_check(int check_type) 1681 { 1682 int pdc_pim_type; 1683 const char *name; 1684 int pimerror, error; 1685 void *data; 1686 size_t size; 1687 1688 /* Do an fcacheall(). */ 1689 fcacheall(); 1690 1691 /* Dispatch on the check type. */ 1692 switch (check_type) { 1693 case T_HPMC: 1694 name = "HPMC"; 1695 pdc_pim_type = PDC_PIM_HPMC; 1696 break; 1697 case T_LPMC: 1698 name = "LPMC"; 1699 pdc_pim_type = PDC_PIM_LPMC; 1700 break; 1701 case T_INTERRUPT: 1702 name = "TOC"; 1703 pdc_pim_type = PDC_PIM_TOC; 1704 break; 1705 default: 1706 panic("unknown machine check type"); 1707 /* NOTREACHED */ 1708 } 1709 1710 pimerror = pdcproc_pim(pdc_pim_type, &pdc_pim, &data, &size); 1711 1712 KASSERT(pdc_pim.count <= size); 1713 1714 /* 1715 * Reset IO and log errors. 1716 * 1717 * This seems to be needed in order to output to the console 1718 * if we take a HPMC interrupt. This PDC procedure may not be 1719 * implemented by some machines. 1720 */ 1721 error = pdcproc_ioclrerrors(); 1722 if (error != PDC_ERR_OK && error != PDC_ERR_NOPROC) 1723 /* This seems futile if we can't print to the console. */ 1724 panic("PDC_IO failed"); 1725 1726 printf("\nmachine check: %s", name); 1727 1728 if (pimerror < 0) { 1729 printf(" - WARNING: could not transfer PIM info (%d)", pimerror); 1730 } else { 1731 if (hppa_cpu_ispa20_p()) 1732 hppa_pim64_dump(check_type, data, size); 1733 else 1734 hppa_pim_dump(check_type, data, size); 1735 } 1736 1737 printf("\n"); 1738 1739 /* If this is our first check, panic. */ 1740 if (in_check == 0) { 1741 in_check = 1; 1742 DELAY(250000); 1743 panic("machine check"); 1744 } 1745 1746 /* Reboot the machine. */ 1747 printf("Rebooting...\n"); 1748 cpu_die(); 1749 } 1750 1751 int 1752 cpu_dump(void) 1753 { 1754 long buf[dbtob(1) / sizeof (long)]; 1755 kcore_seg_t *segp; 1756 cpu_kcore_hdr_t *cpuhdrp __unused; 1757 const struct bdevsw *bdev; 1758 1759 segp = (kcore_seg_t *)buf; 1760 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)]; 1761 1762 /* 1763 * Generate a segment header. 1764 */ 1765 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1766 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); 1767 1768 /* 1769 * Add the machine-dependent header info 1770 */ 1771 /* nothing for now */ 1772 1773 bdev = bdevsw_lookup(dumpdev); 1774 if (bdev == NULL) 1775 return (-1); 1776 1777 return (*bdev->d_dump)(dumpdev, dumplo, (void *)buf, dbtob(1)); 1778 } 1779 1780 /* 1781 * Dump the kernel's image to the swap partition. 1782 */ 1783 #define BYTES_PER_DUMP PAGE_SIZE 1784 1785 void 1786 dumpsys(void) 1787 { 1788 const struct bdevsw *bdev; 1789 int psize, bytes, i, n; 1790 char *maddr; 1791 daddr_t blkno; 1792 int (*dump)(dev_t, daddr_t, void *, size_t); 1793 int error; 1794 1795 if (dumpdev == NODEV) 1796 return; 1797 bdev = bdevsw_lookup(dumpdev); 1798 if (bdev == NULL) 1799 return; 1800 1801 /* Save registers 1802 savectx(&dumppcb); */ 1803 1804 if (dumpsize == 0) 1805 cpu_dumpconf(); 1806 if (dumplo <= 0) { 1807 printf("\ndump to dev %u,%u not possible\n", 1808 major(dumpdev), minor(dumpdev)); 1809 return; 1810 } 1811 printf("\ndumping to dev %u,%u offset %ld\n", 1812 major(dumpdev), minor(dumpdev), dumplo); 1813 1814 psize = bdev_size(dumpdev); 1815 printf("dump "); 1816 if (psize == -1) { 1817 printf("area unavailable\n"); 1818 return; 1819 } 1820 1821 if (!(error = cpu_dump())) { 1822 1823 /* XXX fredette - this is way broken: */ 1824 bytes = ctob(physmem); 1825 maddr = NULL; 1826 blkno = dumplo + cpu_dumpsize(); 1827 dump = bdev->d_dump; 1828 /* TODO block map the whole physical memory */ 1829 for (i = 0; i < bytes; i += n) { 1830 1831 /* Print out how many MBs we are to go. */ 1832 n = bytes - i; 1833 if (n && (n % (1024*1024)) == 0) 1834 printf_nolog("%d ", n / (1024 * 1024)); 1835 1836 /* Limit size for next transfer. */ 1837 1838 if (n > BYTES_PER_DUMP) 1839 n = BYTES_PER_DUMP; 1840 1841 if ((error = (*dump)(dumpdev, blkno, maddr, n))) 1842 break; 1843 maddr += n; 1844 blkno += btodb(n); 1845 } 1846 } 1847 1848 switch (error) { 1849 case ENXIO: printf("device bad\n"); break; 1850 case EFAULT: printf("device not ready\n"); break; 1851 case EINVAL: printf("area improper\n"); break; 1852 case EIO: printf("i/o error\n"); break; 1853 case EINTR: printf("aborted from console\n"); break; 1854 case 0: printf("succeeded\n"); break; 1855 default: printf("error %d\n", error); break; 1856 } 1857 } 1858 1859 void 1860 hppa_setvmspace(struct lwp *l) 1861 { 1862 struct proc *p = l->l_proc; 1863 struct trapframe *tf = l->l_md.md_regs; 1864 pmap_t pmap = p->p_vmspace->vm_map.pmap; 1865 pa_space_t space = pmap->pm_space; 1866 1867 if (p->p_md.md_flags & MDP_OLDSPACE) { 1868 tf->tf_sr7 = HPPA_SID_KERNEL; 1869 } else { 1870 tf->tf_sr7 = space; 1871 } 1872 1873 tf->tf_sr2 = HPPA_SID_KERNEL; 1874 1875 /* Load all of the user's space registers. */ 1876 tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr3 = 1877 tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = 1878 tf->tf_iisq_head = tf->tf_iisq_tail = space; 1879 1880 /* Load the protection registers. */ 1881 tf->tf_pidr1 = tf->tf_pidr2 = pmap->pm_pid; 1882 } 1883 1884 /* 1885 * Set registers on exec. 1886 */ 1887 void 1888 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) 1889 { 1890 struct proc *p = l->l_proc; 1891 struct trapframe *tf = l->l_md.md_regs; 1892 struct pcb *pcb = lwp_getpcb(l); 1893 1894 memset(tf, 0, sizeof(*tf)); 1895 1896 /* 1897 * Initialize the External Interrupt Enable Mask, Processor 1898 * Status Word, and NetBSD's floating-point register area 1899 * pointer to the correct defaults for a user process. 1900 * 1901 * XXXMPSAFE If curcpu()->ci_eiem can vary from CPU to CPU, we 1902 * have bigger problems here -- if the lwp is migrated from one 1903 * CPU to another CPU between when the trapframe is saved and 1904 * when the trapframe is restored, it might be invalidated. 1905 */ 1906 tf->tf_eiem = curcpu()->ci_eiem; 1907 tf->tf_ipsw = PSW_MBS | (hppa_cpu_ispa20_p() ? PSW_O : 0); 1908 tf->tf_cr30 = (u_int)pcb->pcb_fpregs; 1909 1910 tf->tf_flags = TFF_SYS|TFF_LAST; 1911 tf->tf_iioq_tail = 4 + 1912 (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER); 1913 tf->tf_rp = 0; 1914 tf->tf_arg0 = p->p_psstrp; 1915 tf->tf_arg1 = tf->tf_arg2 = 0; /* XXX dynload stuff */ 1916 1917 if (pack->ep_osversion < 699003600) { 1918 p->p_md.md_flags |= MDP_OLDSPACE; 1919 } else { 1920 p->p_md.md_flags = 0; 1921 } 1922 1923 hppa_setvmspace(l); 1924 1925 /* reset any of the pending FPU exceptions */ 1926 hppa_fpu_flush(l); 1927 memset(pcb->pcb_fpregs, 0, sizeof(*pcb->pcb_fpregs)); 1928 pcb->pcb_fpregs->fpr_regs[0] = ((uint64_t)HPPA_FPU_INIT) << 32; 1929 pcb->pcb_fpregs->fpr_regs[1] = 0; 1930 pcb->pcb_fpregs->fpr_regs[2] = 0; 1931 pcb->pcb_fpregs->fpr_regs[3] = 0; 1932 1933 l->l_md.md_bpva = 0; 1934 1935 /* setup terminal stack frame */ 1936 stack = (u_long)STACK_ALIGN(stack, 63); 1937 tf->tf_r3 = stack; 1938 ustore_long((void *)(stack), 0); 1939 stack += HPPA_FRAME_SIZE; 1940 ustore_long((void *)(stack + HPPA_FRAME_PSP), 0); 1941 tf->tf_sp = stack; 1942 } 1943 1944 /* 1945 * machine dependent system variables. 1946 */ 1947 static int 1948 sysctl_machdep_boot(SYSCTLFN_ARGS) 1949 { 1950 struct sysctlnode node = *rnode; 1951 struct btinfo_kernelfile *bi_file; 1952 const char *cp = NULL; 1953 1954 switch (node.sysctl_num) { 1955 case CPU_BOOTED_KERNEL: 1956 if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL) 1957 cp = bi_file->name; 1958 if (cp != NULL && cp[0] == '\0') 1959 cp = "netbsd"; 1960 break; 1961 default: 1962 return (EINVAL); 1963 } 1964 1965 if (cp == NULL || cp[0] == '\0') 1966 return (ENOENT); 1967 1968 node.sysctl_data = __UNCONST(cp); 1969 node.sysctl_size = strlen(cp) + 1; 1970 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 1971 } 1972 1973 #if NLCD > 0 1974 static int 1975 sysctl_machdep_heartbeat(SYSCTLFN_ARGS) 1976 { 1977 int error; 1978 bool oldval; 1979 struct sysctlnode node = *rnode; 1980 1981 oldval = lcd_blink_p; 1982 /* 1983 * If we were false and are now true, start the timer. 1984 */ 1985 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1986 1987 if (error || newp == NULL) 1988 return (error); 1989 1990 if (!oldval && lcd_blink_p) 1991 blink_lcd_timeout(NULL); 1992 1993 return 0; 1994 } 1995 #endif 1996 1997 /* 1998 * machine dependent system variables. 1999 */ 2000 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 2001 { 2002 2003 sysctl_createv(clog, 0, NULL, NULL, 2004 CTLFLAG_PERMANENT, 2005 CTLTYPE_NODE, "machdep", NULL, 2006 NULL, 0, NULL, 0, 2007 CTL_MACHDEP, CTL_EOL); 2008 2009 sysctl_createv(clog, 0, NULL, NULL, 2010 CTLFLAG_PERMANENT, 2011 CTLTYPE_STRUCT, "console_device", NULL, 2012 sysctl_consdev, 0, NULL, sizeof(dev_t), 2013 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 2014 2015 sysctl_createv(clog, 0, NULL, NULL, 2016 CTLFLAG_PERMANENT, 2017 CTLTYPE_STRING, "booted_kernel", NULL, 2018 sysctl_machdep_boot, 0, NULL, 0, 2019 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 2020 #if NLCD > 0 2021 sysctl_createv(clog, 0, NULL, NULL, 2022 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2023 CTLTYPE_BOOL, "lcd_blink", "Display heartbeat on the LCD display", 2024 sysctl_machdep_heartbeat, 0, &lcd_blink_p, 0, 2025 CTL_MACHDEP, CPU_LCD_BLINK, CTL_EOL); 2026 #endif 2027 } 2028 2029 /* 2030 * Given the type of a bootinfo entry, looks for a matching item inside 2031 * the bootinfo structure. If found, returns a pointer to it (which must 2032 * then be casted to the appropriate bootinfo_* type); otherwise, returns 2033 * NULL. 2034 */ 2035 void * 2036 lookup_bootinfo(int type) 2037 { 2038 struct btinfo_common *bic; 2039 int i; 2040 2041 bic = (struct btinfo_common *)(&bootinfo.bi_data[0]); 2042 for (i = 0; i < bootinfo.bi_nentries; i++) 2043 if (bic->type == type) 2044 return bic; 2045 else 2046 bic = (struct btinfo_common *) 2047 ((uint8_t *)bic + bic->len); 2048 2049 return NULL; 2050 } 2051 2052 /* 2053 * consinit: 2054 * initialize the system console. 2055 */ 2056 void 2057 consinit(void) 2058 { 2059 static int initted = 0; 2060 2061 if (!initted) { 2062 initted++; 2063 cninit(); 2064 } 2065 } 2066 2067 #if NLCD > 0 2068 struct blink_lcd_softc { 2069 SLIST_HEAD(, blink_lcd) bls_head; 2070 int bls_on; 2071 struct callout bls_to; 2072 } blink_sc = { 2073 .bls_head = SLIST_HEAD_INITIALIZER(bls_head) 2074 }; 2075 2076 void 2077 blink_lcd_register(struct blink_lcd *l) 2078 { 2079 if (SLIST_EMPTY(&blink_sc.bls_head)) { 2080 callout_init(&blink_sc.bls_to, 0); 2081 callout_setfunc(&blink_sc.bls_to, blink_lcd_timeout, &blink_sc); 2082 blink_sc.bls_on = 0; 2083 if (lcd_blink_p) 2084 callout_schedule(&blink_sc.bls_to, 1); 2085 } 2086 SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next); 2087 } 2088 2089 void 2090 blink_lcd_timeout(void *vsc) 2091 { 2092 struct blink_lcd_softc *sc = &blink_sc; 2093 struct blink_lcd *l; 2094 int t; 2095 2096 if (SLIST_EMPTY(&sc->bls_head)) 2097 return; 2098 2099 SLIST_FOREACH(l, &sc->bls_head, bl_next) { 2100 (*l->bl_func)(l->bl_arg, sc->bls_on); 2101 } 2102 sc->bls_on = !sc->bls_on; 2103 2104 if (!lcd_blink_p) 2105 return; 2106 2107 /* 2108 * Blink rate is: 2109 * full cycle every second if completely idle (loadav = 0) 2110 * full cycle every 2 seconds if loadav = 1 2111 * full cycle every 3 seconds if loadav = 2 2112 * etc. 2113 */ 2114 t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1)); 2115 callout_schedule(&sc->bls_to, t); 2116 } 2117 #endif 2118 2119 #ifdef MODULAR 2120 /* 2121 * Push any modules loaded by the boot loader. 2122 */ 2123 void 2124 module_init_md(void) 2125 { 2126 } 2127 #endif /* MODULAR */ 2128 2129 bool 2130 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr) 2131 { 2132 2133 if (atop(paddr) > physmem) { 2134 return false; 2135 } 2136 *vaddr = paddr; 2137 2138 return true; 2139 } 2140 2141 int 2142 mm_md_physacc(paddr_t pa, vm_prot_t prot) 2143 { 2144 2145 return (atop(pa) > physmem) ? EFAULT : 0; 2146 } 2147 2148 int 2149 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled) 2150 { 2151 extern int kernel_text; 2152 extern int __data_start; 2153 extern int end; 2154 2155 const vaddr_t ksro = (vaddr_t) &kernel_text; 2156 const vaddr_t ksrw = (vaddr_t) &__data_start; 2157 const vaddr_t kend = (vaddr_t) end; 2158 const vaddr_t v = (vaddr_t)ptr; 2159 2160 *handled = false; 2161 if (v >= ksro && v < kend) { 2162 *handled = true; 2163 if (v < ksrw && (prot & VM_PROT_WRITE)) { 2164 return EFAULT; 2165 } 2166 } else if (v >= kend && atop((paddr_t)v) < physmem) { 2167 *handled = true; 2168 } 2169 2170 return 0; 2171 } 2172