1 /* $NetBSD: machdep.c,v 1.22 2025/03/16 15:34:59 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matthew Fredette. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* $OpenBSD: machdep.c,v 1.40 2001/09/19 20:50:56 mickey Exp $ */ 33 34 /* 35 * Copyright (c) 1999-2003 Michael Shalayeff 36 * All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 51 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 52 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 53 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 57 * THE POSSIBILITY OF SUCH DAMAGE. 58 */ 59 60 #include <sys/cdefs.h> 61 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.22 2025/03/16 15:34:59 riastradh Exp $"); 62 63 #include "opt_cputype.h" 64 #include "opt_ddb.h" 65 #include "opt_kgdb.h" 66 #include "opt_modular.h" 67 #include "opt_useleds.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/signalvar.h> 72 #include <sys/kernel.h> 73 #include <sys/proc.h> 74 #include <sys/buf.h> 75 #include <sys/cpu.h> 76 #include <sys/reboot.h> 77 #include <sys/device.h> 78 #include <sys/conf.h> 79 #include <sys/file.h> 80 #include <sys/callout.h> 81 #include <sys/mbuf.h> 82 #include <sys/msgbuf.h> 83 #include <sys/ioctl.h> 84 #include <sys/tty.h> 85 #include <sys/exec.h> 86 #include <sys/exec_aout.h> /* for MID_* */ 87 #include <sys/sysctl.h> 88 #include <sys/core.h> 89 #include <sys/kcore.h> 90 #include <sys/module.h> 91 #include <sys/extent.h> 92 #include <sys/ksyms.h> 93 #include <sys/mount.h> 94 #include <sys/mutex.h> 95 #include <sys/syscallargs.h> 96 97 #include <uvm/uvm_page.h> 98 #include <uvm/uvm.h> 99 100 #include <dev/cons.h> 101 #include <dev/mm.h> 102 103 #include <machine/pdc.h> 104 #include <machine/iomod.h> 105 #include <machine/psl.h> 106 #include <machine/reg.h> 107 #include <machine/cpufunc.h> 108 #include <machine/autoconf.h> 109 #include <machine/bootinfo.h> 110 #include <machine/kcore.h> 111 #include <machine/pcb.h> 112 113 #ifdef KGDB 114 #include "com.h" 115 #endif 116 117 #ifdef DDB 118 #include <machine/db_machdep.h> 119 #include <ddb/db_access.h> 120 #include <ddb/db_sym.h> 121 #include <ddb/db_extern.h> 122 #endif 123 124 #include <hppa/hppa/machdep.h> 125 #include <hppa/hppa/pim.h> 126 #include <hppa/dev/cpudevs.h> 127 128 #include "ksyms.h" 129 #include "lcd.h" 130 131 #ifdef MACHDEPDEBUG 132 133 #define DPRINTF(s) do { \ 134 if (machdepdebug) \ 135 printf s; \ 136 } while(0) 137 138 #define DPRINTFN(l,s) do { \ 139 if (machdepdebug >= (1)) \ 140 printf s; \ 141 } while(0) 142 143 int machdepdebug = 1; 144 #else 145 #define DPRINTF(s) /* */ 146 #define DPRINTFN(l,s) /* */ 147 #endif 148 149 /* 150 * Different kinds of flags used throughout the kernel. 151 */ 152 void *msgbufaddr; 153 154 /* The primary (aka monarch) cpu HPA */ 155 hppa_hpa_t hppa_mcpuhpa; 156 157 /* 158 * cache configuration, for most machines is the same 159 * numbers, so it makes sense to do defines w/ numbers depending 160 * on configured CPU types in the kernel 161 */ 162 int icache_stride, icache_line_mask; 163 int dcache_stride, dcache_line_mask; 164 165 /* 166 * things to not kill 167 */ 168 volatile uint8_t *machine_ledaddr; 169 int machine_ledword, machine_leds; 170 171 /* 172 * This flag is nonzero iff page zero is mapped. 173 * It is initialized to 1, because before we go 174 * virtual, page zero *is* available. It is set 175 * to zero right before we go virtual. 176 */ 177 static int pagezero_mapped = 1; 178 179 /* 180 * CPU params (should be the same for all cpus in the system) 181 */ 182 struct pdc_cache pdc_cache; 183 struct pdc_btlb pdc_btlb; 184 struct pdc_model pdc_model; 185 186 int usebtlb; 187 188 /* 189 * The BTLB slots. 190 */ 191 static struct btlb_slot { 192 193 /* The number associated with this slot. */ 194 int btlb_slot_number; 195 196 /* The flags associated with this slot. */ 197 int btlb_slot_flags; 198 #define BTLB_SLOT_IBTLB (1 << 0) 199 #define BTLB_SLOT_DBTLB (1 << 1) 200 #define BTLB_SLOT_CBTLB (BTLB_SLOT_IBTLB | BTLB_SLOT_DBTLB) 201 #define BTLB_SLOT_VARIABLE_RANGE (1 << 2) 202 203 /* 204 * The mapping information. A mapping is free 205 * if its btlb_slot_frames member is zero. 206 */ 207 pa_space_t btlb_slot_va_space; 208 vaddr_t btlb_slot_va_frame; 209 paddr_t btlb_slot_pa_frame; 210 vsize_t btlb_slot_frames; 211 u_int btlb_slot_tlbprot; 212 } *btlb_slots; 213 int btlb_slots_count; 214 215 /* w/ a little deviation should be the same for all installed cpus */ 216 u_int cpu_ticksnum, cpu_ticksdenom, cpu_hzticks; 217 218 /* exported info */ 219 char machine[] = MACHINE; 220 const struct hppa_cpu_info *hppa_cpu_info; 221 enum hppa_cpu_type cpu_type; 222 int cpu_modelno; 223 int cpu_revision; 224 225 #if NLCD > 0 226 bool lcd_blink_p; 227 #endif 228 229 /* 230 * exported methods for cpus 231 */ 232 int (*cpu_desidhash)(void); 233 int (*cpu_hpt_init)(vaddr_t, vsize_t); 234 int (*cpu_ibtlb_ins)(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 235 int (*cpu_dbtlb_ins)(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 236 237 dev_t bootdev; 238 int totalphysmem; /* # pages in system */ 239 int availphysmem; /* # pages available to kernel */ 240 int esym; 241 paddr_t avail_end; 242 243 /* 244 * Our copy of the bootinfo struct passed to us by the boot loader. 245 */ 246 struct bootinfo bootinfo; 247 248 /* 249 * XXX note that 0x12000 is the old kernel text start 250 * address. Memory below this is assumed to belong 251 * to the firmware. This value is converted into pages 252 * by hppa_init and used as pages in pmap_bootstrap(). 253 */ 254 int resvmem = 0x12000; 255 int resvphysmem; 256 257 /* 258 * BTLB parameters, broken out for the MI hppa code. 259 */ 260 u_int hppa_btlb_size_min, hppa_btlb_size_max; 261 262 /* 263 * Things for MI glue to stick on. 264 */ 265 struct extent *hppa_io_extent; 266 static long hppa_io_extent_store[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)]; 267 268 struct pool hppa_fppl; 269 struct fpreg lwp0_fpregs; 270 271 /* Our exported CPU info */ 272 struct cpu_info cpus[HPPA_MAXCPUS] = { 273 #ifdef MULTIPROCESSOR 274 { 275 .ci_curlwp = &lwp0, 276 }, 277 #endif 278 }; 279 280 struct vm_map *phys_map = NULL; 281 282 void delay_init(void); 283 static inline void fall(int, int, int, int, int); 284 void dumpsys(void); 285 void cpuid(void); 286 enum hppa_cpu_type cpu_model_cpuid(int); 287 #if NLCD > 0 288 void blink_lcd_timeout(void *); 289 #endif 290 291 /* 292 * wide used hardware params 293 */ 294 struct pdc_hwtlb pdc_hwtlb; 295 struct pdc_coproc pdc_coproc; 296 struct pdc_coherence pdc_coherence; 297 struct pdc_spidb pdc_spidbits; 298 struct pdc_pim pdc_pim; 299 struct pdc_model pdc_model; 300 301 /* 302 * Debugger info. 303 */ 304 int hppa_kgdb_attached; 305 306 /* 307 * Whatever CPU types we support 308 */ 309 extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[]; 310 extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[]; 311 extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[]; 312 extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[]; 313 extern const u_int itlb_u[], itlbna_u[], dtlb_u[], dtlbna_u[], tlbd_u[]; 314 315 int iibtlb_s(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 316 int idbtlb_s(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 317 int ibtlb_t(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 318 int ibtlb_l(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 319 int ibtlb_u(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 320 int ibtlb_g(int, pa_space_t, vaddr_t, paddr_t, vsize_t, u_int); 321 int pbtlb_g(int); 322 int pbtlb_u(int); 323 int hpti_l(vaddr_t, vsize_t); 324 int hpti_u(vaddr_t, vsize_t); 325 int hpti_g(vaddr_t, vsize_t); 326 int desidhash_x(void); 327 int desidhash_s(void); 328 int desidhash_t(void); 329 int desidhash_l(void); 330 int desidhash_u(void); 331 332 const struct hppa_cpu_info cpu_types[] = { 333 #ifdef HP7000_CPU 334 { "PA7000", NULL, "PCX", 335 hpcx, 0, 336 0, "1.0", 337 desidhash_x, itlb_x, dtlb_x, itlbna_x, dtlbna_x, tlbd_x, 338 ibtlb_g, NULL, pbtlb_g, NULL }, /* XXXNH check */ 339 #endif 340 #ifdef HP7000_CPU 341 { "PA7000", NULL, "PCXS", 342 hpcxs, 0, 343 0, "1.1a", 344 desidhash_s, itlb_s, dtlb_s, itlbna_s, dtlbna_s, tlbd_s, 345 ibtlb_g, NULL, pbtlb_g, NULL }, 346 #endif 347 #ifdef HP7100_CPU 348 { "PA7100", "T-Bird", "PCXT", 349 hpcxt, 0, 350 HPPA_FTRS_BTLBU, "1.1b", 351 desidhash_t, itlb_t, dtlb_t, itlbna_t, dtlbna_t, tlbd_t, 352 ibtlb_g, NULL, pbtlb_g, NULL }, 353 #endif 354 #ifdef HP7100LC_CPU 355 { "PA7100LC", "Hummingbird", "PCXL", 356 hpcxl, HPPA_CPU_PCXL, 357 HPPA_FTRS_TLBU | HPPA_FTRS_BTLBU | HPPA_FTRS_HVT, "1.1c", 358 desidhash_l, itlb_l, dtlb_l, itlbna_l, dtlbna_l, tlbd_l, 359 ibtlb_g, NULL, pbtlb_g, hpti_g }, 360 #endif 361 #ifdef HP7200_CPU 362 { "PA7200", "T-Bird", "PCXT'", 363 hpcxtp, HPPA_CPU_PCXT2, 364 HPPA_FTRS_BTLBU, "1.1d", 365 desidhash_t, itlb_t, dtlb_t, itlbna_t, dtlbna_t, tlbd_t, 366 ibtlb_g, NULL, pbtlb_g, NULL }, 367 #endif 368 #ifdef HP7300LC_CPU 369 { "PA7300LC", "Velociraptor", "PCXL2", 370 hpcxl2, HPPA_CPU_PCXL2, 371 HPPA_FTRS_TLBU | HPPA_FTRS_BTLBU | HPPA_FTRS_HVT, "1.1e", 372 NULL, itlb_l, dtlb_l, itlbna_l, dtlbna_l, tlbd_l, 373 ibtlb_g, NULL, pbtlb_g, hpti_g }, 374 #endif 375 #ifdef HP8000_CPU 376 { "PA8000", "Onyx", "PCXU", 377 hpcxu, HPPA_CPU_PCXU, 378 HPPA_FTRS_W32B, "2.0", 379 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 380 ibtlb_u, NULL, pbtlb_u, NULL }, 381 #endif 382 #ifdef HP8200_CPU 383 { "PA8200", "Vulcan", "PCXU+", 384 hpcxup, HPPA_CPU_PCXUP, 385 HPPA_FTRS_W32B, "2.0", 386 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 387 ibtlb_u, NULL, pbtlb_u, NULL }, 388 #endif 389 #ifdef HP8500_CPU 390 { "PA8500", "Barra'Cuda", "PCXW", 391 hpcxw, HPPA_CPU_PCXW, 392 HPPA_FTRS_W32B, "2.0", 393 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 394 ibtlb_u, NULL, pbtlb_u, NULL }, 395 #endif 396 #ifdef HP8600_CPU 397 { "PA8600", "Landshark", "PCXW+", 398 hpcxwp, HPPA_CPU_PCXWP, 399 HPPA_FTRS_W32B, "2.0", 400 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 401 ibtlb_u, NULL, pbtlb_u, NULL }, 402 #endif 403 #ifdef HP8700_CPU 404 { "PA8700", "Piranha", "PCXW2", 405 hpcxw2, HPPA_CPU_PCXW2, 406 HPPA_FTRS_W32B, "2.0", 407 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 408 ibtlb_u, NULL, pbtlb_u, NULL }, 409 #endif 410 #ifdef HP8800_CPU 411 { "PA8800", "Mako", "Make", 412 mako, HPPA_CPU_PCXW2, 413 HPPA_FTRS_W32B, "2.0", 414 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 415 ibtlb_u, NULL, pbtlb_u, NULL }, 416 #endif 417 #ifdef HP8900_CPU 418 { "PA8900", "Shortfin", "Shortfin", 419 mako, HPPA_CPU_PCXW2, 420 HPPA_FTRS_W32B, "2.0", 421 desidhash_u, itlb_u, dtlb_u, itlbna_u, dtlbna_u, tlbd_u, 422 ibtlb_u, NULL, pbtlb_u, NULL }, 423 #endif 424 }; 425 426 void 427 hppa_init(paddr_t start, void *bi) 428 { 429 vaddr_t vstart; 430 vaddr_t v; 431 int error; 432 u_int *p, *q; 433 struct btlb_slot *btlb_slot; 434 int btlb_slot_i; 435 struct btinfo_symtab *bi_sym; 436 struct pcb *pcb0; 437 struct cpu_info *ci; 438 439 #ifdef KGDB 440 boothowto |= RB_KDB; /* go to kgdb early if compiled in. */ 441 #endif 442 /* Setup curlwp/curcpu early for LOCKDEBUG and spl* */ 443 #ifdef MULTIPROCESSOR 444 mtctl(&cpus[0], CR_CURCPU); 445 #else 446 mtctl(&lwp0, CR_CURLWP); 447 #endif 448 lwp0.l_cpu = &cpus[0]; 449 450 /* curcpu() is now valid */ 451 ci = curcpu(); 452 453 ci->ci_psw = 454 PSW_Q | /* Interrupt State Collection Enable */ 455 PSW_P | /* Protection Identifier Validation Enable */ 456 PSW_C | /* Instruction Address Translation Enable */ 457 PSW_D; /* Data Address Translation Enable */ 458 459 /* Copy bootinfo */ 460 if (bi != NULL) 461 memcpy(&bootinfo, bi, sizeof(struct bootinfo)); 462 463 /* init PDC iface, so we can call em easy */ 464 pdc_init(); 465 466 cpu_hzticks = (PAGE0->mem_10msec * 100) / hz; 467 468 /* calculate CPU clock ratio */ 469 delay_init(); 470 471 /* fetch the monarch/"default" cpu hpa */ 472 error = pdcproc_hpa_processor(&hppa_mcpuhpa); 473 if (error < 0) 474 panic("%s: PDC_HPA failed", __func__); 475 476 /* cache parameters */ 477 error = pdcproc_cache(&pdc_cache); 478 if (error < 0) { 479 DPRINTF(("WARNING: PDC_CACHE error %d\n", error)); 480 } 481 482 dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1; 483 dcache_stride = pdc_cache.dc_stride; 484 icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1; 485 icache_stride = pdc_cache.ic_stride; 486 487 error = pdcproc_cache_spidbits(&pdc_spidbits); 488 DPRINTF(("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error)); 489 490 /* Calculate the OS_HPMC handler checksums. */ 491 p = os_hpmc; 492 if (pdcproc_instr(p)) 493 *p = 0x08000240; 494 p[7] = ((char *) &os_hpmc_cont_end) - ((char *) &os_hpmc_cont); 495 p[6] = (u_int) &os_hpmc_cont; 496 p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]); 497 p = &os_hpmc_cont; 498 q = os_hpmc_checksum; 499 for (*q = 0; p < q; *q -= *(p++)); 500 501 /* Calculate the OS_TOC handler checksum. */ 502 p = (u_int *) &os_toc; 503 q = os_toc_checksum; 504 for (*q = 0; p < q; *q -= *(p++)); 505 506 /* Install the OS_TOC handler. */ 507 PAGE0->ivec_toc = os_toc; 508 PAGE0->ivec_toclen = ((char *) &os_toc_end) - ((char *) &os_toc); 509 510 cpuid(); 511 ptlball(); 512 fcacheall(); 513 514 avail_end = trunc_page(PAGE0->imm_max_mem); 515 totalphysmem = atop(avail_end); 516 if (avail_end > SYSCALLGATE) 517 avail_end = SYSCALLGATE; 518 physmem = atop(avail_end); 519 resvmem = atop(resvmem); /* XXXNH */ 520 521 /* we hope this won't fail */ 522 hppa_io_extent = extent_create("io", 523 HPPA_IOSPACE, 0xffffffff, 524 (void *)hppa_io_extent_store, sizeof(hppa_io_extent_store), 525 EX_NOCOALESCE|EX_NOWAIT); 526 527 vstart = round_page(start); 528 529 /* 530 * Now allocate kernel dynamic variables 531 */ 532 533 /* Allocate the msgbuf. */ 534 msgbufaddr = (void *) vstart; 535 vstart += MSGBUFSIZE; 536 vstart = round_page(vstart); 537 538 if (usebtlb) { 539 /* Allocate and initialize the BTLB slots array. */ 540 btlb_slots = (struct btlb_slot *) ALIGN(vstart); 541 btlb_slot = btlb_slots; 542 #define BTLB_SLOTS(count, flags) \ 543 do { \ 544 for (btlb_slot_i = 0; \ 545 btlb_slot_i < pdc_btlb.count; \ 546 btlb_slot_i++) { \ 547 btlb_slot->btlb_slot_number = (btlb_slot - btlb_slots); \ 548 btlb_slot->btlb_slot_flags = flags; \ 549 btlb_slot->btlb_slot_frames = 0; \ 550 btlb_slot++; \ 551 } \ 552 } while (/* CONSTCOND */ 0) 553 554 BTLB_SLOTS(finfo.num_i, BTLB_SLOT_IBTLB); 555 BTLB_SLOTS(finfo.num_d, BTLB_SLOT_DBTLB); 556 BTLB_SLOTS(finfo.num_c, BTLB_SLOT_CBTLB); 557 BTLB_SLOTS(vinfo.num_i, BTLB_SLOT_IBTLB | BTLB_SLOT_VARIABLE_RANGE); 558 BTLB_SLOTS(vinfo.num_d, BTLB_SLOT_DBTLB | BTLB_SLOT_VARIABLE_RANGE); 559 BTLB_SLOTS(vinfo.num_c, BTLB_SLOT_CBTLB | BTLB_SLOT_VARIABLE_RANGE); 560 #undef BTLB_SLOTS 561 562 btlb_slots_count = (btlb_slot - btlb_slots); 563 vstart = round_page((vaddr_t) btlb_slot); 564 } 565 566 v = vstart; 567 568 /* sets resvphysmem */ 569 pmap_bootstrap(v); 570 571 /* 572 * BELOW THIS LINE REFERENCING PAGE0 AND OTHER LOW MEMORY 573 * LOCATIONS, AND WRITING THE KERNEL TEXT ARE PROHIBITED 574 * WITHOUT TAKING SPECIAL MEASURES. 575 */ 576 577 DPRINTF(("%s: PDC_CHASSIS\n", __func__)); 578 579 /* they say PDC_COPROC might turn fault light on */ 580 pdcproc_chassis_display(PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0); 581 582 DPRINTF(("%s: intr bootstrap\n", __func__)); 583 /* Bootstrap interrupt masking and dispatching. */ 584 hppa_intr_initialise(ci); 585 586 /* 587 * Initialize any debugger. 588 */ 589 #ifdef KGDB 590 /* 591 * XXX note that we're not virtual yet, yet these 592 * KGDB attach functions will be using bus_space(9) 593 * to map and manipulate their devices. This only 594 * works because, currently, the mainbus.c bus_space 595 * implementation directly-maps things in I/O space. 596 */ 597 hppa_kgdb_attached = false; 598 #if NCOM > 0 599 if (!strcmp(KGDB_DEVNAME, "com")) { 600 int com_gsc_kgdb_attach(void); 601 if (com_gsc_kgdb_attach() == 0) 602 hppa_kgdb_attached = true; 603 } 604 #endif /* NCOM > 0 */ 605 #endif /* KGDB */ 606 607 #if NKSYMS || defined(DDB) || defined(MODULAR) 608 if ((bi_sym = lookup_bootinfo(BTINFO_SYMTAB)) != NULL) 609 ksyms_addsyms_elf(bi_sym->nsym, (int *)bi_sym->ssym, 610 (int *)bi_sym->esym); 611 else { 612 extern int end; 613 614 ksyms_addsyms_elf(esym - (int)&end, &end, (int*)esym); 615 } 616 #endif 617 618 /* We will shortly go virtual. */ 619 pagezero_mapped = 0; 620 fcacheall(); 621 622 pcb0 = lwp_getpcb(&lwp0); 623 pcb0->pcb_fpregs = &lwp0_fpregs; 624 memset(&lwp0_fpregs, 0, sizeof(struct fpreg)); 625 626 pool_init(&hppa_fppl, sizeof(struct fpreg), 16, 0, 0, "fppl", NULL, 627 IPL_NONE); 628 } 629 630 void 631 cpuid(void) 632 { 633 /* 634 * XXX fredette - much of this TLB trap handler setup should 635 * probably be moved here to hppa/hppa/hppa_machdep.c, seeing 636 * that there's related code already in hppa/hppa/trap.S. 637 */ 638 639 /* 640 * Ptrs to various tlb handlers, to be filled 641 * based on CPU features. 642 * from locore.S 643 */ 644 extern u_int trap_ep_T_TLB_DIRTY[]; 645 extern u_int trap_ep_T_DTLBMISS[]; 646 extern u_int trap_ep_T_DTLBMISSNA[]; 647 extern u_int trap_ep_T_ITLBMISS[]; 648 extern u_int trap_ep_T_ITLBMISSNA[]; 649 650 struct pdc_cpuid pdc_cpuid; 651 const struct hppa_cpu_info *p = NULL; 652 const char *model; 653 u_int cpu_version, cpu_features; 654 int error, i; 655 656 /* may the scientific guessing begin */ 657 cpu_type = hpc_unknown; 658 cpu_features = 0; 659 cpu_version = 0; 660 661 /* identify system type */ 662 error = pdcproc_model_info(&pdc_model); 663 if (error < 0) { 664 DPRINTF(("WARNING: PDC_MODEL_INFO error %d\n", error)); 665 666 pdc_model.hwmodel = 0; 667 pdc_model.hv = 0; 668 } else { 669 DPRINTF(("pdc_model.hwmodel/hv %x/%x\n", pdc_model.hwmodel, 670 pdc_model.hv)); 671 } 672 cpu_modelno = pdc_model.hwmodel; 673 model = hppa_mod_info(HPPA_TYPE_BOARD, cpu_modelno); 674 675 DPRINTF(("%s: model %s\n", __func__, model)); 676 677 pdc_settype(cpu_modelno); 678 679 memset(&pdc_cpuid, 0, sizeof(pdc_cpuid)); 680 error = pdcproc_model_cpuid(&pdc_cpuid); 681 if (error < 0) { 682 DPRINTF(("WARNING: PDC_MODEL_CPUID error %d. " 683 "Using cpu_modelno (%#x) based cpu_type.\n", error, cpu_modelno)); 684 685 cpu_type = cpu_model_cpuid(cpu_modelno); 686 if (cpu_type == hpc_unknown) { 687 printf("WARNING: Unknown cpu_type for cpu_modelno %x\n", 688 cpu_modelno); 689 } 690 } else { 691 DPRINTF(("%s: cpuid.version = %x\n", __func__, 692 pdc_cpuid.version)); 693 DPRINTF(("%s: cpuid.revision = %x\n", __func__, 694 pdc_cpuid.revision)); 695 696 cpu_version = pdc_cpuid.version; 697 698 /* XXXNH why? */ 699 /* patch for old 8200 */ 700 if (pdc_cpuid.version == HPPA_CPU_PCXU && 701 pdc_cpuid.revision > 0x0d) 702 cpu_version = HPPA_CPU_PCXUP; 703 } 704 705 /* locate coprocessors and SFUs */ 706 memset(&pdc_coproc, 0, sizeof(pdc_coproc)); 707 error = pdcproc_coproc(&pdc_coproc); 708 if (error < 0) { 709 DPRINTF(("WARNING: PDC_COPROC error %d\n", error)); 710 pdc_coproc.ccr_enable = 0; 711 } else { 712 DPRINTF(("pdc_coproc: 0x%x, 0x%x; model %x rev %x\n", 713 pdc_coproc.ccr_enable, pdc_coproc.ccr_present, 714 pdc_coproc.fpu_model, pdc_coproc.fpu_revision)); 715 716 /* a kludge to detect PCXW */ 717 if (pdc_coproc.fpu_model == HPPA_FPU_PCXW) 718 cpu_version = HPPA_CPU_PCXW; 719 } 720 mtctl(pdc_coproc.ccr_enable & CCR_MASK, CR_CCR); 721 DPRINTF(("%s: bootstrap fpu\n", __func__)); 722 723 usebtlb = 0; 724 if (cpu_version == HPPA_CPU_PCXW || cpu_version > HPPA_CPU_PCXL2) { 725 DPRINTF(("WARNING: BTLB no supported on cpu %d\n", cpu_version)); 726 } else { 727 728 /* BTLB params */ 729 error = pdcproc_block_tlb(&pdc_btlb); 730 if (error < 0) { 731 DPRINTF(("WARNING: PDC_BTLB error %d\n", error)); 732 } else { 733 DPRINTFN(10, ("btlb info: minsz=%d, maxsz=%d\n", 734 pdc_btlb.min_size, pdc_btlb.max_size)); 735 DPRINTFN(10, ("btlb fixed: i=%d, d=%d, c=%d\n", 736 pdc_btlb.finfo.num_i, 737 pdc_btlb.finfo.num_d, 738 pdc_btlb.finfo.num_c)); 739 DPRINTFN(10, ("btlb varbl: i=%d, d=%d, c=%d\n", 740 pdc_btlb.vinfo.num_i, 741 pdc_btlb.vinfo.num_d, 742 pdc_btlb.vinfo.num_c)); 743 744 /* purge TLBs and caches */ 745 if (pdcproc_btlb_purgeall() < 0) 746 DPRINTFN(10, ("WARNING: BTLB purge failed\n")); 747 748 hppa_btlb_size_min = pdc_btlb.min_size; 749 hppa_btlb_size_max = pdc_btlb.max_size; 750 751 DPRINTF(("hppa_btlb_size_min 0x%x\n", hppa_btlb_size_min)); 752 DPRINTF(("hppa_btlb_size_max 0x%x\n", hppa_btlb_size_max)); 753 754 if (pdc_btlb.finfo.num_c) 755 cpu_features |= HPPA_FTRS_BTLBU; 756 usebtlb = 1; 757 } 758 } 759 usebtlb = 0; 760 761 error = pdcproc_tlb_info(&pdc_hwtlb); 762 if (error == 0 && pdc_hwtlb.min_size != 0 && pdc_hwtlb.max_size != 0) { 763 cpu_features |= HPPA_FTRS_HVT; 764 if (pmap_hptsize > pdc_hwtlb.max_size) 765 pmap_hptsize = pdc_hwtlb.max_size; 766 else if (pmap_hptsize && pmap_hptsize < pdc_hwtlb.min_size) 767 pmap_hptsize = pdc_hwtlb.min_size; 768 769 DPRINTF(("%s: pmap_hptsize 0x%x\n", __func__, pmap_hptsize)); 770 } else { 771 DPRINTF(("WARNING: no HPT support, fine!\n")); 772 773 pmap_hptsize = 0; 774 } 775 776 bool cpu_found = false; 777 if (cpu_version) { 778 DPRINTF(("%s: looking for cpu_version %x\n", __func__, 779 cpu_version)); 780 for (i = 0, p = cpu_types; i < __arraycount(cpu_types); 781 i++, p++) { 782 if (p->hci_cpuversion == cpu_version) { 783 cpu_found = true; 784 break; 785 } 786 } 787 } else if (cpu_type != hpc_unknown) { 788 DPRINTF(("%s: looking for cpu_type %d\n", __func__, 789 cpu_type)); 790 for (i = 0, p = cpu_types; i < __arraycount(cpu_types); 791 i++, p++) { 792 if (p->hci_cputype == cpu_type) { 793 cpu_found = true; 794 break; 795 } 796 } 797 } 798 799 if (!cpu_found) { 800 panic("CPU detection failed. Please report the problem."); 801 } 802 803 hppa_cpu_info = p; 804 805 if (hppa_cpu_info->hci_chip_name == NULL) 806 panic("bad model string for 0x%x", pdc_model.hwmodel); 807 808 /* 809 * TODO: HPT on 7200 is not currently supported 810 */ 811 if (pmap_hptsize && p->hci_cputype != hpcxl && p->hci_cputype != hpcxl2) 812 pmap_hptsize = 0; 813 814 cpu_type = hppa_cpu_info->hci_cputype; 815 cpu_ibtlb_ins = hppa_cpu_info->ibtlbins; 816 cpu_dbtlb_ins = hppa_cpu_info->dbtlbins; 817 cpu_hpt_init = hppa_cpu_info->hptinit; 818 cpu_desidhash = hppa_cpu_info->desidhash; 819 820 if (cpu_desidhash) 821 cpu_revision = (*cpu_desidhash)(); 822 else 823 cpu_revision = 0; 824 825 /* force strong ordering for now */ 826 if (hppa_cpu_ispa20_p()) 827 curcpu()->ci_psw |= PSW_O; 828 829 cpu_setmodel("HP9000/%s", model); 830 831 #define LDILDO(t,f) ((t)[0] = (f)[0], (t)[1] = (f)[1]); 832 LDILDO(trap_ep_T_TLB_DIRTY , hppa_cpu_info->tlbdh); 833 LDILDO(trap_ep_T_DTLBMISS , hppa_cpu_info->dtlbh); 834 LDILDO(trap_ep_T_DTLBMISSNA, hppa_cpu_info->dtlbnah); 835 LDILDO(trap_ep_T_ITLBMISS , hppa_cpu_info->itlbh); 836 LDILDO(trap_ep_T_ITLBMISSNA, hppa_cpu_info->itlbnah); 837 #undef LDILDO 838 839 /* Bootstrap any FPU. */ 840 hppa_fpu_bootstrap(pdc_coproc.ccr_enable); 841 } 842 843 enum hppa_cpu_type 844 cpu_model_cpuid(int modelno) 845 { 846 switch (modelno) { 847 /* no supported HP8xx/9xx models with pcx */ 848 case HPPA_BOARD_HP720: 849 case HPPA_BOARD_HP750_66: 850 case HPPA_BOARD_HP730_66: 851 case HPPA_BOARD_HP710: 852 case HPPA_BOARD_HP705: 853 return hpcxs; 854 855 case HPPA_BOARD_HPE23: 856 case HPPA_BOARD_HPE25: 857 case HPPA_BOARD_HPE35: 858 case HPPA_BOARD_HPE45: 859 case HPPA_BOARD_HP712_60: 860 case HPPA_BOARD_HP712_80: 861 case HPPA_BOARD_HP712_100: 862 case HPPA_BOARD_HP715_80: 863 case HPPA_BOARD_HP715_64: 864 case HPPA_BOARD_HP715_100: 865 case HPPA_BOARD_HP715_100XC: 866 case HPPA_BOARD_HP715_100L: 867 case HPPA_BOARD_HP715_120L: 868 case HPPA_BOARD_HP715_80M: 869 return hpcxl; 870 871 case HPPA_BOARD_HP735_99: 872 case HPPA_BOARD_HP755_99: 873 case HPPA_BOARD_HP755_125: 874 case HPPA_BOARD_HP735_130: 875 case HPPA_BOARD_HP715_50: 876 case HPPA_BOARD_HP715_33: 877 case HPPA_BOARD_HP715S_50: 878 case HPPA_BOARD_HP715S_33: 879 case HPPA_BOARD_HP715T_50: 880 case HPPA_BOARD_HP715T_33: 881 case HPPA_BOARD_HP715_75: 882 case HPPA_BOARD_HP715_99: 883 case HPPA_BOARD_HP725_50: 884 case HPPA_BOARD_HP725_75: 885 case HPPA_BOARD_HP725_99: 886 return hpcxt; 887 } 888 return hpc_unknown; 889 } 890 891 void 892 cpu_startup(void) 893 { 894 vaddr_t minaddr, maxaddr; 895 char pbuf[3][9]; 896 897 /* Initialize the message buffer. */ 898 initmsgbuf(msgbufaddr, MSGBUFSIZE); 899 900 /* 901 * i won't understand a friend of mine, 902 * who sat in a room full of artificial ice, 903 * fogging the air w/ humid cries -- 904 * WELCOME TO SUMMER! 905 */ 906 printf("%s%s", copyright, version); 907 908 /* identify system type */ 909 printf("%s\n", cpu_getmodel()); 910 911 /* Display some memory usage information. */ 912 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(physmem)); 913 format_bytes(pbuf[1], sizeof(pbuf[1]), ptoa(resvmem)); 914 format_bytes(pbuf[2], sizeof(pbuf[2]), ptoa(availphysmem)); 915 printf("real mem = %s (%s reserved for PROM, %s used by NetBSD)\n", 916 pbuf[0], pbuf[1], pbuf[2]); 917 918 #ifdef DEBUG 919 if (totalphysmem > physmem) { 920 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(totalphysmem - physmem)); 921 DPRINTF(("lost mem = %s\n", pbuf[0])); 922 } 923 #endif 924 925 minaddr = 0; 926 927 /* 928 * Allocate a submap for physio 929 */ 930 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 931 VM_PHYS_SIZE, 0, false, NULL); 932 933 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(uvm_availmem(false))); 934 printf("avail mem = %s\n", pbuf[0]); 935 } 936 937 /* 938 * compute CPU clock ratio such as: 939 * cpu_ticksnum / cpu_ticksdenom = t + delta 940 * delta -> 0 941 */ 942 void 943 delay_init(void) 944 { 945 u_int num, denom, delta, mdelta; 946 947 mdelta = UINT_MAX; 948 for (denom = 1; denom < 1000; denom++) { 949 num = (PAGE0->mem_10msec * denom) / 10000; 950 delta = num * 10000 / denom - PAGE0->mem_10msec; 951 if (!delta) { 952 cpu_ticksdenom = denom; 953 cpu_ticksnum = num; 954 break; 955 } else if (delta < mdelta) { 956 cpu_ticksdenom = denom; 957 cpu_ticksnum = num; 958 mdelta = delta; 959 } 960 } 961 } 962 963 void 964 delay(u_int us) 965 { 966 u_int start, end, n; 967 968 mfctl(CR_ITMR, start); 969 while (us) { 970 n = uimin(1000, us); 971 end = start + n * cpu_ticksnum / cpu_ticksdenom; 972 973 /* N.B. Interval Timer may wrap around */ 974 if (end < start) { 975 do { 976 mfctl(CR_ITMR, start); 977 } while (start > end); 978 } 979 980 do 981 mfctl(CR_ITMR, start); 982 while (start < end); 983 984 us -= n; 985 mfctl(CR_ITMR, start); 986 } 987 } 988 989 static inline void 990 fall(int c_base, int c_count, int c_loop, int c_stride, int data) 991 { 992 int loop; 993 994 for (; c_count--; c_base += c_stride) 995 for (loop = c_loop; loop--; ) 996 if (data) 997 fdce(0, c_base); 998 else 999 fice(0, c_base); 1000 } 1001 1002 void 1003 fcacheall(void) 1004 { 1005 /* 1006 * Flush the instruction, then data cache. 1007 */ 1008 fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop, 1009 pdc_cache.ic_stride, 0); 1010 sync_caches(); 1011 fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop, 1012 pdc_cache.dc_stride, 1); 1013 sync_caches(); 1014 } 1015 1016 void 1017 ptlball(void) 1018 { 1019 pa_space_t sp; 1020 int i, j, k; 1021 1022 /* instruction TLB */ 1023 sp = pdc_cache.it_sp_base; 1024 for (i = 0; i < pdc_cache.it_sp_count; i++) { 1025 vaddr_t off = pdc_cache.it_off_base; 1026 for (j = 0; j < pdc_cache.it_off_count; j++) { 1027 for (k = 0; k < pdc_cache.it_loop; k++) 1028 pitlbe(sp, off); 1029 off += pdc_cache.it_off_stride; 1030 } 1031 sp += pdc_cache.it_sp_stride; 1032 } 1033 1034 /* data TLB */ 1035 sp = pdc_cache.dt_sp_base; 1036 for (i = 0; i < pdc_cache.dt_sp_count; i++) { 1037 vaddr_t off = pdc_cache.dt_off_base; 1038 for (j = 0; j < pdc_cache.dt_off_count; j++) { 1039 for (k = 0; k < pdc_cache.dt_loop; k++) 1040 pdtlbe(sp, off); 1041 off += pdc_cache.dt_off_stride; 1042 } 1043 sp += pdc_cache.dt_sp_stride; 1044 } 1045 } 1046 1047 int 1048 hpti_g(vaddr_t hpt, vsize_t hptsize) 1049 { 1050 1051 return pdcproc_tlb_config(&pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE); 1052 } 1053 1054 int 1055 pbtlb_g(int i) 1056 { 1057 return -1; 1058 } 1059 1060 int 1061 ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa, vsize_t sz, u_int prot) 1062 { 1063 int error; 1064 1065 error = pdcproc_btlb_insert(sp, va, pa, sz, prot, i); 1066 if (error < 0) { 1067 #ifdef BTLBDEBUG 1068 DPRINTF(("WARNING: BTLB insert failed (%d)\n", error)); 1069 #endif 1070 } 1071 return error; 1072 } 1073 1074 1075 /* 1076 * This inserts a recorded BTLB slot. 1077 */ 1078 static int _hppa_btlb_insert(struct btlb_slot *); 1079 static int 1080 _hppa_btlb_insert(struct btlb_slot *btlb_slot) 1081 { 1082 int error; 1083 #ifdef MACHDEPDEBUG 1084 const char *prot; 1085 1086 /* Display the protection like a file protection. */ 1087 switch (btlb_slot->btlb_slot_tlbprot & TLB_AR_MASK) { 1088 case TLB_AR_NA: prot = "------"; break; 1089 case TLB_AR_R: prot = "r-----"; break; 1090 case TLB_AR_RW: prot = "rw----"; break; 1091 case TLB_AR_RX: prot = "r-x---"; break; 1092 case TLB_AR_RWX: prot = "rwx---"; break; 1093 case TLB_AR_R | TLB_USER: prot = "r--r--"; break; 1094 case TLB_AR_RW | TLB_USER: prot = "rw-rw-"; break; 1095 case TLB_AR_RX | TLB_USER: prot = "r--r-x"; break; 1096 case TLB_AR_RWX | TLB_USER: prot = "rw-rwx"; break; 1097 default: prot = "??????"; break; 1098 } 1099 1100 DPRINTFN(10, ( 1101 " [ BTLB %d: %s 0x%08x @ 0x%x:0x%08x len 0x%08x prot 0x%08x] ", 1102 btlb_slot->btlb_slot_number, 1103 prot, 1104 (u_int)btlb_slot->btlb_slot_pa_frame << PGSHIFT, 1105 btlb_slot->btlb_slot_va_space, 1106 (u_int)btlb_slot->btlb_slot_va_frame << PGSHIFT, 1107 (u_int)btlb_slot->btlb_slot_frames << PGSHIFT, 1108 btlb_slot->btlb_slot_tlbprot)); 1109 1110 /* 1111 * Non-I/O space mappings are entered by the pmap, 1112 * so we do print a newline to make things look better. 1113 */ 1114 if (btlb_slot->btlb_slot_pa_frame < (HPPA_IOSPACE >> PGSHIFT)) 1115 DPRINTFN(10, ("\n")); 1116 #endif 1117 1118 /* Insert this mapping. */ 1119 error = pdcproc_btlb_insert( 1120 btlb_slot->btlb_slot_va_space, 1121 btlb_slot->btlb_slot_va_frame, 1122 btlb_slot->btlb_slot_pa_frame, 1123 btlb_slot->btlb_slot_frames, 1124 btlb_slot->btlb_slot_tlbprot, 1125 btlb_slot->btlb_slot_number); 1126 if (error < 0) { 1127 #ifdef BTLBDEBUG 1128 DPRINTF(("WARNING: BTLB insert failed (%d)\n", error); 1129 #endif 1130 } 1131 return (error ? EINVAL : 0); 1132 } 1133 1134 /* 1135 * This records and inserts a new BTLB entry. 1136 */ 1137 int 1138 hppa_btlb_insert(pa_space_t space, vaddr_t va, paddr_t pa, vsize_t *sizep, 1139 u_int tlbprot) 1140 { 1141 struct btlb_slot *btlb_slot, *btlb_slot_best, *btlb_slot_end; 1142 vsize_t frames; 1143 int error; 1144 int need_dbtlb, need_ibtlb, need_variable_range; 1145 int btlb_slot_score, btlb_slot_best_score; 1146 vsize_t slot_mapped_frames, total_mapped_frames; 1147 1148 /* 1149 * All entries need data translation. Those that 1150 * allow execution also need instruction translation. 1151 */ 1152 switch (tlbprot & TLB_AR_MASK) { 1153 case TLB_AR_R: 1154 case TLB_AR_RW: 1155 case TLB_AR_R | TLB_USER: 1156 case TLB_AR_RW | TLB_USER: 1157 need_dbtlb = true; 1158 need_ibtlb = false; 1159 break; 1160 case TLB_AR_RX: 1161 case TLB_AR_RWX: 1162 case TLB_AR_RX | TLB_USER: 1163 case TLB_AR_RWX | TLB_USER: 1164 need_dbtlb = true; 1165 need_ibtlb = true; 1166 break; 1167 default: 1168 panic("btlb_insert: bad tlbprot"); 1169 } 1170 1171 /* 1172 * If this entry isn't aligned to the size required 1173 * for a fixed-range slot, it requires a variable-range 1174 * slot. This also converts pa and va to page frame 1175 * numbers. 1176 */ 1177 frames = pdc_btlb.min_size << PGSHIFT; 1178 while (frames < *sizep) 1179 frames <<= 1; 1180 frames >>= PGSHIFT; 1181 if (frames > pdc_btlb.max_size) { 1182 #ifdef BTLBDEBUG 1183 DPRINTF(("btlb_insert: too big (%u < %u < %u)\n", 1184 pdc_btlb.min_size, (u_int) frames, pdc_btlb.max_size); 1185 #endif 1186 return -(ENOMEM); 1187 } 1188 pa >>= PGSHIFT; 1189 va >>= PGSHIFT; 1190 need_variable_range = 1191 ((pa & (frames - 1)) != 0 || (va & (frames - 1)) != 0); 1192 1193 /* I/O space must be mapped uncached. */ 1194 if (pa >= HPPA_IOBEGIN) 1195 tlbprot |= TLB_UNCACHEABLE; 1196 1197 /* 1198 * Loop while we still need slots. 1199 */ 1200 btlb_slot_end = btlb_slots + btlb_slots_count; 1201 total_mapped_frames = 0; 1202 btlb_slot_best_score = 0; 1203 while (need_dbtlb || need_ibtlb) { 1204 1205 /* 1206 * Find an applicable slot. 1207 */ 1208 btlb_slot_best = NULL; 1209 for (btlb_slot = btlb_slots; 1210 btlb_slot < btlb_slot_end; 1211 btlb_slot++) { 1212 1213 /* 1214 * Skip this slot if it's in use, or if we need a 1215 * variable-range slot and this isn't one. 1216 */ 1217 if (btlb_slot->btlb_slot_frames != 0 || 1218 (need_variable_range && 1219 !(btlb_slot->btlb_slot_flags & 1220 BTLB_SLOT_VARIABLE_RANGE))) 1221 continue; 1222 1223 /* 1224 * Score this slot. 1225 */ 1226 btlb_slot_score = 0; 1227 if (need_dbtlb && 1228 (btlb_slot->btlb_slot_flags & BTLB_SLOT_DBTLB)) 1229 btlb_slot_score++; 1230 if (need_ibtlb && 1231 (btlb_slot->btlb_slot_flags & BTLB_SLOT_IBTLB)) 1232 btlb_slot_score++; 1233 1234 /* 1235 * Update the best slot. 1236 */ 1237 if (btlb_slot_score > 0 && 1238 (btlb_slot_best == NULL || 1239 btlb_slot_score > btlb_slot_best_score)) { 1240 btlb_slot_best = btlb_slot; 1241 btlb_slot_best_score = btlb_slot_score; 1242 } 1243 } 1244 1245 /* 1246 * If there were no applicable slots. 1247 */ 1248 if (btlb_slot_best == NULL) { 1249 DPRINTFN(10, ("BTLB full\n")); 1250 return -(ENOMEM); 1251 } 1252 1253 /* 1254 * Now fill this BTLB slot record and insert the entry. 1255 */ 1256 if (btlb_slot->btlb_slot_flags & BTLB_SLOT_VARIABLE_RANGE) 1257 slot_mapped_frames = ((*sizep + PGOFSET) >> PGSHIFT); 1258 else 1259 slot_mapped_frames = frames; 1260 if (slot_mapped_frames > total_mapped_frames) 1261 total_mapped_frames = slot_mapped_frames; 1262 btlb_slot = btlb_slot_best; 1263 btlb_slot->btlb_slot_va_space = space; 1264 btlb_slot->btlb_slot_va_frame = va; 1265 btlb_slot->btlb_slot_pa_frame = pa; 1266 btlb_slot->btlb_slot_tlbprot = tlbprot; 1267 btlb_slot->btlb_slot_frames = slot_mapped_frames; 1268 error = _hppa_btlb_insert(btlb_slot); 1269 if (error) 1270 return -error; 1271 /* 1272 * Note what slots we no longer need. 1273 */ 1274 if (btlb_slot->btlb_slot_flags & BTLB_SLOT_DBTLB) 1275 need_dbtlb = false; 1276 if (btlb_slot->btlb_slot_flags & BTLB_SLOT_IBTLB) 1277 need_ibtlb = false; 1278 } 1279 1280 /* Success. */ 1281 *sizep = (total_mapped_frames << PGSHIFT); 1282 return 0; 1283 } 1284 1285 /* 1286 * This reloads the BTLB in the event that it becomes invalidated. 1287 */ 1288 int 1289 hppa_btlb_reload(void) 1290 { 1291 struct btlb_slot *btlb_slot, *btlb_slot_end; 1292 int error; 1293 1294 /* Insert all recorded BTLB entries. */ 1295 btlb_slot = btlb_slots; 1296 btlb_slot_end = btlb_slots + btlb_slots_count; 1297 error = 0; 1298 while (error == 0 && btlb_slot < btlb_slot_end) { 1299 if (btlb_slot->btlb_slot_frames != 0) 1300 error = _hppa_btlb_insert(btlb_slot); 1301 btlb_slot++; 1302 } 1303 DPRINTF(("\n")); 1304 return (error); 1305 } 1306 1307 /* 1308 * This purges a BTLB entry. 1309 */ 1310 int 1311 hppa_btlb_purge(pa_space_t space, vaddr_t va, vsize_t *sizep) 1312 { 1313 struct btlb_slot *btlb_slot, *btlb_slot_end; 1314 int error; 1315 1316 /* 1317 * Purge all slots that map this virtual address. 1318 */ 1319 error = ENOENT; 1320 va >>= PGSHIFT; 1321 btlb_slot_end = btlb_slots + btlb_slots_count; 1322 for (btlb_slot = btlb_slots; 1323 btlb_slot < btlb_slot_end; 1324 btlb_slot++) { 1325 if (btlb_slot->btlb_slot_frames != 0 && 1326 btlb_slot->btlb_slot_va_space == space && 1327 btlb_slot->btlb_slot_va_frame == va) { 1328 error = pdcproc_btlb_purge( 1329 btlb_slot->btlb_slot_va_space, 1330 btlb_slot->btlb_slot_va_frame, 1331 btlb_slot->btlb_slot_number, 1332 btlb_slot->btlb_slot_frames); 1333 if (error < 0) { 1334 DPRINTFN(10, ("WARNING: BTLB purge failed (%d)\n", 1335 error)); 1336 1337 return (error); 1338 } 1339 1340 /* 1341 * Tell our caller how many bytes were mapped 1342 * by this slot, then free the slot. 1343 */ 1344 *sizep = (btlb_slot->btlb_slot_frames << PGSHIFT); 1345 btlb_slot->btlb_slot_frames = 0; 1346 } 1347 } 1348 return (error); 1349 } 1350 1351 /* 1352 * This maps page zero if it isn't already mapped, and 1353 * returns a cookie for hppa_pagezero_unmap. 1354 */ 1355 int 1356 hppa_pagezero_map(void) 1357 { 1358 int was_mapped_before; 1359 int s; 1360 1361 was_mapped_before = pagezero_mapped; 1362 if (!was_mapped_before) { 1363 s = splhigh(); 1364 pmap_kenter_pa(0, 0, VM_PROT_ALL, 0); 1365 pagezero_mapped = 1; 1366 splx(s); 1367 } 1368 return (was_mapped_before); 1369 } 1370 1371 /* 1372 * This unmaps mape zero, given a cookie previously returned 1373 * by hppa_pagezero_map. 1374 */ 1375 void 1376 hppa_pagezero_unmap(int was_mapped_before) 1377 { 1378 int s; 1379 1380 if (!was_mapped_before) { 1381 s = splhigh(); 1382 pmap_kremove(0, PAGE_SIZE); 1383 pagezero_mapped = 0; 1384 splx(s); 1385 } 1386 } 1387 1388 int waittime = -1; 1389 1390 __dead void 1391 cpu_reboot(int howto, char *user_boot_string) 1392 { 1393 boothowto = howto | (boothowto & RB_HALT); 1394 1395 if (!(howto & RB_NOSYNC) && waittime < 0) { 1396 waittime = 0; 1397 vfs_shutdown(); 1398 } 1399 1400 /* XXX probably save howto into stable storage */ 1401 1402 /* Disable interrupts. */ 1403 splhigh(); 1404 1405 /* Make a crash dump. */ 1406 if (howto & RB_DUMP) 1407 dumpsys(); 1408 1409 /* Run any shutdown hooks. */ 1410 doshutdownhooks(); 1411 1412 pmf_system_shutdown(boothowto); 1413 1414 /* in case we came on powerfail interrupt */ 1415 if (cold_hook) 1416 (*cold_hook)(HPPA_COLD_COLD); 1417 1418 hppa_led_ctl(0xf, 0, 0); 1419 1420 if (howto & RB_HALT) { 1421 if ((howto & RB_POWERDOWN) == RB_POWERDOWN && cold_hook) { 1422 printf("Powering off..."); 1423 DELAY(1000000); 1424 (*cold_hook)(HPPA_COLD_OFF); 1425 DELAY(1000000); 1426 } 1427 1428 printf("System halted!\n"); 1429 DELAY(1000000); 1430 __asm volatile("stwas %0, 0(%1)" 1431 :: "r" (CMD_STOP), "r" (LBCAST_ADDR + iomod_command)); 1432 } else { 1433 printf("rebooting..."); 1434 DELAY(1000000); 1435 __asm volatile("stwas %0, 0(%1)" 1436 :: "r" (CMD_RESET), "r" (LBCAST_ADDR + iomod_command)); 1437 1438 /* ask firmware to reset */ 1439 pdcproc_doreset(); 1440 /* forcably reset module if that fails */ 1441 __asm __volatile("stwas %0, 0(%1)" 1442 :: "r" (CMD_RESET), "r" (HPPA_LBCAST + iomod_command)); 1443 } 1444 1445 for (;;) { 1446 /* 1447 * loop while bus reset is coming up. This NOP instruction 1448 * is used by qemu to detect the 'death loop'. 1449 */ 1450 __asm volatile("or %%r31, %%r31, %%r31" ::: "memory"); 1451 } 1452 /* NOTREACHED */ 1453 } 1454 1455 uint32_t dumpmag = 0x8fca0101; /* magic number */ 1456 int dumpsize = 0; /* pages */ 1457 long dumplo = 0; /* blocks */ 1458 1459 /* 1460 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. 1461 */ 1462 int 1463 cpu_dumpsize(void) 1464 { 1465 int size; 1466 1467 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 1468 if (roundup(size, dbtob(1)) != dbtob(1)) 1469 return -1; 1470 1471 return 1; 1472 } 1473 1474 /* 1475 * This handles a machine check. This can be either an HPMC, 1476 * an LPMC, or a TOC. The check type is passed in as a trap 1477 * type, one of T_HPMC, T_LPMC, or T_INTERRUPT (for TOC). 1478 */ 1479 static char in_check = 0; 1480 1481 #define PIM_WORD(name, word, bits) \ 1482 do { \ 1483 snprintb(bitmask_buffer, sizeof(bitmask_buffer),\ 1484 bits, word); \ 1485 printf("%s %s", name, bitmask_buffer); \ 1486 } while (/* CONSTCOND */ 0) 1487 1488 1489 static inline void 1490 hppa_pim_dump(int check_type, void *data, size_t size) 1491 { 1492 struct hppa_pim_hpmc *hpmc; 1493 struct hppa_pim_lpmc *lpmc; 1494 struct hppa_pim_toc *toc; 1495 struct hppa_pim_regs *regs; 1496 struct hppa_pim_checks *checks; 1497 u_int *regarray; 1498 int reg_i, reg_j, reg_k; 1499 char bitmask_buffer[64]; 1500 const char *name; 1501 1502 regs = NULL; 1503 checks = NULL; 1504 switch (check_type) { 1505 case T_HPMC: 1506 hpmc = (struct hppa_pim_hpmc *) data; 1507 regs = &hpmc->pim_hpmc_regs; 1508 checks = &hpmc->pim_hpmc_checks; 1509 break; 1510 case T_LPMC: 1511 lpmc = (struct hppa_pim_lpmc *) data; 1512 checks = &lpmc->pim_lpmc_checks; 1513 break; 1514 case T_INTERRUPT: 1515 toc = (struct hppa_pim_toc *) data; 1516 regs = &toc->pim_toc_regs; 1517 break; 1518 default: 1519 panic("unknown machine check type"); 1520 /* NOTREACHED */ 1521 } 1522 1523 /* If we have register arrays, display them. */ 1524 if (regs != NULL) { 1525 for (reg_i = 0; reg_i < 3; reg_i++) { 1526 if (reg_i == 0) { 1527 name = "General"; 1528 regarray = ®s->pim_regs_r0; 1529 reg_j = 32; 1530 } else if (reg_i == 1) { 1531 name = "Control"; 1532 regarray = ®s->pim_regs_cr0; 1533 reg_j = 32; 1534 } else { 1535 name = "Space"; 1536 regarray = ®s->pim_regs_sr0; 1537 reg_j = 8; 1538 } 1539 printf("\n\n\t%s Registers:", name); 1540 for (reg_k = 0; reg_k < reg_j; reg_k++) 1541 printf("%s0x%08x", 1542 (reg_k & 3) ? " " : "\n", 1543 regarray[reg_k]); 1544 } 1545 1546 /* Print out some interesting registers. */ 1547 printf("\n\n\tIIA head 0x%x:0x%08x\n" 1548 "\tIIA tail 0x%x:0x%08x", 1549 regs->pim_regs_cr17, regs->pim_regs_cr18, 1550 regs->pim_regs_iisq_tail, regs->pim_regs_iioq_tail); 1551 PIM_WORD("\n\tIPSW", regs->pim_regs_cr22, PSW_BITS); 1552 printf("\n\tSP 0x%x:0x%08x FP 0x%x:0x%08x", 1553 regs->pim_regs_sr0, regs->pim_regs_r30, 1554 regs->pim_regs_sr0, regs->pim_regs_r3); 1555 } 1556 1557 /* If we have check words, display them. */ 1558 if (checks != NULL) { 1559 PIM_WORD("\n\n\tCheck Type", checks->pim_check_type, 1560 PIM_CHECK_BITS); 1561 PIM_WORD("\n\tCPU State", checks->pim_check_cpu_state, 1562 PIM_CPU_HPMC_BITS); 1563 PIM_WORD("\n\tCache Check", checks->pim_check_cache, 1564 PIM_CACHE_BITS); 1565 PIM_WORD("\n\tTLB Check", checks->pim_check_tlb, 1566 PIM_TLB_BITS); 1567 PIM_WORD("\n\tBus Check", checks->pim_check_bus, 1568 PIM_BUS_BITS); 1569 PIM_WORD("\n\tAssist Check", checks->pim_check_assist, 1570 PIM_ASSIST_BITS); 1571 printf("\tAssist State %u", checks->pim_check_assist_state); 1572 printf("\n\tSystem Responder 0x%08x", 1573 checks->pim_check_responder); 1574 printf("\n\tSystem Requestor 0x%08x", 1575 checks->pim_check_requestor); 1576 printf("\n\tPath Info 0x%08x", 1577 checks->pim_check_path_info); 1578 } 1579 } 1580 1581 static inline void 1582 hppa_pim64_dump(int check_type, void *data, size_t size) 1583 { 1584 struct hppa_pim64_hpmc *hpmc; 1585 struct hppa_pim64_lpmc *lpmc; 1586 struct hppa_pim64_toc *toc; 1587 struct hppa_pim64_regs *regs; 1588 struct hppa_pim64_checks *checks; 1589 int reg_i, reg_j, reg_k; 1590 uint64_t *regarray; 1591 char bitmask_buffer[64]; 1592 const char *name; 1593 1594 regs = NULL; 1595 checks = NULL; 1596 switch (check_type) { 1597 case T_HPMC: 1598 hpmc = (struct hppa_pim64_hpmc *) data; 1599 regs = &hpmc->pim_hpmc_regs; 1600 checks = &hpmc->pim_hpmc_checks; 1601 break; 1602 case T_LPMC: 1603 lpmc = (struct hppa_pim64_lpmc *) data; 1604 checks = &lpmc->pim_lpmc_checks; 1605 break; 1606 case T_INTERRUPT: 1607 toc = (struct hppa_pim64_toc *) data; 1608 regs = &toc->pim_toc_regs; 1609 break; 1610 default: 1611 panic("unknown machine check type"); 1612 /* NOTREACHED */ 1613 } 1614 1615 /* If we have register arrays, display them. */ 1616 if (regs != NULL) { 1617 for (reg_i = 0; reg_i < 3; reg_i++) { 1618 if (reg_i == 0) { 1619 name = "General"; 1620 regarray = ®s->pim_regs_r0; 1621 reg_j = 32; 1622 } else if (reg_i == 1) { 1623 name = "Control"; 1624 regarray = ®s->pim_regs_cr0; 1625 reg_j = 32; 1626 } else { 1627 name = "Space"; 1628 regarray = ®s->pim_regs_sr0; 1629 reg_j = 8; 1630 } 1631 printf("\n\n%s Registers:", name); 1632 for (reg_k = 0; reg_k < reg_j; reg_k++) 1633 printf("%s0x%016lx", 1634 (reg_k & 3) ? " " : "\n", 1635 (unsigned long)regarray[reg_k]); 1636 } 1637 1638 /* Print out some interesting registers. */ 1639 printf("\n\nIIA head 0x%lx:0x%016lx\n" 1640 "IIA tail 0x%lx:0x%016lx", 1641 (unsigned long)regs->pim_regs_cr17, 1642 (unsigned long)regs->pim_regs_cr18, 1643 (unsigned long)regs->pim_regs_iisq_tail, 1644 (unsigned long)regs->pim_regs_iioq_tail); 1645 PIM_WORD("\nIPSW", regs->pim_regs_cr22, PSW_BITS); 1646 printf("\nSP 0x%lx:0x%016lx\nFP 0x%lx:0x%016lx", 1647 (unsigned long)regs->pim_regs_sr0, 1648 (unsigned long)regs->pim_regs_r30, 1649 (unsigned long)regs->pim_regs_sr0, 1650 (unsigned long)regs->pim_regs_r3); 1651 } 1652 1653 /* If we have check words, display them. */ 1654 if (checks != NULL) { 1655 PIM_WORD("\n\nCheck Type", checks->pim_check_type, 1656 PIM_CHECK_BITS); 1657 PIM_WORD("\nCPU State", checks->pim_check_cpu_state, 1658 PIM_CPU_BITS PIM_CPU_HPMC_BITS); 1659 PIM_WORD("\nCache Check", checks->pim_check_cache, 1660 PIM_CACHE_BITS); 1661 PIM_WORD("\nTLB Check", checks->pim_check_tlb, 1662 PIM_TLB_BITS); 1663 PIM_WORD("\nBus Check", checks->pim_check_bus, 1664 PIM_BUS_BITS); 1665 PIM_WORD("\nAssist Check", checks->pim_check_assist, 1666 PIM_ASSIST_BITS); 1667 printf("\nAssist State %u", checks->pim_check_assist_state); 1668 printf("\nSystem Responder 0x%016lx", 1669 (unsigned long)checks->pim_check_responder); 1670 printf("\nSystem Requestor 0x%016lx", 1671 (unsigned long)checks->pim_check_requestor); 1672 printf("\nPath Info 0x%08x", 1673 checks->pim_check_path_info); 1674 } 1675 } 1676 1677 void 1678 hppa_machine_check(int check_type) 1679 { 1680 int pdc_pim_type; 1681 const char *name; 1682 int pimerror, error; 1683 void *data; 1684 size_t size; 1685 1686 /* Do an fcacheall(). */ 1687 fcacheall(); 1688 1689 /* Dispatch on the check type. */ 1690 switch (check_type) { 1691 case T_HPMC: 1692 name = "HPMC"; 1693 pdc_pim_type = PDC_PIM_HPMC; 1694 break; 1695 case T_LPMC: 1696 name = "LPMC"; 1697 pdc_pim_type = PDC_PIM_LPMC; 1698 break; 1699 case T_INTERRUPT: 1700 name = "TOC"; 1701 pdc_pim_type = PDC_PIM_TOC; 1702 break; 1703 default: 1704 panic("unknown machine check type"); 1705 /* NOTREACHED */ 1706 } 1707 1708 pimerror = pdcproc_pim(pdc_pim_type, &pdc_pim, &data, &size); 1709 1710 KASSERT(pdc_pim.count <= size); 1711 1712 /* 1713 * Reset IO and log errors. 1714 * 1715 * This seems to be needed in order to output to the console 1716 * if we take a HPMC interrupt. This PDC procedure may not be 1717 * implemented by some machines. 1718 */ 1719 error = pdcproc_ioclrerrors(); 1720 if (error != PDC_ERR_OK && error != PDC_ERR_NOPROC) 1721 /* This seems futile if we can't print to the console. */ 1722 panic("PDC_IO failed"); 1723 1724 printf("\nmachine check: %s", name); 1725 1726 if (pimerror < 0) { 1727 printf(" - WARNING: could not transfer PIM info (%d)", pimerror); 1728 } else { 1729 if (hppa_cpu_ispa20_p()) 1730 hppa_pim64_dump(check_type, data, size); 1731 else 1732 hppa_pim_dump(check_type, data, size); 1733 } 1734 1735 printf("\n"); 1736 1737 /* If this is our first check, panic. */ 1738 if (in_check == 0) { 1739 in_check = 1; 1740 DELAY(250000); 1741 panic("machine check"); 1742 } 1743 1744 /* Reboot the machine. */ 1745 printf("Rebooting...\n"); 1746 cpu_die(); 1747 } 1748 1749 int 1750 cpu_dump(void) 1751 { 1752 long buf[dbtob(1) / sizeof (long)]; 1753 kcore_seg_t *segp; 1754 cpu_kcore_hdr_t *cpuhdrp __unused; 1755 const struct bdevsw *bdev; 1756 1757 segp = (kcore_seg_t *)buf; 1758 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)]; 1759 1760 /* 1761 * Generate a segment header. 1762 */ 1763 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1764 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); 1765 1766 /* 1767 * Add the machine-dependent header info 1768 */ 1769 /* nothing for now */ 1770 1771 bdev = bdevsw_lookup(dumpdev); 1772 if (bdev == NULL) 1773 return (-1); 1774 1775 return (*bdev->d_dump)(dumpdev, dumplo, (void *)buf, dbtob(1)); 1776 } 1777 1778 /* 1779 * Dump the kernel's image to the swap partition. 1780 */ 1781 #define BYTES_PER_DUMP PAGE_SIZE 1782 1783 void 1784 dumpsys(void) 1785 { 1786 const struct bdevsw *bdev; 1787 int psize, bytes, i, n; 1788 char *maddr; 1789 daddr_t blkno; 1790 int (*dump)(dev_t, daddr_t, void *, size_t); 1791 int error; 1792 1793 if (dumpdev == NODEV) 1794 return; 1795 bdev = bdevsw_lookup(dumpdev); 1796 if (bdev == NULL) 1797 return; 1798 1799 /* Save registers 1800 savectx(&dumppcb); */ 1801 1802 if (dumpsize == 0) 1803 cpu_dumpconf(); 1804 if (dumplo <= 0) { 1805 printf("\ndump to dev %u,%u not possible\n", 1806 major(dumpdev), minor(dumpdev)); 1807 return; 1808 } 1809 printf("\ndumping to dev %u,%u offset %ld\n", 1810 major(dumpdev), minor(dumpdev), dumplo); 1811 1812 psize = bdev_size(dumpdev); 1813 printf("dump "); 1814 if (psize == -1) { 1815 printf("area unavailable\n"); 1816 return; 1817 } 1818 1819 if (!(error = cpu_dump())) { 1820 1821 /* XXX fredette - this is way broken: */ 1822 bytes = ctob(physmem); 1823 maddr = NULL; 1824 blkno = dumplo + cpu_dumpsize(); 1825 dump = bdev->d_dump; 1826 /* TODO block map the whole physical memory */ 1827 for (i = 0; i < bytes; i += n) { 1828 1829 /* Print out how many MBs we are to go. */ 1830 n = bytes - i; 1831 if (n && (n % (1024*1024)) == 0) 1832 printf_nolog("%d ", n / (1024 * 1024)); 1833 1834 /* Limit size for next transfer. */ 1835 1836 if (n > BYTES_PER_DUMP) 1837 n = BYTES_PER_DUMP; 1838 1839 if ((error = (*dump)(dumpdev, blkno, maddr, n))) 1840 break; 1841 maddr += n; 1842 blkno += btodb(n); 1843 } 1844 } 1845 1846 switch (error) { 1847 case ENXIO: printf("device bad\n"); break; 1848 case EFAULT: printf("device not ready\n"); break; 1849 case EINVAL: printf("area improper\n"); break; 1850 case EIO: printf("i/o error\n"); break; 1851 case EINTR: printf("aborted from console\n"); break; 1852 case 0: printf("succeeded\n"); break; 1853 default: printf("error %d\n", error); break; 1854 } 1855 } 1856 1857 void 1858 hppa_setvmspace(struct lwp *l) 1859 { 1860 struct proc *p = l->l_proc; 1861 struct trapframe *tf = l->l_md.md_regs; 1862 pmap_t pmap = p->p_vmspace->vm_map.pmap; 1863 pa_space_t space = pmap->pm_space; 1864 1865 if (p->p_md.md_flags & MDP_OLDSPACE) { 1866 tf->tf_sr7 = HPPA_SID_KERNEL; 1867 } else { 1868 tf->tf_sr7 = space; 1869 } 1870 1871 tf->tf_sr2 = HPPA_SID_KERNEL; 1872 1873 /* Load all of the user's space registers. */ 1874 tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr3 = 1875 tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = 1876 tf->tf_iisq_head = tf->tf_iisq_tail = space; 1877 1878 /* Load the protection registers. */ 1879 tf->tf_pidr1 = tf->tf_pidr2 = pmap->pm_pid; 1880 } 1881 1882 /* 1883 * Set registers on exec. 1884 */ 1885 void 1886 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) 1887 { 1888 struct proc *p = l->l_proc; 1889 struct trapframe *tf = l->l_md.md_regs; 1890 struct pcb *pcb = lwp_getpcb(l); 1891 1892 memset(tf, 0, sizeof(*tf)); 1893 1894 /* 1895 * Initialize the External Interrupt Enable Mask, Processor 1896 * Status Word, and NetBSD's floating-point register area 1897 * pointer to the correct defaults for a user process. 1898 * 1899 * XXXMPSAFE If curcpu()->ci_eiem can vary from CPU to CPU, we 1900 * have bigger problems here -- if the lwp is migrated from one 1901 * CPU to another CPU between when the trapframe is saved and 1902 * when the trapframe is restored, it might be invalidated. 1903 */ 1904 tf->tf_eiem = curcpu()->ci_eiem; 1905 tf->tf_ipsw = PSW_MBS | (hppa_cpu_ispa20_p() ? PSW_O : 0); 1906 tf->tf_cr30 = (u_int)pcb->pcb_fpregs; 1907 1908 tf->tf_flags = TFF_SYS|TFF_LAST; 1909 tf->tf_iioq_tail = 4 + 1910 (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER); 1911 tf->tf_rp = 0; 1912 tf->tf_arg0 = p->p_psstrp; 1913 tf->tf_arg1 = tf->tf_arg2 = 0; /* XXX dynload stuff */ 1914 1915 if (pack->ep_osversion < 699003600) { 1916 p->p_md.md_flags |= MDP_OLDSPACE; 1917 } else { 1918 p->p_md.md_flags = 0; 1919 } 1920 1921 hppa_setvmspace(l); 1922 1923 /* reset any of the pending FPU exceptions */ 1924 hppa_fpu_flush(l); 1925 memset(pcb->pcb_fpregs, 0, sizeof(*pcb->pcb_fpregs)); 1926 pcb->pcb_fpregs->fpr_regs[0] = ((uint64_t)HPPA_FPU_INIT) << 32; 1927 pcb->pcb_fpregs->fpr_regs[1] = 0; 1928 pcb->pcb_fpregs->fpr_regs[2] = 0; 1929 pcb->pcb_fpregs->fpr_regs[3] = 0; 1930 1931 l->l_md.md_bpva = 0; 1932 1933 /* setup terminal stack frame */ 1934 stack = (u_long)STACK_ALIGN(stack, 63); 1935 tf->tf_r3 = stack; 1936 ustore_long((void *)(stack), 0); 1937 stack += HPPA_FRAME_SIZE; 1938 ustore_long((void *)(stack + HPPA_FRAME_PSP), 0); 1939 tf->tf_sp = stack; 1940 } 1941 1942 /* 1943 * machine dependent system variables. 1944 */ 1945 static int 1946 sysctl_machdep_boot(SYSCTLFN_ARGS) 1947 { 1948 struct sysctlnode node = *rnode; 1949 struct btinfo_kernelfile *bi_file; 1950 const char *cp = NULL; 1951 1952 switch (node.sysctl_num) { 1953 case CPU_BOOTED_KERNEL: 1954 if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL) 1955 cp = bi_file->name; 1956 if (cp != NULL && cp[0] == '\0') 1957 cp = "netbsd"; 1958 break; 1959 default: 1960 return (EINVAL); 1961 } 1962 1963 if (cp == NULL || cp[0] == '\0') 1964 return (ENOENT); 1965 1966 node.sysctl_data = __UNCONST(cp); 1967 node.sysctl_size = strlen(cp) + 1; 1968 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 1969 } 1970 1971 #if NLCD > 0 1972 static int 1973 sysctl_machdep_heartbeat(SYSCTLFN_ARGS) 1974 { 1975 int error; 1976 bool oldval; 1977 struct sysctlnode node = *rnode; 1978 1979 oldval = lcd_blink_p; 1980 /* 1981 * If we were false and are now true, start the timer. 1982 */ 1983 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1984 1985 if (error || newp == NULL) 1986 return (error); 1987 1988 if (!oldval && lcd_blink_p) 1989 blink_lcd_timeout(NULL); 1990 1991 return 0; 1992 } 1993 #endif 1994 1995 /* 1996 * machine dependent system variables. 1997 */ 1998 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 1999 { 2000 2001 sysctl_createv(clog, 0, NULL, NULL, 2002 CTLFLAG_PERMANENT, 2003 CTLTYPE_NODE, "machdep", NULL, 2004 NULL, 0, NULL, 0, 2005 CTL_MACHDEP, CTL_EOL); 2006 2007 sysctl_createv(clog, 0, NULL, NULL, 2008 CTLFLAG_PERMANENT, 2009 CTLTYPE_STRUCT, "console_device", NULL, 2010 sysctl_consdev, 0, NULL, sizeof(dev_t), 2011 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 2012 2013 sysctl_createv(clog, 0, NULL, NULL, 2014 CTLFLAG_PERMANENT, 2015 CTLTYPE_STRING, "booted_kernel", NULL, 2016 sysctl_machdep_boot, 0, NULL, 0, 2017 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 2018 #if NLCD > 0 2019 sysctl_createv(clog, 0, NULL, NULL, 2020 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2021 CTLTYPE_BOOL, "lcd_blink", "Display heartbeat on the LCD display", 2022 sysctl_machdep_heartbeat, 0, &lcd_blink_p, 0, 2023 CTL_MACHDEP, CPU_LCD_BLINK, CTL_EOL); 2024 #endif 2025 } 2026 2027 /* 2028 * Given the type of a bootinfo entry, looks for a matching item inside 2029 * the bootinfo structure. If found, returns a pointer to it (which must 2030 * then be casted to the appropriate bootinfo_* type); otherwise, returns 2031 * NULL. 2032 */ 2033 void * 2034 lookup_bootinfo(int type) 2035 { 2036 struct btinfo_common *bic; 2037 int i; 2038 2039 bic = (struct btinfo_common *)(&bootinfo.bi_data[0]); 2040 for (i = 0; i < bootinfo.bi_nentries; i++) 2041 if (bic->type == type) 2042 return bic; 2043 else 2044 bic = (struct btinfo_common *) 2045 ((uint8_t *)bic + bic->len); 2046 2047 return NULL; 2048 } 2049 2050 /* 2051 * consinit: 2052 * initialize the system console. 2053 */ 2054 void 2055 consinit(void) 2056 { 2057 static int initted = 0; 2058 2059 if (!initted) { 2060 initted++; 2061 cninit(); 2062 } 2063 } 2064 2065 #if NLCD > 0 2066 struct blink_lcd_softc { 2067 SLIST_HEAD(, blink_lcd) bls_head; 2068 int bls_on; 2069 struct callout bls_to; 2070 } blink_sc = { 2071 .bls_head = SLIST_HEAD_INITIALIZER(bls_head) 2072 }; 2073 2074 void 2075 blink_lcd_register(struct blink_lcd *l) 2076 { 2077 if (SLIST_EMPTY(&blink_sc.bls_head)) { 2078 callout_init(&blink_sc.bls_to, 0); 2079 callout_setfunc(&blink_sc.bls_to, blink_lcd_timeout, &blink_sc); 2080 blink_sc.bls_on = 0; 2081 if (lcd_blink_p) 2082 callout_schedule(&blink_sc.bls_to, 1); 2083 } 2084 SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next); 2085 } 2086 2087 void 2088 blink_lcd_timeout(void *vsc) 2089 { 2090 struct blink_lcd_softc *sc = &blink_sc; 2091 struct blink_lcd *l; 2092 int t; 2093 2094 if (SLIST_EMPTY(&sc->bls_head)) 2095 return; 2096 2097 SLIST_FOREACH(l, &sc->bls_head, bl_next) { 2098 (*l->bl_func)(l->bl_arg, sc->bls_on); 2099 } 2100 sc->bls_on = !sc->bls_on; 2101 2102 if (!lcd_blink_p) 2103 return; 2104 2105 /* 2106 * Blink rate is: 2107 * full cycle every second if completely idle (loadav = 0) 2108 * full cycle every 2 seconds if loadav = 1 2109 * full cycle every 3 seconds if loadav = 2 2110 * etc. 2111 */ 2112 t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1)); 2113 callout_schedule(&sc->bls_to, t); 2114 } 2115 #endif 2116 2117 #ifdef MODULAR 2118 /* 2119 * Push any modules loaded by the boot loader. 2120 */ 2121 void 2122 module_init_md(void) 2123 { 2124 } 2125 #endif /* MODULAR */ 2126 2127 bool 2128 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr) 2129 { 2130 2131 if (atop(paddr) > physmem) { 2132 return false; 2133 } 2134 *vaddr = paddr; 2135 2136 return true; 2137 } 2138 2139 int 2140 mm_md_physacc(paddr_t pa, vm_prot_t prot) 2141 { 2142 2143 return (atop(pa) > physmem) ? EFAULT : 0; 2144 } 2145 2146 int 2147 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled) 2148 { 2149 extern int kernel_text; 2150 extern int __data_start; 2151 extern int end; 2152 2153 const vaddr_t ksro = (vaddr_t) &kernel_text; 2154 const vaddr_t ksrw = (vaddr_t) &__data_start; 2155 const vaddr_t kend = (vaddr_t) end; 2156 const vaddr_t v = (vaddr_t)ptr; 2157 2158 *handled = false; 2159 if (v >= ksro && v < kend) { 2160 *handled = true; 2161 if (v < ksrw && (prot & VM_PROT_WRITE)) { 2162 return EFAULT; 2163 } 2164 } else if (v >= kend && atop((paddr_t)v) < physmem) { 2165 *handled = true; 2166 } 2167 2168 return 0; 2169 } 2170