1 /* $NetBSD: machdep.c,v 1.383 2025/04/25 00:59:26 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2019, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center and by Chris G. Demetriou. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 35 * All rights reserved. 36 * 37 * Author: Chris G. Demetriou 38 * 39 * Permission to use, copy, modify and distribute this software and 40 * its documentation is hereby granted, provided that both the copyright 41 * notice and this permission notice appear in all copies of the 42 * software, derivative works or modified versions, and any portions 43 * thereof, and that both notices appear in supporting documentation. 44 * 45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 48 * 49 * Carnegie Mellon requests users of this software to return to 50 * 51 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU 52 * School of Computer Science 53 * Carnegie Mellon University 54 * Pittsburgh PA 15213-3890 55 * 56 * any improvements or extensions that they make and grant Carnegie the 57 * rights to redistribute these changes. 58 */ 59 60 #include "opt_ddb.h" 61 #include "opt_kgdb.h" 62 #include "opt_modular.h" 63 #include "opt_multiprocessor.h" 64 #include "opt_dec_3000_300.h" 65 #include "opt_dec_3000_500.h" 66 #include "opt_execfmt.h" 67 68 #define __RWLOCK_PRIVATE 69 70 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 71 72 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.383 2025/04/25 00:59:26 riastradh Exp $"); 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/signalvar.h> 77 #include <sys/kernel.h> 78 #include <sys/cpu.h> 79 #include <sys/proc.h> 80 #include <sys/ras.h> 81 #include <sys/sched.h> 82 #include <sys/reboot.h> 83 #include <sys/device.h> 84 #include <sys/module.h> 85 #include <sys/mman.h> 86 #include <sys/msgbuf.h> 87 #include <sys/ioctl.h> 88 #include <sys/tty.h> 89 #include <sys/exec.h> 90 #include <sys/exec_aout.h> /* for MID_* */ 91 #include <sys/exec_ecoff.h> 92 #include <sys/core.h> 93 #include <sys/kcore.h> 94 #include <sys/ucontext.h> 95 #include <sys/conf.h> 96 #include <sys/ksyms.h> 97 #include <sys/kauth.h> 98 #include <sys/atomic.h> 99 #include <sys/cpu.h> 100 #include <sys/rwlock.h> 101 102 #include <machine/kcore.h> 103 #include <machine/fpu.h> 104 105 #include <sys/mount.h> 106 #include <sys/syscallargs.h> 107 108 #include <uvm/uvm.h> 109 #include <sys/sysctl.h> 110 111 #include <dev/cons.h> 112 #include <dev/mm.h> 113 114 #include <machine/autoconf.h> 115 #include <machine/reg.h> 116 #include <machine/rpb.h> 117 #include <machine/prom.h> 118 #include <machine/cpuconf.h> 119 #include <machine/ieeefp.h> 120 121 #ifdef DDB 122 #include <machine/db_machdep.h> 123 #include <ddb/db_access.h> 124 #include <ddb/db_sym.h> 125 #include <ddb/db_extern.h> 126 #include <ddb/db_interface.h> 127 #endif 128 129 #ifdef KGDB 130 #include <sys/kgdb.h> 131 #endif 132 133 #ifdef DEBUG 134 #include <machine/sigdebug.h> 135 int sigdebug = 0x0; 136 int sigpid = 0; 137 #endif 138 139 /* Assert some assumptions made in lock_stubs.s */ 140 __CTASSERT(RW_READER == 0); 141 __CTASSERT(RW_HAS_WAITERS == 1); 142 143 #include <machine/alpha.h> 144 145 #include "ksyms.h" 146 147 struct vm_map *phys_map = NULL; 148 149 void *msgbufaddr; 150 151 int maxmem; /* max memory per process */ 152 153 int totalphysmem; /* total amount of physical memory in system */ 154 int resvmem; /* amount of memory reserved for PROM */ 155 int unusedmem; /* amount of memory for OS that we don't use */ 156 int unknownmem; /* amount of memory with an unknown use */ 157 158 int cputype; /* system type, from the RPB */ 159 bool alpha_is_qemu; /* true if we've detected running in qemu */ 160 161 int bootdev_debug = 0; /* patchable, or from DDB */ 162 163 /* 164 * XXX We need an address to which we can assign things so that they 165 * won't be optimized away because we didn't use the value. 166 */ 167 uint32_t no_optimize; 168 169 /* the following is used externally (sysctl_hw) */ 170 char machine[] = MACHINE; /* from <machine/param.h> */ 171 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 172 173 /* Number of machine cycles per microsecond */ 174 uint64_t cycles_per_usec; 175 176 /* number of CPUs in the box. really! */ 177 int ncpus; 178 179 struct bootinfo_kernel bootinfo; 180 181 /* For built-in TCDS */ 182 #if defined(DEC_3000_300) || defined(DEC_3000_500) 183 uint8_t dec_3000_scsiid[3], dec_3000_scsifast[3]; 184 #endif 185 186 struct platform platform; 187 188 #if NKSYMS || defined(DDB) || defined(MODULAR) 189 /* start and end of kernel symbol table */ 190 void *ksym_start, *ksym_end; 191 #endif 192 193 /* for cpu_sysctl() */ 194 int alpha_unaligned_print = 1; /* warn about unaligned accesses */ 195 int alpha_unaligned_fix = 1; /* fix up unaligned accesses */ 196 int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */ 197 int alpha_fp_sync_complete = 0; /* fp fixup if sync even without /s */ 198 int alpha_fp_complete_debug = 0; /* fp completion debug enabled */ 199 200 /* 201 * XXX This should be dynamically sized, but we have the chicken-egg problem! 202 * XXX it should also be larger than it is, because not all of the mddt 203 * XXX clusters end up being used for VM. 204 */ 205 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */ 206 int mem_cluster_cnt; 207 208 int cpu_dump(void); 209 int cpu_dumpsize(void); 210 u_long cpu_dump_mempagecnt(void); 211 void dumpsys(void); 212 void identifycpu(void); 213 void printregs(struct reg *); 214 215 const pcu_ops_t fpu_ops = { 216 .pcu_id = PCU_FPU, 217 .pcu_state_load = fpu_state_load, 218 .pcu_state_save = fpu_state_save, 219 .pcu_state_release = fpu_state_release, 220 }; 221 222 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = { 223 [PCU_FPU] = &fpu_ops, 224 }; 225 226 static void 227 alpha_page_physload(unsigned long const start_pfn, unsigned long const end_pfn) 228 { 229 230 /* 231 * Some Alpha platforms may have unique requirements about 232 * how physical memory is managed (e.g. reserving memory 233 * ranges due to lack of SGMAP DMA). 234 */ 235 if (platform.page_physload != NULL) { 236 (*platform.page_physload)(start_pfn, end_pfn); 237 return; 238 } 239 240 uvm_page_physload(start_pfn, end_pfn, start_pfn, end_pfn, 241 VM_FREELIST_DEFAULT); 242 } 243 244 void 245 alpha_page_physload_sheltered(unsigned long const start_pfn, 246 unsigned long const end_pfn, unsigned long const shelter_start_pfn, 247 unsigned long const shelter_end_pfn) 248 { 249 250 /* 251 * If the added region ends before or starts after the sheltered 252 * region, then it just goes on the default freelist. 253 */ 254 if (end_pfn <= shelter_start_pfn || start_pfn >= shelter_end_pfn) { 255 uvm_page_physload(start_pfn, end_pfn, 256 start_pfn, end_pfn, VM_FREELIST_DEFAULT); 257 return; 258 } 259 260 /* 261 * Load any portion that comes before the sheltered region. 262 */ 263 if (start_pfn < shelter_start_pfn) { 264 KASSERT(end_pfn > shelter_start_pfn); 265 uvm_page_physload(start_pfn, shelter_start_pfn, 266 start_pfn, shelter_start_pfn, VM_FREELIST_DEFAULT); 267 } 268 269 /* 270 * Load the portion that overlaps that sheltered region. 271 */ 272 const unsigned long ov_start = MAX(start_pfn, shelter_start_pfn); 273 const unsigned long ov_end = MIN(end_pfn, shelter_end_pfn); 274 KASSERT(ov_start >= shelter_start_pfn); 275 KASSERT(ov_end <= shelter_end_pfn); 276 uvm_page_physload(ov_start, ov_end, ov_start, ov_end, 277 VM_FREELIST_SHELTERED); 278 279 /* 280 * Load any portion that comes after the sheltered region. 281 */ 282 if (end_pfn > shelter_end_pfn) { 283 KASSERT(start_pfn < shelter_end_pfn); 284 uvm_page_physload(shelter_end_pfn, end_pfn, 285 shelter_end_pfn, end_pfn, VM_FREELIST_DEFAULT); 286 } 287 } 288 289 void 290 alpha_init(u_long xxx_pfn __unused, u_long ptb, u_long bim, u_long bip, 291 u_long biv) 292 /* pfn: first free PFN number (no longer used) */ 293 /* ptb: PFN of current level 1 page table */ 294 /* bim: bootinfo magic */ 295 /* bip: bootinfo pointer */ 296 /* biv: bootinfo version */ 297 { 298 extern char kernel_text[], _end[]; 299 struct mddt *mddtp; 300 struct mddt_cluster *memc; 301 int i, mddtweird; 302 struct pcb *pcb0; 303 vaddr_t kernstart, kernend, v; 304 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1; 305 cpuid_t cpu_id; 306 struct cpu_info *ci; 307 char *p; 308 const char *bootinfo_msg; 309 const struct cpuinit *c; 310 311 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */ 312 313 /* 314 * Turn off interrupts (not mchecks) and floating point. 315 * Make sure the instruction and data streams are consistent. 316 */ 317 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); 318 alpha_pal_wrfen(0); 319 ALPHA_TBIA(); 320 alpha_pal_imb(); 321 322 /* Initialize the SCB. */ 323 scb_init(); 324 325 cpu_id = cpu_number(); 326 327 ci = &cpu_info_primary; 328 ci->ci_cpuid = cpu_id; 329 330 #if defined(MULTIPROCESSOR) 331 /* 332 * Set the SysValue to &lwp0, after making sure that lwp0 333 * is pointing at the primary CPU. Secondary processors do 334 * this in their spinup trampoline. 335 */ 336 lwp0.l_cpu = ci; 337 cpu_info[cpu_id] = ci; 338 alpha_pal_wrval((u_long)&lwp0); 339 #endif 340 341 /* 342 * Get critical system information (if possible, from the 343 * information provided by the boot program). 344 */ 345 bootinfo_msg = NULL; 346 if (bim == BOOTINFO_MAGIC) { 347 if (biv == 0) { /* backward compat */ 348 biv = *(u_long *)bip; 349 bip += 8; 350 } 351 switch (biv) { 352 case 1: { 353 struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip; 354 355 bootinfo.ssym = v1p->ssym; 356 bootinfo.esym = v1p->esym; 357 /* hwrpb may not be provided by boot block in v1 */ 358 if (v1p->hwrpb != NULL) { 359 bootinfo.hwrpb_phys = 360 ((struct rpb *)v1p->hwrpb)->rpb_phys; 361 bootinfo.hwrpb_size = v1p->hwrpbsize; 362 } else { 363 bootinfo.hwrpb_phys = 364 ((struct rpb *)HWRPB_ADDR)->rpb_phys; 365 bootinfo.hwrpb_size = 366 ((struct rpb *)HWRPB_ADDR)->rpb_size; 367 } 368 memcpy(bootinfo.boot_flags, v1p->boot_flags, 369 uimin(sizeof v1p->boot_flags, 370 sizeof bootinfo.boot_flags)); 371 memcpy(bootinfo.booted_kernel, v1p->booted_kernel, 372 uimin(sizeof v1p->booted_kernel, 373 sizeof bootinfo.booted_kernel)); 374 /* booted dev not provided in bootinfo */ 375 init_prom_interface(ptb, (struct rpb *) 376 ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys)); 377 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev, 378 sizeof bootinfo.booted_dev); 379 break; 380 } 381 default: 382 bootinfo_msg = "unknown bootinfo version"; 383 goto nobootinfo; 384 } 385 } else { 386 bootinfo_msg = "boot program did not pass bootinfo"; 387 nobootinfo: 388 bootinfo.ssym = (u_long)_end; 389 bootinfo.esym = (u_long)_end; 390 bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys; 391 bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size; 392 init_prom_interface(ptb, (struct rpb *)HWRPB_ADDR); 393 if (alpha_is_qemu) { 394 /* 395 * Grab boot flags from kernel command line. 396 * Assume autoboot if not supplied. 397 */ 398 if (! prom_qemu_getenv("flags", bootinfo.boot_flags, 399 sizeof(bootinfo.boot_flags))) { 400 strlcpy(bootinfo.boot_flags, "A", 401 sizeof(bootinfo.boot_flags)); 402 } 403 } else { 404 prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags, 405 sizeof bootinfo.boot_flags); 406 prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel, 407 sizeof bootinfo.booted_kernel); 408 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev, 409 sizeof bootinfo.booted_dev); 410 } 411 } 412 413 /* 414 * Initialize the kernel's mapping of the RPB. It's needed for 415 * lots of things. 416 */ 417 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys); 418 419 #if defined(DEC_3000_300) || defined(DEC_3000_500) 420 if (hwrpb->rpb_type == ST_DEC_3000_300 || 421 hwrpb->rpb_type == ST_DEC_3000_500) { 422 prom_getenv(PROM_E_SCSIID, dec_3000_scsiid, 423 sizeof(dec_3000_scsiid)); 424 prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast, 425 sizeof(dec_3000_scsifast)); 426 } 427 #endif 428 429 /* 430 * Remember how many cycles there are per microsecond, 431 * so that we can use delay(). Round up, for safety. 432 */ 433 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000; 434 435 /* 436 * Initialize the (temporary) bootstrap console interface, so 437 * we can use printf until the VM system starts being setup. 438 * The real console is initialized before then. 439 */ 440 init_bootstrap_console(); 441 442 /* OUTPUT NOW ALLOWED */ 443 444 /* delayed from above */ 445 if (bootinfo_msg) 446 printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n", 447 bootinfo_msg, bim, bip, biv); 448 449 /* Initialize the trap vectors on the primary processor. */ 450 trap_init(); 451 452 /* 453 * Find out this system's page size, and initialize 454 * PAGE_SIZE-dependent variables. 455 */ 456 if (hwrpb->rpb_page_size != ALPHA_PGBYTES) 457 panic("page size %lu != %d?!", hwrpb->rpb_page_size, 458 ALPHA_PGBYTES); 459 uvmexp.pagesize = hwrpb->rpb_page_size; 460 uvm_md_init(); 461 462 /* 463 * cputype has been initialized in init_prom_interface(). 464 * Perform basic platform initialization using this info. 465 */ 466 KASSERT(prom_interface_initialized); 467 c = platform_lookup(cputype); 468 if (c == NULL) { 469 platform_not_supported(); 470 /* NOTREACHED */ 471 } 472 (*c->init)(); 473 cpu_setmodel("%s", platform.model); 474 475 /* 476 * Initialize the real console, so that the bootstrap console is 477 * no longer necessary. 478 */ 479 (*platform.cons_init)(); 480 481 #ifdef DIAGNOSTIC 482 /* Paranoid sanity checking */ 483 484 /* We should always be running on the primary. */ 485 assert(hwrpb->rpb_primary_cpu_id == cpu_id); 486 487 /* 488 * On single-CPU systypes, the primary should always be CPU 0, 489 * except on Alpha 8200 systems where the CPU id is related 490 * to the VID, which is related to the Turbo Laser node id. 491 */ 492 if (cputype != ST_DEC_21000) 493 assert(hwrpb->rpb_primary_cpu_id == 0); 494 #endif 495 496 /* NO MORE FIRMWARE ACCESS ALLOWED */ 497 /* XXX Unless prom_uses_prom_console() evaluates to non-zero.) */ 498 499 /* 500 * Find the beginning and end of the kernel (and leave a 501 * bit of space before the beginning for the bootstrap 502 * stack). 503 */ 504 kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE; 505 #if NKSYMS || defined(DDB) || defined(MODULAR) 506 ksym_start = (void *)bootinfo.ssym; 507 ksym_end = (void *)bootinfo.esym; 508 kernend = (vaddr_t)round_page((vaddr_t)ksym_end); 509 #else 510 kernend = (vaddr_t)round_page((vaddr_t)_end); 511 #endif 512 513 kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart)); 514 kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend)); 515 516 /* 517 * Find out how much memory is available, by looking at 518 * the memory cluster descriptors. This also tries to do 519 * its best to detect things things that have never been seen 520 * before... 521 */ 522 mddtp = (struct mddt *)(((char *)hwrpb) + hwrpb->rpb_memdat_off); 523 524 /* MDDT SANITY CHECKING */ 525 mddtweird = 0; 526 if (mddtp->mddt_cluster_cnt < 2) { 527 mddtweird = 1; 528 printf("WARNING: weird number of mem clusters: %lu\n", 529 mddtp->mddt_cluster_cnt); 530 } 531 532 #if 0 533 printf("Memory cluster count: %" PRIu64 "\n", mddtp->mddt_cluster_cnt); 534 #endif 535 536 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { 537 memc = &mddtp->mddt_clusters[i]; 538 #if 0 539 printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i, 540 memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage); 541 #endif 542 totalphysmem += memc->mddt_pg_cnt; 543 if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */ 544 mem_clusters[mem_cluster_cnt].start = 545 ptoa(memc->mddt_pfn); 546 mem_clusters[mem_cluster_cnt].size = 547 ptoa(memc->mddt_pg_cnt); 548 if (memc->mddt_usage & MDDT_mbz || 549 memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */ 550 memc->mddt_usage & MDDT_PALCODE) 551 mem_clusters[mem_cluster_cnt].size |= 552 PROT_READ; 553 else 554 mem_clusters[mem_cluster_cnt].size |= 555 PROT_READ | PROT_WRITE | PROT_EXEC; 556 mem_cluster_cnt++; 557 } 558 559 if (memc->mddt_usage & MDDT_mbz) { 560 mddtweird = 1; 561 printf("WARNING: mem cluster %d has weird " 562 "usage 0x%lx\n", i, memc->mddt_usage); 563 unknownmem += memc->mddt_pg_cnt; 564 continue; 565 } 566 if (memc->mddt_usage & MDDT_NONVOLATILE) { 567 /* XXX should handle these... */ 568 printf("WARNING: skipping non-volatile mem " 569 "cluster %d\n", i); 570 unusedmem += memc->mddt_pg_cnt; 571 continue; 572 } 573 if (memc->mddt_usage & MDDT_PALCODE) { 574 resvmem += memc->mddt_pg_cnt; 575 continue; 576 } 577 578 /* 579 * We have a memory cluster available for system 580 * software use. We must determine if this cluster 581 * holds the kernel. 582 */ 583 584 /* 585 * XXX If the kernel uses the PROM console, we only use the 586 * XXX memory after the kernel in the first system segment, 587 * XXX to avoid clobbering prom mapping, data, etc. 588 */ 589 physmem += memc->mddt_pg_cnt; 590 pfn0 = memc->mddt_pfn; 591 pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt; 592 if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) { 593 /* 594 * Must compute the location of the kernel 595 * within the segment. 596 */ 597 #if 0 598 printf("Cluster %d contains kernel\n", i); 599 #endif 600 if (pfn0 < kernstartpfn && !prom_uses_prom_console()) { 601 /* 602 * There is a chunk before the kernel. 603 */ 604 #if 0 605 printf("Loading chunk before kernel: " 606 "0x%lx / 0x%lx\n", pfn0, kernstartpfn); 607 #endif 608 alpha_page_physload(pfn0, kernstartpfn); 609 } 610 if (kernendpfn < pfn1) { 611 /* 612 * There is a chunk after the kernel. 613 */ 614 #if 0 615 printf("Loading chunk after kernel: " 616 "0x%lx / 0x%lx\n", kernendpfn, pfn1); 617 #endif 618 alpha_page_physload(kernendpfn, pfn1); 619 } 620 } else { 621 /* 622 * Just load this cluster as one chunk. 623 */ 624 #if 0 625 printf("Loading cluster %d: 0x%lx / 0x%lx\n", i, 626 pfn0, pfn1); 627 #endif 628 alpha_page_physload(pfn0, pfn1); 629 } 630 } 631 632 /* 633 * Dump out the MDDT if it looks odd... 634 */ 635 if (mddtweird) { 636 printf("\n"); 637 printf("complete memory cluster information:\n"); 638 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { 639 printf("mddt %d:\n", i); 640 printf("\tpfn %lx\n", 641 mddtp->mddt_clusters[i].mddt_pfn); 642 printf("\tcnt %lx\n", 643 mddtp->mddt_clusters[i].mddt_pg_cnt); 644 printf("\ttest %lx\n", 645 mddtp->mddt_clusters[i].mddt_pg_test); 646 printf("\tbva %lx\n", 647 mddtp->mddt_clusters[i].mddt_v_bitaddr); 648 printf("\tbpa %lx\n", 649 mddtp->mddt_clusters[i].mddt_p_bitaddr); 650 printf("\tbcksum %lx\n", 651 mddtp->mddt_clusters[i].mddt_bit_cksum); 652 printf("\tusage %lx\n", 653 mddtp->mddt_clusters[i].mddt_usage); 654 } 655 printf("\n"); 656 } 657 658 if (totalphysmem == 0) 659 panic("can't happen: system seems to have no memory!"); 660 maxmem = physmem; 661 #if 0 662 printf("totalphysmem = %d\n", totalphysmem); 663 printf("physmem = %lu\n", physmem); 664 printf("resvmem = %d\n", resvmem); 665 printf("unusedmem = %d\n", unusedmem); 666 printf("unknownmem = %d\n", unknownmem); 667 #endif 668 669 /* 670 * Initialize error message buffer (at end of core). 671 */ 672 { 673 paddr_t end; 674 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE); 675 vsize_t reqsz = sz; 676 uvm_physseg_t bank; 677 678 bank = uvm_physseg_get_last(); 679 680 /* shrink so that it'll fit in the last segment */ 681 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz)) 682 sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)); 683 684 end = uvm_physseg_get_end(bank); 685 end -= atop(sz); 686 687 uvm_physseg_unplug(end, atop(sz)); 688 msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(end)); 689 690 initmsgbuf(msgbufaddr, sz); 691 692 /* warn if the message buffer had to be shrunk */ 693 if (sz != reqsz) 694 printf("WARNING: %ld bytes not available for msgbuf " 695 "in last cluster (%ld used)\n", reqsz, sz); 696 697 } 698 699 /* 700 * NOTE: It is safe to use uvm_pageboot_alloc() before 701 * pmap_bootstrap() because our pmap_virtual_space() 702 * returns compile-time constants. 703 */ 704 705 /* 706 * Allocate uarea page for lwp0 and set it. 707 */ 708 v = uvm_pageboot_alloc(UPAGES * PAGE_SIZE); 709 uvm_lwp_setuarea(&lwp0, v); 710 711 /* 712 * Initialize the virtual memory system, and set the 713 * page table base register in proc 0's PCB. 714 */ 715 pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT), 716 hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt); 717 718 /* 719 * Initialize the rest of lwp0's PCB and cache its physical address. 720 */ 721 pcb0 = lwp_getpcb(&lwp0); 722 lwp0.l_md.md_pcbpaddr = (void *)ALPHA_K0SEG_TO_PHYS((vaddr_t)pcb0); 723 724 /* 725 * Set the kernel sp, reserving space for an (empty) trapframe, 726 * and make lwp0's trapframe pointer point to it for sanity. 727 */ 728 pcb0->pcb_hw.apcb_ksp = v + USPACE - sizeof(struct trapframe); 729 lwp0.l_md.md_tf = (struct trapframe *)pcb0->pcb_hw.apcb_ksp; 730 731 /* Indicate that lwp0 has a CPU. */ 732 lwp0.l_cpu = ci; 733 734 /* 735 * Look at arguments passed to us and compute boothowto. 736 */ 737 738 boothowto = RB_SINGLE; 739 #ifdef KADB 740 boothowto |= RB_KDB; 741 #endif 742 for (p = bootinfo.boot_flags; p && *p != '\0'; p++) { 743 /* 744 * Note that we'd really like to differentiate case here, 745 * but the Alpha AXP Architecture Reference Manual 746 * says that we shouldn't. 747 */ 748 switch (*p) { 749 case 'a': /* autoboot */ 750 case 'A': 751 boothowto &= ~RB_SINGLE; 752 break; 753 754 #ifdef DEBUG 755 case 'c': /* crash dump immediately after autoconfig */ 756 case 'C': 757 boothowto |= RB_DUMP; 758 break; 759 #endif 760 761 #if defined(KGDB) || defined(DDB) 762 case 'd': /* break into the kernel debugger ASAP */ 763 case 'D': 764 boothowto |= RB_KDB; 765 break; 766 #endif 767 768 case 'h': /* always halt, never reboot */ 769 case 'H': 770 boothowto |= RB_HALT; 771 break; 772 773 #if 0 774 case 'm': /* mini root present in memory */ 775 case 'M': 776 boothowto |= RB_MINIROOT; 777 break; 778 #endif 779 780 case 'n': /* askname */ 781 case 'N': 782 boothowto |= RB_ASKNAME; 783 break; 784 785 case 's': /* single-user (default, supported for sanity) */ 786 case 'S': 787 boothowto |= RB_SINGLE; 788 break; 789 790 case 'q': /* quiet boot */ 791 case 'Q': 792 boothowto |= AB_QUIET; 793 break; 794 795 case 'v': /* verbose boot */ 796 case 'V': 797 boothowto |= AB_VERBOSE; 798 break; 799 800 case 'x': /* debug messages */ 801 case 'X': 802 boothowto |= AB_DEBUG; 803 break; 804 805 case '-': 806 /* 807 * Just ignore this. It's not required, but it's 808 * common for it to be passed regardless. 809 */ 810 break; 811 812 default: 813 printf("Unrecognized boot flag '%c'.\n", *p); 814 break; 815 } 816 } 817 818 /* 819 * Perform any initial kernel patches based on the running system. 820 * We may perform more later if we attach additional CPUs. 821 */ 822 alpha_patch(false); 823 824 /* 825 * Figure out the number of CPUs in the box, from RPB fields. 826 * Really. We mean it. 827 */ 828 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { 829 struct pcs *pcsp; 830 831 pcsp = LOCATE_PCS(hwrpb, i); 832 if ((pcsp->pcs_flags & PCS_PP) != 0) 833 ncpus++; 834 } 835 836 /* 837 * Initialize debuggers, and break into them if appropriate. 838 */ 839 #if NKSYMS || defined(DDB) || defined(MODULAR) 840 ksyms_addsyms_elf((int)((uint64_t)ksym_end - (uint64_t)ksym_start), 841 ksym_start, ksym_end); 842 #endif 843 844 if (boothowto & RB_KDB) { 845 #if defined(KGDB) 846 kgdb_debug_init = 1; 847 kgdb_connect(1); 848 #elif defined(DDB) 849 Debugger(); 850 #endif 851 } 852 853 #ifdef DIAGNOSTIC 854 /* 855 * Check our clock frequency, from RPB fields. 856 */ 857 if ((hwrpb->rpb_intr_freq >> 12) != 1024) 858 printf("WARNING: unbelievable rpb_intr_freq: %ld (%d hz)\n", 859 hwrpb->rpb_intr_freq, hz); 860 #endif 861 } 862 863 #ifdef MODULAR 864 /* Push any modules loaded by the boot loader */ 865 void 866 module_init_md(void) 867 { 868 /* nada. */ 869 } 870 #endif /* MODULAR */ 871 872 void 873 consinit(void) 874 { 875 876 /* 877 * Everything related to console initialization is done 878 * in alpha_init(). 879 */ 880 #if defined(DIAGNOSTIC) && defined(_PROM_MAY_USE_PROM_CONSOLE) 881 printf("consinit: %susing prom console\n", 882 prom_uses_prom_console() ? "" : "not "); 883 #endif 884 } 885 886 void 887 cpu_startup(void) 888 { 889 vaddr_t minaddr, maxaddr; 890 char pbuf[9]; 891 #if defined(DEBUG) 892 extern int pmapdebug; 893 int opmapdebug = pmapdebug; 894 895 pmapdebug = 0; 896 #endif 897 898 /* 899 * Good {morning,afternoon,evening,night}. 900 */ 901 printf("%s%s", copyright, version); 902 identifycpu(); 903 format_bytes(pbuf, sizeof(pbuf), ptoa(totalphysmem)); 904 printf("total memory = %s\n", pbuf); 905 format_bytes(pbuf, sizeof(pbuf), ptoa(resvmem)); 906 printf("(%s reserved for PROM, ", pbuf); 907 format_bytes(pbuf, sizeof(pbuf), ptoa(physmem)); 908 printf("%s used by NetBSD)\n", pbuf); 909 if (unusedmem) { 910 format_bytes(pbuf, sizeof(pbuf), ptoa(unusedmem)); 911 printf("WARNING: unused memory = %s\n", pbuf); 912 } 913 if (unknownmem) { 914 format_bytes(pbuf, sizeof(pbuf), ptoa(unknownmem)); 915 printf("WARNING: %s of memory with unknown purpose\n", pbuf); 916 } 917 918 minaddr = 0; 919 920 /* 921 * Allocate a submap for physio 922 */ 923 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 924 VM_PHYS_SIZE, 0, false, NULL); 925 926 /* 927 * No need to allocate an mbuf cluster submap. Mbuf clusters 928 * are allocated via the pool allocator, and we use K0SEG to 929 * map those pages. 930 */ 931 932 #if defined(DEBUG) 933 pmapdebug = opmapdebug; 934 #endif 935 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 936 printf("avail memory = %s\n", pbuf); 937 #if 0 938 { 939 extern u_long pmap_pages_stolen; 940 941 format_bytes(pbuf, sizeof(pbuf), pmap_pages_stolen * PAGE_SIZE); 942 printf("stolen memory for VM structures = %s\n", pbuf); 943 } 944 #endif 945 946 /* 947 * Set up the HWPCB so that it's safe to configure secondary 948 * CPUs. 949 */ 950 hwrpb_primary_init(); 951 952 /* 953 * Initialize FP handling. 954 */ 955 alpha_fp_init(); 956 } 957 958 /* 959 * Retrieve the platform name from the DSR. 960 */ 961 const char * 962 alpha_dsr_sysname(void) 963 { 964 struct dsrdb *dsr; 965 const char *sysname; 966 967 /* 968 * DSR does not exist on early HWRPB versions. 969 */ 970 if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS) 971 return (NULL); 972 973 dsr = (struct dsrdb *)(((char *)hwrpb) + hwrpb->rpb_dsrdb_off); 974 sysname = (const char *)((char *)dsr + (dsr->dsr_sysname_off + 975 sizeof(uint64_t))); 976 return (sysname); 977 } 978 979 /* 980 * Lookup the system specified system variation in the provided table, 981 * returning the model string on match. 982 */ 983 const char * 984 alpha_variation_name(uint64_t variation, const struct alpha_variation_table *avtp) 985 { 986 int i; 987 988 for (i = 0; avtp[i].avt_model != NULL; i++) 989 if (avtp[i].avt_variation == variation) 990 return (avtp[i].avt_model); 991 return (NULL); 992 } 993 994 /* 995 * Generate a default platform name based for unknown system variations. 996 */ 997 const char * 998 alpha_unknown_sysname(void) 999 { 1000 static char s[128]; /* safe size */ 1001 1002 snprintf(s, sizeof(s), "%s family, unknown model variation 0x%lx", 1003 platform.family, hwrpb->rpb_variation & SV_ST_MASK); 1004 return ((const char *)s); 1005 } 1006 1007 void 1008 identifycpu(void) 1009 { 1010 const char *s; 1011 int i; 1012 1013 /* 1014 * print out CPU identification information. 1015 */ 1016 s = cpu_getmodel(); 1017 printf("%s", s); 1018 for (; *s != '\0'; s++) { 1019 if (strncasecmp(s, "MHz", 3) == 0) { 1020 goto skipMHz; 1021 } 1022 } 1023 printf(", %ldMHz", hwrpb->rpb_cc_freq / 1000000); 1024 skipMHz: 1025 for (i = 0; i < 10; i++) { 1026 /* Only so long as there are printable characters. */ 1027 if (! isprint((unsigned char)hwrpb->rpb_ssn[i])) { 1028 break; 1029 } 1030 if (i == 0) { 1031 printf(", s/n "); 1032 } 1033 printf("%c", hwrpb->rpb_ssn[i]); 1034 } 1035 printf("\n"); 1036 printf("%ld byte page size, %d processor%s.\n", 1037 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s"); 1038 } 1039 1040 int waittime = -1; 1041 struct pcb dumppcb; 1042 1043 void 1044 cpu_reboot(int howto, char *bootstr) 1045 { 1046 #if defined(MULTIPROCESSOR) 1047 u_long cpu_id = cpu_number(); 1048 u_long wait_mask; 1049 int i; 1050 #endif 1051 1052 /* If "always halt" was specified as a boot flag, obey. */ 1053 if ((boothowto & RB_HALT) != 0) 1054 howto |= RB_HALT; 1055 1056 boothowto = howto; 1057 1058 /* If system is cold, just halt. */ 1059 if (cold) { 1060 boothowto |= RB_HALT; 1061 goto haltsys; 1062 } 1063 1064 if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) { 1065 waittime = 0; 1066 vfs_shutdown(); 1067 } 1068 1069 /* Disable interrupts. */ 1070 splhigh(); 1071 1072 #if defined(MULTIPROCESSOR) 1073 /* 1074 * Halt all other CPUs. If we're not the primary, the 1075 * primary will spin, waiting for us to halt. 1076 */ 1077 cpu_id = cpu_number(); /* may have changed cpu */ 1078 wait_mask = (1UL << cpu_id) | (1UL << hwrpb->rpb_primary_cpu_id); 1079 1080 alpha_broadcast_ipi(ALPHA_IPI_HALT); 1081 1082 /* Ensure any CPUs paused by DDB resume execution so they can halt */ 1083 cpus_paused = 0; 1084 1085 for (i = 0; i < 10000; i++) { 1086 alpha_mb(); 1087 if (cpus_running == wait_mask) 1088 break; 1089 delay(1000); 1090 } 1091 alpha_mb(); 1092 if (cpus_running != wait_mask) 1093 printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n", 1094 cpus_running); 1095 #endif /* MULTIPROCESSOR */ 1096 1097 /* If rebooting and a dump is requested do it. */ 1098 #if 0 1099 if ((boothowto & (RB_DUMP | RB_HALT)) == RB_DUMP) 1100 #else 1101 if (boothowto & RB_DUMP) 1102 #endif 1103 dumpsys(); 1104 1105 haltsys: 1106 1107 /* run any shutdown hooks */ 1108 doshutdownhooks(); 1109 1110 pmf_system_shutdown(boothowto); 1111 1112 #ifdef BOOTKEY 1113 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot"); 1114 cnpollc(1); /* for proper keyboard command handling */ 1115 cngetc(); 1116 cnpollc(0); 1117 printf("\n"); 1118 #endif 1119 1120 /* Finally, powerdown/halt/reboot the system. */ 1121 if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN && 1122 platform.powerdown != NULL) { 1123 (*platform.powerdown)(); 1124 printf("WARNING: powerdown failed!\n"); 1125 } 1126 printf("%s\n\n", (boothowto & RB_HALT) ? "halted." : "rebooting..."); 1127 #if defined(MULTIPROCESSOR) 1128 if (cpu_id != hwrpb->rpb_primary_cpu_id) 1129 cpu_halt(); 1130 else 1131 #endif 1132 prom_halt(boothowto & RB_HALT); 1133 /*NOTREACHED*/ 1134 } 1135 1136 /* 1137 * These variables are needed by /sbin/savecore 1138 */ 1139 uint32_t dumpmag = 0x8fca0101; /* magic number */ 1140 int dumpsize = 0; /* pages */ 1141 long dumplo = 0; /* blocks */ 1142 1143 /* 1144 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. 1145 */ 1146 int 1147 cpu_dumpsize(void) 1148 { 1149 int size; 1150 1151 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) + 1152 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t)); 1153 if (roundup(size, dbtob(1)) != dbtob(1)) 1154 return -1; 1155 1156 return (1); 1157 } 1158 1159 /* 1160 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped. 1161 */ 1162 u_long 1163 cpu_dump_mempagecnt(void) 1164 { 1165 u_long i, n; 1166 1167 n = 0; 1168 for (i = 0; i < mem_cluster_cnt; i++) 1169 n += atop(mem_clusters[i].size); 1170 return (n); 1171 } 1172 1173 /* 1174 * cpu_dump: dump machine-dependent kernel core dump headers. 1175 */ 1176 int 1177 cpu_dump(void) 1178 { 1179 int (*dump)(dev_t, daddr_t, void *, size_t); 1180 char buf[dbtob(1)]; 1181 kcore_seg_t *segp; 1182 cpu_kcore_hdr_t *cpuhdrp; 1183 phys_ram_seg_t *memsegp; 1184 const struct bdevsw *bdev; 1185 int i; 1186 1187 bdev = bdevsw_lookup(dumpdev); 1188 if (bdev == NULL) 1189 return (ENXIO); 1190 dump = bdev->d_dump; 1191 1192 memset(buf, 0, sizeof buf); 1193 segp = (kcore_seg_t *)buf; 1194 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))]; 1195 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) + 1196 ALIGN(sizeof(*cpuhdrp))]; 1197 1198 /* 1199 * Generate a segment header. 1200 */ 1201 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1202 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); 1203 1204 /* 1205 * Add the machine-dependent header info. 1206 */ 1207 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map); 1208 cpuhdrp->page_size = PAGE_SIZE; 1209 cpuhdrp->nmemsegs = mem_cluster_cnt; 1210 1211 /* 1212 * Fill in the memory segment descriptors. 1213 */ 1214 for (i = 0; i < mem_cluster_cnt; i++) { 1215 memsegp[i].start = mem_clusters[i].start; 1216 memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK; 1217 } 1218 1219 return (dump(dumpdev, dumplo, (void *)buf, dbtob(1))); 1220 } 1221 1222 /* 1223 * This is called by main to set dumplo and dumpsize. 1224 * Dumps always skip the first PAGE_SIZE of disk space 1225 * in case there might be a disk label stored there. 1226 * If there is extra space, put dump at the end to 1227 * reduce the chance that swapping trashes it. 1228 */ 1229 void 1230 cpu_dumpconf(void) 1231 { 1232 int nblks, dumpblks; /* size of dump area */ 1233 1234 if (dumpdev == NODEV) 1235 goto bad; 1236 nblks = bdev_size(dumpdev); 1237 if (nblks <= ctod(1)) 1238 goto bad; 1239 1240 dumpblks = cpu_dumpsize(); 1241 if (dumpblks < 0) 1242 goto bad; 1243 dumpblks += ctod(cpu_dump_mempagecnt()); 1244 1245 /* If dump won't fit (incl. room for possible label), punt. */ 1246 if (dumpblks > (nblks - ctod(1))) 1247 goto bad; 1248 1249 /* Put dump at end of partition */ 1250 dumplo = nblks - dumpblks; 1251 1252 /* dumpsize is in page units, and doesn't include headers. */ 1253 dumpsize = cpu_dump_mempagecnt(); 1254 return; 1255 1256 bad: 1257 dumpsize = 0; 1258 return; 1259 } 1260 1261 /* 1262 * Dump the kernel's image to the swap partition. 1263 */ 1264 #define BYTES_PER_DUMP PAGE_SIZE 1265 1266 void 1267 dumpsys(void) 1268 { 1269 const struct bdevsw *bdev; 1270 u_long totalbytesleft, bytes, i, n, memcl; 1271 u_long maddr; 1272 int psize; 1273 daddr_t blkno; 1274 int (*dump)(dev_t, daddr_t, void *, size_t); 1275 int error; 1276 1277 /* Save registers. */ 1278 savectx(&dumppcb); 1279 1280 if (dumpdev == NODEV) 1281 return; 1282 bdev = bdevsw_lookup(dumpdev); 1283 if (bdev == NULL || bdev->d_psize == NULL) 1284 return; 1285 1286 /* 1287 * For dumps during autoconfiguration, 1288 * if dump device has already configured... 1289 */ 1290 if (dumpsize == 0) 1291 cpu_dumpconf(); 1292 if (dumplo <= 0) { 1293 printf("\ndump to dev %u,%u not possible\n", 1294 major(dumpdev), minor(dumpdev)); 1295 return; 1296 } 1297 printf("\ndumping to dev %u,%u offset %ld\n", 1298 major(dumpdev), minor(dumpdev), dumplo); 1299 1300 psize = bdev_size(dumpdev); 1301 printf("dump "); 1302 if (psize == -1) { 1303 printf("area unavailable\n"); 1304 return; 1305 } 1306 1307 /* XXX should purge all outstanding keystrokes. */ 1308 1309 if ((error = cpu_dump()) != 0) 1310 goto err; 1311 1312 totalbytesleft = ptoa(cpu_dump_mempagecnt()); 1313 blkno = dumplo + cpu_dumpsize(); 1314 dump = bdev->d_dump; 1315 error = 0; 1316 1317 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) { 1318 maddr = mem_clusters[memcl].start; 1319 bytes = mem_clusters[memcl].size & ~PAGE_MASK; 1320 1321 for (i = 0; i < bytes; i += n, totalbytesleft -= n) { 1322 1323 /* Print out how many MBs we to go. */ 1324 if ((totalbytesleft % (1024*1024)) == 0) 1325 printf_nolog("%ld ", 1326 totalbytesleft / (1024 * 1024)); 1327 1328 /* Limit size for next transfer. */ 1329 n = bytes - i; 1330 if (n > BYTES_PER_DUMP) 1331 n = BYTES_PER_DUMP; 1332 1333 error = (*dump)(dumpdev, blkno, 1334 (void *)ALPHA_PHYS_TO_K0SEG(maddr), n); 1335 if (error) 1336 goto err; 1337 maddr += n; 1338 blkno += btodb(n); /* XXX? */ 1339 1340 /* XXX should look for keystrokes, to cancel. */ 1341 } 1342 } 1343 1344 err: 1345 switch (error) { 1346 1347 case ENXIO: 1348 printf("device bad\n"); 1349 break; 1350 1351 case EFAULT: 1352 printf("device not ready\n"); 1353 break; 1354 1355 case EINVAL: 1356 printf("area improper\n"); 1357 break; 1358 1359 case EIO: 1360 printf("i/o error\n"); 1361 break; 1362 1363 case EINTR: 1364 printf("aborted from console\n"); 1365 break; 1366 1367 case 0: 1368 printf("succeeded\n"); 1369 break; 1370 1371 default: 1372 printf("error %d\n", error); 1373 break; 1374 } 1375 printf("\n\n"); 1376 delay(1000); 1377 } 1378 1379 void 1380 frametoreg(const struct trapframe *framep, struct reg *regp) 1381 { 1382 1383 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0]; 1384 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0]; 1385 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1]; 1386 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2]; 1387 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3]; 1388 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4]; 1389 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5]; 1390 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6]; 1391 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7]; 1392 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0]; 1393 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1]; 1394 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2]; 1395 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3]; 1396 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4]; 1397 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5]; 1398 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6]; 1399 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0]; 1400 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1]; 1401 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2]; 1402 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3]; 1403 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4]; 1404 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5]; 1405 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8]; 1406 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9]; 1407 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10]; 1408 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11]; 1409 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA]; 1410 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12]; 1411 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT]; 1412 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP]; 1413 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */ 1414 regp->r_regs[R_ZERO] = 0; 1415 } 1416 1417 void 1418 regtoframe(const struct reg *regp, struct trapframe *framep) 1419 { 1420 1421 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0]; 1422 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0]; 1423 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1]; 1424 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2]; 1425 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3]; 1426 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4]; 1427 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5]; 1428 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6]; 1429 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7]; 1430 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0]; 1431 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1]; 1432 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2]; 1433 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3]; 1434 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4]; 1435 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5]; 1436 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6]; 1437 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0]; 1438 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1]; 1439 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2]; 1440 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3]; 1441 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4]; 1442 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5]; 1443 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8]; 1444 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9]; 1445 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10]; 1446 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11]; 1447 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA]; 1448 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12]; 1449 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT]; 1450 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP]; 1451 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */ 1452 /* ??? = regp->r_regs[R_ZERO]; */ 1453 } 1454 1455 void 1456 printregs(struct reg *regp) 1457 { 1458 int i; 1459 1460 for (i = 0; i < 32; i++) 1461 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i], 1462 i & 1 ? "\n" : "\t"); 1463 } 1464 1465 void 1466 regdump(struct trapframe *framep) 1467 { 1468 struct reg reg; 1469 1470 frametoreg(framep, ®); 1471 reg.r_regs[R_SP] = alpha_pal_rdusp(); 1472 1473 printf("REGISTERS:\n"); 1474 printregs(®); 1475 } 1476 1477 1478 1479 void * 1480 getframe(const struct lwp *l, int sig, int *onstack, size_t size, size_t align) 1481 { 1482 uintptr_t frame; 1483 1484 KASSERT((align & (align - 1)) == 0); 1485 1486 /* Do we need to jump onto the signal stack? */ 1487 *onstack = 1488 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && 1489 (SIGACTION(l->l_proc, sig).sa_flags & SA_ONSTACK) != 0; 1490 1491 if (*onstack) 1492 frame = (uintptr_t)l->l_sigstk.ss_sp + l->l_sigstk.ss_size; 1493 else 1494 frame = (uintptr_t)alpha_pal_rdusp(); 1495 frame -= size; 1496 frame &= ~(STACK_ALIGNBYTES | (align - 1)); 1497 return (void *)frame; 1498 } 1499 1500 void 1501 buildcontext(struct lwp *l, const void *catcher, const void *tramp, const void *fp) 1502 { 1503 struct trapframe *tf = l->l_md.md_tf; 1504 1505 tf->tf_regs[FRAME_RA] = (uint64_t)tramp; 1506 tf->tf_regs[FRAME_PC] = (uint64_t)catcher; 1507 tf->tf_regs[FRAME_T12] = (uint64_t)catcher; 1508 alpha_pal_wrusp((unsigned long)fp); 1509 } 1510 1511 1512 /* 1513 * Send an interrupt to process, new style 1514 */ 1515 void 1516 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask) 1517 { 1518 struct lwp *l = curlwp; 1519 struct proc *p = l->l_proc; 1520 struct sigacts *ps = p->p_sigacts; 1521 int onstack, sig = ksi->ksi_signo, error; 1522 struct sigframe_siginfo *fp, frame; 1523 struct trapframe *tf; 1524 sig_t catcher = SIGACTION(p, ksi->ksi_signo).sa_handler; 1525 1526 tf = l->l_md.md_tf; 1527 1528 /* Allocate space for the signal handler context. */ 1529 fp = getframe(l, ksi->ksi_signo, &onstack, sizeof(*fp), _Alignof(*fp)); 1530 1531 #ifdef DEBUG 1532 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1533 printf("sendsig_siginfo(%d): sig %d ssp %p usp %p\n", p->p_pid, 1534 sig, &onstack, fp); 1535 #endif 1536 1537 /* Build stack frame for signal trampoline. */ 1538 memset(&frame, 0, sizeof(frame)); 1539 frame.sf_si._info = ksi->ksi_info; 1540 frame.sf_uc.uc_flags = _UC_SIGMASK; 1541 frame.sf_uc.uc_sigmask = *mask; 1542 frame.sf_uc.uc_link = l->l_ctxlink; 1543 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK) 1544 ? _UC_SETSTACK : _UC_CLRSTACK; 1545 sendsig_reset(l, sig); 1546 mutex_exit(p->p_lock); 1547 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags); 1548 error = copyout(&frame, fp, sizeof(frame)); 1549 mutex_enter(p->p_lock); 1550 1551 if (error != 0) { 1552 /* 1553 * Process has trashed its stack; give it an illegal 1554 * instruction to halt it in its tracks. 1555 */ 1556 #ifdef DEBUG 1557 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1558 printf("sendsig_siginfo(%d): copyout failed on sig %d\n", 1559 p->p_pid, sig); 1560 #endif 1561 sigexit(l, SIGILL); 1562 /* NOTREACHED */ 1563 } 1564 1565 #ifdef DEBUG 1566 if (sigdebug & SDB_FOLLOW) 1567 printf("sendsig_siginfo(%d): sig %d usp %p code %x\n", 1568 p->p_pid, sig, fp, ksi->ksi_code); 1569 #endif 1570 1571 /* 1572 * Set up the registers to directly invoke the signal handler. The 1573 * signal trampoline is then used to return from the signal. Note 1574 * the trampoline version numbers are coordinated with machine- 1575 * dependent code in libc. 1576 */ 1577 1578 tf->tf_regs[FRAME_A0] = sig; 1579 tf->tf_regs[FRAME_A1] = (uint64_t)&fp->sf_si; 1580 tf->tf_regs[FRAME_A2] = (uint64_t)&fp->sf_uc; 1581 1582 buildcontext(l,catcher,ps->sa_sigdesc[sig].sd_tramp,fp); 1583 1584 /* Remember that we're now on the signal stack. */ 1585 if (onstack) 1586 l->l_sigstk.ss_flags |= SS_ONSTACK; 1587 1588 #ifdef DEBUG 1589 if (sigdebug & SDB_FOLLOW) 1590 printf("sendsig_siginfo(%d): pc %lx, catcher %lx\n", p->p_pid, 1591 tf->tf_regs[FRAME_PC], tf->tf_regs[FRAME_A3]); 1592 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1593 printf("sendsig_siginfo(%d): sig %d returns\n", 1594 p->p_pid, sig); 1595 #endif 1596 } 1597 1598 /* 1599 * machine dependent system variables. 1600 */ 1601 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 1602 { 1603 1604 sysctl_createv(clog, 0, NULL, NULL, 1605 CTLFLAG_PERMANENT, 1606 CTLTYPE_NODE, "machdep", NULL, 1607 NULL, 0, NULL, 0, 1608 CTL_MACHDEP, CTL_EOL); 1609 1610 sysctl_createv(clog, 0, NULL, NULL, 1611 CTLFLAG_PERMANENT, 1612 CTLTYPE_STRUCT, "console_device", NULL, 1613 sysctl_consdev, 0, NULL, sizeof(dev_t), 1614 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 1615 sysctl_createv(clog, 0, NULL, NULL, 1616 CTLFLAG_PERMANENT, 1617 CTLTYPE_STRING, "root_device", NULL, 1618 sysctl_root_device, 0, NULL, 0, 1619 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL); 1620 sysctl_createv(clog, 0, NULL, NULL, 1621 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1622 CTLTYPE_INT, "unaligned_print", 1623 SYSCTL_DESCR("Warn about unaligned accesses"), 1624 NULL, 0, &alpha_unaligned_print, 0, 1625 CTL_MACHDEP, CPU_UNALIGNED_PRINT, CTL_EOL); 1626 sysctl_createv(clog, 0, NULL, NULL, 1627 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1628 CTLTYPE_INT, "unaligned_fix", 1629 SYSCTL_DESCR("Fix up unaligned accesses"), 1630 NULL, 0, &alpha_unaligned_fix, 0, 1631 CTL_MACHDEP, CPU_UNALIGNED_FIX, CTL_EOL); 1632 sysctl_createv(clog, 0, NULL, NULL, 1633 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1634 CTLTYPE_INT, "unaligned_sigbus", 1635 SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"), 1636 NULL, 0, &alpha_unaligned_sigbus, 0, 1637 CTL_MACHDEP, CPU_UNALIGNED_SIGBUS, CTL_EOL); 1638 sysctl_createv(clog, 0, NULL, NULL, 1639 CTLFLAG_PERMANENT, 1640 CTLTYPE_STRING, "booted_kernel", NULL, 1641 NULL, 0, bootinfo.booted_kernel, 0, 1642 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 1643 sysctl_createv(clog, 0, NULL, NULL, 1644 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1645 CTLTYPE_INT, "fp_sync_complete", NULL, 1646 NULL, 0, &alpha_fp_sync_complete, 0, 1647 CTL_MACHDEP, CPU_FP_SYNC_COMPLETE, CTL_EOL); 1648 sysctl_createv(clog, 0, NULL, NULL, 1649 CTLFLAG_PERMANENT, 1650 CTLTYPE_INT, "cctr", NULL, 1651 NULL, 0, &alpha_use_cctr, 0, 1652 CTL_MACHDEP, CPU_CCTR, CTL_EOL); 1653 sysctl_createv(clog, 0, NULL, NULL, 1654 CTLFLAG_PERMANENT, 1655 CTLTYPE_BOOL, "is_qemu", NULL, 1656 NULL, 0, &alpha_is_qemu, 0, 1657 CTL_MACHDEP, CPU_IS_QEMU, CTL_EOL); 1658 sysctl_createv(clog, 0, NULL, NULL, 1659 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1660 CTLTYPE_INT, "fp_complete_debug", NULL, 1661 NULL, 0, &alpha_fp_complete_debug, 0, 1662 CTL_MACHDEP, CPU_FP_COMPLETE_DEBUG, CTL_EOL); 1663 sysctl_createv(clog, 0, NULL, NULL, 1664 CTLFLAG_PERMANENT, 1665 CTLTYPE_QUAD, "rpb_type", NULL, 1666 NULL, 0, &hwrpb->rpb_type, 0, 1667 CTL_MACHDEP, CPU_RPB_TYPE, CTL_EOL); 1668 sysctl_createv(clog, 0, NULL, NULL, 1669 CTLFLAG_PERMANENT, 1670 CTLTYPE_QUAD, "rpb_variation", NULL, 1671 NULL, 0, &hwrpb->rpb_variation, 0, 1672 CTL_MACHDEP, CPU_RPB_VARIATION, CTL_EOL); 1673 } 1674 1675 /* 1676 * Set registers on exec. 1677 */ 1678 void 1679 setregs(register struct lwp *l, struct exec_package *pack, vaddr_t stack) 1680 { 1681 struct trapframe *tfp = l->l_md.md_tf; 1682 struct pcb *pcb; 1683 #ifdef DEBUG 1684 int i; 1685 #endif 1686 1687 #ifdef DEBUG 1688 /* 1689 * Crash and dump, if the user requested it. 1690 */ 1691 if (boothowto & RB_DUMP) 1692 panic("crash requested by boot flags"); 1693 #endif 1694 1695 memset(tfp, 0, sizeof(*tfp)); 1696 1697 #ifdef DEBUG 1698 for (i = 0; i < FRAME_SIZE; i++) 1699 tfp->tf_regs[i] = 0xbabefacedeadbeef; 1700 #endif 1701 pcb = lwp_getpcb(l); 1702 memset(&pcb->pcb_fp, 0, sizeof(pcb->pcb_fp)); 1703 alpha_pal_wrusp(stack); 1704 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET; 1705 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3; 1706 1707 tfp->tf_regs[FRAME_A0] = stack; /* a0 = sp */ 1708 tfp->tf_regs[FRAME_A1] = 0; /* a1 = rtld cleanup */ 1709 tfp->tf_regs[FRAME_A2] = 0; /* a2 = rtld object */ 1710 tfp->tf_regs[FRAME_A3] = l->l_proc->p_psstrp; /* a3 = ps_strings */ 1711 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */ 1712 1713 if (__predict_true((l->l_md.md_flags & IEEE_INHERIT) == 0)) { 1714 l->l_md.md_flags = 1715 (l->l_md.md_flags & ~(MDLWP_FP_C | MDLWP_FPACTIVE)) | 1716 FP_C_DEFAULT; 1717 pcb->pcb_fp.fpr_cr = FPCR_DEFAULT; 1718 } 1719 } 1720 1721 void (*alpha_delay_fn)(unsigned long); 1722 1723 /* 1724 * Wait "n" microseconds. 1725 */ 1726 void 1727 delay(unsigned long n) 1728 { 1729 unsigned long pcc0, pcc1, curcycle, cycles, usec; 1730 1731 if (n == 0) 1732 return; 1733 1734 /* 1735 * If we have an alternative delay function, go ahead and 1736 * use it. 1737 */ 1738 if (alpha_delay_fn != NULL) { 1739 (*alpha_delay_fn)(n); 1740 return; 1741 } 1742 1743 lwp_t * const l = curlwp; 1744 KPREEMPT_DISABLE(l); 1745 1746 pcc0 = alpha_rpcc() & 0xffffffffUL; 1747 cycles = 0; 1748 usec = 0; 1749 1750 while (usec <= n) { 1751 /* 1752 * Get the next CPU cycle count- assumes that we cannot 1753 * have had more than one 32 bit overflow. 1754 */ 1755 pcc1 = alpha_rpcc() & 0xffffffffUL; 1756 if (pcc1 < pcc0) 1757 curcycle = (pcc1 + 0x100000000UL) - pcc0; 1758 else 1759 curcycle = pcc1 - pcc0; 1760 1761 /* 1762 * We now have the number of processor cycles since we 1763 * last checked. Add the current cycle count to the 1764 * running total. If it's over cycles_per_usec, increment 1765 * the usec counter. 1766 */ 1767 cycles += curcycle; 1768 while (cycles > cycles_per_usec) { 1769 usec++; 1770 cycles -= cycles_per_usec; 1771 } 1772 pcc0 = pcc1; 1773 } 1774 1775 KPREEMPT_ENABLE(l); 1776 } 1777 1778 #ifdef EXEC_ECOFF 1779 void 1780 cpu_exec_ecoff_setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack) 1781 { 1782 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; 1783 1784 l->l_md.md_tf->tf_regs[FRAME_GP] = execp->a.gp_value; 1785 } 1786 1787 /* 1788 * cpu_exec_ecoff_hook(): 1789 * cpu-dependent ECOFF format hook for execve(). 1790 * 1791 * Do any machine-dependent diddling of the exec package when doing ECOFF. 1792 * 1793 */ 1794 int 1795 cpu_exec_ecoff_probe(struct lwp *l, struct exec_package *epp) 1796 { 1797 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; 1798 int error; 1799 1800 if (execp->f.f_magic == ECOFF_MAGIC_NETBSD_ALPHA) 1801 error = 0; 1802 else 1803 error = ENOEXEC; 1804 1805 return (error); 1806 } 1807 #endif /* EXEC_ECOFF */ 1808 1809 int 1810 mm_md_physacc(paddr_t pa, vm_prot_t prot) 1811 { 1812 u_quad_t size; 1813 int i; 1814 1815 for (i = 0; i < mem_cluster_cnt; i++) { 1816 if (pa < mem_clusters[i].start) 1817 continue; 1818 size = mem_clusters[i].size & ~PAGE_MASK; 1819 if (pa >= (mem_clusters[i].start + size)) 1820 continue; 1821 if ((prot & mem_clusters[i].size & PAGE_MASK) == prot) 1822 return 0; 1823 } 1824 return EFAULT; 1825 } 1826 1827 bool 1828 mm_md_direct_mapped_io(void *addr, paddr_t *paddr) 1829 { 1830 vaddr_t va = (vaddr_t)addr; 1831 1832 if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) { 1833 *paddr = ALPHA_K0SEG_TO_PHYS(va); 1834 return true; 1835 } 1836 return false; 1837 } 1838 1839 bool 1840 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr) 1841 { 1842 1843 *vaddr = ALPHA_PHYS_TO_K0SEG(paddr); 1844 return true; 1845 } 1846 1847 void 1848 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags) 1849 { 1850 struct trapframe *frame = l->l_md.md_tf; 1851 struct pcb *pcb = lwp_getpcb(l); 1852 __greg_t *gr = mcp->__gregs; 1853 __greg_t ras_pc; 1854 1855 /* Save register context. */ 1856 frametoreg(frame, (struct reg *)gr); 1857 /* XXX if there's a better, general way to get the USP of 1858 * an LWP that might or might not be curlwp, I'd like to know 1859 * about it. 1860 */ 1861 if (l == curlwp) { 1862 gr[_REG_SP] = alpha_pal_rdusp(); 1863 gr[_REG_UNIQUE] = alpha_pal_rdunique(); 1864 } else { 1865 gr[_REG_SP] = pcb->pcb_hw.apcb_usp; 1866 gr[_REG_UNIQUE] = pcb->pcb_hw.apcb_unique; 1867 } 1868 gr[_REG_PC] = frame->tf_regs[FRAME_PC]; 1869 gr[_REG_PS] = frame->tf_regs[FRAME_PS]; 1870 1871 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc, 1872 (void *) gr[_REG_PC])) != -1) 1873 gr[_REG_PC] = ras_pc; 1874 1875 *flags |= _UC_CPU | _UC_TLSBASE; 1876 1877 /* Save floating point register context, if any, and copy it. */ 1878 if (fpu_valid_p(l)) { 1879 fpu_save(l); 1880 (void)memcpy(&mcp->__fpregs, &pcb->pcb_fp, 1881 sizeof (mcp->__fpregs)); 1882 mcp->__fpregs.__fp_fpcr = alpha_read_fp_c(l); 1883 *flags |= _UC_FPU; 1884 } 1885 } 1886 1887 int 1888 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp) 1889 { 1890 const __greg_t *gr = mcp->__gregs; 1891 1892 if ((gr[_REG_PS] & ALPHA_PSL_USERSET) != ALPHA_PSL_USERSET || 1893 (gr[_REG_PS] & ALPHA_PSL_USERCLR) != 0) 1894 return EINVAL; 1895 1896 return 0; 1897 } 1898 1899 int 1900 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) 1901 { 1902 struct trapframe *frame = l->l_md.md_tf; 1903 struct pcb *pcb = lwp_getpcb(l); 1904 const __greg_t *gr = mcp->__gregs; 1905 int error; 1906 1907 /* Restore register context, if any. */ 1908 if (flags & _UC_CPU) { 1909 /* Check for security violations first. */ 1910 error = cpu_mcontext_validate(l, mcp); 1911 if (error) 1912 return error; 1913 1914 regtoframe((const struct reg *)gr, l->l_md.md_tf); 1915 if (l == curlwp) 1916 alpha_pal_wrusp(gr[_REG_SP]); 1917 else 1918 pcb->pcb_hw.apcb_usp = gr[_REG_SP]; 1919 frame->tf_regs[FRAME_PC] = gr[_REG_PC]; 1920 frame->tf_regs[FRAME_PS] = gr[_REG_PS]; 1921 } 1922 1923 if (flags & _UC_TLSBASE) 1924 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_UNIQUE]); 1925 1926 /* Restore floating point register context, if any. */ 1927 if (flags & _UC_FPU) { 1928 /* If we have an FP register context, get rid of it. */ 1929 fpu_discard(l, true); 1930 (void)memcpy(&pcb->pcb_fp, &mcp->__fpregs, 1931 sizeof (pcb->pcb_fp)); 1932 l->l_md.md_flags = mcp->__fpregs.__fp_fpcr & MDLWP_FP_C; 1933 } 1934 1935 mutex_enter(l->l_proc->p_lock); 1936 if (flags & _UC_SETSTACK) 1937 l->l_sigstk.ss_flags |= SS_ONSTACK; 1938 if (flags & _UC_CLRSTACK) 1939 l->l_sigstk.ss_flags &= ~SS_ONSTACK; 1940 mutex_exit(l->l_proc->p_lock); 1941 1942 return (0); 1943 } 1944 1945 static void 1946 cpu_kick(struct cpu_info * const ci) 1947 { 1948 #if defined(MULTIPROCESSOR) 1949 alpha_send_ipi(ci->ci_cpuid, ALPHA_IPI_AST); 1950 #endif /* MULTIPROCESSOR */ 1951 } 1952 1953 /* 1954 * Preempt the current process if in interrupt from user mode, 1955 * or after the current trap/syscall if in system mode. 1956 */ 1957 void 1958 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags) 1959 { 1960 1961 KASSERT(kpreempt_disabled()); 1962 1963 if ((flags & RESCHED_IDLE) != 0) { 1964 /* 1965 * Nothing to do here; we are not currently using WTINT 1966 * in cpu_idle(). 1967 */ 1968 return; 1969 } 1970 1971 /* XXX RESCHED_KPREEMPT XXX */ 1972 1973 KASSERT((flags & RESCHED_UPREEMPT) != 0); 1974 if ((flags & RESCHED_REMOTE) != 0) { 1975 cpu_kick(ci); 1976 } else { 1977 aston(l); 1978 } 1979 } 1980 1981 /* 1982 * Notify the current lwp (l) that it has a signal pending, 1983 * process as soon as possible. 1984 */ 1985 void 1986 cpu_signotify(struct lwp *l) 1987 { 1988 1989 KASSERT(kpreempt_disabled()); 1990 1991 if (l->l_cpu != curcpu()) { 1992 cpu_kick(l->l_cpu); 1993 } else { 1994 aston(l); 1995 } 1996 } 1997 1998 /* 1999 * Give a profiling tick to the current process when the user profiling 2000 * buffer pages are invalid. On the alpha, request an AST to send us 2001 * through trap, marking the proc as needing a profiling tick. 2002 */ 2003 void 2004 cpu_need_proftick(struct lwp *l) 2005 { 2006 2007 KASSERT(kpreempt_disabled()); 2008 KASSERT(l->l_cpu == curcpu()); 2009 2010 l->l_pflag |= LP_OWEUPC; 2011 aston(l); 2012 } 2013