machdep.c revision 1.368 1 /* $NetBSD: machdep.c,v 1.368 2020/10/14 00:59:50 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35 * All rights reserved.
36 *
37 * Author: Chris G. Demetriou
38 *
39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation.
44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 *
49 * Carnegie Mellon requests users of this software to return to
50 *
51 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 *
56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes.
58 */
59
60 #include "opt_ddb.h"
61 #include "opt_kgdb.h"
62 #include "opt_modular.h"
63 #include "opt_multiprocessor.h"
64 #include "opt_dec_3000_300.h"
65 #include "opt_dec_3000_500.h"
66 #include "opt_execfmt.h"
67
68 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
69
70 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.368 2020/10/14 00:59:50 thorpej Exp $");
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/signalvar.h>
75 #include <sys/kernel.h>
76 #include <sys/cpu.h>
77 #include <sys/proc.h>
78 #include <sys/ras.h>
79 #include <sys/sched.h>
80 #include <sys/reboot.h>
81 #include <sys/device.h>
82 #include <sys/malloc.h>
83 #include <sys/module.h>
84 #include <sys/mman.h>
85 #include <sys/msgbuf.h>
86 #include <sys/ioctl.h>
87 #include <sys/tty.h>
88 #include <sys/exec.h>
89 #include <sys/exec_aout.h> /* for MID_* */
90 #include <sys/exec_ecoff.h>
91 #include <sys/core.h>
92 #include <sys/kcore.h>
93 #include <sys/ucontext.h>
94 #include <sys/conf.h>
95 #include <sys/ksyms.h>
96 #include <sys/kauth.h>
97 #include <sys/atomic.h>
98 #include <sys/cpu.h>
99
100 #include <machine/kcore.h>
101 #include <machine/fpu.h>
102
103 #include <sys/mount.h>
104 #include <sys/syscallargs.h>
105
106 #include <uvm/uvm.h>
107 #include <sys/sysctl.h>
108
109 #include <dev/cons.h>
110 #include <dev/mm.h>
111
112 #include <machine/autoconf.h>
113 #include <machine/reg.h>
114 #include <machine/rpb.h>
115 #include <machine/prom.h>
116 #include <machine/cpuconf.h>
117 #include <machine/ieeefp.h>
118
119 #ifdef DDB
120 #include <machine/db_machdep.h>
121 #include <ddb/db_access.h>
122 #include <ddb/db_sym.h>
123 #include <ddb/db_extern.h>
124 #include <ddb/db_interface.h>
125 #endif
126
127 #ifdef KGDB
128 #include <sys/kgdb.h>
129 #endif
130
131 #ifdef DEBUG
132 #include <machine/sigdebug.h>
133 int sigdebug = 0x0;
134 int sigpid = 0;
135 #endif
136
137 #include <machine/alpha.h>
138
139 #include "ksyms.h"
140
141 struct vm_map *phys_map = NULL;
142
143 void *msgbufaddr;
144
145 int maxmem; /* max memory per process */
146
147 int totalphysmem; /* total amount of physical memory in system */
148 int resvmem; /* amount of memory reserved for PROM */
149 int unusedmem; /* amount of memory for OS that we don't use */
150 int unknownmem; /* amount of memory with an unknown use */
151
152 int cputype; /* system type, from the RPB */
153 bool alpha_is_qemu; /* true if we've detected runnnig in qemu */
154
155 int bootdev_debug = 0; /* patchable, or from DDB */
156
157 /*
158 * XXX We need an address to which we can assign things so that they
159 * won't be optimized away because we didn't use the value.
160 */
161 uint32_t no_optimize;
162
163 /* the following is used externally (sysctl_hw) */
164 char machine[] = MACHINE; /* from <machine/param.h> */
165 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
166
167 /* Number of machine cycles per microsecond */
168 uint64_t cycles_per_usec;
169
170 /* number of CPUs in the box. really! */
171 int ncpus;
172
173 struct bootinfo_kernel bootinfo;
174
175 /* For built-in TCDS */
176 #if defined(DEC_3000_300) || defined(DEC_3000_500)
177 uint8_t dec_3000_scsiid[3], dec_3000_scsifast[3];
178 #endif
179
180 struct platform platform;
181
182 #if NKSYMS || defined(DDB) || defined(MODULAR)
183 /* start and end of kernel symbol table */
184 void *ksym_start, *ksym_end;
185 #endif
186
187 /* for cpu_sysctl() */
188 int alpha_unaligned_print = 1; /* warn about unaligned accesses */
189 int alpha_unaligned_fix = 1; /* fix up unaligned accesses */
190 int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */
191 int alpha_fp_sync_complete = 0; /* fp fixup if sync even without /s */
192
193 /*
194 * XXX This should be dynamically sized, but we have the chicken-egg problem!
195 * XXX it should also be larger than it is, because not all of the mddt
196 * XXX clusters end up being used for VM.
197 */
198 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */
199 int mem_cluster_cnt;
200
201 int cpu_dump(void);
202 int cpu_dumpsize(void);
203 u_long cpu_dump_mempagecnt(void);
204 void dumpsys(void);
205 void identifycpu(void);
206 void printregs(struct reg *);
207
208 const pcu_ops_t fpu_ops = {
209 .pcu_id = PCU_FPU,
210 .pcu_state_load = fpu_state_load,
211 .pcu_state_save = fpu_state_save,
212 .pcu_state_release = fpu_state_release,
213 };
214
215 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
216 [PCU_FPU] = &fpu_ops,
217 };
218
219 static void
220 alpha_page_physload(unsigned long const start_pfn, unsigned long const end_pfn)
221 {
222
223 /*
224 * Some Alpha platforms may have unique requirements about
225 * how physical memory is managed (e.g. reserving memory
226 * ranges due to lack of SGMAP DMA).
227 */
228 if (platform.page_physload != NULL) {
229 (*platform.page_physload)(start_pfn, end_pfn);
230 return;
231 }
232
233 uvm_page_physload(start_pfn, end_pfn, start_pfn, end_pfn,
234 VM_FREELIST_DEFAULT);
235 }
236
237 void
238 alpha_page_physload_sheltered(unsigned long const start_pfn,
239 unsigned long const end_pfn, unsigned long const shelter_start_pfn,
240 unsigned long const shelter_end_pfn)
241 {
242
243 /*
244 * If the added region ends before or starts after the sheltered
245 * region, then it just goes on the default freelist.
246 */
247 if (end_pfn <= shelter_start_pfn || start_pfn >= shelter_end_pfn) {
248 uvm_page_physload(start_pfn, end_pfn,
249 start_pfn, end_pfn, VM_FREELIST_DEFAULT);
250 return;
251 }
252
253 /*
254 * Load any portion that comes before the sheltered region.
255 */
256 if (start_pfn < shelter_start_pfn) {
257 KASSERT(end_pfn > shelter_start_pfn);
258 uvm_page_physload(start_pfn, shelter_start_pfn,
259 start_pfn, shelter_start_pfn, VM_FREELIST_DEFAULT);
260 }
261
262 /*
263 * Load the portion that overlaps that sheltered region.
264 */
265 const unsigned long ov_start = MAX(start_pfn, shelter_start_pfn);
266 const unsigned long ov_end = MIN(end_pfn, shelter_end_pfn);
267 KASSERT(ov_start >= shelter_start_pfn);
268 KASSERT(ov_end <= shelter_end_pfn);
269 uvm_page_physload(ov_start, ov_end, ov_start, ov_end,
270 VM_FREELIST_SHELTERED);
271
272 /*
273 * Load any portion that comes after the sheltered region.
274 */
275 if (end_pfn > shelter_end_pfn) {
276 KASSERT(start_pfn < shelter_end_pfn);
277 uvm_page_physload(shelter_end_pfn, end_pfn,
278 shelter_end_pfn, end_pfn, VM_FREELIST_DEFAULT);
279 }
280 }
281
282 void
283 alpha_init(u_long xxx_pfn __unused, u_long ptb, u_long bim, u_long bip,
284 u_long biv)
285 /* pfn: first free PFN number (no longer used) */
286 /* ptb: PFN of current level 1 page table */
287 /* bim: bootinfo magic */
288 /* bip: bootinfo pointer */
289 /* biv: bootinfo version */
290 {
291 extern char kernel_text[], _end[];
292 struct mddt *mddtp;
293 struct mddt_cluster *memc;
294 int i, mddtweird;
295 struct pcb *pcb0;
296 vaddr_t kernstart, kernend, v;
297 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
298 cpuid_t cpu_id;
299 struct cpu_info *ci;
300 char *p;
301 const char *bootinfo_msg;
302 const struct cpuinit *c;
303
304 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
305
306 /*
307 * Turn off interrupts (not mchecks) and floating point.
308 * Make sure the instruction and data streams are consistent.
309 */
310 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
311 alpha_pal_wrfen(0);
312 ALPHA_TBIA();
313 alpha_pal_imb();
314
315 /* Initialize the SCB. */
316 scb_init();
317
318 cpu_id = cpu_number();
319
320 ci = &cpu_info_primary;
321 ci->ci_cpuid = cpu_id;
322
323 #if defined(MULTIPROCESSOR)
324 /*
325 * Set the SysValue to &lwp0, after making sure that lwp0
326 * is pointing at the primary CPU. Secondary processors do
327 * this in their spinup trampoline.
328 */
329 lwp0.l_cpu = ci;
330 cpu_info[cpu_id] = ci;
331 alpha_pal_wrval((u_long)&lwp0);
332 #endif
333
334 /*
335 * Get critical system information (if possible, from the
336 * information provided by the boot program).
337 */
338 bootinfo_msg = NULL;
339 if (bim == BOOTINFO_MAGIC) {
340 if (biv == 0) { /* backward compat */
341 biv = *(u_long *)bip;
342 bip += 8;
343 }
344 switch (biv) {
345 case 1: {
346 struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip;
347
348 bootinfo.ssym = v1p->ssym;
349 bootinfo.esym = v1p->esym;
350 /* hwrpb may not be provided by boot block in v1 */
351 if (v1p->hwrpb != NULL) {
352 bootinfo.hwrpb_phys =
353 ((struct rpb *)v1p->hwrpb)->rpb_phys;
354 bootinfo.hwrpb_size = v1p->hwrpbsize;
355 } else {
356 bootinfo.hwrpb_phys =
357 ((struct rpb *)HWRPB_ADDR)->rpb_phys;
358 bootinfo.hwrpb_size =
359 ((struct rpb *)HWRPB_ADDR)->rpb_size;
360 }
361 memcpy(bootinfo.boot_flags, v1p->boot_flags,
362 uimin(sizeof v1p->boot_flags,
363 sizeof bootinfo.boot_flags));
364 memcpy(bootinfo.booted_kernel, v1p->booted_kernel,
365 uimin(sizeof v1p->booted_kernel,
366 sizeof bootinfo.booted_kernel));
367 /* booted dev not provided in bootinfo */
368 init_prom_interface(ptb, (struct rpb *)
369 ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys));
370 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
371 sizeof bootinfo.booted_dev);
372 break;
373 }
374 default:
375 bootinfo_msg = "unknown bootinfo version";
376 goto nobootinfo;
377 }
378 } else {
379 bootinfo_msg = "boot program did not pass bootinfo";
380 nobootinfo:
381 bootinfo.ssym = (u_long)_end;
382 bootinfo.esym = (u_long)_end;
383 bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys;
384 bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size;
385 init_prom_interface(ptb, (struct rpb *)HWRPB_ADDR);
386 if (alpha_is_qemu) {
387 /*
388 * Grab boot flags from kernel command line.
389 * Assume autoboot if not supplied.
390 */
391 if (! prom_qemu_getenv("flags", bootinfo.boot_flags,
392 sizeof(bootinfo.boot_flags))) {
393 strlcpy(bootinfo.boot_flags, "A",
394 sizeof(bootinfo.boot_flags));
395 }
396 } else {
397 prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags,
398 sizeof bootinfo.boot_flags);
399 prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel,
400 sizeof bootinfo.booted_kernel);
401 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
402 sizeof bootinfo.booted_dev);
403 }
404 }
405
406 /*
407 * Initialize the kernel's mapping of the RPB. It's needed for
408 * lots of things.
409 */
410 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys);
411
412 #if defined(DEC_3000_300) || defined(DEC_3000_500)
413 if (hwrpb->rpb_type == ST_DEC_3000_300 ||
414 hwrpb->rpb_type == ST_DEC_3000_500) {
415 prom_getenv(PROM_E_SCSIID, dec_3000_scsiid,
416 sizeof(dec_3000_scsiid));
417 prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast,
418 sizeof(dec_3000_scsifast));
419 }
420 #endif
421
422 /*
423 * Remember how many cycles there are per microsecond,
424 * so that we can use delay(). Round up, for safety.
425 */
426 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
427
428 /*
429 * Initialize the (temporary) bootstrap console interface, so
430 * we can use printf until the VM system starts being setup.
431 * The real console is initialized before then.
432 */
433 init_bootstrap_console();
434
435 /* OUTPUT NOW ALLOWED */
436
437 /* delayed from above */
438 if (bootinfo_msg)
439 printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n",
440 bootinfo_msg, bim, bip, biv);
441
442 /* Initialize the trap vectors on the primary processor. */
443 trap_init();
444
445 /*
446 * Find out this system's page size, and initialize
447 * PAGE_SIZE-dependent variables.
448 */
449 if (hwrpb->rpb_page_size != ALPHA_PGBYTES)
450 panic("page size %lu != %d?!", hwrpb->rpb_page_size,
451 ALPHA_PGBYTES);
452 uvmexp.pagesize = hwrpb->rpb_page_size;
453 uvm_md_init();
454
455 /*
456 * cputype has been initialized in init_prom_interface().
457 * Perform basic platform initialization using this info.
458 */
459 KASSERT(prom_interface_initialized);
460 c = platform_lookup(cputype);
461 if (c == NULL) {
462 platform_not_supported();
463 /* NOTREACHED */
464 }
465 (*c->init)();
466 cpu_setmodel("%s", platform.model);
467
468 /*
469 * Initialize the real console, so that the bootstrap console is
470 * no longer necessary.
471 */
472 (*platform.cons_init)();
473
474 #ifdef DIAGNOSTIC
475 /* Paranoid sanity checking */
476
477 /* We should always be running on the primary. */
478 assert(hwrpb->rpb_primary_cpu_id == cpu_id);
479
480 /*
481 * On single-CPU systypes, the primary should always be CPU 0,
482 * except on Alpha 8200 systems where the CPU id is related
483 * to the VID, which is related to the Turbo Laser node id.
484 */
485 if (cputype != ST_DEC_21000)
486 assert(hwrpb->rpb_primary_cpu_id == 0);
487 #endif
488
489 /* NO MORE FIRMWARE ACCESS ALLOWED */
490 /* XXX Unless prom_uses_prom_console() evaluates to non-zero.) */
491
492 /*
493 * Find the beginning and end of the kernel (and leave a
494 * bit of space before the beginning for the bootstrap
495 * stack).
496 */
497 kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE;
498 #if NKSYMS || defined(DDB) || defined(MODULAR)
499 ksym_start = (void *)bootinfo.ssym;
500 ksym_end = (void *)bootinfo.esym;
501 kernend = (vaddr_t)round_page((vaddr_t)ksym_end);
502 #else
503 kernend = (vaddr_t)round_page((vaddr_t)_end);
504 #endif
505
506 kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart));
507 kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend));
508
509 /*
510 * Find out how much memory is available, by looking at
511 * the memory cluster descriptors. This also tries to do
512 * its best to detect things things that have never been seen
513 * before...
514 */
515 mddtp = (struct mddt *)(((char *)hwrpb) + hwrpb->rpb_memdat_off);
516
517 /* MDDT SANITY CHECKING */
518 mddtweird = 0;
519 if (mddtp->mddt_cluster_cnt < 2) {
520 mddtweird = 1;
521 printf("WARNING: weird number of mem clusters: %lu\n",
522 mddtp->mddt_cluster_cnt);
523 }
524
525 #if 0
526 printf("Memory cluster count: %" PRIu64 "\n", mddtp->mddt_cluster_cnt);
527 #endif
528
529 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
530 memc = &mddtp->mddt_clusters[i];
531 #if 0
532 printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i,
533 memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage);
534 #endif
535 totalphysmem += memc->mddt_pg_cnt;
536 if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */
537 mem_clusters[mem_cluster_cnt].start =
538 ptoa(memc->mddt_pfn);
539 mem_clusters[mem_cluster_cnt].size =
540 ptoa(memc->mddt_pg_cnt);
541 if (memc->mddt_usage & MDDT_mbz ||
542 memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */
543 memc->mddt_usage & MDDT_PALCODE)
544 mem_clusters[mem_cluster_cnt].size |=
545 PROT_READ;
546 else
547 mem_clusters[mem_cluster_cnt].size |=
548 PROT_READ | PROT_WRITE | PROT_EXEC;
549 mem_cluster_cnt++;
550 }
551
552 if (memc->mddt_usage & MDDT_mbz) {
553 mddtweird = 1;
554 printf("WARNING: mem cluster %d has weird "
555 "usage 0x%lx\n", i, memc->mddt_usage);
556 unknownmem += memc->mddt_pg_cnt;
557 continue;
558 }
559 if (memc->mddt_usage & MDDT_NONVOLATILE) {
560 /* XXX should handle these... */
561 printf("WARNING: skipping non-volatile mem "
562 "cluster %d\n", i);
563 unusedmem += memc->mddt_pg_cnt;
564 continue;
565 }
566 if (memc->mddt_usage & MDDT_PALCODE) {
567 resvmem += memc->mddt_pg_cnt;
568 continue;
569 }
570
571 /*
572 * We have a memory cluster available for system
573 * software use. We must determine if this cluster
574 * holds the kernel.
575 */
576
577 /*
578 * XXX If the kernel uses the PROM console, we only use the
579 * XXX memory after the kernel in the first system segment,
580 * XXX to avoid clobbering prom mapping, data, etc.
581 */
582 physmem += memc->mddt_pg_cnt;
583 pfn0 = memc->mddt_pfn;
584 pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt;
585 if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) {
586 /*
587 * Must compute the location of the kernel
588 * within the segment.
589 */
590 #if 0
591 printf("Cluster %d contains kernel\n", i);
592 #endif
593 if (pfn0 < kernstartpfn && !prom_uses_prom_console()) {
594 /*
595 * There is a chunk before the kernel.
596 */
597 #if 0
598 printf("Loading chunk before kernel: "
599 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
600 #endif
601 alpha_page_physload(pfn0, kernstartpfn);
602 }
603 if (kernendpfn < pfn1) {
604 /*
605 * There is a chunk after the kernel.
606 */
607 #if 0
608 printf("Loading chunk after kernel: "
609 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
610 #endif
611 alpha_page_physload(kernendpfn, pfn1);
612 }
613 } else {
614 /*
615 * Just load this cluster as one chunk.
616 */
617 #if 0
618 printf("Loading cluster %d: 0x%lx / 0x%lx\n", i,
619 pfn0, pfn1);
620 #endif
621 alpha_page_physload(pfn0, pfn1);
622 }
623 }
624
625 /*
626 * Dump out the MDDT if it looks odd...
627 */
628 if (mddtweird) {
629 printf("\n");
630 printf("complete memory cluster information:\n");
631 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
632 printf("mddt %d:\n", i);
633 printf("\tpfn %lx\n",
634 mddtp->mddt_clusters[i].mddt_pfn);
635 printf("\tcnt %lx\n",
636 mddtp->mddt_clusters[i].mddt_pg_cnt);
637 printf("\ttest %lx\n",
638 mddtp->mddt_clusters[i].mddt_pg_test);
639 printf("\tbva %lx\n",
640 mddtp->mddt_clusters[i].mddt_v_bitaddr);
641 printf("\tbpa %lx\n",
642 mddtp->mddt_clusters[i].mddt_p_bitaddr);
643 printf("\tbcksum %lx\n",
644 mddtp->mddt_clusters[i].mddt_bit_cksum);
645 printf("\tusage %lx\n",
646 mddtp->mddt_clusters[i].mddt_usage);
647 }
648 printf("\n");
649 }
650
651 if (totalphysmem == 0)
652 panic("can't happen: system seems to have no memory!");
653 maxmem = physmem;
654 #if 0
655 printf("totalphysmem = %d\n", totalphysmem);
656 printf("physmem = %lu\n", physmem);
657 printf("resvmem = %d\n", resvmem);
658 printf("unusedmem = %d\n", unusedmem);
659 printf("unknownmem = %d\n", unknownmem);
660 #endif
661
662 /*
663 * Initialize error message buffer (at end of core).
664 */
665 {
666 paddr_t end;
667 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
668 vsize_t reqsz = sz;
669 uvm_physseg_t bank;
670
671 bank = uvm_physseg_get_last();
672
673 /* shrink so that it'll fit in the last segment */
674 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz))
675 sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank));
676
677 end = uvm_physseg_get_end(bank);
678 end -= atop(sz);
679
680 uvm_physseg_unplug(end, atop(sz));
681 msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(end));
682
683 initmsgbuf(msgbufaddr, sz);
684
685 /* warn if the message buffer had to be shrunk */
686 if (sz != reqsz)
687 printf("WARNING: %ld bytes not available for msgbuf "
688 "in last cluster (%ld used)\n", reqsz, sz);
689
690 }
691
692 /*
693 * NOTE: It is safe to use uvm_pageboot_alloc() before
694 * pmap_bootstrap() because our pmap_virtual_space()
695 * returns compile-time constants.
696 */
697
698 /*
699 * Allocate uarea page for lwp0 and set it.
700 */
701 v = uvm_pageboot_alloc(UPAGES * PAGE_SIZE);
702 uvm_lwp_setuarea(&lwp0, v);
703
704 /*
705 * Initialize the virtual memory system, and set the
706 * page table base register in proc 0's PCB.
707 */
708 pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
709 hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt);
710
711 /*
712 * Initialize the rest of lwp0's PCB and cache its physical address.
713 */
714 pcb0 = lwp_getpcb(&lwp0);
715 lwp0.l_md.md_pcbpaddr = (void *)ALPHA_K0SEG_TO_PHYS((vaddr_t)pcb0);
716
717 /*
718 * Set the kernel sp, reserving space for an (empty) trapframe,
719 * and make lwp0's trapframe pointer point to it for sanity.
720 */
721 pcb0->pcb_hw.apcb_ksp = v + USPACE - sizeof(struct trapframe);
722 lwp0.l_md.md_tf = (struct trapframe *)pcb0->pcb_hw.apcb_ksp;
723
724 /* Indicate that lwp0 has a CPU. */
725 lwp0.l_cpu = ci;
726
727 /*
728 * Look at arguments passed to us and compute boothowto.
729 */
730
731 boothowto = RB_SINGLE;
732 #ifdef KADB
733 boothowto |= RB_KDB;
734 #endif
735 for (p = bootinfo.boot_flags; p && *p != '\0'; p++) {
736 /*
737 * Note that we'd really like to differentiate case here,
738 * but the Alpha AXP Architecture Reference Manual
739 * says that we shouldn't.
740 */
741 switch (*p) {
742 case 'a': /* autoboot */
743 case 'A':
744 boothowto &= ~RB_SINGLE;
745 break;
746
747 #ifdef DEBUG
748 case 'c': /* crash dump immediately after autoconfig */
749 case 'C':
750 boothowto |= RB_DUMP;
751 break;
752 #endif
753
754 #if defined(KGDB) || defined(DDB)
755 case 'd': /* break into the kernel debugger ASAP */
756 case 'D':
757 boothowto |= RB_KDB;
758 break;
759 #endif
760
761 case 'h': /* always halt, never reboot */
762 case 'H':
763 boothowto |= RB_HALT;
764 break;
765
766 #if 0
767 case 'm': /* mini root present in memory */
768 case 'M':
769 boothowto |= RB_MINIROOT;
770 break;
771 #endif
772
773 case 'n': /* askname */
774 case 'N':
775 boothowto |= RB_ASKNAME;
776 break;
777
778 case 's': /* single-user (default, supported for sanity) */
779 case 'S':
780 boothowto |= RB_SINGLE;
781 break;
782
783 case 'q': /* quiet boot */
784 case 'Q':
785 boothowto |= AB_QUIET;
786 break;
787
788 case 'v': /* verbose boot */
789 case 'V':
790 boothowto |= AB_VERBOSE;
791 break;
792
793 case '-':
794 /*
795 * Just ignore this. It's not required, but it's
796 * common for it to be passed regardless.
797 */
798 break;
799
800 default:
801 printf("Unrecognized boot flag '%c'.\n", *p);
802 break;
803 }
804 }
805
806 /*
807 * Perform any initial kernel patches based on the running system.
808 * We may perform more later if we attach additional CPUs.
809 */
810 alpha_patch(false);
811
812 /*
813 * Figure out the number of CPUs in the box, from RPB fields.
814 * Really. We mean it.
815 */
816 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
817 struct pcs *pcsp;
818
819 pcsp = LOCATE_PCS(hwrpb, i);
820 if ((pcsp->pcs_flags & PCS_PP) != 0)
821 ncpus++;
822 }
823
824 /*
825 * Initialize debuggers, and break into them if appropriate.
826 */
827 #if NKSYMS || defined(DDB) || defined(MODULAR)
828 ksyms_addsyms_elf((int)((uint64_t)ksym_end - (uint64_t)ksym_start),
829 ksym_start, ksym_end);
830 #endif
831
832 if (boothowto & RB_KDB) {
833 #if defined(KGDB)
834 kgdb_debug_init = 1;
835 kgdb_connect(1);
836 #elif defined(DDB)
837 Debugger();
838 #endif
839 }
840
841 #ifdef DIAGNOSTIC
842 /*
843 * Check our clock frequency, from RPB fields.
844 */
845 if ((hwrpb->rpb_intr_freq >> 12) != 1024)
846 printf("WARNING: unbelievable rpb_intr_freq: %ld (%d hz)\n",
847 hwrpb->rpb_intr_freq, hz);
848 #endif
849 }
850
851 #ifdef MODULAR
852 /* Push any modules loaded by the boot loader */
853 void
854 module_init_md(void)
855 {
856 /* nada. */
857 }
858 #endif /* MODULAR */
859
860 void
861 consinit(void)
862 {
863
864 /*
865 * Everything related to console initialization is done
866 * in alpha_init().
867 */
868 #if defined(DIAGNOSTIC) && defined(_PROM_MAY_USE_PROM_CONSOLE)
869 printf("consinit: %susing prom console\n",
870 prom_uses_prom_console() ? "" : "not ");
871 #endif
872 }
873
874 void
875 cpu_startup(void)
876 {
877 extern struct evcnt fpevent_use, fpevent_reuse;
878 vaddr_t minaddr, maxaddr;
879 char pbuf[9];
880 #if defined(DEBUG)
881 extern int pmapdebug;
882 int opmapdebug = pmapdebug;
883
884 pmapdebug = 0;
885 #endif
886
887 /*
888 * Good {morning,afternoon,evening,night}.
889 */
890 printf("%s%s", copyright, version);
891 identifycpu();
892 format_bytes(pbuf, sizeof(pbuf), ptoa(totalphysmem));
893 printf("total memory = %s\n", pbuf);
894 format_bytes(pbuf, sizeof(pbuf), ptoa(resvmem));
895 printf("(%s reserved for PROM, ", pbuf);
896 format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
897 printf("%s used by NetBSD)\n", pbuf);
898 if (unusedmem) {
899 format_bytes(pbuf, sizeof(pbuf), ptoa(unusedmem));
900 printf("WARNING: unused memory = %s\n", pbuf);
901 }
902 if (unknownmem) {
903 format_bytes(pbuf, sizeof(pbuf), ptoa(unknownmem));
904 printf("WARNING: %s of memory with unknown purpose\n", pbuf);
905 }
906
907 minaddr = 0;
908
909 /*
910 * Allocate a submap for physio
911 */
912 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
913 VM_PHYS_SIZE, 0, false, NULL);
914
915 /*
916 * No need to allocate an mbuf cluster submap. Mbuf clusters
917 * are allocated via the pool allocator, and we use K0SEG to
918 * map those pages.
919 */
920
921 #if defined(DEBUG)
922 pmapdebug = opmapdebug;
923 #endif
924 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
925 printf("avail memory = %s\n", pbuf);
926 #if 0
927 {
928 extern u_long pmap_pages_stolen;
929
930 format_bytes(pbuf, sizeof(pbuf), pmap_pages_stolen * PAGE_SIZE);
931 printf("stolen memory for VM structures = %s\n", pbuf);
932 }
933 #endif
934
935 /*
936 * Set up the HWPCB so that it's safe to configure secondary
937 * CPUs.
938 */
939 hwrpb_primary_init();
940
941 /*
942 * Initialize some trap event counters.
943 */
944 evcnt_attach_dynamic_nozero(&fpevent_use, EVCNT_TYPE_MISC, NULL,
945 "FP", "proc use");
946 evcnt_attach_dynamic_nozero(&fpevent_reuse, EVCNT_TYPE_MISC, NULL,
947 "FP", "proc re-use");
948 }
949
950 /*
951 * Retrieve the platform name from the DSR.
952 */
953 const char *
954 alpha_dsr_sysname(void)
955 {
956 struct dsrdb *dsr;
957 const char *sysname;
958
959 /*
960 * DSR does not exist on early HWRPB versions.
961 */
962 if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS)
963 return (NULL);
964
965 dsr = (struct dsrdb *)(((char *)hwrpb) + hwrpb->rpb_dsrdb_off);
966 sysname = (const char *)((char *)dsr + (dsr->dsr_sysname_off +
967 sizeof(uint64_t)));
968 return (sysname);
969 }
970
971 /*
972 * Lookup the system specified system variation in the provided table,
973 * returning the model string on match.
974 */
975 const char *
976 alpha_variation_name(uint64_t variation, const struct alpha_variation_table *avtp)
977 {
978 int i;
979
980 for (i = 0; avtp[i].avt_model != NULL; i++)
981 if (avtp[i].avt_variation == variation)
982 return (avtp[i].avt_model);
983 return (NULL);
984 }
985
986 /*
987 * Generate a default platform name based for unknown system variations.
988 */
989 const char *
990 alpha_unknown_sysname(void)
991 {
992 static char s[128]; /* safe size */
993
994 snprintf(s, sizeof(s), "%s family, unknown model variation 0x%lx",
995 platform.family, hwrpb->rpb_variation & SV_ST_MASK);
996 return ((const char *)s);
997 }
998
999 void
1000 identifycpu(void)
1001 {
1002 const char *s;
1003 int i;
1004
1005 /*
1006 * print out CPU identification information.
1007 */
1008 printf("%s", cpu_getmodel());
1009 for(s = cpu_getmodel(); *s; ++s)
1010 if(strncasecmp(s, "MHz", 3) == 0)
1011 goto skipMHz;
1012 printf(", %ldMHz", hwrpb->rpb_cc_freq / 1000000);
1013 skipMHz:
1014 printf(", s/n ");
1015 for (i = 0; i < 10; i++)
1016 printf("%c", hwrpb->rpb_ssn[i]);
1017 printf("\n");
1018 printf("%ld byte page size, %d processor%s.\n",
1019 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s");
1020 }
1021
1022 int waittime = -1;
1023 struct pcb dumppcb;
1024
1025 void
1026 cpu_reboot(int howto, char *bootstr)
1027 {
1028 #if defined(MULTIPROCESSOR)
1029 u_long cpu_id = cpu_number();
1030 u_long wait_mask;
1031 int i;
1032 #endif
1033
1034 /* If "always halt" was specified as a boot flag, obey. */
1035 if ((boothowto & RB_HALT) != 0)
1036 howto |= RB_HALT;
1037
1038 boothowto = howto;
1039
1040 /* If system is cold, just halt. */
1041 if (cold) {
1042 boothowto |= RB_HALT;
1043 goto haltsys;
1044 }
1045
1046 if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) {
1047 waittime = 0;
1048 vfs_shutdown();
1049 /*
1050 * If we've been adjusting the clock, the todr
1051 * will be out of synch; adjust it now.
1052 */
1053 resettodr();
1054 }
1055
1056 /* Disable interrupts. */
1057 splhigh();
1058
1059 #if defined(MULTIPROCESSOR)
1060 /*
1061 * Halt all other CPUs. If we're not the primary, the
1062 * primary will spin, waiting for us to halt.
1063 */
1064 cpu_id = cpu_number(); /* may have changed cpu */
1065 wait_mask = (1UL << cpu_id) | (1UL << hwrpb->rpb_primary_cpu_id);
1066
1067 alpha_broadcast_ipi(ALPHA_IPI_HALT);
1068
1069 /* Ensure any CPUs paused by DDB resume execution so they can halt */
1070 cpus_paused = 0;
1071
1072 for (i = 0; i < 10000; i++) {
1073 alpha_mb();
1074 if (cpus_running == wait_mask)
1075 break;
1076 delay(1000);
1077 }
1078 alpha_mb();
1079 if (cpus_running != wait_mask)
1080 printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n",
1081 cpus_running);
1082 #endif /* MULTIPROCESSOR */
1083
1084 /* If rebooting and a dump is requested do it. */
1085 #if 0
1086 if ((boothowto & (RB_DUMP | RB_HALT)) == RB_DUMP)
1087 #else
1088 if (boothowto & RB_DUMP)
1089 #endif
1090 dumpsys();
1091
1092 haltsys:
1093
1094 /* run any shutdown hooks */
1095 doshutdownhooks();
1096
1097 pmf_system_shutdown(boothowto);
1098
1099 #ifdef BOOTKEY
1100 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot");
1101 cnpollc(1); /* for proper keyboard command handling */
1102 cngetc();
1103 cnpollc(0);
1104 printf("\n");
1105 #endif
1106
1107 /* Finally, powerdown/halt/reboot the system. */
1108 if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN &&
1109 platform.powerdown != NULL) {
1110 (*platform.powerdown)();
1111 printf("WARNING: powerdown failed!\n");
1112 }
1113 printf("%s\n\n", (boothowto & RB_HALT) ? "halted." : "rebooting...");
1114 #if defined(MULTIPROCESSOR)
1115 if (cpu_id != hwrpb->rpb_primary_cpu_id)
1116 cpu_halt();
1117 else
1118 #endif
1119 prom_halt(boothowto & RB_HALT);
1120 /*NOTREACHED*/
1121 }
1122
1123 /*
1124 * These variables are needed by /sbin/savecore
1125 */
1126 uint32_t dumpmag = 0x8fca0101; /* magic number */
1127 int dumpsize = 0; /* pages */
1128 long dumplo = 0; /* blocks */
1129
1130 /*
1131 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1132 */
1133 int
1134 cpu_dumpsize(void)
1135 {
1136 int size;
1137
1138 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1139 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1140 if (roundup(size, dbtob(1)) != dbtob(1))
1141 return -1;
1142
1143 return (1);
1144 }
1145
1146 /*
1147 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
1148 */
1149 u_long
1150 cpu_dump_mempagecnt(void)
1151 {
1152 u_long i, n;
1153
1154 n = 0;
1155 for (i = 0; i < mem_cluster_cnt; i++)
1156 n += atop(mem_clusters[i].size);
1157 return (n);
1158 }
1159
1160 /*
1161 * cpu_dump: dump machine-dependent kernel core dump headers.
1162 */
1163 int
1164 cpu_dump(void)
1165 {
1166 int (*dump)(dev_t, daddr_t, void *, size_t);
1167 char buf[dbtob(1)];
1168 kcore_seg_t *segp;
1169 cpu_kcore_hdr_t *cpuhdrp;
1170 phys_ram_seg_t *memsegp;
1171 const struct bdevsw *bdev;
1172 int i;
1173
1174 bdev = bdevsw_lookup(dumpdev);
1175 if (bdev == NULL)
1176 return (ENXIO);
1177 dump = bdev->d_dump;
1178
1179 memset(buf, 0, sizeof buf);
1180 segp = (kcore_seg_t *)buf;
1181 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
1182 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
1183 ALIGN(sizeof(*cpuhdrp))];
1184
1185 /*
1186 * Generate a segment header.
1187 */
1188 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1189 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1190
1191 /*
1192 * Add the machine-dependent header info.
1193 */
1194 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map);
1195 cpuhdrp->page_size = PAGE_SIZE;
1196 cpuhdrp->nmemsegs = mem_cluster_cnt;
1197
1198 /*
1199 * Fill in the memory segment descriptors.
1200 */
1201 for (i = 0; i < mem_cluster_cnt; i++) {
1202 memsegp[i].start = mem_clusters[i].start;
1203 memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK;
1204 }
1205
1206 return (dump(dumpdev, dumplo, (void *)buf, dbtob(1)));
1207 }
1208
1209 /*
1210 * This is called by main to set dumplo and dumpsize.
1211 * Dumps always skip the first PAGE_SIZE of disk space
1212 * in case there might be a disk label stored there.
1213 * If there is extra space, put dump at the end to
1214 * reduce the chance that swapping trashes it.
1215 */
1216 void
1217 cpu_dumpconf(void)
1218 {
1219 int nblks, dumpblks; /* size of dump area */
1220
1221 if (dumpdev == NODEV)
1222 goto bad;
1223 nblks = bdev_size(dumpdev);
1224 if (nblks <= ctod(1))
1225 goto bad;
1226
1227 dumpblks = cpu_dumpsize();
1228 if (dumpblks < 0)
1229 goto bad;
1230 dumpblks += ctod(cpu_dump_mempagecnt());
1231
1232 /* If dump won't fit (incl. room for possible label), punt. */
1233 if (dumpblks > (nblks - ctod(1)))
1234 goto bad;
1235
1236 /* Put dump at end of partition */
1237 dumplo = nblks - dumpblks;
1238
1239 /* dumpsize is in page units, and doesn't include headers. */
1240 dumpsize = cpu_dump_mempagecnt();
1241 return;
1242
1243 bad:
1244 dumpsize = 0;
1245 return;
1246 }
1247
1248 /*
1249 * Dump the kernel's image to the swap partition.
1250 */
1251 #define BYTES_PER_DUMP PAGE_SIZE
1252
1253 void
1254 dumpsys(void)
1255 {
1256 const struct bdevsw *bdev;
1257 u_long totalbytesleft, bytes, i, n, memcl;
1258 u_long maddr;
1259 int psize;
1260 daddr_t blkno;
1261 int (*dump)(dev_t, daddr_t, void *, size_t);
1262 int error;
1263
1264 /* Save registers. */
1265 savectx(&dumppcb);
1266
1267 if (dumpdev == NODEV)
1268 return;
1269 bdev = bdevsw_lookup(dumpdev);
1270 if (bdev == NULL || bdev->d_psize == NULL)
1271 return;
1272
1273 /*
1274 * For dumps during autoconfiguration,
1275 * if dump device has already configured...
1276 */
1277 if (dumpsize == 0)
1278 cpu_dumpconf();
1279 if (dumplo <= 0) {
1280 printf("\ndump to dev %u,%u not possible\n",
1281 major(dumpdev), minor(dumpdev));
1282 return;
1283 }
1284 printf("\ndumping to dev %u,%u offset %ld\n",
1285 major(dumpdev), minor(dumpdev), dumplo);
1286
1287 psize = bdev_size(dumpdev);
1288 printf("dump ");
1289 if (psize == -1) {
1290 printf("area unavailable\n");
1291 return;
1292 }
1293
1294 /* XXX should purge all outstanding keystrokes. */
1295
1296 if ((error = cpu_dump()) != 0)
1297 goto err;
1298
1299 totalbytesleft = ptoa(cpu_dump_mempagecnt());
1300 blkno = dumplo + cpu_dumpsize();
1301 dump = bdev->d_dump;
1302 error = 0;
1303
1304 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
1305 maddr = mem_clusters[memcl].start;
1306 bytes = mem_clusters[memcl].size & ~PAGE_MASK;
1307
1308 for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1309
1310 /* Print out how many MBs we to go. */
1311 if ((totalbytesleft % (1024*1024)) == 0)
1312 printf_nolog("%ld ",
1313 totalbytesleft / (1024 * 1024));
1314
1315 /* Limit size for next transfer. */
1316 n = bytes - i;
1317 if (n > BYTES_PER_DUMP)
1318 n = BYTES_PER_DUMP;
1319
1320 error = (*dump)(dumpdev, blkno,
1321 (void *)ALPHA_PHYS_TO_K0SEG(maddr), n);
1322 if (error)
1323 goto err;
1324 maddr += n;
1325 blkno += btodb(n); /* XXX? */
1326
1327 /* XXX should look for keystrokes, to cancel. */
1328 }
1329 }
1330
1331 err:
1332 switch (error) {
1333
1334 case ENXIO:
1335 printf("device bad\n");
1336 break;
1337
1338 case EFAULT:
1339 printf("device not ready\n");
1340 break;
1341
1342 case EINVAL:
1343 printf("area improper\n");
1344 break;
1345
1346 case EIO:
1347 printf("i/o error\n");
1348 break;
1349
1350 case EINTR:
1351 printf("aborted from console\n");
1352 break;
1353
1354 case 0:
1355 printf("succeeded\n");
1356 break;
1357
1358 default:
1359 printf("error %d\n", error);
1360 break;
1361 }
1362 printf("\n\n");
1363 delay(1000);
1364 }
1365
1366 void
1367 frametoreg(const struct trapframe *framep, struct reg *regp)
1368 {
1369
1370 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
1371 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
1372 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
1373 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
1374 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
1375 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
1376 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
1377 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
1378 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
1379 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
1380 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
1381 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
1382 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
1383 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
1384 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
1385 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
1386 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0];
1387 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1];
1388 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2];
1389 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
1390 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
1391 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
1392 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
1393 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
1394 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
1395 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
1396 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
1397 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
1398 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
1399 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP];
1400 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */
1401 regp->r_regs[R_ZERO] = 0;
1402 }
1403
1404 void
1405 regtoframe(const struct reg *regp, struct trapframe *framep)
1406 {
1407
1408 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
1409 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
1410 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
1411 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
1412 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
1413 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
1414 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
1415 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
1416 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
1417 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
1418 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
1419 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
1420 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
1421 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
1422 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
1423 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
1424 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0];
1425 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1];
1426 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2];
1427 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
1428 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
1429 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
1430 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
1431 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
1432 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
1433 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
1434 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
1435 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
1436 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
1437 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP];
1438 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */
1439 /* ??? = regp->r_regs[R_ZERO]; */
1440 }
1441
1442 void
1443 printregs(struct reg *regp)
1444 {
1445 int i;
1446
1447 for (i = 0; i < 32; i++)
1448 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
1449 i & 1 ? "\n" : "\t");
1450 }
1451
1452 void
1453 regdump(struct trapframe *framep)
1454 {
1455 struct reg reg;
1456
1457 frametoreg(framep, ®);
1458 reg.r_regs[R_SP] = alpha_pal_rdusp();
1459
1460 printf("REGISTERS:\n");
1461 printregs(®);
1462 }
1463
1464
1465
1466 void *
1467 getframe(const struct lwp *l, int sig, int *onstack)
1468 {
1469 void *frame;
1470
1471 /* Do we need to jump onto the signal stack? */
1472 *onstack =
1473 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
1474 (SIGACTION(l->l_proc, sig).sa_flags & SA_ONSTACK) != 0;
1475
1476 if (*onstack)
1477 frame = (void *)((char *)l->l_sigstk.ss_sp +
1478 l->l_sigstk.ss_size);
1479 else
1480 frame = (void *)(alpha_pal_rdusp());
1481 return (frame);
1482 }
1483
1484 void
1485 buildcontext(struct lwp *l, const void *catcher, const void *tramp, const void *fp)
1486 {
1487 struct trapframe *tf = l->l_md.md_tf;
1488
1489 tf->tf_regs[FRAME_RA] = (uint64_t)tramp;
1490 tf->tf_regs[FRAME_PC] = (uint64_t)catcher;
1491 tf->tf_regs[FRAME_T12] = (uint64_t)catcher;
1492 alpha_pal_wrusp((unsigned long)fp);
1493 }
1494
1495
1496 /*
1497 * Send an interrupt to process, new style
1498 */
1499 void
1500 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
1501 {
1502 struct lwp *l = curlwp;
1503 struct proc *p = l->l_proc;
1504 struct sigacts *ps = p->p_sigacts;
1505 int onstack, sig = ksi->ksi_signo, error;
1506 struct sigframe_siginfo *fp, frame;
1507 struct trapframe *tf;
1508 sig_t catcher = SIGACTION(p, ksi->ksi_signo).sa_handler;
1509
1510 fp = (struct sigframe_siginfo *)getframe(l,ksi->ksi_signo,&onstack);
1511 tf = l->l_md.md_tf;
1512
1513 /* Allocate space for the signal handler context. */
1514 fp--;
1515
1516 #ifdef DEBUG
1517 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1518 printf("sendsig_siginfo(%d): sig %d ssp %p usp %p\n", p->p_pid,
1519 sig, &onstack, fp);
1520 #endif
1521
1522 /* Build stack frame for signal trampoline. */
1523 memset(&frame, 0, sizeof(frame));
1524 frame.sf_si._info = ksi->ksi_info;
1525 frame.sf_uc.uc_flags = _UC_SIGMASK;
1526 frame.sf_uc.uc_sigmask = *mask;
1527 frame.sf_uc.uc_link = l->l_ctxlink;
1528 sendsig_reset(l, sig);
1529 mutex_exit(p->p_lock);
1530 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
1531 error = copyout(&frame, fp, sizeof(frame));
1532 mutex_enter(p->p_lock);
1533
1534 if (error != 0) {
1535 /*
1536 * Process has trashed its stack; give it an illegal
1537 * instruction to halt it in its tracks.
1538 */
1539 #ifdef DEBUG
1540 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1541 printf("sendsig_siginfo(%d): copyout failed on sig %d\n",
1542 p->p_pid, sig);
1543 #endif
1544 sigexit(l, SIGILL);
1545 /* NOTREACHED */
1546 }
1547
1548 #ifdef DEBUG
1549 if (sigdebug & SDB_FOLLOW)
1550 printf("sendsig_siginfo(%d): sig %d usp %p code %x\n",
1551 p->p_pid, sig, fp, ksi->ksi_code);
1552 #endif
1553
1554 /*
1555 * Set up the registers to directly invoke the signal handler. The
1556 * signal trampoline is then used to return from the signal. Note
1557 * the trampoline version numbers are coordinated with machine-
1558 * dependent code in libc.
1559 */
1560
1561 tf->tf_regs[FRAME_A0] = sig;
1562 tf->tf_regs[FRAME_A1] = (uint64_t)&fp->sf_si;
1563 tf->tf_regs[FRAME_A2] = (uint64_t)&fp->sf_uc;
1564
1565 buildcontext(l,catcher,ps->sa_sigdesc[sig].sd_tramp,fp);
1566
1567 /* Remember that we're now on the signal stack. */
1568 if (onstack)
1569 l->l_sigstk.ss_flags |= SS_ONSTACK;
1570
1571 #ifdef DEBUG
1572 if (sigdebug & SDB_FOLLOW)
1573 printf("sendsig_siginfo(%d): pc %lx, catcher %lx\n", p->p_pid,
1574 tf->tf_regs[FRAME_PC], tf->tf_regs[FRAME_A3]);
1575 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1576 printf("sendsig_siginfo(%d): sig %d returns\n",
1577 p->p_pid, sig);
1578 #endif
1579 }
1580
1581 /*
1582 * machine dependent system variables.
1583 */
1584 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
1585 {
1586
1587 sysctl_createv(clog, 0, NULL, NULL,
1588 CTLFLAG_PERMANENT,
1589 CTLTYPE_NODE, "machdep", NULL,
1590 NULL, 0, NULL, 0,
1591 CTL_MACHDEP, CTL_EOL);
1592
1593 sysctl_createv(clog, 0, NULL, NULL,
1594 CTLFLAG_PERMANENT,
1595 CTLTYPE_STRUCT, "console_device", NULL,
1596 sysctl_consdev, 0, NULL, sizeof(dev_t),
1597 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
1598 sysctl_createv(clog, 0, NULL, NULL,
1599 CTLFLAG_PERMANENT,
1600 CTLTYPE_STRING, "root_device", NULL,
1601 sysctl_root_device, 0, NULL, 0,
1602 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL);
1603 sysctl_createv(clog, 0, NULL, NULL,
1604 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1605 CTLTYPE_INT, "unaligned_print",
1606 SYSCTL_DESCR("Warn about unaligned accesses"),
1607 NULL, 0, &alpha_unaligned_print, 0,
1608 CTL_MACHDEP, CPU_UNALIGNED_PRINT, CTL_EOL);
1609 sysctl_createv(clog, 0, NULL, NULL,
1610 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1611 CTLTYPE_INT, "unaligned_fix",
1612 SYSCTL_DESCR("Fix up unaligned accesses"),
1613 NULL, 0, &alpha_unaligned_fix, 0,
1614 CTL_MACHDEP, CPU_UNALIGNED_FIX, CTL_EOL);
1615 sysctl_createv(clog, 0, NULL, NULL,
1616 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1617 CTLTYPE_INT, "unaligned_sigbus",
1618 SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"),
1619 NULL, 0, &alpha_unaligned_sigbus, 0,
1620 CTL_MACHDEP, CPU_UNALIGNED_SIGBUS, CTL_EOL);
1621 sysctl_createv(clog, 0, NULL, NULL,
1622 CTLFLAG_PERMANENT,
1623 CTLTYPE_STRING, "booted_kernel", NULL,
1624 NULL, 0, bootinfo.booted_kernel, 0,
1625 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
1626 sysctl_createv(clog, 0, NULL, NULL,
1627 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1628 CTLTYPE_INT, "fp_sync_complete", NULL,
1629 NULL, 0, &alpha_fp_sync_complete, 0,
1630 CTL_MACHDEP, CPU_FP_SYNC_COMPLETE, CTL_EOL);
1631 }
1632
1633 /*
1634 * Set registers on exec.
1635 */
1636 void
1637 setregs(register struct lwp *l, struct exec_package *pack, vaddr_t stack)
1638 {
1639 struct trapframe *tfp = l->l_md.md_tf;
1640 struct pcb *pcb;
1641 #ifdef DEBUG
1642 int i;
1643 #endif
1644
1645 #ifdef DEBUG
1646 /*
1647 * Crash and dump, if the user requested it.
1648 */
1649 if (boothowto & RB_DUMP)
1650 panic("crash requested by boot flags");
1651 #endif
1652
1653 #ifdef DEBUG
1654 for (i = 0; i < FRAME_SIZE; i++)
1655 tfp->tf_regs[i] = 0xbabefacedeadbeef;
1656 #else
1657 memset(tfp->tf_regs, 0, FRAME_SIZE * sizeof tfp->tf_regs[0]);
1658 #endif
1659 pcb = lwp_getpcb(l);
1660 memset(&pcb->pcb_fp, 0, sizeof(pcb->pcb_fp));
1661 alpha_pal_wrusp(stack);
1662 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET;
1663 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3;
1664
1665 tfp->tf_regs[FRAME_A0] = stack; /* a0 = sp */
1666 tfp->tf_regs[FRAME_A1] = 0; /* a1 = rtld cleanup */
1667 tfp->tf_regs[FRAME_A2] = 0; /* a2 = rtld object */
1668 tfp->tf_regs[FRAME_A3] = l->l_proc->p_psstrp; /* a3 = ps_strings */
1669 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */
1670
1671 if (__predict_true((l->l_md.md_flags & IEEE_INHERIT) == 0)) {
1672 l->l_md.md_flags &= ~MDLWP_FP_C;
1673 pcb->pcb_fp.fpr_cr = FPCR_DYN(FP_RN);
1674 }
1675 }
1676
1677 void (*alpha_delay_fn)(unsigned long);
1678
1679 /*
1680 * Wait "n" microseconds.
1681 */
1682 void
1683 delay(unsigned long n)
1684 {
1685 unsigned long pcc0, pcc1, curcycle, cycles, usec;
1686
1687 if (n == 0)
1688 return;
1689
1690 /*
1691 * If we have an alternative delay function, go ahead and
1692 * use it.
1693 */
1694 if (alpha_delay_fn != NULL) {
1695 (*alpha_delay_fn)(n);
1696 return;
1697 }
1698
1699 pcc0 = alpha_rpcc() & 0xffffffffUL;
1700 cycles = 0;
1701 usec = 0;
1702
1703 while (usec <= n) {
1704 /*
1705 * Get the next CPU cycle count- assumes that we cannot
1706 * have had more than one 32 bit overflow.
1707 */
1708 pcc1 = alpha_rpcc() & 0xffffffffUL;
1709 if (pcc1 < pcc0)
1710 curcycle = (pcc1 + 0x100000000UL) - pcc0;
1711 else
1712 curcycle = pcc1 - pcc0;
1713
1714 /*
1715 * We now have the number of processor cycles since we
1716 * last checked. Add the current cycle count to the
1717 * running total. If it's over cycles_per_usec, increment
1718 * the usec counter.
1719 */
1720 cycles += curcycle;
1721 while (cycles > cycles_per_usec) {
1722 usec++;
1723 cycles -= cycles_per_usec;
1724 }
1725 pcc0 = pcc1;
1726 }
1727 }
1728
1729 #ifdef EXEC_ECOFF
1730 void
1731 cpu_exec_ecoff_setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack)
1732 {
1733 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1734
1735 l->l_md.md_tf->tf_regs[FRAME_GP] = execp->a.gp_value;
1736 }
1737
1738 /*
1739 * cpu_exec_ecoff_hook():
1740 * cpu-dependent ECOFF format hook for execve().
1741 *
1742 * Do any machine-dependent diddling of the exec package when doing ECOFF.
1743 *
1744 */
1745 int
1746 cpu_exec_ecoff_probe(struct lwp *l, struct exec_package *epp)
1747 {
1748 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1749 int error;
1750
1751 if (execp->f.f_magic == ECOFF_MAGIC_NETBSD_ALPHA)
1752 error = 0;
1753 else
1754 error = ENOEXEC;
1755
1756 return (error);
1757 }
1758 #endif /* EXEC_ECOFF */
1759
1760 int
1761 mm_md_physacc(paddr_t pa, vm_prot_t prot)
1762 {
1763 u_quad_t size;
1764 int i;
1765
1766 for (i = 0; i < mem_cluster_cnt; i++) {
1767 if (pa < mem_clusters[i].start)
1768 continue;
1769 size = mem_clusters[i].size & ~PAGE_MASK;
1770 if (pa >= (mem_clusters[i].start + size))
1771 continue;
1772 if ((prot & mem_clusters[i].size & PAGE_MASK) == prot)
1773 return 0;
1774 }
1775 return EFAULT;
1776 }
1777
1778 bool
1779 mm_md_direct_mapped_io(void *addr, paddr_t *paddr)
1780 {
1781 vaddr_t va = (vaddr_t)addr;
1782
1783 if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
1784 *paddr = ALPHA_K0SEG_TO_PHYS(va);
1785 return true;
1786 }
1787 return false;
1788 }
1789
1790 bool
1791 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
1792 {
1793
1794 *vaddr = ALPHA_PHYS_TO_K0SEG(paddr);
1795 return true;
1796 }
1797
1798 char *
1799 dot_conv(unsigned long x)
1800 {
1801 int i;
1802 char *xc;
1803 static int next;
1804 static char space[2][20];
1805
1806 xc = space[next ^= 1] + sizeof space[0];
1807 *--xc = '\0';
1808 for (i = 0;; ++i) {
1809 if (i && (i & 3) == 0)
1810 *--xc = '.';
1811 *--xc = hexdigits[x & 0xf];
1812 x >>= 4;
1813 if (x == 0)
1814 break;
1815 }
1816 return xc;
1817 }
1818
1819 void
1820 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
1821 {
1822 struct trapframe *frame = l->l_md.md_tf;
1823 struct pcb *pcb = lwp_getpcb(l);
1824 __greg_t *gr = mcp->__gregs;
1825 __greg_t ras_pc;
1826
1827 /* Save register context. */
1828 frametoreg(frame, (struct reg *)gr);
1829 /* XXX if there's a better, general way to get the USP of
1830 * an LWP that might or might not be curlwp, I'd like to know
1831 * about it.
1832 */
1833 if (l == curlwp) {
1834 gr[_REG_SP] = alpha_pal_rdusp();
1835 gr[_REG_UNIQUE] = alpha_pal_rdunique();
1836 } else {
1837 gr[_REG_SP] = pcb->pcb_hw.apcb_usp;
1838 gr[_REG_UNIQUE] = pcb->pcb_hw.apcb_unique;
1839 }
1840 gr[_REG_PC] = frame->tf_regs[FRAME_PC];
1841 gr[_REG_PS] = frame->tf_regs[FRAME_PS];
1842
1843 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
1844 (void *) gr[_REG_PC])) != -1)
1845 gr[_REG_PC] = ras_pc;
1846
1847 *flags |= _UC_CPU | _UC_TLSBASE;
1848
1849 /* Save floating point register context, if any, and copy it. */
1850 if (fpu_valid_p(l)) {
1851 fpu_save(l);
1852 (void)memcpy(&mcp->__fpregs, &pcb->pcb_fp,
1853 sizeof (mcp->__fpregs));
1854 mcp->__fpregs.__fp_fpcr = alpha_read_fp_c(l);
1855 *flags |= _UC_FPU;
1856 }
1857 }
1858
1859 int
1860 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
1861 {
1862 const __greg_t *gr = mcp->__gregs;
1863
1864 if ((gr[_REG_PS] & ALPHA_PSL_USERSET) != ALPHA_PSL_USERSET ||
1865 (gr[_REG_PS] & ALPHA_PSL_USERCLR) != 0)
1866 return EINVAL;
1867
1868 return 0;
1869 }
1870
1871 int
1872 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
1873 {
1874 struct trapframe *frame = l->l_md.md_tf;
1875 struct pcb *pcb = lwp_getpcb(l);
1876 const __greg_t *gr = mcp->__gregs;
1877 int error;
1878
1879 /* Restore register context, if any. */
1880 if (flags & _UC_CPU) {
1881 /* Check for security violations first. */
1882 error = cpu_mcontext_validate(l, mcp);
1883 if (error)
1884 return error;
1885
1886 regtoframe((const struct reg *)gr, l->l_md.md_tf);
1887 if (l == curlwp)
1888 alpha_pal_wrusp(gr[_REG_SP]);
1889 else
1890 pcb->pcb_hw.apcb_usp = gr[_REG_SP];
1891 frame->tf_regs[FRAME_PC] = gr[_REG_PC];
1892 frame->tf_regs[FRAME_PS] = gr[_REG_PS];
1893 }
1894 if (flags & _UC_TLSBASE)
1895 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_UNIQUE]);
1896 /* Restore floating point register context, if any. */
1897 if (flags & _UC_FPU) {
1898 /* If we have an FP register context, get rid of it. */
1899 fpu_discard(l, true);
1900 (void)memcpy(&pcb->pcb_fp, &mcp->__fpregs,
1901 sizeof (pcb->pcb_fp));
1902 l->l_md.md_flags = mcp->__fpregs.__fp_fpcr & MDLWP_FP_C;
1903 }
1904
1905 return (0);
1906 }
1907
1908 static void
1909 cpu_kick(struct cpu_info * const ci)
1910 {
1911 #if defined(MULTIPROCESSOR)
1912 alpha_send_ipi(ci->ci_cpuid, ALPHA_IPI_AST);
1913 #endif /* MULTIPROCESSOR */
1914 }
1915
1916 /*
1917 * Preempt the current process if in interrupt from user mode,
1918 * or after the current trap/syscall if in system mode.
1919 */
1920 void
1921 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
1922 {
1923
1924 KASSERT(kpreempt_disabled());
1925
1926 if ((flags & RESCHED_IDLE) != 0) {
1927 /*
1928 * Nothing to do here; we are not currently using WTINT
1929 * in cpu_idle().
1930 */
1931 return;
1932 }
1933
1934 /* XXX RESCHED_KPREEMPT XXX */
1935
1936 KASSERT((flags & RESCHED_UPREEMPT) != 0);
1937 if ((flags & RESCHED_REMOTE) != 0) {
1938 cpu_kick(ci);
1939 } else {
1940 aston(l);
1941 }
1942 }
1943
1944 /*
1945 * Notify the current lwp (l) that it has a signal pending,
1946 * process as soon as possible.
1947 */
1948 void
1949 cpu_signotify(struct lwp *l)
1950 {
1951
1952 KASSERT(kpreempt_disabled());
1953
1954 if (l->l_cpu != curcpu()) {
1955 cpu_kick(l->l_cpu);
1956 } else {
1957 aston(l);
1958 }
1959 }
1960
1961 /*
1962 * Give a profiling tick to the current process when the user profiling
1963 * buffer pages are invalid. On the alpha, request an AST to send us
1964 * through trap, marking the proc as needing a profiling tick.
1965 */
1966 void
1967 cpu_need_proftick(struct lwp *l)
1968 {
1969
1970 KASSERT(kpreempt_disabled());
1971 KASSERT(l->l_cpu == curcpu());
1972
1973 l->l_pflag |= LP_OWEUPC;
1974 aston(l);
1975 }
1976