machdep.c revision 1.82 1 /* $NetBSD: machdep.c,v 1.82 1997/08/11 22:46:37 cgd Exp $ */
2
3 /*
4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 #include <machine/options.h> /* Config options headers */
31 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
32
33 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.82 1997/08/11 22:46:37 cgd Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/signalvar.h>
38 #include <sys/kernel.h>
39 #include <sys/map.h>
40 #include <sys/proc.h>
41 #include <sys/buf.h>
42 #include <sys/reboot.h>
43 #include <sys/device.h>
44 #include <sys/file.h>
45 #ifdef REAL_CLISTS
46 #include <sys/clist.h>
47 #endif
48 #include <sys/callout.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/msgbuf.h>
52 #include <sys/ioctl.h>
53 #include <sys/tty.h>
54 #include <sys/user.h>
55 #include <sys/exec.h>
56 #include <sys/exec_ecoff.h>
57 #include <sys/sysctl.h>
58 #include <sys/core.h>
59 #include <sys/kcore.h>
60 #include <machine/kcore.h>
61 #ifdef SYSVMSG
62 #include <sys/msg.h>
63 #endif
64 #ifdef SYSVSEM
65 #include <sys/sem.h>
66 #endif
67 #ifdef SYSVSHM
68 #include <sys/shm.h>
69 #endif
70
71 #include <sys/mount.h>
72 #include <sys/syscallargs.h>
73
74 #include <vm/vm_kern.h>
75
76 #include <dev/cons.h>
77
78 #include <machine/autoconf.h>
79 #include <machine/cpu.h>
80 #include <machine/reg.h>
81 #include <machine/rpb.h>
82 #include <machine/prom.h>
83 #include <machine/conf.h>
84
85 #include <net/netisr.h>
86 #include <net/if.h>
87
88 #ifdef INET
89 #include <netinet/in.h>
90 #include <netinet/ip_var.h>
91 #include "arp.h"
92 #if NARP > 0
93 #include <netinet/if_inarp.h>
94 #endif
95 #endif
96 #ifdef NS
97 #include <netns/ns_var.h>
98 #endif
99 #ifdef ISO
100 #include <netiso/iso.h>
101 #include <netiso/clnp.h>
102 #endif
103 #ifdef CCITT
104 #include <netccitt/x25.h>
105 #include <netccitt/pk.h>
106 #include <netccitt/pk_extern.h>
107 #endif
108 #ifdef NATM
109 #include <netnatm/natm.h>
110 #endif
111 #ifdef NETATALK
112 #include <netatalk/at_extern.h>
113 #endif
114 #include "ppp.h"
115 #if NPPP > 0
116 #include <net/ppp_defs.h>
117 #include <net/if_ppp.h>
118 #endif
119
120 #ifdef DDB
121 #include <machine/db_machdep.h>
122 #include <ddb/db_access.h>
123 #include <ddb/db_sym.h>
124 #include <ddb/db_extern.h>
125 #include <ddb/db_interface.h>
126 #endif
127
128 #include "le_ioasic.h" /* for le_iomem creation */
129
130 vm_map_t buffer_map;
131
132 /*
133 * Declare these as initialized data so we can patch them.
134 */
135 int nswbuf = 0;
136 #ifdef NBUF
137 int nbuf = NBUF;
138 #else
139 int nbuf = 0;
140 #endif
141 #ifdef BUFPAGES
142 int bufpages = BUFPAGES;
143 #else
144 int bufpages = 0;
145 #endif
146 int msgbufmapped = 0; /* set when safe to use msgbuf */
147 int maxmem; /* max memory per process */
148
149 int totalphysmem; /* total amount of physical memory in system */
150 int physmem; /* physical memory used by NetBSD + some rsvd */
151 int firstusablepage; /* first usable memory page */
152 int lastusablepage; /* last usable memory page */
153 int resvmem; /* amount of memory reserved for PROM */
154 int unusedmem; /* amount of memory for OS that we don't use */
155 int unknownmem; /* amount of memory with an unknown use */
156
157 int cputype; /* system type, from the RPB */
158
159 /*
160 * XXX We need an address to which we can assign things so that they
161 * won't be optimized away because we didn't use the value.
162 */
163 u_int32_t no_optimize;
164
165 /* the following is used externally (sysctl_hw) */
166 char machine[] = MACHINE; /* from <machine/param.h> */
167 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
168 char cpu_model[128];
169 const struct cpusw *cpu_fn_switch; /* function switch */
170
171 struct user *proc0paddr;
172
173 /* Number of machine cycles per microsecond */
174 u_int64_t cycles_per_usec;
175
176 /* some memory areas for device DMA. "ick." */
177 caddr_t le_iomem; /* XXX iomem for LANCE DMA */
178
179 /* number of cpus in the box. really! */
180 int ncpus;
181
182 char boot_flags[64];
183 char booted_kernel[64];
184
185 int bootinfo_valid;
186 struct bootinfo bootinfo;
187
188 #ifdef DDB
189 /* start and end of kernel symbol table */
190 void *ksym_start, *ksym_end;
191 #endif
192
193 /* for cpu_sysctl() */
194 int alpha_unaligned_print = 1; /* warn about unaligned accesses */
195 int alpha_unaligned_fix = 1; /* fix up unaligned accesses */
196 int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */
197
198 int cpu_dump __P((void));
199 int cpu_dumpsize __P((void));
200 void dumpsys __P((void));
201 void identifycpu __P((void));
202 void netintr __P((void));
203 void printregs __P((struct reg *));
204
205 void
206 alpha_init(pfn, ptb, bim, bip)
207 u_long pfn; /* first free PFN number */
208 u_long ptb; /* PFN of current level 1 page table */
209 u_long bim; /* bootinfo magic */
210 u_long bip; /* bootinfo pointer */
211 {
212 extern char _end[];
213 caddr_t start, v;
214 struct mddt *mddtp;
215 int i, mddtweird;
216 char *p;
217
218 /*
219 * Turn off interrupts (not mchecks) and floating point.
220 * Make sure the instruction and data streams are consistent.
221 */
222 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
223 alpha_pal_wrfen(0);
224 ALPHA_TBIA();
225 alpha_pal_imb();
226
227 /*
228 * get address of the restart block, while we the bootstrap
229 * mapping is still around.
230 */
231 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(
232 (vm_offset_t)(*(struct rpb **)HWRPB_ADDR));
233
234 /*
235 * Remember how many cycles there are per microsecond,
236 * so that we can use delay(). Round up, for safety.
237 */
238 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
239
240 /*
241 * Init the PROM interface, so we can use printf
242 * until PROM mappings go away in consinit.
243 */
244 init_prom_interface();
245
246 /*
247 * Check for a bootinfo from the boot program.
248 */
249 if (bim == BOOTINFO_MAGIC) {
250 /*
251 * Have boot info. Copy it to our own storage.
252 * We'll sanity-check it later.
253 */
254 bcopy((void *)bip, &bootinfo, sizeof(bootinfo));
255 switch (bootinfo.version) {
256 case 1:
257 bootinfo_valid = 1;
258 break;
259
260 default:
261 printf("warning: unknown bootinfo version %d\n",
262 bootinfo.version);
263 }
264 } else
265 printf("warning: boot program did not pass bootinfo\n");
266
267 /*
268 * Point interrupt/exception vectors to our own.
269 */
270 alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT);
271 alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH);
272 alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM);
273 alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF);
274 alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA);
275 alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS);
276
277 /*
278 * Clear pending machine checks and error reports, and enable
279 * system- and processor-correctable error reporting.
280 */
281 alpha_pal_wrmces(alpha_pal_rdmces() &
282 ~(ALPHA_MCES_DSC|ALPHA_MCES_DPC));
283
284 /*
285 * Find out how much memory is available, by looking at
286 * the memory cluster descriptors. This also tries to do
287 * its best to detect things things that have never been seen
288 * before...
289 *
290 * XXX Assumes that the first "system" cluster is the
291 * only one we can use. Is the second (etc.) system cluster
292 * (if one happens to exist) guaranteed to be contiguous? or...?
293 */
294 mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off);
295
296 /*
297 * BEGIN MDDT WEIRDNESS CHECKING
298 */
299 mddtweird = 0;
300
301 #define cnt mddtp->mddt_cluster_cnt
302 #define usage(n) mddtp->mddt_clusters[(n)].mddt_usage
303 if (cnt != 2 && cnt != 3) {
304 printf("WARNING: weird number (%ld) of mem clusters\n", cnt);
305 mddtweird = 1;
306 } else if (usage(0) != MDDT_PALCODE ||
307 usage(1) != MDDT_SYSTEM ||
308 (cnt == 3 && usage(2) != MDDT_PALCODE)) {
309 mddtweird = 1;
310 printf("WARNING: %ld mem clusters, but weird config\n", cnt);
311 }
312
313 for (i = 0; i < cnt; i++) {
314 if ((usage(i) & MDDT_mbz) != 0) {
315 printf("WARNING: mem cluster %d has weird usage %lx\n",
316 i, usage(i));
317 mddtweird = 1;
318 }
319 if (mddtp->mddt_clusters[i].mddt_pg_cnt == 0) {
320 printf("WARNING: mem cluster %d has pg cnt == 0\n", i);
321 mddtweird = 1;
322 }
323 /* XXX other things to check? */
324 }
325 #undef cnt
326 #undef usage
327
328 if (mddtweird) {
329 printf("\n");
330 printf("complete memory cluster information:\n");
331 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
332 printf("mddt %d:\n", i);
333 printf("\tpfn %lx\n",
334 mddtp->mddt_clusters[i].mddt_pfn);
335 printf("\tcnt %lx\n",
336 mddtp->mddt_clusters[i].mddt_pg_cnt);
337 printf("\ttest %lx\n",
338 mddtp->mddt_clusters[i].mddt_pg_test);
339 printf("\tbva %lx\n",
340 mddtp->mddt_clusters[i].mddt_v_bitaddr);
341 printf("\tbpa %lx\n",
342 mddtp->mddt_clusters[i].mddt_p_bitaddr);
343 printf("\tbcksum %lx\n",
344 mddtp->mddt_clusters[i].mddt_bit_cksum);
345 printf("\tusage %lx\n",
346 mddtp->mddt_clusters[i].mddt_usage);
347 }
348 printf("\n");
349 }
350 /*
351 * END MDDT WEIRDNESS CHECKING
352 */
353
354 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
355 totalphysmem += mddtp->mddt_clusters[i].mddt_pg_cnt;
356 #define usage(n) mddtp->mddt_clusters[(n)].mddt_usage
357 #define pgcnt(n) mddtp->mddt_clusters[(n)].mddt_pg_cnt
358 if ((usage(i) & MDDT_mbz) != 0)
359 unknownmem += pgcnt(i);
360 else if ((usage(i) & ~MDDT_mbz) == MDDT_PALCODE)
361 resvmem += pgcnt(i);
362 else if ((usage(i) & ~MDDT_mbz) == MDDT_SYSTEM) {
363 /*
364 * assumes that the system cluster listed is
365 * one we're in...
366 */
367 if (physmem != resvmem) {
368 physmem += pgcnt(i);
369 firstusablepage =
370 mddtp->mddt_clusters[i].mddt_pfn;
371 lastusablepage = firstusablepage + pgcnt(i) - 1;
372 } else
373 unusedmem += pgcnt(i);
374 }
375 #undef usage
376 #undef pgcnt
377 }
378 if (totalphysmem == 0)
379 panic("can't happen: system seems to have no memory!");
380 maxmem = physmem;
381
382 #if 0
383 printf("totalphysmem = %d\n", totalphysmem);
384 printf("physmem = %d\n", physmem);
385 printf("firstusablepage = %d\n", firstusablepage);
386 printf("lastusablepage = %d\n", lastusablepage);
387 printf("resvmem = %d\n", resvmem);
388 printf("unusedmem = %d\n", unusedmem);
389 printf("unknownmem = %d\n", unknownmem);
390 #endif
391
392 /*
393 * find out this CPU's page size
394 */
395 PAGE_SIZE = hwrpb->rpb_page_size;
396 if (PAGE_SIZE != 8192)
397 panic("page size %d != 8192?!", PAGE_SIZE);
398
399 /*
400 * Find the first free page.
401 */
402 #ifdef DDB
403 if (bootinfo_valid) {
404 /*
405 * Save the kernel symbol table.
406 */
407 switch (bootinfo.version) {
408 case 1:
409 ksym_start = (void *)bootinfo.un.v1.ssym;
410 ksym_end = (void *)bootinfo.un.v1.esym;
411 break;
412 }
413 v = (caddr_t)alpha_round_page(ksym_end);
414 } else
415 #endif
416 v = (caddr_t)alpha_round_page(_end);
417
418 /*
419 * Init mapping for u page(s) for proc 0
420 */
421 start = v;
422 curproc->p_addr = proc0paddr = (struct user *)v;
423 v += UPAGES * NBPG;
424
425 /*
426 * Find out what hardware we're on, and remember its type name.
427 */
428 cputype = hwrpb->rpb_type;
429 if (cputype < 0 || cputype > ncpusw) {
430 unknown_cputype:
431 printf("\n");
432 printf("Unknown system type %d.\n", cputype);
433 printf("\n");
434 panic("unknown system type");
435 }
436 cpu_fn_switch = &cpusw[cputype];
437 if (cpu_fn_switch->family == NULL)
438 goto unknown_cputype;
439 if (cpu_fn_switch->option == NULL) {
440 printf("\n");
441 printf("NetBSD does not currently support system type %d\n",
442 cputype);
443 printf("(%s family).\n", cpu_fn_switch->family);
444 printf("\n");
445 panic("unsupported system type");
446 }
447 if (!cpu_fn_switch->present) {
448 printf("\n");
449 printf("Support for system type %d (%s family) is\n", cputype,
450 cpu_fn_switch->family);
451 printf("not present in this kernel. Build a kernel with \"options %s\"\n",
452 cpu_fn_switch->option);
453 printf("to include support for this system type.\n");
454 printf("\n");
455 panic("support for system not present");
456 }
457
458 if ((*cpu_fn_switch->model_name)() != NULL)
459 strncpy(cpu_model, (*cpu_fn_switch->model_name)(),
460 sizeof cpu_model - 1);
461 else {
462 strncpy(cpu_model, cpu_fn_switch->family, sizeof cpu_model - 1);
463 strcat(cpu_model, " family"); /* XXX */
464 }
465 cpu_model[sizeof cpu_model - 1] = '\0';
466
467 /* XXX SANITY CHECKING. SHOULD GO AWAY */
468 /* XXX We should always be running on the the primary. */
469 assert(hwrpb->rpb_primary_cpu_id == alpha_pal_whami()); /*XXX*/
470 /* XXX On single-CPU boxes, the primary should always be CPU 0. */
471 if (cputype != ST_DEC_21000) /*XXX*/
472 assert(hwrpb->rpb_primary_cpu_id == 0); /*XXX*/
473
474 #if NLE_IOASIC > 0
475 /*
476 * Grab 128K at the top of physical memory for the lance chip
477 * on machines where it does dma through the I/O ASIC.
478 * It must be physically contiguous and aligned on a 128K boundary.
479 *
480 * Note that since this is conditional on the presence of
481 * IOASIC-attached 'le' units in the kernel config, the
482 * message buffer may move on these systems. This shouldn't
483 * be a problem, because once people have a kernel config that
484 * they use, they're going to stick with it.
485 */
486 if (cputype == ST_DEC_3000_500 ||
487 cputype == ST_DEC_3000_300) { /* XXX possibly others? */
488 lastusablepage -= btoc(128 * 1024);
489 le_iomem =
490 (caddr_t)ALPHA_PHYS_TO_K0SEG(ctob(lastusablepage + 1));
491 }
492 #endif /* NLE_IOASIC */
493
494 /*
495 * Initialize error message buffer (at end of core).
496 */
497 lastusablepage -= btoc(sizeof (struct msgbuf));
498 msgbufp =
499 (struct msgbuf *)ALPHA_PHYS_TO_K0SEG(ctob(lastusablepage + 1));
500 msgbufmapped = 1;
501
502 /*
503 * Allocate space for system data structures.
504 * The first available kernel virtual address is in "v".
505 * As pages of kernel virtual memory are allocated, "v" is incremented.
506 *
507 * These data structures are allocated here instead of cpu_startup()
508 * because physical memory is directly addressable. We don't have
509 * to map these into virtual address space.
510 */
511 #define valloc(name, type, num) \
512 (name) = (type *)v; v = (caddr_t)ALIGN((name)+(num))
513 #define valloclim(name, type, num, lim) \
514 (name) = (type *)v; v = (caddr_t)ALIGN((lim) = ((name)+(num)))
515 #ifdef REAL_CLISTS
516 valloc(cfree, struct cblock, nclist);
517 #endif
518 valloc(callout, struct callout, ncallout);
519 #ifdef SYSVSHM
520 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
521 #endif
522 #ifdef SYSVSEM
523 valloc(sema, struct semid_ds, seminfo.semmni);
524 valloc(sem, struct sem, seminfo.semmns);
525 /* This is pretty disgusting! */
526 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
527 #endif
528 #ifdef SYSVMSG
529 valloc(msgpool, char, msginfo.msgmax);
530 valloc(msgmaps, struct msgmap, msginfo.msgseg);
531 valloc(msghdrs, struct msg, msginfo.msgtql);
532 valloc(msqids, struct msqid_ds, msginfo.msgmni);
533 #endif
534
535 /*
536 * Determine how many buffers to allocate.
537 * We allocate 10% of memory for buffer space. Insure a
538 * minimum of 16 buffers. We allocate 1/2 as many swap buffer
539 * headers as file i/o buffers.
540 */
541 if (bufpages == 0)
542 bufpages = (physmem * 10) / (CLSIZE * 100);
543 if (nbuf == 0) {
544 nbuf = bufpages;
545 if (nbuf < 16)
546 nbuf = 16;
547 }
548 if (nswbuf == 0) {
549 nswbuf = (nbuf / 2) &~ 1; /* force even */
550 if (nswbuf > 256)
551 nswbuf = 256; /* sanity */
552 }
553 valloc(swbuf, struct buf, nswbuf);
554 valloc(buf, struct buf, nbuf);
555
556 /*
557 * Clear allocated memory.
558 */
559 bzero(start, v - start);
560
561 /*
562 * Initialize the virtual memory system, and set the
563 * page table base register in proc 0's PCB.
564 */
565 #ifndef NEW_PMAP
566 pmap_bootstrap((vm_offset_t)v, ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT));
567 #else
568 pmap_bootstrap((vm_offset_t)v, ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
569 hwrpb->rpb_max_asn);
570 #endif
571
572 /*
573 * Initialize the rest of proc 0's PCB, and cache its physical
574 * address.
575 */
576 proc0.p_md.md_pcbpaddr =
577 (struct pcb *)ALPHA_K0SEG_TO_PHYS((vm_offset_t)&proc0paddr->u_pcb);
578
579 /*
580 * Set the kernel sp, reserving space for an (empty) trapframe,
581 * and make proc0's trapframe pointer point to it for sanity.
582 */
583 proc0paddr->u_pcb.pcb_hw.apcb_ksp =
584 (u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe);
585 proc0.p_md.md_tf =
586 (struct trapframe *)proc0paddr->u_pcb.pcb_hw.apcb_ksp;
587
588 #ifdef NEW_PMAP
589 pmap_activate(kernel_pmap, &proc0paddr->u_pcb.pcb_hw, 0);
590 #endif
591
592 /*
593 * Look at arguments passed to us and compute boothowto.
594 * Also, get kernel name so it can be used in user-land.
595 */
596 if (bootinfo_valid) {
597 switch (bootinfo.version) {
598 case 1:
599 bcopy(bootinfo.un.v1.boot_flags, boot_flags,
600 sizeof(boot_flags));
601 bcopy(bootinfo.un.v1.booted_kernel, booted_kernel,
602 sizeof(booted_kernel));
603 }
604 } else {
605 prom_getenv(PROM_E_BOOTED_OSFLAGS, boot_flags,
606 sizeof(boot_flags));
607 prom_getenv(PROM_E_BOOTED_FILE, booted_kernel,
608 sizeof(booted_kernel));
609 }
610
611 #if 0
612 printf("boot flags = \"%s\"\n", boot_flags);
613 printf("booted kernel = \"%s\"\n", booted_kernel);
614 #endif
615
616 boothowto = RB_SINGLE;
617 #ifdef KADB
618 boothowto |= RB_KDB;
619 #endif
620 for (p = boot_flags; p && *p != '\0'; p++) {
621 /*
622 * Note that we'd really like to differentiate case here,
623 * but the Alpha AXP Architecture Reference Manual
624 * says that we shouldn't.
625 */
626 switch (*p) {
627 case 'a': /* autoboot */
628 case 'A':
629 boothowto &= ~RB_SINGLE;
630 break;
631
632 #ifdef DEBUG
633 case 'c': /* crash dump immediately after autoconfig */
634 case 'C':
635 boothowto |= RB_DUMP;
636 break;
637 #endif
638
639 #if defined(KGDB) || defined(DDB)
640 case 'd': /* break into the kernel debugger ASAP */
641 case 'D':
642 boothowto |= RB_KDB;
643 break;
644 #endif
645
646 case 'h': /* always halt, never reboot */
647 case 'H':
648 boothowto |= RB_HALT;
649 break;
650
651 #if 0
652 case 'm': /* mini root present in memory */
653 case 'M':
654 boothowto |= RB_MINIROOT;
655 break;
656 #endif
657
658 case 'n': /* askname */
659 case 'N':
660 boothowto |= RB_ASKNAME;
661 break;
662
663 case 's': /* single-user (default, supported for sanity) */
664 case 'S':
665 boothowto |= RB_SINGLE;
666 break;
667
668 default:
669 printf("Unrecognized boot flag '%c'.\n", *p);
670 break;
671 }
672 }
673
674 /*
675 * Figure out the number of cpus in the box, from RPB fields.
676 * Really. We mean it.
677 */
678 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
679 struct pcs *pcsp;
680
681 pcsp = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off +
682 (i * hwrpb->rpb_pcs_size));
683 if ((pcsp->pcs_flags & PCS_PP) != 0)
684 ncpus++;
685 }
686 }
687
688 void
689 consinit()
690 {
691
692 (*cpu_fn_switch->cons_init)();
693 pmap_unmap_prom();
694
695 #ifdef DDB
696 db_machine_init();
697 ddb_init(ksym_start, ksym_end);
698 if (boothowto & RB_KDB)
699 Debugger();
700 #endif
701 #ifdef KGDB
702 if (boothowto & RB_KDB)
703 kgdb_connect(0);
704 #endif
705 }
706
707 void
708 cpu_startup()
709 {
710 register unsigned i;
711 int base, residual;
712 vm_offset_t minaddr, maxaddr;
713 vm_size_t size;
714 #if defined(DEBUG)
715 extern int pmapdebug;
716 int opmapdebug = pmapdebug;
717
718 pmapdebug = 0;
719 #endif
720
721 /*
722 * Good {morning,afternoon,evening,night}.
723 */
724 printf(version);
725 identifycpu();
726 printf("real mem = %d (%d reserved for PROM, %d used by NetBSD)\n",
727 ctob(totalphysmem), ctob(resvmem), ctob(physmem));
728 if (unusedmem)
729 printf("WARNING: unused memory = %d bytes\n", ctob(unusedmem));
730 if (unknownmem)
731 printf("WARNING: %d bytes of memory with unknown purpose\n",
732 ctob(unknownmem));
733
734 /*
735 * Allocate virtual address space for file I/O buffers.
736 * Note they are different than the array of headers, 'buf',
737 * and usually occupy more virtual memory than physical.
738 */
739 size = MAXBSIZE * nbuf;
740 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
741 &maxaddr, size, TRUE);
742 minaddr = (vm_offset_t)buffers;
743 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
744 &minaddr, size, FALSE) != KERN_SUCCESS)
745 panic("startup: cannot allocate buffers");
746 base = bufpages / nbuf;
747 residual = bufpages % nbuf;
748 for (i = 0; i < nbuf; i++) {
749 vm_size_t curbufsize;
750 vm_offset_t curbuf;
751
752 /*
753 * First <residual> buffers get (base+1) physical pages
754 * allocated for them. The rest get (base) physical pages.
755 *
756 * The rest of each buffer occupies virtual space,
757 * but has no physical memory allocated for it.
758 */
759 curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
760 curbufsize = CLBYTES * (i < residual ? base+1 : base);
761 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
762 vm_map_simplify(buffer_map, curbuf);
763 }
764 /*
765 * Allocate a submap for exec arguments. This map effectively
766 * limits the number of processes exec'ing at any time.
767 */
768 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
769 16 * NCARGS, TRUE);
770
771 /*
772 * Allocate a submap for physio
773 */
774 phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
775 VM_PHYS_SIZE, TRUE);
776
777 /*
778 * Finally, allocate mbuf cluster submap.
779 */
780 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
781 VM_MBUF_SIZE, FALSE);
782 /*
783 * Initialize callouts
784 */
785 callfree = callout;
786 for (i = 1; i < ncallout; i++)
787 callout[i-1].c_next = &callout[i];
788 callout[i-1].c_next = NULL;
789
790 #if defined(DEBUG)
791 pmapdebug = opmapdebug;
792 #endif
793 printf("avail mem = %ld\n", (long)ptoa(cnt.v_free_count));
794 printf("using %ld buffers containing %ld bytes of memory\n",
795 (long)nbuf, (long)(bufpages * CLBYTES));
796
797 /*
798 * Set up buffers, so they can be used to read disk labels.
799 */
800 bufinit();
801
802 /*
803 * Configure the system.
804 */
805 configure();
806
807 /*
808 * Note that bootstrapping is finished, and set the HWRPB up
809 * to do restarts.
810 */
811 hwrpb_restart_setup();
812 }
813
814 void
815 identifycpu()
816 {
817
818 /*
819 * print out CPU identification information.
820 */
821 printf("%s, %ldMHz\n", cpu_model,
822 hwrpb->rpb_cc_freq / 1000000); /* XXX true for 21164? */
823 printf("%ld byte page size, %d processor%s.\n",
824 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s");
825 #if 0
826 /* this isn't defined for any systems that we run on? */
827 printf("serial number 0x%lx 0x%lx\n",
828 ((long *)hwrpb->rpb_ssn)[0], ((long *)hwrpb->rpb_ssn)[1]);
829
830 /* and these aren't particularly useful! */
831 printf("variation: 0x%lx, revision 0x%lx\n",
832 hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision);
833 #endif
834 }
835
836 int waittime = -1;
837 struct pcb dumppcb;
838
839 void
840 cpu_reboot(howto, bootstr)
841 int howto;
842 char *bootstr;
843 {
844 extern int cold;
845
846 /* If system is cold, just halt. */
847 if (cold) {
848 howto |= RB_HALT;
849 goto haltsys;
850 }
851
852 /* If "always halt" was specified as a boot flag, obey. */
853 if ((boothowto & RB_HALT) != 0)
854 howto |= RB_HALT;
855
856 boothowto = howto;
857 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
858 waittime = 0;
859 vfs_shutdown();
860 /*
861 * If we've been adjusting the clock, the todr
862 * will be out of synch; adjust it now.
863 */
864 resettodr();
865 }
866
867 /* Disable interrupts. */
868 splhigh();
869
870 /* If rebooting and a dump is requested do it. */
871 #if 0
872 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
873 #else
874 if (howto & RB_DUMP)
875 #endif
876 dumpsys();
877
878 haltsys:
879
880 /* run any shutdown hooks */
881 doshutdownhooks();
882
883 #ifdef BOOTKEY
884 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot");
885 cngetc();
886 printf("\n");
887 #endif
888
889 /* Finally, halt/reboot the system. */
890 printf("%s\n\n", howto & RB_HALT ? "halted." : "rebooting...");
891 prom_halt(howto & RB_HALT);
892 /*NOTREACHED*/
893 }
894
895 /*
896 * These variables are needed by /sbin/savecore
897 */
898 u_long dumpmag = 0x8fca0101; /* magic number */
899 int dumpsize = 0; /* pages */
900 long dumplo = 0; /* blocks */
901
902 /*
903 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
904 */
905 int
906 cpu_dumpsize()
907 {
908 int size;
909
910 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
911 if (roundup(size, dbtob(1)) != dbtob(1))
912 return -1;
913
914 return (1);
915 }
916
917 /*
918 * cpu_dump: dump machine-dependent kernel core dump headers.
919 */
920 int
921 cpu_dump()
922 {
923 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
924 long buf[dbtob(1) / sizeof (long)];
925 kcore_seg_t *segp;
926 cpu_kcore_hdr_t *cpuhdrp;
927
928 dump = bdevsw[major(dumpdev)].d_dump;
929
930 segp = (kcore_seg_t *)buf;
931 cpuhdrp =
932 (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
933
934 /*
935 * Generate a segment header.
936 */
937 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
938 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
939
940 /*
941 * Add the machine-dependent header info
942 */
943 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vm_offset_t)Lev1map);
944 cpuhdrp->page_size = PAGE_SIZE;
945 cpuhdrp->core_seg.start = ctob(firstusablepage);
946 cpuhdrp->core_seg.size = ctob(physmem);
947
948 return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
949 }
950
951 /*
952 * This is called by main to set dumplo and dumpsize.
953 * Dumps always skip the first CLBYTES of disk space
954 * in case there might be a disk label stored there.
955 * If there is extra space, put dump at the end to
956 * reduce the chance that swapping trashes it.
957 */
958 void
959 cpu_dumpconf()
960 {
961 int nblks, dumpblks; /* size of dump area */
962 int maj;
963
964 if (dumpdev == NODEV)
965 goto bad;
966 maj = major(dumpdev);
967 if (maj < 0 || maj >= nblkdev)
968 panic("dumpconf: bad dumpdev=0x%x", dumpdev);
969 if (bdevsw[maj].d_psize == NULL)
970 goto bad;
971 nblks = (*bdevsw[maj].d_psize)(dumpdev);
972 if (nblks <= ctod(1))
973 goto bad;
974
975 dumpblks = cpu_dumpsize();
976 if (dumpblks < 0)
977 goto bad;
978 dumpblks += ctod(physmem);
979
980 /* If dump won't fit (incl. room for possible label), punt. */
981 if (dumpblks > (nblks - ctod(1)))
982 goto bad;
983
984 /* Put dump at end of partition */
985 dumplo = nblks - dumpblks;
986
987 /* dumpsize is in page units, and doesn't include headers. */
988 dumpsize = physmem;
989 return;
990
991 bad:
992 dumpsize = 0;
993 return;
994 }
995
996 /*
997 * Dump the kernel's image to the swap partition.
998 */
999 #define BYTES_PER_DUMP NBPG
1000
1001 void
1002 dumpsys()
1003 {
1004 unsigned bytes, i, n;
1005 int maddr, psize;
1006 daddr_t blkno;
1007 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
1008 int error;
1009
1010 /* Save registers. */
1011 savectx(&dumppcb);
1012
1013 msgbufmapped = 0; /* don't record dump msgs in msgbuf */
1014 if (dumpdev == NODEV)
1015 return;
1016
1017 /*
1018 * For dumps during autoconfiguration,
1019 * if dump device has already configured...
1020 */
1021 if (dumpsize == 0)
1022 cpu_dumpconf();
1023 if (dumplo <= 0) {
1024 printf("\ndump to dev %x not possible\n", dumpdev);
1025 return;
1026 }
1027 printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
1028
1029 psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1030 printf("dump ");
1031 if (psize == -1) {
1032 printf("area unavailable\n");
1033 return;
1034 }
1035
1036 /* XXX should purge all outstanding keystrokes. */
1037
1038 if ((error = cpu_dump()) != 0)
1039 goto err;
1040
1041 bytes = ctob(physmem);
1042 maddr = ctob(firstusablepage);
1043 blkno = dumplo + cpu_dumpsize();
1044 dump = bdevsw[major(dumpdev)].d_dump;
1045 error = 0;
1046 for (i = 0; i < bytes; i += n) {
1047
1048 /* Print out how many MBs we to go. */
1049 n = bytes - i;
1050 if (n && (n % (1024*1024)) == 0)
1051 printf("%d ", n / (1024 * 1024));
1052
1053 /* Limit size for next transfer. */
1054 if (n > BYTES_PER_DUMP)
1055 n = BYTES_PER_DUMP;
1056
1057 error = (*dump)(dumpdev, blkno,
1058 (caddr_t)ALPHA_PHYS_TO_K0SEG(maddr), n);
1059 if (error)
1060 break;
1061 maddr += n;
1062 blkno += btodb(n); /* XXX? */
1063
1064 /* XXX should look for keystrokes, to cancel. */
1065 }
1066
1067 err:
1068 switch (error) {
1069
1070 case ENXIO:
1071 printf("device bad\n");
1072 break;
1073
1074 case EFAULT:
1075 printf("device not ready\n");
1076 break;
1077
1078 case EINVAL:
1079 printf("area improper\n");
1080 break;
1081
1082 case EIO:
1083 printf("i/o error\n");
1084 break;
1085
1086 case EINTR:
1087 printf("aborted from console\n");
1088 break;
1089
1090 case 0:
1091 printf("succeeded\n");
1092 break;
1093
1094 default:
1095 printf("error %d\n", error);
1096 break;
1097 }
1098 printf("\n\n");
1099 delay(1000);
1100 }
1101
1102 void
1103 frametoreg(framep, regp)
1104 struct trapframe *framep;
1105 struct reg *regp;
1106 {
1107
1108 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
1109 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
1110 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
1111 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
1112 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
1113 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
1114 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
1115 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
1116 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
1117 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
1118 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
1119 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
1120 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
1121 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
1122 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
1123 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
1124 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0];
1125 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1];
1126 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2];
1127 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
1128 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
1129 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
1130 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
1131 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
1132 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
1133 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
1134 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
1135 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
1136 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
1137 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP];
1138 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */
1139 regp->r_regs[R_ZERO] = 0;
1140 }
1141
1142 void
1143 regtoframe(regp, framep)
1144 struct reg *regp;
1145 struct trapframe *framep;
1146 {
1147
1148 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
1149 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
1150 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
1151 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
1152 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
1153 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
1154 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
1155 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
1156 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
1157 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
1158 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
1159 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
1160 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
1161 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
1162 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
1163 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
1164 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0];
1165 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1];
1166 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2];
1167 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
1168 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
1169 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
1170 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
1171 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
1172 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
1173 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
1174 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
1175 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
1176 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
1177 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP];
1178 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */
1179 /* ??? = regp->r_regs[R_ZERO]; */
1180 }
1181
1182 void
1183 printregs(regp)
1184 struct reg *regp;
1185 {
1186 int i;
1187
1188 for (i = 0; i < 32; i++)
1189 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
1190 i & 1 ? "\n" : "\t");
1191 }
1192
1193 void
1194 regdump(framep)
1195 struct trapframe *framep;
1196 {
1197 struct reg reg;
1198
1199 frametoreg(framep, ®);
1200 reg.r_regs[R_SP] = alpha_pal_rdusp();
1201
1202 printf("REGISTERS:\n");
1203 printregs(®);
1204 }
1205
1206 #ifdef DEBUG
1207 int sigdebug = 0;
1208 int sigpid = 0;
1209 #define SDB_FOLLOW 0x01
1210 #define SDB_KSTACK 0x02
1211 #endif
1212
1213 /*
1214 * Send an interrupt to process.
1215 */
1216 void
1217 sendsig(catcher, sig, mask, code)
1218 sig_t catcher;
1219 int sig, mask;
1220 u_long code;
1221 {
1222 struct proc *p = curproc;
1223 struct sigcontext *scp, ksc;
1224 struct trapframe *frame;
1225 struct sigacts *psp = p->p_sigacts;
1226 int oonstack, fsize, rndfsize;
1227 extern char sigcode[], esigcode[];
1228 extern struct proc *fpcurproc;
1229
1230 frame = p->p_md.md_tf;
1231 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
1232 fsize = sizeof ksc;
1233 rndfsize = ((fsize + 15) / 16) * 16;
1234 /*
1235 * Allocate and validate space for the signal handler
1236 * context. Note that if the stack is in P0 space, the
1237 * call to grow() is a nop, and the useracc() check
1238 * will fail if the process has not already allocated
1239 * the space with a `brk'.
1240 */
1241 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
1242 (psp->ps_sigonstack & sigmask(sig))) {
1243 scp = (struct sigcontext *)(psp->ps_sigstk.ss_sp +
1244 psp->ps_sigstk.ss_size - rndfsize);
1245 psp->ps_sigstk.ss_flags |= SS_ONSTACK;
1246 } else
1247 scp = (struct sigcontext *)(alpha_pal_rdusp() - rndfsize);
1248 if ((u_long)scp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
1249 (void)grow(p, (u_long)scp);
1250 #ifdef DEBUG
1251 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1252 printf("sendsig(%d): sig %d ssp %p usp %p\n", p->p_pid,
1253 sig, &oonstack, scp);
1254 #endif
1255 if (useracc((caddr_t)scp, fsize, B_WRITE) == 0) {
1256 #ifdef DEBUG
1257 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1258 printf("sendsig(%d): useracc failed on sig %d\n",
1259 p->p_pid, sig);
1260 #endif
1261 /*
1262 * Process has trashed its stack; give it an illegal
1263 * instruction to halt it in its tracks.
1264 */
1265 SIGACTION(p, SIGILL) = SIG_DFL;
1266 sig = sigmask(SIGILL);
1267 p->p_sigignore &= ~sig;
1268 p->p_sigcatch &= ~sig;
1269 p->p_sigmask &= ~sig;
1270 psignal(p, SIGILL);
1271 return;
1272 }
1273
1274 /*
1275 * Build the signal context to be used by sigreturn.
1276 */
1277 ksc.sc_onstack = oonstack;
1278 ksc.sc_mask = mask;
1279 ksc.sc_pc = frame->tf_regs[FRAME_PC];
1280 ksc.sc_ps = frame->tf_regs[FRAME_PS];
1281
1282 /* copy the registers. */
1283 frametoreg(frame, (struct reg *)ksc.sc_regs);
1284 ksc.sc_regs[R_ZERO] = 0xACEDBADE; /* magic number */
1285 ksc.sc_regs[R_SP] = alpha_pal_rdusp();
1286
1287 /* save the floating-point state, if necessary, then copy it. */
1288 if (p == fpcurproc) {
1289 alpha_pal_wrfen(1);
1290 savefpstate(&p->p_addr->u_pcb.pcb_fp);
1291 alpha_pal_wrfen(0);
1292 fpcurproc = NULL;
1293 }
1294 ksc.sc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
1295 bcopy(&p->p_addr->u_pcb.pcb_fp, (struct fpreg *)ksc.sc_fpregs,
1296 sizeof(struct fpreg));
1297 ksc.sc_fp_control = 0; /* XXX ? */
1298 bzero(ksc.sc_reserved, sizeof ksc.sc_reserved); /* XXX */
1299 bzero(ksc.sc_xxx, sizeof ksc.sc_xxx); /* XXX */
1300
1301
1302 #ifdef COMPAT_OSF1
1303 /*
1304 * XXX Create an OSF/1-style sigcontext and associated goo.
1305 */
1306 #endif
1307
1308 /*
1309 * copy the frame out to userland.
1310 */
1311 (void) copyout((caddr_t)&ksc, (caddr_t)scp, fsize);
1312 #ifdef DEBUG
1313 if (sigdebug & SDB_FOLLOW)
1314 printf("sendsig(%d): sig %d scp %p code %lx\n", p->p_pid, sig,
1315 scp, code);
1316 #endif
1317
1318 /*
1319 * Set up the registers to return to sigcode.
1320 */
1321 frame->tf_regs[FRAME_PC] =
1322 (u_int64_t)PS_STRINGS - (esigcode - sigcode);
1323 frame->tf_regs[FRAME_A0] = sig;
1324 frame->tf_regs[FRAME_A1] = code;
1325 frame->tf_regs[FRAME_A2] = (u_int64_t)scp;
1326 frame->tf_regs[FRAME_T12] = (u_int64_t)catcher; /* t12 is pv */
1327 alpha_pal_wrusp((unsigned long)scp);
1328
1329 #ifdef DEBUG
1330 if (sigdebug & SDB_FOLLOW)
1331 printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid,
1332 frame->tf_regs[FRAME_PC], frame->tf_regs[FRAME_A3]);
1333 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1334 printf("sendsig(%d): sig %d returns\n",
1335 p->p_pid, sig);
1336 #endif
1337 }
1338
1339 /*
1340 * System call to cleanup state after a signal
1341 * has been taken. Reset signal mask and
1342 * stack state from context left by sendsig (above).
1343 * Return to previous pc and psl as specified by
1344 * context left by sendsig. Check carefully to
1345 * make sure that the user has not modified the
1346 * psl to gain improper priviledges or to cause
1347 * a machine fault.
1348 */
1349 /* ARGSUSED */
1350 int
1351 sys_sigreturn(p, v, retval)
1352 struct proc *p;
1353 void *v;
1354 register_t *retval;
1355 {
1356 struct sys_sigreturn_args /* {
1357 syscallarg(struct sigcontext *) sigcntxp;
1358 } */ *uap = v;
1359 struct sigcontext *scp, ksc;
1360 extern struct proc *fpcurproc;
1361
1362 scp = SCARG(uap, sigcntxp);
1363 #ifdef DEBUG
1364 if (sigdebug & SDB_FOLLOW)
1365 printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp);
1366 #endif
1367
1368 if (ALIGN(scp) != (u_int64_t)scp)
1369 return (EINVAL);
1370
1371 /*
1372 * Test and fetch the context structure.
1373 * We grab it all at once for speed.
1374 */
1375 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
1376 copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc))
1377 return (EINVAL);
1378
1379 if (ksc.sc_regs[R_ZERO] != 0xACEDBADE) /* magic number */
1380 return (EINVAL);
1381 /*
1382 * Restore the user-supplied information
1383 */
1384 if (ksc.sc_onstack)
1385 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
1386 else
1387 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
1388 p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1389
1390 p->p_md.md_tf->tf_regs[FRAME_PC] = ksc.sc_pc;
1391 p->p_md.md_tf->tf_regs[FRAME_PS] =
1392 (ksc.sc_ps | ALPHA_PSL_USERSET) & ~ALPHA_PSL_USERCLR;
1393
1394 regtoframe((struct reg *)ksc.sc_regs, p->p_md.md_tf);
1395 alpha_pal_wrusp(ksc.sc_regs[R_SP]);
1396
1397 /* XXX ksc.sc_ownedfp ? */
1398 if (p == fpcurproc)
1399 fpcurproc = NULL;
1400 bcopy((struct fpreg *)ksc.sc_fpregs, &p->p_addr->u_pcb.pcb_fp,
1401 sizeof(struct fpreg));
1402 /* XXX ksc.sc_fp_control ? */
1403
1404 #ifdef DEBUG
1405 if (sigdebug & SDB_FOLLOW)
1406 printf("sigreturn(%d): returns\n", p->p_pid);
1407 #endif
1408 return (EJUSTRETURN);
1409 }
1410
1411 /*
1412 * machine dependent system variables.
1413 */
1414 int
1415 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1416 int *name;
1417 u_int namelen;
1418 void *oldp;
1419 size_t *oldlenp;
1420 void *newp;
1421 size_t newlen;
1422 struct proc *p;
1423 {
1424 dev_t consdev;
1425
1426 /* all sysctl names at this level are terminal */
1427 if (namelen != 1)
1428 return (ENOTDIR); /* overloaded */
1429
1430 switch (name[0]) {
1431 case CPU_CONSDEV:
1432 if (cn_tab != NULL)
1433 consdev = cn_tab->cn_dev;
1434 else
1435 consdev = NODEV;
1436 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1437 sizeof consdev));
1438
1439 case CPU_ROOT_DEVICE:
1440 return (sysctl_rdstring(oldp, oldlenp, newp,
1441 root_device->dv_xname));
1442
1443 case CPU_UNALIGNED_PRINT:
1444 return (sysctl_int(oldp, oldlenp, newp, newlen,
1445 &alpha_unaligned_print));
1446
1447 case CPU_UNALIGNED_FIX:
1448 return (sysctl_int(oldp, oldlenp, newp, newlen,
1449 &alpha_unaligned_fix));
1450
1451 case CPU_UNALIGNED_SIGBUS:
1452 return (sysctl_int(oldp, oldlenp, newp, newlen,
1453 &alpha_unaligned_sigbus));
1454
1455 case CPU_BOOTED_KERNEL:
1456 return (sysctl_rdstring(oldp, oldlenp, newp, booted_kernel));
1457
1458 default:
1459 return (EOPNOTSUPP);
1460 }
1461 /* NOTREACHED */
1462 }
1463
1464 /*
1465 * Set registers on exec.
1466 */
1467 void
1468 setregs(p, pack, stack, retval)
1469 register struct proc *p;
1470 struct exec_package *pack;
1471 u_long stack;
1472 register_t *retval;
1473 {
1474 struct trapframe *tfp = p->p_md.md_tf;
1475 extern struct proc *fpcurproc;
1476 #ifdef DEBUG
1477 int i;
1478 #endif
1479
1480 #ifdef DEBUG
1481 /*
1482 * Crash and dump, if the user requested it.
1483 */
1484 if (boothowto & RB_DUMP)
1485 panic("crash requested by boot flags");
1486 #endif
1487
1488 #ifdef DEBUG
1489 for (i = 0; i < FRAME_SIZE; i++)
1490 tfp->tf_regs[i] = 0xbabefacedeadbeef;
1491 #else
1492 bzero(tfp->tf_regs, FRAME_SIZE * sizeof tfp->tf_regs[0]);
1493 #endif
1494 bzero(&p->p_addr->u_pcb.pcb_fp, sizeof p->p_addr->u_pcb.pcb_fp);
1495 #define FP_RN 2 /* XXX */
1496 p->p_addr->u_pcb.pcb_fp.fpr_cr = (long)FP_RN << 58;
1497 alpha_pal_wrusp(stack);
1498 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET;
1499 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3;
1500
1501 tfp->tf_regs[FRAME_A0] = stack; /* a0 = sp */
1502 tfp->tf_regs[FRAME_A1] = 0; /* a1 = rtld cleanup */
1503 tfp->tf_regs[FRAME_A2] = 0; /* a2 = rtld object */
1504 tfp->tf_regs[FRAME_A3] = (u_int64_t)PS_STRINGS; /* a3 = ps_strings */
1505 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */
1506
1507 p->p_md.md_flags &= ~MDP_FPUSED;
1508 if (fpcurproc == p)
1509 fpcurproc = NULL;
1510
1511 retval[0] = retval[1] = 0;
1512 }
1513
1514 void
1515 netintr()
1516 {
1517 int n, s;
1518
1519 s = splhigh();
1520 n = netisr;
1521 netisr = 0;
1522 splx(s);
1523
1524 #define DONETISR(bit, fn) \
1525 do { \
1526 if (n & (1 << (bit))) \
1527 fn; \
1528 } while (0)
1529
1530 #ifdef INET
1531 #if NARP > 0
1532 DONETISR(NETISR_ARP, arpintr());
1533 #endif
1534 DONETISR(NETISR_IP, ipintr());
1535 #endif
1536 #ifdef NETATALK
1537 DONETISR(NETISR_ATALK, atintr());
1538 #endif
1539 #ifdef NS
1540 DONETISR(NETISR_NS, nsintr());
1541 #endif
1542 #ifdef ISO
1543 DONETISR(NETISR_ISO, clnlintr());
1544 #endif
1545 #ifdef CCITT
1546 DONETISR(NETISR_CCITT, ccittintr());
1547 #endif
1548 #ifdef NATM
1549 DONETISR(NETISR_NATM, natmintr());
1550 #endif
1551 #if NPPP > 1
1552 DONETISR(NETISR_PPP, pppintr());
1553 #endif
1554
1555 #undef DONETISR
1556 }
1557
1558 void
1559 do_sir()
1560 {
1561 u_int64_t n;
1562
1563 do {
1564 (void)splhigh();
1565 n = ssir;
1566 ssir = 0;
1567 splsoft(); /* don't recurse through spl0() */
1568
1569 #define DO_SIR(bit, fn) \
1570 do { \
1571 if (n & (bit)) { \
1572 cnt.v_soft++; \
1573 fn; \
1574 } \
1575 } while (0)
1576
1577 DO_SIR(SIR_NET, netintr());
1578 DO_SIR(SIR_CLOCK, softclock());
1579
1580 #undef DO_SIR
1581 } while (ssir != 0);
1582 }
1583
1584 int
1585 spl0()
1586 {
1587
1588 if (ssir)
1589 do_sir(); /* it lowers the IPL itself */
1590
1591 return (alpha_pal_swpipl(ALPHA_PSL_IPL_0));
1592 }
1593
1594 /*
1595 * The following primitives manipulate the run queues. _whichqs tells which
1596 * of the 32 queues _qs have processes in them. Setrunqueue puts processes
1597 * into queues, Remrunqueue removes them from queues. The running process is
1598 * on no queue, other processes are on a queue related to p->p_priority,
1599 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1600 * available queues.
1601 */
1602 /*
1603 * setrunqueue(p)
1604 * proc *p;
1605 *
1606 * Call should be made at splclock(), and p->p_stat should be SRUN.
1607 */
1608
1609 void
1610 setrunqueue(p)
1611 struct proc *p;
1612 {
1613 int bit;
1614
1615 /* firewall: p->p_back must be NULL */
1616 if (p->p_back != NULL)
1617 panic("setrunqueue");
1618
1619 bit = p->p_priority >> 2;
1620 whichqs |= (1 << bit);
1621 p->p_forw = (struct proc *)&qs[bit];
1622 p->p_back = qs[bit].ph_rlink;
1623 p->p_back->p_forw = p;
1624 qs[bit].ph_rlink = p;
1625 }
1626
1627 /*
1628 * remrunqueue(p)
1629 *
1630 * Call should be made at splclock().
1631 */
1632 void
1633 remrunqueue(p)
1634 struct proc *p;
1635 {
1636 int bit;
1637
1638 bit = p->p_priority >> 2;
1639 if ((whichqs & (1 << bit)) == 0)
1640 panic("remrunqueue");
1641
1642 p->p_back->p_forw = p->p_forw;
1643 p->p_forw->p_back = p->p_back;
1644 p->p_back = NULL; /* for firewall checking. */
1645
1646 if ((struct proc *)&qs[bit] == qs[bit].ph_link)
1647 whichqs &= ~(1 << bit);
1648 }
1649
1650 /*
1651 * Return the best possible estimate of the time in the timeval
1652 * to which tvp points. Unfortunately, we can't read the hardware registers.
1653 * We guarantee that the time will be greater than the value obtained by a
1654 * previous call.
1655 */
1656 void
1657 microtime(tvp)
1658 register struct timeval *tvp;
1659 {
1660 int s = splclock();
1661 static struct timeval lasttime;
1662
1663 *tvp = time;
1664 #ifdef notdef
1665 tvp->tv_usec += clkread();
1666 while (tvp->tv_usec > 1000000) {
1667 tvp->tv_sec++;
1668 tvp->tv_usec -= 1000000;
1669 }
1670 #endif
1671 if (tvp->tv_sec == lasttime.tv_sec &&
1672 tvp->tv_usec <= lasttime.tv_usec &&
1673 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
1674 tvp->tv_sec++;
1675 tvp->tv_usec -= 1000000;
1676 }
1677 lasttime = *tvp;
1678 splx(s);
1679 }
1680
1681 /*
1682 * Wait "n" microseconds.
1683 */
1684 void
1685 delay(n)
1686 unsigned long n;
1687 {
1688 long N = cycles_per_usec * (n);
1689
1690 while (N > 0) /* XXX */
1691 N -= 3; /* XXX */
1692 }
1693
1694 #if defined(COMPAT_OSF1) || 1 /* XXX */
1695 void cpu_exec_ecoff_setregs __P((struct proc *, struct exec_package *,
1696 u_long, register_t *));
1697
1698 void
1699 cpu_exec_ecoff_setregs(p, epp, stack, retval)
1700 struct proc *p;
1701 struct exec_package *epp;
1702 u_long stack;
1703 register_t *retval;
1704 {
1705 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1706
1707 setregs(p, epp, stack, retval);
1708 p->p_md.md_tf->tf_regs[FRAME_GP] = execp->a.gp_value;
1709 }
1710
1711 /*
1712 * cpu_exec_ecoff_hook():
1713 * cpu-dependent ECOFF format hook for execve().
1714 *
1715 * Do any machine-dependent diddling of the exec package when doing ECOFF.
1716 *
1717 */
1718 int
1719 cpu_exec_ecoff_hook(p, epp)
1720 struct proc *p;
1721 struct exec_package *epp;
1722 {
1723 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1724 extern struct emul emul_netbsd;
1725 #ifdef COMPAT_OSF1
1726 extern struct emul emul_osf1;
1727 #endif
1728
1729 switch (execp->f.f_magic) {
1730 #ifdef COMPAT_OSF1
1731 case ECOFF_MAGIC_ALPHA:
1732 epp->ep_emul = &emul_osf1;
1733 break;
1734 #endif
1735
1736 case ECOFF_MAGIC_NETBSD_ALPHA:
1737 epp->ep_emul = &emul_netbsd;
1738 break;
1739
1740 default:
1741 return ENOEXEC;
1742 }
1743 return 0;
1744 }
1745 #endif
1746
1747 /* XXX XXX BEGIN XXX XXX */
1748 vm_offset_t alpha_XXX_dmamap_or; /* XXX */
1749 /* XXX */
1750 vm_offset_t /* XXX */
1751 alpha_XXX_dmamap(v) /* XXX */
1752 vm_offset_t v; /* XXX */
1753 { /* XXX */
1754 /* XXX */
1755 return (vtophys(v) | alpha_XXX_dmamap_or); /* XXX */
1756 } /* XXX */
1757 /* XXX XXX END XXX XXX */
1758