machdep.c revision 1.49 1 /* $NetBSD: machdep.c,v 1.49 1996/10/18 20:35:23 cgd Exp $ */
2
3 /*
4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/signalvar.h>
33 #include <sys/kernel.h>
34 #include <sys/map.h>
35 #include <sys/proc.h>
36 #include <sys/buf.h>
37 #include <sys/reboot.h>
38 #include <sys/device.h>
39 #include <sys/conf.h>
40 #include <sys/file.h>
41 #ifdef REAL_CLISTS
42 #include <sys/clist.h>
43 #endif
44 #include <sys/callout.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/msgbuf.h>
48 #include <sys/ioctl.h>
49 #include <sys/tty.h>
50 #include <sys/user.h>
51 #include <sys/exec.h>
52 #include <sys/exec_ecoff.h>
53 #include <sys/sysctl.h>
54 #include <sys/core.h>
55 #include <sys/kcore.h>
56 #include <machine/kcore.h>
57 #ifdef SYSVMSG
58 #include <sys/msg.h>
59 #endif
60 #ifdef SYSVSEM
61 #include <sys/sem.h>
62 #endif
63 #ifdef SYSVSHM
64 #include <sys/shm.h>
65 #endif
66
67 #include <sys/mount.h>
68 #include <sys/syscallargs.h>
69
70 #include <vm/vm_kern.h>
71
72 #include <dev/cons.h>
73
74 #include <machine/cpu.h>
75 #include <machine/reg.h>
76 #include <machine/rpb.h>
77 #include <machine/prom.h>
78
79 #ifdef DEC_3000_500
80 #include <alpha/alpha/dec_3000_500.h>
81 #endif
82 #ifdef DEC_3000_300
83 #include <alpha/alpha/dec_3000_300.h>
84 #endif
85 #ifdef DEC_2100_A50
86 #include <alpha/alpha/dec_2100_a50.h>
87 #endif
88 #ifdef DEC_KN20AA
89 #include <alpha/alpha/dec_kn20aa.h>
90 #endif
91 #ifdef DEC_AXPPCI_33
92 #include <alpha/alpha/dec_axppci_33.h>
93 #endif
94 #ifdef DEC_21000
95 #include <alpha/alpha/dec_21000.h>
96 #endif
97
98 #include <net/netisr.h>
99 #include <net/if.h>
100
101 #ifdef INET
102 #include <netinet/in.h>
103 #include <netinet/if_ether.h>
104 #include <netinet/ip_var.h>
105 #endif
106 #ifdef NS
107 #include <netns/ns_var.h>
108 #endif
109 #ifdef ISO
110 #include <netiso/iso.h>
111 #include <netiso/clnp.h>
112 #endif
113 #include "ppp.h"
114 #if NPPP > 0
115 #include <net/ppp_defs.h>
116 #include <net/if_ppp.h>
117 #endif
118
119 #include "le_ioasic.h" /* for le_iomem creation */
120
121 vm_map_t buffer_map;
122
123 void dumpsys __P((void));
124
125 /*
126 * Declare these as initialized data so we can patch them.
127 */
128 int nswbuf = 0;
129 #ifdef NBUF
130 int nbuf = NBUF;
131 #else
132 int nbuf = 0;
133 #endif
134 #ifdef BUFPAGES
135 int bufpages = BUFPAGES;
136 #else
137 int bufpages = 0;
138 #endif
139 int msgbufmapped = 0; /* set when safe to use msgbuf */
140 int maxmem; /* max memory per process */
141
142 int totalphysmem; /* total amount of physical memory in system */
143 int physmem; /* physical memory used by NetBSD + some rsvd */
144 int firstusablepage; /* first usable memory page */
145 int lastusablepage; /* last usable memory page */
146 int resvmem; /* amount of memory reserved for PROM */
147 int unusedmem; /* amount of memory for OS that we don't use */
148 int unknownmem; /* amount of memory with an unknown use */
149
150 int cputype; /* system type, from the RPB */
151
152 /*
153 * XXX We need an address to which we can assign things so that they
154 * won't be optimized away because we didn't use the value.
155 */
156 u_int32_t no_optimize;
157
158 /* the following is used externally (sysctl_hw) */
159 char machine[] = "alpha";
160 char cpu_model[128];
161 char *model_names[] = {
162 "UNKNOWN (0)",
163 "Alpha Demonstration Unit",
164 "DEC 4000 (\"Cobra\")",
165 "DEC 7000 (\"Ruby\")",
166 "DEC 3000/500 (\"Flamingo\") family",
167 "UNKNOWN (5)",
168 "DEC 2000/300 (\"Jensen\")",
169 "DEC 3000/300 (\"Pelican\")",
170 "UNKNOWN (8)",
171 "DEC 2100/A500 (\"Sable\")",
172 "AXPvme 64",
173 "AXPpci 33 (\"NoName\")",
174 "DEC 21000 (\"TurboLaser\")",
175 "DEC 2100/A50 (\"Avanti\") family",
176 "Mustang",
177 "DEC KN20AA",
178 "UNKNOWN (16)",
179 "DEC 1000 (\"Mikasa\")",
180 };
181 int nmodel_names = sizeof model_names/sizeof model_names[0];
182
183 struct user *proc0paddr;
184
185 /* Number of machine cycles per microsecond */
186 u_int64_t cycles_per_usec;
187
188 /* some memory areas for device DMA. "ick." */
189 caddr_t le_iomem; /* XXX iomem for LANCE DMA */
190
191 /* Interrupt vectors (in locore) */
192 extern int XentInt(), XentArith(), XentMM(), XentIF(), XentUna(), XentSys();
193
194 /* number of cpus in the box. really! */
195 int ncpus;
196
197 /* various CPU-specific functions. */
198 char *(*cpu_modelname) __P((void));
199 void (*cpu_consinit) __P((void));
200 void (*cpu_device_register) __P((struct device *dev, void *aux));
201 char *cpu_iobus;
202
203 char boot_flags[64];
204
205 /* for cpu_sysctl() */
206 char root_device[17];
207 int alpha_unaligned_print = 1; /* warn about unaligned accesses */
208 int alpha_unaligned_fix = 1; /* fix up unaligned accesses */
209 int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */
210
211 void identifycpu();
212
213 int
214 alpha_init(pfn, ptb)
215 u_long pfn; /* first free PFN number */
216 u_long ptb; /* PFN of current level 1 page table */
217 {
218 extern char _end[];
219 caddr_t start, v;
220 struct mddt *mddtp;
221 int i, mddtweird;
222 char *p;
223
224 /*
225 * Turn off interrupts and floating point.
226 * Make sure the instruction and data streams are consistent.
227 */
228 (void)splhigh();
229 alpha_pal_wrfen(0);
230 ALPHA_TBIA();
231 alpha_pal_imb();
232
233 /*
234 * get address of the restart block, while we the bootstrap
235 * mapping is still around.
236 */
237 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(
238 (vm_offset_t)(*(struct rpb **)HWRPB_ADDR));
239
240 /*
241 * Remember how many cycles there are per microsecond,
242 * so that we can use delay(). Round up, for safety.
243 */
244 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
245
246 /*
247 * Init the PROM interface, so we can use printf
248 * until PROM mappings go away in consinit.
249 */
250 init_prom_interface();
251
252 /*
253 * Point interrupt/exception vectors to our own.
254 */
255 alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT);
256 alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH);
257 alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM);
258 alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF);
259 alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA);
260 alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS);
261
262 /*
263 * Disable System and Processor Correctable Error reporting.
264 * Clear pending machine checks and error reports, etc.
265 */
266 alpha_pal_wrmces(alpha_pal_rdmces() | ALPHA_MCES_DSC | ALPHA_MCES_DPC);
267
268 /*
269 * Find out how much memory is available, by looking at
270 * the memory cluster descriptors. This also tries to do
271 * its best to detect things things that have never been seen
272 * before...
273 *
274 * XXX Assumes that the first "system" cluster is the
275 * only one we can use. Is the second (etc.) system cluster
276 * (if one happens to exist) guaranteed to be contiguous? or...?
277 */
278 mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off);
279
280 /*
281 * BEGIN MDDT WEIRDNESS CHECKING
282 */
283 mddtweird = 0;
284
285 #define cnt mddtp->mddt_cluster_cnt
286 #define usage(n) mddtp->mddt_clusters[(n)].mddt_usage
287 if (cnt != 2 && cnt != 3) {
288 printf("WARNING: weird number (%ld) of mem clusters\n", cnt);
289 mddtweird = 1;
290 } else if (usage(0) != MDDT_PALCODE ||
291 usage(1) != MDDT_SYSTEM ||
292 (cnt == 3 && usage(2) != MDDT_PALCODE)) {
293 mddtweird = 1;
294 printf("WARNING: %ld mem clusters, but weird config\n", cnt);
295 }
296
297 for (i = 0; i < cnt; i++) {
298 if ((usage(i) & MDDT_mbz) != 0) {
299 printf("WARNING: mem cluster %d has weird usage %lx\n",
300 i, usage(i));
301 mddtweird = 1;
302 }
303 if (mddtp->mddt_clusters[i].mddt_pg_cnt == 0) {
304 printf("WARNING: mem cluster %d has pg cnt == 0\n", i);
305 mddtweird = 1;
306 }
307 /* XXX other things to check? */
308 }
309 #undef cnt
310 #undef usage
311
312 if (mddtweird) {
313 printf("\n");
314 printf("complete memory cluster information:\n");
315 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
316 printf("mddt %d:\n", i);
317 printf("\tpfn %lx\n",
318 mddtp->mddt_clusters[i].mddt_pfn);
319 printf("\tcnt %lx\n",
320 mddtp->mddt_clusters[i].mddt_pg_cnt);
321 printf("\ttest %lx\n",
322 mddtp->mddt_clusters[i].mddt_pg_test);
323 printf("\tbva %lx\n",
324 mddtp->mddt_clusters[i].mddt_v_bitaddr);
325 printf("\tbpa %lx\n",
326 mddtp->mddt_clusters[i].mddt_p_bitaddr);
327 printf("\tbcksum %lx\n",
328 mddtp->mddt_clusters[i].mddt_bit_cksum);
329 printf("\tusage %lx\n",
330 mddtp->mddt_clusters[i].mddt_usage);
331 }
332 printf("\n");
333 }
334 /*
335 * END MDDT WEIRDNESS CHECKING
336 */
337
338 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
339 totalphysmem += mddtp->mddt_clusters[i].mddt_pg_cnt;
340 #define usage(n) mddtp->mddt_clusters[(n)].mddt_usage
341 #define pgcnt(n) mddtp->mddt_clusters[(n)].mddt_pg_cnt
342 if ((usage(i) & MDDT_mbz) != 0)
343 unknownmem += pgcnt(i);
344 else if ((usage(i) & ~MDDT_mbz) == MDDT_PALCODE)
345 resvmem += pgcnt(i);
346 else if ((usage(i) & ~MDDT_mbz) == MDDT_SYSTEM) {
347 /*
348 * assumes that the system cluster listed is
349 * one we're in...
350 */
351 if (physmem != resvmem) {
352 physmem += pgcnt(i);
353 firstusablepage =
354 mddtp->mddt_clusters[i].mddt_pfn;
355 lastusablepage = firstusablepage + pgcnt(i) - 1;
356 } else
357 unusedmem += pgcnt(i);
358 }
359 #undef usage
360 #undef pgcnt
361 }
362 if (totalphysmem == 0)
363 panic("can't happen: system seems to have no memory!");
364 maxmem = physmem;
365
366 #if 0
367 printf("totalphysmem = %d\n", totalphysmem);
368 printf("physmem = %d\n", physmem);
369 printf("firstusablepage = %d\n", firstusablepage);
370 printf("lastusablepage = %d\n", lastusablepage);
371 printf("resvmem = %d\n", resvmem);
372 printf("unusedmem = %d\n", unusedmem);
373 printf("unknownmem = %d\n", unknownmem);
374 #endif
375
376 /*
377 * find out this CPU's page size
378 */
379 PAGE_SIZE = hwrpb->rpb_page_size;
380 if (PAGE_SIZE != 8192)
381 panic("page size %d != 8192?!", PAGE_SIZE);
382
383 v = (caddr_t)alpha_round_page(_end);
384 /*
385 * Init mapping for u page(s) for proc 0
386 */
387 start = v;
388 curproc->p_addr = proc0paddr = (struct user *)v;
389 v += UPAGES * NBPG;
390
391 /*
392 * Find out what hardware we're on, and remember its type name.
393 */
394 cputype = hwrpb->rpb_type;
395 switch (cputype) {
396 #ifdef DEC_3000_500 /* and 400, [6-9]00 */
397 case ST_DEC_3000_500:
398 cpu_modelname = dec_3000_500_modelname;
399 cpu_consinit = dec_3000_500_consinit;
400 cpu_device_register = dec_3000_500_device_register;
401 cpu_iobus = "tcasic";
402 break;
403 #endif
404
405 #ifdef DEC_3000_300
406 case ST_DEC_3000_300:
407 cpu_modelname = dec_3000_300_modelname;
408 cpu_consinit = dec_3000_300_consinit;
409 cpu_device_register = dec_3000_300_device_register;
410 cpu_iobus = "tcasic";
411 break;
412 #endif
413
414 #ifdef DEC_2100_A50
415 case ST_DEC_2100_A50:
416 cpu_modelname = dec_2100_a50_modelname;
417 cpu_consinit = dec_2100_a50_consinit;
418 cpu_device_register = dec_2100_a50_device_register;
419 cpu_iobus = "apecs";
420 break;
421 #endif
422
423 #ifdef DEC_KN20AA
424 case ST_DEC_KN20AA:
425 cpu_modelname = dec_kn20aa_modelname;
426 cpu_consinit = dec_kn20aa_consinit;
427 cpu_device_register = dec_kn20aa_device_register;
428 cpu_iobus = "cia";
429 break;
430 #endif
431
432 #ifdef DEC_AXPPCI_33
433 case ST_DEC_AXPPCI_33:
434 cpu_modelname = dec_axppci_33_modelname;
435 cpu_consinit = dec_axppci_33_consinit;
436 cpu_device_register = dec_axppci_33_device_register;
437 cpu_iobus = "lca";
438 break;
439 #endif
440
441 #ifdef DEC_2000_300
442 case ST_DEC_2000_300:
443 cpu_modelname = dec_2000_300_modelname;
444 cpu_consinit = dec_2000_300_consinit;
445 cpu_device_register = dec_2000_300_device_register;
446 cpu_iobus = "ibus";
447 XXX DEC 2000/300 NOT SUPPORTED
448 break;
449 #endif
450
451 #ifdef DEC_21000
452 case ST_DEC_21000:
453 cpu_modelname = dec_21000_modelname;
454 cpu_consinit = dec_21000_consinit;
455 cpu_device_register = dec_21000_device_register;
456 cpu_iobus = "tlsb";
457 XXX DEC 21000 NOT SUPPORTED
458 break;
459 #endif
460
461 default:
462 if (cputype > nmodel_names)
463 panic("Unknown system type %d", cputype);
464 else
465 panic("Support for %s system type not in kernel.",
466 model_names[cputype]);
467 }
468
469 if ((*cpu_modelname)() != NULL)
470 strncpy(cpu_model, (*cpu_modelname)(), sizeof cpu_model - 1);
471 else
472 strncpy(cpu_model, model_names[cputype], sizeof cpu_model - 1);
473 cpu_model[sizeof cpu_model - 1] = '\0';
474
475 #if NLE_IOASIC > 0
476 /*
477 * Grab 128K at the top of physical memory for the lance chip
478 * on machines where it does dma through the I/O ASIC.
479 * It must be physically contiguous and aligned on a 128K boundary.
480 *
481 * Note that since this is conditional on the presence of
482 * IOASIC-attached 'le' units in the kernel config, the
483 * message buffer may move on these systems. This shouldn't
484 * be a problem, because once people have a kernel config that
485 * they use, they're going to stick with it.
486 */
487 if (cputype == ST_DEC_3000_500 ||
488 cputype == ST_DEC_3000_300) { /* XXX possibly others? */
489 lastusablepage -= btoc(128 * 1024);
490 le_iomem =
491 (caddr_t)ALPHA_PHYS_TO_K0SEG(ctob(lastusablepage + 1));
492 }
493 #endif /* NLE_IOASIC */
494
495 /*
496 * Initialize error message buffer (at end of core).
497 */
498 lastusablepage -= btoc(sizeof (struct msgbuf));
499 msgbufp =
500 (struct msgbuf *)ALPHA_PHYS_TO_K0SEG(ctob(lastusablepage + 1));
501 msgbufmapped = 1;
502
503 /*
504 * Allocate space for system data structures.
505 * The first available kernel virtual address is in "v".
506 * As pages of kernel virtual memory are allocated, "v" is incremented.
507 *
508 * These data structures are allocated here instead of cpu_startup()
509 * because physical memory is directly addressable. We don't have
510 * to map these into virtual address space.
511 */
512 #define valloc(name, type, num) \
513 (name) = (type *)v; v = (caddr_t)ALIGN((name)+(num))
514 #define valloclim(name, type, num, lim) \
515 (name) = (type *)v; v = (caddr_t)ALIGN((lim) = ((name)+(num)))
516 #ifdef REAL_CLISTS
517 valloc(cfree, struct cblock, nclist);
518 #endif
519 valloc(callout, struct callout, ncallout);
520 valloc(swapmap, struct map, nswapmap = maxproc * 2);
521 #ifdef SYSVSHM
522 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
523 #endif
524 #ifdef SYSVSEM
525 valloc(sema, struct semid_ds, seminfo.semmni);
526 valloc(sem, struct sem, seminfo.semmns);
527 /* This is pretty disgusting! */
528 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
529 #endif
530 #ifdef SYSVMSG
531 valloc(msgpool, char, msginfo.msgmax);
532 valloc(msgmaps, struct msgmap, msginfo.msgseg);
533 valloc(msghdrs, struct msg, msginfo.msgtql);
534 valloc(msqids, struct msqid_ds, msginfo.msgmni);
535 #endif
536
537 /*
538 * Determine how many buffers to allocate.
539 * We allocate 10% of memory for buffer space. Insure a
540 * minimum of 16 buffers. We allocate 1/2 as many swap buffer
541 * headers as file i/o buffers.
542 */
543 if (bufpages == 0)
544 bufpages = (physmem * 10) / (CLSIZE * 100);
545 if (nbuf == 0) {
546 nbuf = bufpages;
547 if (nbuf < 16)
548 nbuf = 16;
549 }
550 if (nswbuf == 0) {
551 nswbuf = (nbuf / 2) &~ 1; /* force even */
552 if (nswbuf > 256)
553 nswbuf = 256; /* sanity */
554 }
555 valloc(swbuf, struct buf, nswbuf);
556 valloc(buf, struct buf, nbuf);
557
558 /*
559 * Clear allocated memory.
560 */
561 bzero(start, v - start);
562
563 /*
564 * Initialize the virtual memory system, and set the
565 * page table base register in proc 0's PCB.
566 */
567 #ifndef NEW_PMAP
568 pmap_bootstrap((vm_offset_t)v, ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT));
569 #else
570 pmap_bootstrap((vm_offset_t)v, ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
571 hwrpb->rpb_max_asn);
572 #endif
573
574 /*
575 * Initialize the rest of proc 0's PCB, and cache its physical
576 * address.
577 */
578 proc0.p_md.md_pcbpaddr =
579 (struct pcb *)ALPHA_K0SEG_TO_PHYS((vm_offset_t)&proc0paddr->u_pcb);
580
581 /*
582 * Set the kernel sp, reserving space for an (empty) trapframe,
583 * and make proc0's trapframe pointer point to it for sanity.
584 */
585 proc0paddr->u_pcb.pcb_hw.apcb_ksp =
586 (u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe);
587 proc0.p_md.md_tf = (struct trapframe *)proc0paddr->u_pcb.pcb_hw.apcb_ksp;
588
589 #ifdef NEW_PMAP
590 pmap_activate(kernel_pmap, &proc0paddr->u_pcb.pcb_hw, 0);
591 #endif
592
593 /*
594 * Look at arguments passed to us and compute boothowto.
595 */
596 prom_getenv(PROM_E_BOOTED_OSFLAGS, boot_flags, sizeof(boot_flags));
597 #if 0
598 printf("boot flags = \"%s\"\n", boot_flags);
599 #endif
600
601 boothowto = RB_SINGLE;
602 #ifdef KADB
603 boothowto |= RB_KDB;
604 #endif
605 for (p = boot_flags; p && *p != '\0'; p++) {
606 /*
607 * Note that we'd really like to differentiate case here,
608 * but the Alpha AXP Architecture Reference Manual
609 * says that we shouldn't.
610 */
611 switch (*p) {
612 case 'a': /* autoboot */
613 case 'A':
614 boothowto &= ~RB_SINGLE;
615 break;
616
617 #ifdef DEBUG
618 case 'c': /* crash dump immediately after autoconfig */
619 case 'C':
620 boothowto |= RB_DUMP;
621 break;
622 #endif
623
624 case 'h': /* always halt, never reboot */
625 case 'H':
626 boothowto |= RB_HALT;
627 break;
628
629 #if 0
630 case 'm': /* mini root present in memory */
631 case 'M':
632 boothowto |= RB_MINIROOT;
633 break;
634 #endif
635
636 case 'n': /* askname */
637 case 'N':
638 boothowto |= RB_ASKNAME;
639 break;
640 }
641 }
642
643 /*
644 * Figure out the number of cpus in the box, from RPB fields.
645 * Really. We mean it.
646 */
647 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
648 struct pcs *pcsp;
649
650 pcsp = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off +
651 (i * hwrpb->rpb_pcs_size));
652 if ((pcsp->pcs_flags & PCS_PP) != 0)
653 ncpus++;
654 }
655
656 return (0);
657 }
658
659 void
660 consinit()
661 {
662
663 (*cpu_consinit)();
664 pmap_unmap_prom();
665 }
666
667 void
668 cpu_startup()
669 {
670 register unsigned i;
671 int base, residual;
672 vm_offset_t minaddr, maxaddr;
673 vm_size_t size;
674 #if defined(DEBUG)
675 extern int pmapdebug;
676 int opmapdebug = pmapdebug;
677
678 pmapdebug = 0;
679 #endif
680
681 /*
682 * Good {morning,afternoon,evening,night}.
683 */
684 printf(version);
685 identifycpu();
686 printf("real mem = %d (%d reserved for PROM, %d used by NetBSD)\n",
687 ctob(totalphysmem), ctob(resvmem), ctob(physmem));
688 if (unusedmem)
689 printf("WARNING: unused memory = %d bytes\n", ctob(unusedmem));
690 if (unknownmem)
691 printf("WARNING: %d bytes of memory with unknown purpose\n",
692 ctob(unknownmem));
693
694 /*
695 * Allocate virtual address space for file I/O buffers.
696 * Note they are different than the array of headers, 'buf',
697 * and usually occupy more virtual memory than physical.
698 */
699 size = MAXBSIZE * nbuf;
700 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
701 &maxaddr, size, TRUE);
702 minaddr = (vm_offset_t)buffers;
703 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
704 &minaddr, size, FALSE) != KERN_SUCCESS)
705 panic("startup: cannot allocate buffers");
706 base = bufpages / nbuf;
707 residual = bufpages % nbuf;
708 for (i = 0; i < nbuf; i++) {
709 vm_size_t curbufsize;
710 vm_offset_t curbuf;
711
712 /*
713 * First <residual> buffers get (base+1) physical pages
714 * allocated for them. The rest get (base) physical pages.
715 *
716 * The rest of each buffer occupies virtual space,
717 * but has no physical memory allocated for it.
718 */
719 curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
720 curbufsize = CLBYTES * (i < residual ? base+1 : base);
721 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
722 vm_map_simplify(buffer_map, curbuf);
723 }
724 /*
725 * Allocate a submap for exec arguments. This map effectively
726 * limits the number of processes exec'ing at any time.
727 */
728 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
729 16 * NCARGS, TRUE);
730
731 /*
732 * Allocate a submap for physio
733 */
734 phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
735 VM_PHYS_SIZE, TRUE);
736
737 /*
738 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
739 * we use the more space efficient malloc in place of kmem_alloc.
740 */
741 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
742 M_MBUF, M_NOWAIT);
743 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
744 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
745 VM_MBUF_SIZE, FALSE);
746 /*
747 * Initialize callouts
748 */
749 callfree = callout;
750 for (i = 1; i < ncallout; i++)
751 callout[i-1].c_next = &callout[i];
752 callout[i-1].c_next = NULL;
753
754 #if defined(DEBUG)
755 pmapdebug = opmapdebug;
756 #endif
757 printf("avail mem = %ld\n", (long)ptoa(cnt.v_free_count));
758 printf("using %ld buffers containing %ld bytes of memory\n",
759 (long)nbuf, (long)(bufpages * CLBYTES));
760
761 /*
762 * Set up buffers, so they can be used to read disk labels.
763 */
764 bufinit();
765
766 /*
767 * Configure the system.
768 */
769 configure();
770
771 /*
772 * Note that bootstrapping is finished, and set the HWRPB up
773 * to do restarts.
774 */
775 hwrbp_restart_setup();
776 }
777
778 void
779 identifycpu()
780 {
781
782 /*
783 * print out CPU identification information.
784 */
785 printf("%s, %ldMHz\n", cpu_model,
786 hwrpb->rpb_cc_freq / 1000000); /* XXX true for 21164? */
787 printf("%ld byte page size, %d processor%s.\n",
788 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s");
789 #if 0
790 /* this isn't defined for any systems that we run on? */
791 printf("serial number 0x%lx 0x%lx\n",
792 ((long *)hwrpb->rpb_ssn)[0], ((long *)hwrpb->rpb_ssn)[1]);
793
794 /* and these aren't particularly useful! */
795 printf("variation: 0x%lx, revision 0x%lx\n",
796 hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision);
797 #endif
798 }
799
800 int waittime = -1;
801 struct pcb dumppcb;
802
803 void
804 boot(howto, bootstr)
805 int howto;
806 char *bootstr;
807 {
808 extern int cold;
809
810 /* If system is cold, just halt. */
811 if (cold) {
812 howto |= RB_HALT;
813 goto haltsys;
814 }
815
816 /* If "always halt" was specified as a boot flag, obey. */
817 if ((boothowto & RB_HALT) != 0)
818 howto |= RB_HALT;
819
820 boothowto = howto;
821 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
822 waittime = 0;
823 vfs_shutdown();
824 /*
825 * If we've been adjusting the clock, the todr
826 * will be out of synch; adjust it now.
827 */
828 resettodr();
829 }
830
831 /* Disable interrupts. */
832 splhigh();
833
834 /* If rebooting and a dump is requested do it. */
835 #if 0
836 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
837 #else
838 if (howto & RB_DUMP)
839 #endif
840 dumpsys();
841
842 haltsys:
843
844 /* run any shutdown hooks */
845 doshutdownhooks();
846
847 #ifdef BOOTKEY
848 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot");
849 cngetc();
850 printf("\n");
851 #endif
852
853 /* Finally, halt/reboot the system. */
854 printf("%s\n\n", howto & RB_HALT ? "halted." : "rebooting...");
855 prom_halt(howto & RB_HALT);
856 /*NOTREACHED*/
857 }
858
859 /*
860 * These variables are needed by /sbin/savecore
861 */
862 u_long dumpmag = 0x8fca0101; /* magic number */
863 int dumpsize = 0; /* pages */
864 long dumplo = 0; /* blocks */
865
866 /*
867 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
868 */
869 int
870 cpu_dumpsize()
871 {
872 int size;
873
874 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
875 if (roundup(size, dbtob(1)) != dbtob(1))
876 return -1;
877
878 return (1);
879 }
880
881 /*
882 * cpu_dump: dump machine-dependent kernel core dump headers.
883 */
884 int
885 cpu_dump()
886 {
887 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
888 long buf[dbtob(1) / sizeof (long)];
889 kcore_seg_t *segp;
890 cpu_kcore_hdr_t *cpuhdrp;
891
892 dump = bdevsw[major(dumpdev)].d_dump;
893
894 segp = (kcore_seg_t *)buf;
895 cpuhdrp =
896 (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
897
898 /*
899 * Generate a segment header.
900 */
901 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
902 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
903
904 /*
905 * Add the machine-dependent header info
906 */
907 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vm_offset_t)Lev1map);
908 cpuhdrp->page_size = PAGE_SIZE;
909 cpuhdrp->core_seg.start = ctob(firstusablepage);
910 cpuhdrp->core_seg.size = ctob(physmem);
911
912 return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
913 }
914
915 /*
916 * This is called by configure to set dumplo and dumpsize.
917 * Dumps always skip the first CLBYTES of disk space
918 * in case there might be a disk label stored there.
919 * If there is extra space, put dump at the end to
920 * reduce the chance that swapping trashes it.
921 */
922 void
923 dumpconf()
924 {
925 int nblks, dumpblks; /* size of dump area */
926 int maj;
927
928 if (dumpdev == NODEV)
929 goto bad;
930 maj = major(dumpdev);
931 if (maj < 0 || maj >= nblkdev)
932 panic("dumpconf: bad dumpdev=0x%x", dumpdev);
933 if (bdevsw[maj].d_psize == NULL)
934 goto bad;
935 nblks = (*bdevsw[maj].d_psize)(dumpdev);
936 if (nblks <= ctod(1))
937 goto bad;
938
939 dumpblks = cpu_dumpsize();
940 if (dumpblks < 0)
941 goto bad;
942 dumpblks += ctod(physmem);
943
944 /* If dump won't fit (incl. room for possible label), punt. */
945 if (dumpblks > (nblks - ctod(1)))
946 goto bad;
947
948 /* Put dump at end of partition */
949 dumplo = nblks - dumpblks;
950
951 /* dumpsize is in page units, and doesn't include headers. */
952 dumpsize = physmem;
953 return;
954
955 bad:
956 dumpsize = 0;
957 return;
958 }
959
960 /*
961 * Dump the kernel's image to the swap partition.
962 */
963 #define BYTES_PER_DUMP NBPG
964
965 void
966 dumpsys()
967 {
968 unsigned bytes, i, n;
969 int maddr, psize;
970 daddr_t blkno;
971 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
972 int error;
973
974 /* Save registers. */
975 savectx(&dumppcb);
976
977 msgbufmapped = 0; /* don't record dump msgs in msgbuf */
978 if (dumpdev == NODEV)
979 return;
980
981 /*
982 * For dumps during autoconfiguration,
983 * if dump device has already configured...
984 */
985 if (dumpsize == 0)
986 dumpconf();
987 if (dumplo <= 0) {
988 printf("\ndump to dev %x not possible\n", dumpdev);
989 return;
990 }
991 printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
992
993 psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
994 printf("dump ");
995 if (psize == -1) {
996 printf("area unavailable\n");
997 return;
998 }
999
1000 /* XXX should purge all outstanding keystrokes. */
1001
1002 if ((error = cpu_dump()) != 0)
1003 goto err;
1004
1005 bytes = ctob(physmem);
1006 maddr = ctob(firstusablepage);
1007 blkno = dumplo + cpu_dumpsize();
1008 dump = bdevsw[major(dumpdev)].d_dump;
1009 error = 0;
1010 for (i = 0; i < bytes; i += n) {
1011
1012 /* Print out how many MBs we to go. */
1013 n = bytes - i;
1014 if (n && (n % (1024*1024)) == 0)
1015 printf("%d ", n / (1024 * 1024));
1016
1017 /* Limit size for next transfer. */
1018 if (n > BYTES_PER_DUMP)
1019 n = BYTES_PER_DUMP;
1020
1021 error = (*dump)(dumpdev, blkno,
1022 (caddr_t)ALPHA_PHYS_TO_K0SEG(maddr), n);
1023 if (error)
1024 break;
1025 maddr += n;
1026 blkno += btodb(n); /* XXX? */
1027
1028 /* XXX should look for keystrokes, to cancel. */
1029 }
1030
1031 err:
1032 switch (error) {
1033
1034 case ENXIO:
1035 printf("device bad\n");
1036 break;
1037
1038 case EFAULT:
1039 printf("device not ready\n");
1040 break;
1041
1042 case EINVAL:
1043 printf("area improper\n");
1044 break;
1045
1046 case EIO:
1047 printf("i/o error\n");
1048 break;
1049
1050 case EINTR:
1051 printf("aborted from console\n");
1052 break;
1053
1054 case 0:
1055 printf("succeeded\n");
1056 break;
1057
1058 default:
1059 printf("error %d\n", error);
1060 break;
1061 }
1062 printf("\n\n");
1063 delay(1000);
1064 }
1065
1066 void
1067 frametoreg(framep, regp)
1068 struct trapframe *framep;
1069 struct reg *regp;
1070 {
1071
1072 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
1073 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
1074 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
1075 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
1076 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
1077 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
1078 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
1079 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
1080 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
1081 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
1082 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
1083 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
1084 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
1085 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
1086 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
1087 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
1088 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0];
1089 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1];
1090 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2];
1091 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
1092 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
1093 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
1094 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
1095 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
1096 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
1097 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
1098 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
1099 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
1100 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
1101 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP];
1102 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */
1103 regp->r_regs[R_ZERO] = 0;
1104 }
1105
1106 void
1107 regtoframe(regp, framep)
1108 struct reg *regp;
1109 struct trapframe *framep;
1110 {
1111
1112 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
1113 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
1114 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
1115 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
1116 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
1117 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
1118 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
1119 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
1120 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
1121 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
1122 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
1123 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
1124 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
1125 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
1126 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
1127 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
1128 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0];
1129 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1];
1130 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2];
1131 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
1132 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
1133 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
1134 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
1135 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
1136 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
1137 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
1138 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
1139 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
1140 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
1141 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP];
1142 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */
1143 /* ??? = regp->r_regs[R_ZERO]; */
1144 }
1145
1146 void
1147 printregs(regp)
1148 struct reg *regp;
1149 {
1150 int i;
1151
1152 for (i = 0; i < 32; i++)
1153 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
1154 i & 1 ? "\n" : "\t");
1155 }
1156
1157 void
1158 regdump(framep)
1159 struct trapframe *framep;
1160 {
1161 struct reg reg;
1162
1163 frametoreg(framep, ®);
1164 reg.r_regs[R_SP] = alpha_pal_rdusp();
1165
1166 printf("REGISTERS:\n");
1167 printregs(®);
1168 }
1169
1170 #ifdef DEBUG
1171 int sigdebug = 0;
1172 int sigpid = 0;
1173 #define SDB_FOLLOW 0x01
1174 #define SDB_KSTACK 0x02
1175 #endif
1176
1177 /*
1178 * Send an interrupt to process.
1179 */
1180 void
1181 sendsig(catcher, sig, mask, code)
1182 sig_t catcher;
1183 int sig, mask;
1184 u_long code;
1185 {
1186 struct proc *p = curproc;
1187 struct sigcontext *scp, ksc;
1188 struct trapframe *frame;
1189 struct sigacts *psp = p->p_sigacts;
1190 int oonstack, fsize, rndfsize;
1191 extern char sigcode[], esigcode[];
1192 extern struct proc *fpcurproc;
1193
1194 frame = p->p_md.md_tf;
1195 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
1196 fsize = sizeof ksc;
1197 rndfsize = ((fsize + 15) / 16) * 16;
1198 /*
1199 * Allocate and validate space for the signal handler
1200 * context. Note that if the stack is in P0 space, the
1201 * call to grow() is a nop, and the useracc() check
1202 * will fail if the process has not already allocated
1203 * the space with a `brk'.
1204 */
1205 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
1206 (psp->ps_sigonstack & sigmask(sig))) {
1207 scp = (struct sigcontext *)(psp->ps_sigstk.ss_sp +
1208 psp->ps_sigstk.ss_size - rndfsize);
1209 psp->ps_sigstk.ss_flags |= SS_ONSTACK;
1210 } else
1211 scp = (struct sigcontext *)(alpha_pal_rdusp() - rndfsize);
1212 if ((u_long)scp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
1213 (void)grow(p, (u_long)scp);
1214 #ifdef DEBUG
1215 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1216 printf("sendsig(%d): sig %d ssp %p usp %p\n", p->p_pid,
1217 sig, &oonstack, scp);
1218 #endif
1219 if (useracc((caddr_t)scp, fsize, B_WRITE) == 0) {
1220 #ifdef DEBUG
1221 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1222 printf("sendsig(%d): useracc failed on sig %d\n",
1223 p->p_pid, sig);
1224 #endif
1225 /*
1226 * Process has trashed its stack; give it an illegal
1227 * instruction to halt it in its tracks.
1228 */
1229 SIGACTION(p, SIGILL) = SIG_DFL;
1230 sig = sigmask(SIGILL);
1231 p->p_sigignore &= ~sig;
1232 p->p_sigcatch &= ~sig;
1233 p->p_sigmask &= ~sig;
1234 psignal(p, SIGILL);
1235 return;
1236 }
1237
1238 /*
1239 * Build the signal context to be used by sigreturn.
1240 */
1241 ksc.sc_onstack = oonstack;
1242 ksc.sc_mask = mask;
1243 ksc.sc_pc = frame->tf_regs[FRAME_PC];
1244 ksc.sc_ps = frame->tf_regs[FRAME_PS];
1245
1246 /* copy the registers. */
1247 frametoreg(frame, (struct reg *)ksc.sc_regs);
1248 ksc.sc_regs[R_ZERO] = 0xACEDBADE; /* magic number */
1249 ksc.sc_regs[R_SP] = alpha_pal_rdusp();
1250
1251 /* save the floating-point state, if necessary, then copy it. */
1252 if (p == fpcurproc) {
1253 alpha_pal_wrfen(1);
1254 savefpstate(&p->p_addr->u_pcb.pcb_fp);
1255 alpha_pal_wrfen(0);
1256 fpcurproc = NULL;
1257 }
1258 ksc.sc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
1259 bcopy(&p->p_addr->u_pcb.pcb_fp, (struct fpreg *)ksc.sc_fpregs,
1260 sizeof(struct fpreg));
1261 ksc.sc_fp_control = 0; /* XXX ? */
1262 bzero(ksc.sc_reserved, sizeof ksc.sc_reserved); /* XXX */
1263 bzero(ksc.sc_xxx, sizeof ksc.sc_xxx); /* XXX */
1264
1265
1266 #ifdef COMPAT_OSF1
1267 /*
1268 * XXX Create an OSF/1-style sigcontext and associated goo.
1269 */
1270 #endif
1271
1272 /*
1273 * copy the frame out to userland.
1274 */
1275 (void) copyout((caddr_t)&ksc, (caddr_t)scp, fsize);
1276 #ifdef DEBUG
1277 if (sigdebug & SDB_FOLLOW)
1278 printf("sendsig(%d): sig %d scp %p code %lx\n", p->p_pid, sig,
1279 scp, code);
1280 #endif
1281
1282 /*
1283 * Set up the registers to return to sigcode.
1284 */
1285 frame->tf_regs[FRAME_PC] =
1286 (u_int64_t)PS_STRINGS - (esigcode - sigcode);
1287 frame->tf_regs[FRAME_A0] = sig;
1288 frame->tf_regs[FRAME_A1] = code;
1289 frame->tf_regs[FRAME_A2] = (u_int64_t)scp;
1290 frame->tf_regs[FRAME_T12] = (u_int64_t)catcher; /* t12 is pv */
1291 alpha_pal_wrusp((unsigned long)scp);
1292
1293 #ifdef DEBUG
1294 if (sigdebug & SDB_FOLLOW)
1295 printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid,
1296 frame->tf_regs[FRAME_PC], frame->tf_regs[FRAME_A3]);
1297 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1298 printf("sendsig(%d): sig %d returns\n",
1299 p->p_pid, sig);
1300 #endif
1301 }
1302
1303 /*
1304 * System call to cleanup state after a signal
1305 * has been taken. Reset signal mask and
1306 * stack state from context left by sendsig (above).
1307 * Return to previous pc and psl as specified by
1308 * context left by sendsig. Check carefully to
1309 * make sure that the user has not modified the
1310 * psl to gain improper priviledges or to cause
1311 * a machine fault.
1312 */
1313 /* ARGSUSED */
1314 int
1315 sys_sigreturn(p, v, retval)
1316 struct proc *p;
1317 void *v;
1318 register_t *retval;
1319 {
1320 struct sys_sigreturn_args /* {
1321 syscallarg(struct sigcontext *) sigcntxp;
1322 } */ *uap = v;
1323 struct sigcontext *scp, ksc;
1324 extern struct proc *fpcurproc;
1325
1326 scp = SCARG(uap, sigcntxp);
1327 #ifdef DEBUG
1328 if (sigdebug & SDB_FOLLOW)
1329 printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp);
1330 #endif
1331
1332 if (ALIGN(scp) != (u_int64_t)scp)
1333 return (EINVAL);
1334
1335 /*
1336 * Test and fetch the context structure.
1337 * We grab it all at once for speed.
1338 */
1339 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
1340 copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc))
1341 return (EINVAL);
1342
1343 if (ksc.sc_regs[R_ZERO] != 0xACEDBADE) /* magic number */
1344 return (EINVAL);
1345 /*
1346 * Restore the user-supplied information
1347 */
1348 if (ksc.sc_onstack)
1349 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
1350 else
1351 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
1352 p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1353
1354 p->p_md.md_tf->tf_regs[FRAME_PC] = ksc.sc_pc;
1355 p->p_md.md_tf->tf_regs[FRAME_PS] =
1356 (ksc.sc_ps | ALPHA_PSL_USERSET) & ~ALPHA_PSL_USERCLR;
1357
1358 regtoframe((struct reg *)ksc.sc_regs, p->p_md.md_tf);
1359 alpha_pal_wrusp(ksc.sc_regs[R_SP]);
1360
1361 /* XXX ksc.sc_ownedfp ? */
1362 if (p == fpcurproc)
1363 fpcurproc = NULL;
1364 bcopy((struct fpreg *)ksc.sc_fpregs, &p->p_addr->u_pcb.pcb_fp,
1365 sizeof(struct fpreg));
1366 /* XXX ksc.sc_fp_control ? */
1367
1368 #ifdef DEBUG
1369 if (sigdebug & SDB_FOLLOW)
1370 printf("sigreturn(%d): returns\n", p->p_pid);
1371 #endif
1372 return (EJUSTRETURN);
1373 }
1374
1375 /*
1376 * machine dependent system variables.
1377 */
1378 int
1379 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1380 int *name;
1381 u_int namelen;
1382 void *oldp;
1383 size_t *oldlenp;
1384 void *newp;
1385 size_t newlen;
1386 struct proc *p;
1387 {
1388 dev_t consdev;
1389
1390 /* all sysctl names at this level are terminal */
1391 if (namelen != 1)
1392 return (ENOTDIR); /* overloaded */
1393
1394 switch (name[0]) {
1395 case CPU_CONSDEV:
1396 if (cn_tab != NULL)
1397 consdev = cn_tab->cn_dev;
1398 else
1399 consdev = NODEV;
1400 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1401 sizeof consdev));
1402
1403 case CPU_ROOT_DEVICE:
1404 return (sysctl_rdstring(oldp, oldlenp, newp, root_device));
1405
1406 case CPU_UNALIGNED_PRINT:
1407 return (sysctl_int(oldp, oldlenp, newp, newlen,
1408 &alpha_unaligned_print));
1409
1410 case CPU_UNALIGNED_FIX:
1411 return (sysctl_int(oldp, oldlenp, newp, newlen,
1412 &alpha_unaligned_fix));
1413
1414 case CPU_UNALIGNED_SIGBUS:
1415 return (sysctl_int(oldp, oldlenp, newp, newlen,
1416 &alpha_unaligned_sigbus));
1417
1418 default:
1419 return (EOPNOTSUPP);
1420 }
1421 /* NOTREACHED */
1422 }
1423
1424 /*
1425 * Set registers on exec.
1426 */
1427 void
1428 setregs(p, pack, stack, retval)
1429 register struct proc *p;
1430 struct exec_package *pack;
1431 u_long stack;
1432 register_t *retval;
1433 {
1434 struct trapframe *tfp = p->p_md.md_tf;
1435 int i;
1436 extern struct proc *fpcurproc;
1437
1438 #ifdef DEBUG
1439 /*
1440 * Crash and dump, if the user requested it.
1441 */
1442 if (boothowto & RB_DUMP)
1443 panic("crash requested by boot flags");
1444 #endif
1445
1446 #ifdef DEBUG
1447 for (i = 0; i < FRAME_SIZE; i++)
1448 tfp->tf_regs[i] = 0xbabefacedeadbeef;
1449 #else
1450 bzero(tfp->tf_regs, FRAME_SIZE * sizeof tfp->tf_regs[0]);
1451 #endif
1452 bzero(&p->p_addr->u_pcb.pcb_fp, sizeof p->p_addr->u_pcb.pcb_fp);
1453 #define FP_RN 2 /* XXX */
1454 p->p_addr->u_pcb.pcb_fp.fpr_cr = (long)FP_RN << 58;
1455 alpha_pal_wrusp(stack);
1456 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET;
1457 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3;
1458
1459 tfp->tf_regs[FRAME_A0] = stack;
1460 /* a1 and a2 already zeroed */
1461 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */
1462
1463 p->p_md.md_flags &= ~MDP_FPUSED;
1464 if (fpcurproc == p)
1465 fpcurproc = NULL;
1466
1467 retval[0] = retval[1] = 0;
1468 }
1469
1470 void
1471 netintr()
1472 {
1473 int n, s;
1474
1475 s = splhigh();
1476 n = netisr;
1477 netisr = 0;
1478 splx(s);
1479
1480 #define DONETISR(bit, fn) \
1481 do { \
1482 if (n & (1 << (bit))) \
1483 fn; \
1484 } while (0)
1485
1486 #ifdef INET
1487 DONETISR(NETISR_ARP, arpintr());
1488 DONETISR(NETISR_IP, ipintr());
1489 #endif
1490 #ifdef NS
1491 DONETISR(NETISR_NS, nsintr());
1492 #endif
1493 #ifdef ISO
1494 DONETISR(NETISR_ISO, clnlintr());
1495 #endif
1496 #ifdef CCITT
1497 DONETISR(NETISR_CCITT, ccittintr());
1498 #endif
1499 #ifdef NATM
1500 DONETISR(NETISR_NATM, natmintr());
1501 #endif
1502 #if NPPP > 1
1503 DONETISR(NETISR_PPP, pppintr());
1504 #endif
1505
1506 #undef DONETISR
1507 }
1508
1509 void
1510 do_sir()
1511 {
1512
1513 if (ssir & SIR_NET) {
1514 siroff(SIR_NET);
1515 cnt.v_soft++;
1516 netintr();
1517 }
1518 if (ssir & SIR_CLOCK) {
1519 siroff(SIR_CLOCK);
1520 cnt.v_soft++;
1521 softclock();
1522 }
1523 }
1524
1525 int
1526 spl0()
1527 {
1528
1529 if (ssir) {
1530 splsoft();
1531 do_sir();
1532 }
1533
1534 return (alpha_pal_swpipl(ALPHA_PSL_IPL_0));
1535 }
1536
1537 /*
1538 * The following primitives manipulate the run queues. _whichqs tells which
1539 * of the 32 queues _qs have processes in them. Setrunqueue puts processes
1540 * into queues, Remrq removes them from queues. The running process is on
1541 * no queue, other processes are on a queue related to p->p_priority, divided
1542 * by 4 actually to shrink the 0-127 range of priorities into the 32 available
1543 * queues.
1544 */
1545 /*
1546 * setrunqueue(p)
1547 * proc *p;
1548 *
1549 * Call should be made at splclock(), and p->p_stat should be SRUN.
1550 */
1551
1552 void
1553 setrunqueue(p)
1554 struct proc *p;
1555 {
1556 int bit;
1557
1558 /* firewall: p->p_back must be NULL */
1559 if (p->p_back != NULL)
1560 panic("setrunqueue");
1561
1562 bit = p->p_priority >> 2;
1563 whichqs |= (1 << bit);
1564 p->p_forw = (struct proc *)&qs[bit];
1565 p->p_back = qs[bit].ph_rlink;
1566 p->p_back->p_forw = p;
1567 qs[bit].ph_rlink = p;
1568 }
1569
1570 /*
1571 * Remrq(p)
1572 *
1573 * Call should be made at splclock().
1574 */
1575 void
1576 remrq(p)
1577 struct proc *p;
1578 {
1579 int bit;
1580
1581 bit = p->p_priority >> 2;
1582 if ((whichqs & (1 << bit)) == 0)
1583 panic("remrq");
1584
1585 p->p_back->p_forw = p->p_forw;
1586 p->p_forw->p_back = p->p_back;
1587 p->p_back = NULL; /* for firewall checking. */
1588
1589 if ((struct proc *)&qs[bit] == qs[bit].ph_link)
1590 whichqs &= ~(1 << bit);
1591 }
1592
1593 /*
1594 * Return the best possible estimate of the time in the timeval
1595 * to which tvp points. Unfortunately, we can't read the hardware registers.
1596 * We guarantee that the time will be greater than the value obtained by a
1597 * previous call.
1598 */
1599 void
1600 microtime(tvp)
1601 register struct timeval *tvp;
1602 {
1603 int s = splclock();
1604 static struct timeval lasttime;
1605
1606 *tvp = time;
1607 #ifdef notdef
1608 tvp->tv_usec += clkread();
1609 while (tvp->tv_usec > 1000000) {
1610 tvp->tv_sec++;
1611 tvp->tv_usec -= 1000000;
1612 }
1613 #endif
1614 if (tvp->tv_sec == lasttime.tv_sec &&
1615 tvp->tv_usec <= lasttime.tv_usec &&
1616 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
1617 tvp->tv_sec++;
1618 tvp->tv_usec -= 1000000;
1619 }
1620 lasttime = *tvp;
1621 splx(s);
1622 }
1623
1624 /*
1625 * Wait "n" microseconds.
1626 */
1627 void
1628 delay(n)
1629 unsigned long n;
1630 {
1631 long N = cycles_per_usec * (n);
1632
1633 while (N > 0) /* XXX */
1634 N -= 3; /* XXX */
1635 }
1636
1637 #if defined(COMPAT_OSF1) || 1 /* XXX */
1638 void
1639 cpu_exec_ecoff_setregs(p, epp, stack, retval)
1640 struct proc *p;
1641 struct exec_package *epp;
1642 u_long stack;
1643 register_t *retval;
1644 {
1645 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1646
1647 setregs(p, epp, stack, retval);
1648 p->p_md.md_tf->tf_regs[FRAME_GP] = execp->a.gp_value;
1649 }
1650
1651 /*
1652 * cpu_exec_ecoff_hook():
1653 * cpu-dependent ECOFF format hook for execve().
1654 *
1655 * Do any machine-dependent diddling of the exec package when doing ECOFF.
1656 *
1657 */
1658 int
1659 cpu_exec_ecoff_hook(p, epp)
1660 struct proc *p;
1661 struct exec_package *epp;
1662 {
1663 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1664 extern struct emul emul_netbsd;
1665 #ifdef COMPAT_OSF1
1666 extern struct emul emul_osf1;
1667 #endif
1668
1669 switch (execp->f.f_magic) {
1670 #ifdef COMPAT_OSF1
1671 case ECOFF_MAGIC_ALPHA:
1672 epp->ep_emul = &emul_osf1;
1673 break;
1674 #endif
1675
1676 case ECOFF_MAGIC_NETBSD_ALPHA:
1677 epp->ep_emul = &emul_netbsd;
1678 break;
1679
1680 default:
1681 return ENOEXEC;
1682 }
1683 return 0;
1684 }
1685 #endif
1686