machdep.c revision 1.2 1 /* $NetBSD: machdep.c,v 1.2 1995/03/08 00:38:50 cgd Exp $ */
2
3 /*
4 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/signalvar.h>
33 #include <sys/kernel.h>
34 #include <sys/map.h>
35 #include <sys/proc.h>
36 #include <sys/buf.h>
37 #include <sys/reboot.h>
38 #include <sys/conf.h>
39 #include <sys/file.h>
40 #ifdef REAL_CLISTS
41 #include <sys/clist.h>
42 #endif
43 #include <sys/callout.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/msgbuf.h>
47 #include <sys/ioctl.h>
48 #include <sys/tty.h>
49 #include <sys/user.h>
50 #include <sys/exec.h>
51 #include <sys/exec_ecoff.h>
52 #include <sys/sysctl.h>
53 #ifdef SYSVMSG
54 #include <sys/msg.h>
55 #endif
56 #ifdef SYSVSEM
57 #include <sys/sem.h>
58 #endif
59 #ifdef SYSVSHM
60 #include <sys/shm.h>
61 #endif
62
63 #include <sys/mount.h>
64 #include <sys/syscallargs.h>
65
66 #include <vm/vm_kern.h>
67
68 #include <dev/cons.h>
69
70 #include <machine/cpu.h>
71 #include <machine/reg.h>
72 #include <machine/rpb.h>
73 #include <machine/prom.h>
74
75 #include <net/netisr.h>
76 #include "ether.h"
77
78 #include "le.h" /* XXX for le_iomem creation */
79 #include "esp.h" /* XXX for esp_iomem creation */
80
81 vm_map_t buffer_map;
82
83 /*
84 * Declare these as initialized data so we can patch them.
85 */
86 int nswbuf = 0;
87 #ifdef NBUF
88 int nbuf = NBUF;
89 #else
90 int nbuf = 0;
91 #endif
92 #ifdef BUFPAGES
93 int bufpages = BUFPAGES;
94 #else
95 int bufpages = 0;
96 #endif
97 int msgbufmapped = 0; /* set when safe to use msgbuf */
98 int maxmem; /* max memory per process */
99 int physmem; /* amount of physical memory in system */
100 int resvmem; /* amount of memory reserved for PROM */
101
102 int cputype; /* system type, from the RPB */
103
104 /*
105 * XXX We need an address to which we can assign things so that they
106 * won't be optimized away because we didn't use the value.
107 */
108 u_int32_t no_optimize;
109
110 /* the following is used externally (sysctl_hw) */
111 char machine[] = "alpha";
112 char *cpu_model;
113 char *model_names[] = {
114 "UNKNOWN (0)",
115 "Alpha Demonstration Unit",
116 "DEC 4000 (\"Cobra\")",
117 "DEC 7000 (\"Ruby\")",
118 "DEC 3000/500 (\"Flamingo\") family",
119 "UNKNOWN (5)",
120 "DEC 2000/300 (\"Jensen\")",
121 "DEC 3000/300 (\"Pelican\")",
122 "UNKNOWN (8)",
123 "DEC 2100/A500 (\"Sable\")",
124 "AXPvme 64",
125 "AXPpci 33 (\"NoName\")",
126 "UNKNOWN (12)",
127 "DEC 2100/A50 (\"Avanti\")",
128 "Mustang",
129 "DEC 1000 (\"Mikasa\")",
130 };
131 int nmodel_names = sizeof model_names/sizeof model_names[0];
132
133 struct user *proc0paddr;
134
135 /* Number of machine cycles per microsecond */
136 u_int64_t cycles_per_usec;
137
138 /* some memory areas for device DMA. "ick." */
139 caddr_t le_iomem; /* XXX iomem for LANCE DMA */
140 caddr_t esp_iomem; /* XXX iomem for SCSI DMA */
141
142 /* Interrupt vectors (in locore) */
143 extern int XentInt(), XentArith(), XentMM(), XentIF(), XentUna(), XentSys();
144
145 int
146 alpha_init(pfn, ptb, argc, argv, envp)
147 u_long pfn; /* first free PFN number */
148 u_long ptb; /* PFN of current level 1 page table */
149 u_long argc;
150 char *argv[], *envp[];
151 {
152 extern char _end[];
153 caddr_t start, v;
154 struct mddt *mddtp;
155 int i;
156 char *p;
157
158 /*
159 * Turn off interrupts and floating point.
160 * Make sure the instruction and data streams are consistent.
161 */
162 (void)splhigh();
163 pal_wrfen(0);
164 TBIA();
165 IMB();
166
167 /*
168 * get address of the restart block, while we the bootstrap
169 * mapping is still around.
170 */
171 hwrpb = (struct rpb *) phystok0seg(*(struct rpb **)HWRPB_ADDR);
172
173 /*
174 * Remember how many cycles there are per microsecond,
175 * so that we can use delay()
176 */
177 cycles_per_usec = hwrpb->rpb_cc_freq / 1000000;
178
179 /*
180 * Init the PROM interface, so we can use printf
181 * until PROM mappings go away in consinit.
182 */
183 init_prom_interface();
184
185 /*
186 * Point interrupt/exception vectors to our own.
187 */
188 pal_wrent(XentInt, 0);
189 pal_wrent(XentArith, 1);
190 pal_wrent(XentMM, 2);
191 pal_wrent(XentIF, 3);
192 pal_wrent(XentUna, 4);
193 pal_wrent(XentSys, 5);
194
195 /*
196 * Find out how much memory is available, by looking at
197 * the memory cluster descriptors.
198 * XXX Assumes that the first "system" cluster is the
199 * only one we can use. Can there be more than two clusters?
200 * Is the second (etc.) system cluster guaranteed to be
201 * discontiguous?
202 */
203 mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off);
204 physmem = 0;
205 if (mddtp->mddt_cluster_cnt != 2) {
206 printf("warning: strange number of memory clusters (%d).\n",
207 mddtp->mddt_cluster_cnt);
208 printf("memory cluster information:\n");
209 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
210 printf("mddt %d:\n", i);
211 printf("\tpfn %lx\n",
212 mddtp->mddt_clusters[i].mddt_pfn);
213 printf("\tcnt %lx\n",
214 mddtp->mddt_clusters[i].mddt_pg_cnt);
215 printf("\ttest %lx\n",
216 mddtp->mddt_clusters[i].mddt_pg_test);
217 printf("\tbva %lx\n",
218 mddtp->mddt_clusters[i].mddt_v_bitaddr);
219 printf("\tbpa %lx\n",
220 mddtp->mddt_clusters[i].mddt_p_bitaddr);
221 printf("\tbcksum %lx\n",
222 mddtp->mddt_clusters[i].mddt_bit_cksum);
223 printf("\tusage %lx\n",
224 mddtp->mddt_clusters[i].mddt_usage);
225 }
226 }
227
228 physmem = 0;
229 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
230 /* add up physmem, stopping on first OS-available space. */
231 physmem += mddtp->mddt_clusters[i].mddt_pg_cnt;
232 if ((mddtp->mddt_clusters[i].mddt_usage & 0x01) == 0)
233 break;
234 else
235 resvmem += mddtp->mddt_clusters[i].mddt_pg_cnt;
236 }
237 if (physmem == 0)
238 panic("can't happen: system seems to have no memory!");
239 maxmem = physmem;
240
241 /*
242 * find out this CPU's page size
243 */
244 PAGE_SIZE = hwrpb->rpb_page_size;
245
246 v = (caddr_t)alpha_round_page(_end);
247 /*
248 * Init mapping for u page(s) for proc 0
249 */
250 start = v;
251 curproc->p_addr = proc0paddr = (struct user *)v;
252 v += UPAGES * NBPG;
253
254 /*
255 * Find out what hardware we're on, and remember its type name.
256 * XXX and start dealing with config?
257 */
258 cputype = hwrpb->rpb_type;
259 switch (cputype) {
260 #ifdef ADU
261 case ST_ADU:
262 THIS SYSTEM NOT SUPPORTED
263 #endif
264
265 #ifdef DEC_4000
266 case ST_DEC_4000:
267 THIS SYSTEM NOT SUPPORTED
268 #endif
269
270 #ifdef DEC_7000
271 case ST_DEC_7000:
272 THIS SYSTEM NOT SUPPORTED
273 #endif
274
275 #ifdef DEC_3000_500 /* and 400, [6-9]00 */
276 case ST_DEC_3000_500:
277 switch (hwrpb->rpb_variation & SV_ST_MASK) {
278 case SV_ST_SANDPIPER:
279 systype_sandpiper:
280 cpu_model = "DEC 3000/400 (\"Sandpiper\")";
281 break;
282
283 case SV_ST_FLAMINGO:
284 systype_flamingo:
285 cpu_model = "DEC 3000/500 (\"Flamingo\")";
286 break;
287
288 case SV_ST_HOTPINK:
289 cpu_model = "DEC 3000/500X (\"Hot Pink\")";
290 break;
291
292 case SV_ST_FLAMINGOPLUS:
293 case SV_ST_ULTRA:
294 cpu_model = "DEC 3000/800 (\"Flamingo+\")";
295 break;
296
297 case SV_ST_SANDPLUS:
298 cpu_model = "DEC 3000/600 (\"Sandpiper+\")";
299 break;
300
301 case SV_ST_SANDPIPER45:
302 cpu_model = "DEC 3000/700 (\"Sandpiper45\")";
303 break;
304
305 case SV_ST_FLAMINGO45:
306 cpu_model = "DEC 3000/900 (\"Flamingo45\")";
307 break;
308
309 case SV_ST_RESERVED: /* this is how things used to be done */
310 if (hwrpb->rpb_variation & SV_GRAPHICS)
311 goto systype_flamingo;
312 else
313 goto systype_sandpiper;
314 /* NOTREACHED */
315
316 default:
317 printf("unknown system variation %lx\n",
318 hwrpb->rpb_variation & SV_ST_MASK);
319 }
320 break;
321 #endif
322
323 #ifdef DEC_2000_300
324 case ST_DEC_2000_300:
325 /* XXX XXX XXX */
326 break;
327 #endif
328
329 #ifdef DEC_3000_300
330 case ST_DEC_3000_300:
331 switch (hwrpb->rpb_variation & SV_ST_MASK) {
332 case SV_ST_PELICAN:
333 cpu_model = "DEC 3000/300 (\"Pelican\")";
334 break;
335
336 case SV_ST_PELICANL:
337 cpu_model = "DEC 3000/300L (\"???\")";
338 break;
339
340 case SV_ST_PELICANX:
341 cpu_model = "DEC 3000/300X (\"???\")";
342 break;
343
344 case SV_ST_PELICANLX:
345 cpu_model = "DEC 3000/300LX (\"???\")";
346 break;
347
348 default:
349 printf("unknown system variation %lx\n",
350 hwrpb->rpb_variation & SV_ST_MASK);
351 }
352 break;
353 #endif
354
355 #ifdef DEC_2100_A500
356 case ST_DEC_2100_A500:
357 THIS SYSTEM NOT SUPPORTED
358 #endif
359
360 #ifdef DEC_AXPVME_64
361 case ST_DEC_AXPVME_64:
362 THIS SYSTEM NOT SUPPORTED
363 #endif
364
365 #ifdef DEC_AXPPCI_33
366 case ST_DEC_AXPPCI_33:
367 THIS SYSTEM NOT SUPPORTED
368 #endif
369
370 #ifdef DEC_2100_A50
371 case ST_DEC_2100_A50:
372 /* XXX */
373 printf("unknown system variation %lx\n",
374 hwrpb->rpb_variation & SV_ST_MASK);
375 break;
376 #endif
377
378 #ifdef DEC_MUSTANG
379 case ST_DEC_MUSTANG:
380 THIS SYSTEM NOT SUPPORTED
381 #endif
382
383 #ifdef DEC_1000
384 case ST_DEC_1000:
385 THIS SYSTEM NOT SUPPORTED
386 #endif
387
388 default:
389 if (cputype > nmodel_names)
390 panic("Unknown system type %d", cputype);
391 else
392 panic("Support for %s system type not in kernel.",
393 model_names[cputype]);
394 }
395 if (cpu_model == NULL)
396 cpu_model = model_names[cputype];
397
398 #if NLE > 0
399 /*
400 * Grab 128K at the top of physical memory for the lance chip
401 * on machines where it does dma through the I/O ASIC.
402 * It must be physically contiguous and aligned on a 128K boundary.
403 */
404 if (cputype == ST_DEC_3000_500 ||
405 cputype == ST_DEC_3000_300) { /* XXX possibly others? */
406 maxmem -= btoc(128 * 1024);
407 le_iomem = (caddr_t)phystok0seg(maxmem << PGSHIFT);
408 }
409 #endif /* NLE */
410 #if NESP > 0
411 /*
412 * Ditto for the scsi chip. There is probably a way to make esp.c
413 * do dma without these buffers, but it would require major
414 * re-engineering of the esp driver.
415 * They must be 8K in size and page aligned.
416 */
417 if (cputype == ST_DEC_3000_500 ||
418 cputype == ST_DEC_3000_300) { /* XXX possibly others? */
419 maxmem -= btoc(NESP * 8192);
420 esp_iomem = (caddr_t)phystok0seg(maxmem << PGSHIFT);
421 }
422 #endif /* NESP */
423
424 /*
425 * Initialize error message buffer (at end of core).
426 */
427 maxmem -= btoc(sizeof (struct msgbuf));
428 msgbufp = (struct msgbuf *)phystok0seg(maxmem << PGSHIFT);
429 msgbufmapped = 1;
430
431 /*
432 * Allocate space for system data structures.
433 * The first available kernel virtual address is in "v".
434 * As pages of kernel virtual memory are allocated, "v" is incremented.
435 *
436 * These data structures are allocated here instead of cpu_startup()
437 * because physical memory is directly addressable. We don't have
438 * to map these into virtual address space.
439 */
440 #define valloc(name, type, num) \
441 (name) = (type *)v; v = (caddr_t)((name)+(num))
442 #define valloclim(name, type, num, lim) \
443 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
444 #ifdef REAL_CLISTS
445 valloc(cfree, struct cblock, nclist);
446 #endif
447 valloc(callout, struct callout, ncallout);
448 valloc(swapmap, struct map, nswapmap = maxproc * 2);
449 #ifdef SYSVSHM
450 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
451 #endif
452 #ifdef SYSVSEM
453 valloc(sema, struct semid_ds, seminfo.semmni);
454 valloc(sem, struct sem, seminfo.semmns);
455 /* This is pretty disgusting! */
456 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
457 #endif
458 #ifdef SYSVMSG
459 valloc(msgpool, char, msginfo.msgmax);
460 valloc(msgmaps, struct msgmap, msginfo.msgseg);
461 valloc(msghdrs, struct msg, msginfo.msgtql);
462 valloc(msqids, struct msqid_ds, msginfo.msgmni);
463 #endif
464
465 /*
466 * Determine how many buffers to allocate.
467 * We allocate the BSD standard of 10% of memory for the first
468 * 2 Meg, and 5% of remaining memory for buffer space. Insure a
469 * minimum of 16 buffers. We allocate 1/2 as many swap buffer
470 * headers as file i/o buffers.
471 */
472 if (bufpages == 0)
473 bufpages = (btoc(2 * 1024 * 1024) + (physmem - resvmem)) /
474 (20 * CLSIZE);
475 if (nbuf == 0) {
476 nbuf = bufpages;
477 if (nbuf < 16)
478 nbuf = 16;
479 }
480 if (nswbuf == 0) {
481 nswbuf = (nbuf / 2) &~ 1; /* force even */
482 if (nswbuf > 256)
483 nswbuf = 256; /* sanity */
484 }
485 valloc(swbuf, struct buf, nswbuf);
486 valloc(buf, struct buf, nbuf);
487
488 /*
489 * Clear allocated memory.
490 */
491 bzero(start, v - start);
492
493 /*
494 * Initialize the virtual memory system, and set the
495 * page table base register in proc 0's PCB.
496 */
497 pmap_bootstrap((vm_offset_t)v, phystok0seg(ptb << PGSHIFT));
498
499 /*
500 * Initialize the rest of proc 0's PCB, and init the ptes
501 * which are cached in its md_proc structure, so we can switch
502 * to it in locore. Also cache the physical address of the pcb.
503 */
504 for (i = 0; i < UPAGES; i++)
505 proc0.p_md.md_upte[i] = PG_V | PG_KRE | PG_KWE |
506 (((k0segtophys(proc0paddr) >> PGSHIFT) + i) << PG_SHIFT);
507 proc0.p_md.md_pcbpaddr = (struct pcb *)k0segtophys(&proc0paddr->u_pcb);
508 proc0paddr->u_pcb.pcb_ksp = KSTACKTOP; /* set the kernel sp */
509
510 /*
511 * Look at arguments and compute bootdev.
512 *
513 * XXX
514 * Boot currently doesn't pass any arguments concerning booting
515 * or the root device.
516 */
517 { extern dev_t bootdev;
518 bootdev = MAKEBOOTDEV(8, 0, 0, 0, 0); /* sd0a. XXX */
519 }
520
521 /*
522 * Look at arguments passed to us and compute boothowto.
523 */
524 #ifdef GENERIC
525 boothowto = RB_SINGLE | RB_ASKNAME;
526 #else
527 boothowto = RB_SINGLE;
528 #endif
529 #ifdef KADB
530 boothowto |= RB_KDB;
531 #endif
532
533 printf("argc = %d\n", argc);
534 printf("argv = %lx\n", argv);
535 for (i = 0; i < argc; i++)
536 printf("argv[%d] = (%lx) \"%s\"\n", i, argv[i], argv[i]);
537
538 if (argc > 1) {
539 /* we have arguments. argv[1] is the flags. */
540 for (p = argv[1]; *p != '\0'; p++) {
541 switch (*p) {
542 case 'a': /* autoboot */
543 case 'A': /* DEC's notion of autoboot */
544 boothowto &= ~RB_SINGLE;
545 break;
546
547 case 'd': /* use compiled in default root */
548 boothowto |= RB_DFLTROOT;
549 break;
550
551 case 'm': /* mini root present in memory */
552 boothowto |= RB_MINIROOT;
553 break;
554
555 case 'n': /* ask for names */
556 boothowto |= RB_ASKNAME;
557 break;
558
559 case 'N': /* don't ask for names */
560 boothowto &= ~RB_ASKNAME;
561 }
562 }
563 }
564
565 return (0);
566 }
567
568 /* for cons.c */
569 struct consdev constab[] = {
570 { 0 },
571 };
572
573 consinit()
574 {
575 /* XXX SET UP THE CONSOLE TAB TO HAVE REASONABLE ENTRIES */
576 /* XXX */
577
578 /* XXX PICK A NEW CONSOLE DEVICE */
579 /* cninit(); */
580
581 pmap_unmap_prom();
582 }
583
584 cpu_startup()
585 {
586 register unsigned i;
587 register caddr_t v;
588 int base, residual;
589 vm_offset_t minaddr, maxaddr;
590 vm_size_t size;
591 #ifdef DEBUG
592 extern int pmapdebug;
593 int opmapdebug = pmapdebug;
594
595 pmapdebug = 0;
596 #endif
597
598 /*
599 * Good {morning,afternoon,evening,night}.
600 */
601 printf(version);
602 identifycpu();
603 printf("real mem = %d (%d reserved for PROM)\n", ctob(physmem),
604 ctob(resvmem));
605
606 /*
607 * Allocate virtual address space for file I/O buffers.
608 * Note they are different than the array of headers, 'buf',
609 * and usually occupy more virtual memory than physical.
610 */
611 size = MAXBSIZE * nbuf;
612 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
613 &maxaddr, size, TRUE);
614 minaddr = (vm_offset_t)buffers;
615 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
616 &minaddr, size, FALSE) != KERN_SUCCESS)
617 panic("startup: cannot allocate buffers");
618 base = bufpages / nbuf;
619 residual = bufpages % nbuf;
620 for (i = 0; i < nbuf; i++) {
621 vm_size_t curbufsize;
622 vm_offset_t curbuf;
623
624 /*
625 * First <residual> buffers get (base+1) physical pages
626 * allocated for them. The rest get (base) physical pages.
627 *
628 * The rest of each buffer occupies virtual space,
629 * but has no physical memory allocated for it.
630 */
631 curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
632 curbufsize = CLBYTES * (i < residual ? base+1 : base);
633 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
634 vm_map_simplify(buffer_map, curbuf);
635 }
636 /*
637 * Allocate a submap for exec arguments. This map effectively
638 * limits the number of processes exec'ing at any time.
639 */
640 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
641 16 * NCARGS, TRUE);
642
643 /*
644 * Allocate a submap for physio
645 */
646 phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
647 VM_PHYS_SIZE, TRUE);
648
649 /*
650 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
651 * we use the more space efficient malloc in place of kmem_alloc.
652 */
653 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
654 M_MBUF, M_NOWAIT);
655 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
656 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
657 VM_MBUF_SIZE, FALSE);
658 /*
659 * Initialize callouts
660 */
661 callfree = callout;
662 for (i = 1; i < ncallout; i++)
663 callout[i-1].c_next = &callout[i];
664 callout[i-1].c_next = NULL;
665
666 #ifdef DEBUG
667 pmapdebug = opmapdebug;
668 #endif
669 printf("avail mem = %ld\n", (long)ptoa(cnt.v_free_count));
670 printf("using %ld buffers containing %ld bytes of memory\n",
671 (long)nbuf, (long)(bufpages * CLBYTES));
672
673 /*
674 * Set up buffers, so they can be used to read disk labels.
675 */
676 bufinit();
677
678 /*
679 * Configure the system.
680 */
681 configure();
682 }
683
684 identifycpu()
685 {
686
687 /* most of the work here is taken care of in alpha_init(). */
688 printf("%s, serial number 0x%lx 0x%lx\n", cpu_model,
689 ((long *)hwrpb->rpb_ssn)[0], ((long *)hwrpb->rpb_ssn)[1]);
690 printf("variation: 0x%lx, revision 0x%lx\n",
691 hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision);
692 printf("%d byte page size, %d processor%s.\n", hwrpb->rpb_page_size,
693 hwrpb->rpb_pcs_cnt, hwrpb->rpb_pcs_cnt == 1 ? "" : "s");
694 }
695
696 int waittime = -1;
697
698 boot(howto)
699 int howto;
700 {
701 extern int cold;
702
703 /* Take a snapshot before clobbering any registers. */
704 if (curproc)
705 savectx(curproc->p_addr, 0);
706
707 /* If system is cold, just halt. */
708 if (cold) {
709 while (1);
710
711 howto |= RB_HALT;
712 goto haltsys;
713 }
714
715 /* Sync the disks, if appropriate */
716 if ((howto & RB_NOSYNC) == 0 && waittime < 0 && 0 /* XXX */) {
717 register struct buf *bp;
718 int iter, nbusy;
719
720 waittime = 0;
721 (void) spl0();
722 printf("syncing disks... ");
723 #ifdef notdef /* XXX */
724 /*
725 * Release vnodes held by texts before sync.
726 */
727 if (panicstr == 0)
728 vnode_pager_umount(NULL);
729
730 sync(&proc0, (void *)NULL, (int *)NULL);
731
732 for (iter = 0; iter < 20; iter++) {
733 nbusy = 0;
734 for (bp = &buf[nbuf]; --bp >= buf; )
735 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
736 nbusy++;
737 if (nbusy == 0)
738 break;
739 printf("%d ", nbusy);
740 DELAY(40000 * iter);
741 }
742 if (nbusy)
743 printf("giving up\n");
744 else
745 #endif
746 printf("done\n");
747 #ifdef notdef /* XXX */
748 /*
749 * If we've been adjusting the clock, the todr
750 * will be out of synch; adjust it now.
751 */
752 resettodr();
753 #endif
754 }
755
756 /* Disable interrupts. */
757 splhigh();
758
759 #ifdef notdef /* XXX */
760 /* If rebooting and a dump is requested do the dump. */
761 if ((howto & (RB_DUMP|RB_HALT)) == RB_DUMP)
762 dumpsys();
763 #endif
764
765 haltsys:
766 /* Finally, halt/reboot the system. */
767 printf("%s\n\n", howto & RB_HALT ? "halted." : "rebooting...");
768 prom_halt(howto & RB_HALT);
769 /*NOTREACHED*/
770 }
771
772 void
773 frametoreg(framep, regp)
774 struct trapframe *framep;
775 struct reg *regp;
776 {
777
778 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
779 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
780 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
781 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
782 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
783 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
784 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
785 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
786 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
787 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
788 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
789 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
790 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
791 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
792 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
793 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
794 regp->r_regs[R_A0] = framep->tf_a0;
795 regp->r_regs[R_A1] = framep->tf_a1;
796 regp->r_regs[R_A2] = framep->tf_a2;
797 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
798 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
799 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
800 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
801 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
802 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
803 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
804 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
805 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
806 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
807 regp->r_regs[R_GP] = framep->tf_gp;
808 regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP];
809 regp->r_regs[R_ZERO] = 0;
810 }
811
812 void
813 regtoframe(regp, framep)
814 struct reg *regp;
815 struct trapframe *framep;
816 {
817
818 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
819 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
820 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
821 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
822 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
823 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
824 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
825 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
826 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
827 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
828 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
829 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
830 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
831 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
832 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
833 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
834 framep->tf_a0 = regp->r_regs[R_A0];
835 framep->tf_a1 = regp->r_regs[R_A1];
836 framep->tf_a2 = regp->r_regs[R_A2];
837 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
838 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
839 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
840 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
841 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
842 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
843 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
844 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
845 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
846 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
847 framep->tf_gp = regp->r_regs[R_GP];
848 framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP];
849 /* ??? = regp->r_regs[R_ZERO]; */
850 }
851
852 void
853 printregs(regp)
854 struct reg *regp;
855 {
856 int i;
857
858 for (i = 0; i < 32; i++)
859 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
860 i & 1 ? "\n" : "\t");
861 }
862
863 void
864 regdump(framep)
865 struct trapframe *framep;
866 {
867 struct reg reg;
868
869 frametoreg(framep, ®);
870 printf("REGISTERS:\n");
871 printregs(®);
872 }
873
874 #ifdef DEBUG
875 int sigdebug = 0;
876 int sigpid = 0;
877 #define SDB_FOLLOW 0x01
878 #define SDB_KSTACK 0x02
879 #endif
880
881 /*
882 * Send an interrupt to process.
883 */
884 void
885 sendsig(catcher, sig, mask, code)
886 sig_t catcher;
887 int sig, mask;
888 u_long code;
889 {
890 struct proc *p = curproc;
891 struct sigcontext *scp, ksc;
892 struct trapframe *frame;
893 struct sigacts *psp = p->p_sigacts;
894 int oonstack, fsize, rndfsize;
895 extern char sigcode[], esigcode[];
896 extern struct proc *fpcurproc;
897
898 frame = p->p_md.md_tf;
899 oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
900 fsize = sizeof ksc;
901 rndfsize = ((fsize + 15) / 16) * 16;
902 /*
903 * Allocate and validate space for the signal handler
904 * context. Note that if the stack is in P0 space, the
905 * call to grow() is a nop, and the useracc() check
906 * will fail if the process has not already allocated
907 * the space with a `brk'.
908 */
909 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
910 (psp->ps_sigonstack & sigmask(sig))) {
911 scp = (struct sigcontext *)(psp->ps_sigstk.ss_base +
912 psp->ps_sigstk.ss_size - rndfsize);
913 psp->ps_sigstk.ss_flags |= SA_ONSTACK;
914 } else
915 scp = (struct sigcontext *)(frame->tf_regs[FRAME_SP] -
916 rndfsize);
917 if ((u_long)scp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
918 (void)grow(p, (u_long)scp);
919 #ifdef DEBUG
920 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
921 printf("sendsig(%d): sig %d ssp %lx usp %lx\n", p->p_pid,
922 sig, &oonstack, scp);
923 #endif
924 if (useracc((caddr_t)scp, fsize, B_WRITE) == 0) {
925 #ifdef DEBUG
926 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
927 printf("sendsig(%d): useracc failed on sig %d\n",
928 p->p_pid, sig);
929 #endif
930 /*
931 * Process has trashed its stack; give it an illegal
932 * instruction to halt it in its tracks.
933 */
934 SIGACTION(p, SIGILL) = SIG_DFL;
935 sig = sigmask(SIGILL);
936 p->p_sigignore &= ~sig;
937 p->p_sigcatch &= ~sig;
938 p->p_sigmask &= ~sig;
939 psignal(p, SIGILL);
940 return;
941 }
942
943 /*
944 * Build the signal context to be used by sigreturn.
945 */
946 ksc.sc_onstack = oonstack;
947 ksc.sc_mask = mask;
948 ksc.sc_pc = frame->tf_pc;
949 ksc.sc_ps = frame->tf_ps;
950
951 /* copy the registers. */
952 frametoreg(frame, (struct reg *)ksc.sc_regs);
953 ksc.sc_regs[R_ZERO] = 0xACEDBADE; /* magic number */
954
955 /* save the floating-point state, if necessary, then copy it. */
956 if (p == fpcurproc) {
957 pal_wrfen(1);
958 savefpstate(&p->p_addr->u_pcb.pcb_fp);
959 pal_wrfen(0);
960 fpcurproc = NULL;
961 }
962 ksc.sc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
963 bcopy(&p->p_addr->u_pcb.pcb_fp, (struct fpreg *)ksc.sc_fpregs,
964 sizeof(struct fpreg));
965 ksc.sc_fp_control = 0; /* XXX ? */
966 bzero(ksc.sc_reserved, sizeof ksc.sc_reserved); /* XXX */
967 bzero(ksc.sc_xxx, sizeof ksc.sc_xxx); /* XXX */
968
969
970 #ifdef COMPAT_OSF1
971 /*
972 * XXX Create an OSF/1-style sigcontext and associated goo.
973 */
974 #endif
975
976 /*
977 * copy the frame out to userland.
978 */
979 (void) copyout((caddr_t)&ksc, (caddr_t)scp, fsize);
980 #ifdef DEBUG
981 if (sigdebug & SDB_FOLLOW)
982 printf("sendsig(%d): sig %d scp %lx code %lx\n", p->p_pid, sig,
983 scp, code);
984 #endif
985
986 /*
987 * Set up the registers to return to sigcode.
988 */
989 frame->tf_pc = (u_int64_t)PS_STRINGS - (esigcode - sigcode);
990 frame->tf_regs[FRAME_SP] = (u_int64_t)scp;
991 frame->tf_a0 = sig;
992 frame->tf_a1 = code;
993 frame->tf_a2 = (u_int64_t)scp;
994 frame->tf_regs[FRAME_T12] = (u_int64_t)catcher; /* t12 is pv */
995
996 #ifdef DEBUG
997 if (sigdebug & SDB_FOLLOW)
998 printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid,
999 frame->tf_pc, frame->tf_regs[FRAME_A3]);
1000 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1001 printf("sendsig(%d): sig %d returns\n",
1002 p->p_pid, sig);
1003 #endif
1004 }
1005
1006 /*
1007 * System call to cleanup state after a signal
1008 * has been taken. Reset signal mask and
1009 * stack state from context left by sendsig (above).
1010 * Return to previous pc and psl as specified by
1011 * context left by sendsig. Check carefully to
1012 * make sure that the user has not modified the
1013 * psl to gain improper priviledges or to cause
1014 * a machine fault.
1015 */
1016 /* ARGSUSED */
1017 sigreturn(p, uap, retval)
1018 struct proc *p;
1019 struct sigreturn_args /* {
1020 syscallarg(struct sigcontext *) sigcntxp;
1021 } */ *uap;
1022 register_t *retval;
1023 {
1024 struct sigcontext *scp, ksc;
1025 extern struct proc *fpcurproc;
1026
1027 scp = SCARG(uap, sigcntxp);
1028 #ifdef DEBUG
1029 if (sigdebug & SDB_FOLLOW)
1030 printf("sigreturn: pid %d, scp %lx\n", p->p_pid, scp);
1031 #endif
1032
1033 if (ALIGN(scp) != (u_int64_t)scp)
1034 return (EINVAL);
1035
1036 /*
1037 * Test and fetch the context structure.
1038 * We grab it all at once for speed.
1039 */
1040 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
1041 copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc))
1042 return (EINVAL);
1043
1044 if (ksc.sc_regs[R_ZERO] != 0xACEDBADE) /* magic number */
1045 return (EINVAL);
1046 /*
1047 * Restore the user-supplied information
1048 */
1049 if (ksc.sc_onstack)
1050 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
1051 else
1052 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
1053 p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1054
1055 p->p_md.md_tf->tf_pc = ksc.sc_pc;
1056 p->p_md.md_tf->tf_ps = (ksc.sc_ps | PSL_USERSET) & ~PSL_USERCLR;
1057
1058 regtoframe((struct reg *)ksc.sc_regs, p->p_md.md_tf);
1059
1060 /* XXX ksc.sc_ownedfp ? */
1061 if (p == fpcurproc)
1062 fpcurproc = NULL;
1063 bcopy((struct fpreg *)ksc.sc_fpregs, &p->p_addr->u_pcb.pcb_fp,
1064 sizeof(struct fpreg));
1065 /* XXX ksc.sc_fp_control ? */
1066
1067 #ifdef DEBUG
1068 if (sigdebug & SDB_FOLLOW)
1069 printf("sigreturn(%d): returns\n", p->p_pid);
1070 #endif
1071 return (EJUSTRETURN);
1072 }
1073
1074 /*
1075 * machine dependent system variables.
1076 */
1077 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1078 int *name;
1079 u_int namelen;
1080 void *oldp;
1081 size_t *oldlenp;
1082 void *newp;
1083 size_t newlen;
1084 struct proc *p;
1085 {
1086 dev_t consdev;
1087
1088 /* all sysctl names at this level are terminal */
1089 if (namelen != 1)
1090 return (ENOTDIR); /* overloaded */
1091
1092 switch (name[0]) {
1093 case CPU_CONSDEV:
1094 if (cn_tab != NULL)
1095 consdev = cn_tab->cn_dev;
1096 else
1097 consdev = NODEV;
1098 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1099 sizeof consdev));
1100 default:
1101 return (EOPNOTSUPP);
1102 }
1103 /* NOTREACHED */
1104 }
1105
1106 /*
1107 * Set registers on exec.
1108 */
1109 void
1110 setregs(p, entry, stack, retval)
1111 register struct proc *p;
1112 u_long entry;
1113 u_long stack;
1114 register_t *retval;
1115 {
1116 struct trapframe *tfp = p->p_md.md_tf;
1117 int i;
1118 extern struct proc *fpcurproc;
1119
1120 #ifdef DEBUG
1121 for (i = 0; i < FRAME_NSAVEREGS; i++)
1122 tfp->tf_regs[i] = 0xbabefacedeadbeef;
1123 tfp->tf_gp = 0xbabefacedeadbeef;
1124 tfp->tf_a0 = 0xbabefacedeadbeef;
1125 tfp->tf_a1 = 0xbabefacedeadbeef;
1126 tfp->tf_a2 = 0xbabefacedeadbeef;
1127 #else
1128 bzero(tfp->tf_regs, FRAME_NSAVEREGS * sizeof tfp->tf_regs[0]);
1129 tfp->tf_gp = 0;
1130 tfp->tf_a0 = 0;
1131 tfp->tf_a1 = 0;
1132 tfp->tf_a2 = 0;
1133 #endif
1134 bzero(&p->p_addr->u_pcb.pcb_fp, sizeof p->p_addr->u_pcb.pcb_fp);
1135
1136 tfp->tf_regs[FRAME_SP] = stack; /* restored to usp in trap return */
1137 tfp->tf_ps = PSL_USERSET;
1138 tfp->tf_pc = entry & ~3;
1139
1140 p->p_md.md_flags & ~MDP_FPUSED;
1141 if (fpcurproc == p)
1142 fpcurproc = NULL;
1143
1144 retval[0] = retval[1] = 0;
1145 }
1146
1147 void
1148 netintr()
1149 {
1150 #ifdef INET
1151 #if NETHER > 0
1152 if (netisr & (1 << NETISR_ARP)) {
1153 netisr &= ~(1 << NETISR_ARP);
1154 arpintr();
1155 }
1156 #endif
1157 if (netisr & (1 << NETISR_IP)) {
1158 netisr &= ~(1 << NETISR_IP);
1159 ipintr();
1160 }
1161 #endif
1162 #ifdef NS
1163 if (netisr & (1 << NETISR_NS)) {
1164 netisr &= ~(1 << NETISR_NS);
1165 nsintr();
1166 }
1167 #endif
1168 #ifdef ISO
1169 if (netisr & (1 << NETISR_ISO)) {
1170 netisr &= ~(1 << NETISR_ISO);
1171 clnlintr();
1172 }
1173 #endif
1174 #ifdef CCITT
1175 if (netisr & (1 << NETISR_CCITT)) {
1176 netisr &= ~(1 << NETISR_CCITT);
1177 ccittintr();
1178 }
1179 #endif
1180 }
1181
1182 void
1183 do_sir()
1184 {
1185
1186 if (ssir & SIR_NET) {
1187 siroff(SIR_NET);
1188 cnt.v_soft++;
1189 netintr();
1190 }
1191 if (ssir & SIR_CLOCK) {
1192 siroff(SIR_CLOCK);
1193 cnt.v_soft++;
1194 softclock();
1195 }
1196 }
1197
1198 int
1199 spl0()
1200 {
1201
1202 if (ssir) {
1203 splsoft();
1204 do_sir();
1205 }
1206
1207 return (pal_swpipl(PSL_IPL_0));
1208 }
1209
1210 /*
1211 * The following primitives manipulate the run queues. _whichqs tells which
1212 * of the 32 queues _qs have processes in them. Setrunqueue puts processes
1213 * into queues, Remrq removes them from queues. The running process is on
1214 * no queue, other processes are on a queue related to p->p_priority, divided
1215 * by 4 actually to shrink the 0-127 range of priorities into the 32 available
1216 * queues.
1217 */
1218 /*
1219 * setrunqueue(p)
1220 * proc *p;
1221 *
1222 * Call should be made at splclock(), and p->p_stat should be SRUN.
1223 */
1224
1225 void
1226 setrunqueue(p)
1227 struct proc *p;
1228 {
1229 int bit;
1230
1231 /* firewall: p->p_back must be NULL */
1232 if (p->p_back != NULL)
1233 panic("setrunqueue");
1234
1235 bit = p->p_priority >> 2;
1236 whichqs |= (1 << bit);
1237 p->p_forw = (struct proc *)&qs[bit];
1238 p->p_back = qs[bit].ph_rlink;
1239 p->p_back->p_forw = p;
1240 qs[bit].ph_rlink = p;
1241 }
1242
1243 /*
1244 * Remrq(p)
1245 *
1246 * Call should be made at splclock().
1247 */
1248 void
1249 remrq(p)
1250 struct proc *p;
1251 {
1252 int bit;
1253
1254 bit = p->p_priority >> 2;
1255 if ((whichqs & (1 << bit)) == 0)
1256 panic("remrq");
1257
1258 p->p_back->p_forw = p->p_forw;
1259 p->p_forw->p_back = p->p_back;
1260 p->p_back = NULL; /* for firewall checking. */
1261
1262 if ((struct proc *)&qs[bit] == qs[bit].ph_link)
1263 whichqs &= ~(1 << bit);
1264 }
1265
1266 /*
1267 * Return the best possible estimate of the time in the timeval
1268 * to which tvp points. Unfortunately, we can't read the hardware registers.
1269 * We guarantee that the time will be greater than the value obtained by a
1270 * previous call.
1271 */
1272 void
1273 microtime(tvp)
1274 register struct timeval *tvp;
1275 {
1276 int s = splclock();
1277 static struct timeval lasttime;
1278
1279 *tvp = time;
1280 #ifdef notdef
1281 tvp->tv_usec += clkread();
1282 while (tvp->tv_usec > 1000000) {
1283 tvp->tv_sec++;
1284 tvp->tv_usec -= 1000000;
1285 }
1286 #endif
1287 if (tvp->tv_sec == lasttime.tv_sec &&
1288 tvp->tv_usec <= lasttime.tv_usec &&
1289 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
1290 tvp->tv_sec++;
1291 tvp->tv_usec -= 1000000;
1292 }
1293 lasttime = *tvp;
1294 splx(s);
1295 }
1296
1297 #ifdef COMPAT_OSF1
1298 void
1299 cpu_exec_ecoff_setup(cmd, p, epp, sp)
1300 int cmd;
1301 struct proc *p;
1302 struct exec_package *epp;
1303 void *sp;
1304 {
1305 struct ecoff_aouthdr *eap;
1306
1307 if (cmd != EXEC_SETUP_FINISH)
1308 return;
1309
1310 eap = (struct ecoff_aouthdr *)
1311 ((caddr_t)epp->ep_hdr + sizeof(struct ecoff_filehdr));
1312 p->p_md.md_tf->tf_gp = eap->ea_gp_value;
1313 }
1314
1315 /*
1316 * cpu_exec_ecoff_hook():
1317 * cpu-dependent ECOFF format hook for execve().
1318 *
1319 * Do any machine-dependent diddling of the exec package when doing ECOFF.
1320 *
1321 */
1322 int
1323 cpu_exec_ecoff_hook(p, epp, eap)
1324 struct proc *p;
1325 struct exec_package *epp;
1326 struct ecoff_aouthdr *eap;
1327 {
1328 struct ecoff_filehdr *efp = epp->ep_hdr;
1329
1330 switch (efp->ef_magic) {
1331 case ECOFF_MAGIC_ALPHA:
1332 epp->ep_emul = EMUL_OSF1;
1333 break;
1334
1335 case ECOFF_MAGIC_NETBSD_ALPHA:
1336 epp->ep_emul = EMUL_NETBSD;
1337 break;
1338
1339 #ifdef DIAGNOSTIC
1340 default:
1341 panic("cpu_exec_ecoff_hook: can't get here from there.");
1342 #endif
1343 }
1344 epp->ep_setup = cpu_exec_ecoff_setup;
1345 return 0;
1346 }
1347 #endif
1348