machdep.c revision 1.1 1 /* $NetBSD: machdep.c,v 1.1 1995/02/13 23:07:02 cgd Exp $ */
2
3 /*
4 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/signalvar.h>
33 #include <sys/kernel.h>
34 #include <sys/map.h>
35 #include <sys/proc.h>
36 #include <sys/buf.h>
37 #include <sys/reboot.h>
38 #include <sys/conf.h>
39 #include <sys/file.h>
40 #ifdef REAL_CLISTS
41 #include <sys/clist.h>
42 #endif
43 #include <sys/callout.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/msgbuf.h>
47 #include <sys/ioctl.h>
48 #include <sys/tty.h>
49 #include <sys/user.h>
50 #include <sys/exec.h>
51 #include <sys/exec_ecoff.h>
52 #include <sys/sysctl.h>
53 #ifdef SYSVMSG
54 #include <sys/msg.h>
55 #endif
56 #ifdef SYSVSEM
57 #include <sys/sem.h>
58 #endif
59 #ifdef SYSVSHM
60 #include <sys/shm.h>
61 #endif
62
63 #include <sys/mount.h>
64 #include <sys/syscallargs.h>
65
66 #include <vm/vm_kern.h>
67
68 #include <dev/cons.h>
69
70 #include <machine/cpu.h>
71 #include <machine/reg.h>
72 #include <machine/rpb.h>
73 #include <machine/prom.h>
74
75 #include <net/netisr.h>
76 #include "ether.h"
77
78 #include "le.h" /* XXX for le_iomem creation */
79 #include "esp.h" /* XXX for esp_iomem creation */
80
81 vm_map_t buffer_map;
82
83 /*
84 * Declare these as initialized data so we can patch them.
85 */
86 int nswbuf = 0;
87 #ifdef NBUF
88 int nbuf = NBUF;
89 #else
90 int nbuf = 0;
91 #endif
92 #ifdef BUFPAGES
93 int bufpages = BUFPAGES;
94 #else
95 int bufpages = 0;
96 #endif
97 int msgbufmapped = 0; /* set when safe to use msgbuf */
98 int maxmem; /* max memory per process */
99 int physmem; /* amount of physical memory in system */
100 int resvmem; /* amount of memory reserved for PROM */
101
102 int cputype; /* system type, from the RPB */
103
104 /*
105 * XXX We need an address to which we can assign things so that they
106 * won't be optimized away because we didn't use the value.
107 */
108 u_int32_t no_optimize;
109
110 /* the following is used externally (sysctl_hw) */
111 char machine[] = "alpha";
112 char cpu_model[64];
113 char *model_names[] = {
114 "UNKNOWN (0)", "Alpha ADU", "DEC 4000", "DEC 7000", "DEC 3000/[4568]00",
115 "UNKNOWN (5)", "DEC 2000/300", "DEC 3000/300",
116 };
117 int nmodel_names = sizeof model_names/sizeof model_names[0];
118
119 struct user *proc0paddr;
120
121 /* Number of machine cycles per microsecond */
122 u_int64_t cycles_per_usec;
123
124 /* some memory areas for device DMA. "ick." */
125 caddr_t le_iomem; /* XXX iomem for LANCE DMA */
126 caddr_t esp_iomem; /* XXX iomem for SCSI DMA */
127
128 /* Interrupt vectors (in locore) */
129 extern int XentInt(), XentArith(), XentMM(), XentIF(), XentUna(), XentSys();
130
131 int
132 alpha_init(pfn, ptb, argc, argv, envp)
133 u_long pfn; /* first free PFN number */
134 u_long ptb; /* PFN of current level 1 page table */
135 u_long argc;
136 char *argv[], *envp[];
137 {
138 #ifdef __GNUC__ /* XXX */
139 extern char _end[]; /* XXX */
140 #else /* __GNUC__ */ /* XXX */
141 extern char end[]; /* XXX */
142 #endif /* __GNUC__ */ /* XXX */
143 caddr_t start, v;
144 struct mddt *mddtp;
145 int i;
146 char *p;
147
148 /*
149 * Turn off interrupts and floating point.
150 * Make sure the instruction and data streams are consistent.
151 */
152 (void)splhigh();
153 pal_wrfen(0);
154 TBIA();
155 IMB();
156
157 /*
158 * get address of the restart block, while we the bootstrap
159 * mapping is still around.
160 */
161 hwrpb = (struct rpb *) phystok0seg(*(struct rpb **)HWRPB_ADDR);
162
163 /*
164 * Remember how many cycles there are per microsecond,
165 * so that we can use delay()
166 */
167 cycles_per_usec = hwrpb->rpb_cc_freq / 1000000;
168
169 /*
170 * Init the PROM interface, so we can use printf
171 * until PROM mappings go away in consinit.
172 */
173 init_prom_interface();
174
175 /*
176 * Point interrupt/exception vectors to our own.
177 */
178 pal_wrent(XentInt, 0);
179 pal_wrent(XentArith, 1);
180 pal_wrent(XentMM, 2);
181 pal_wrent(XentIF, 3);
182 pal_wrent(XentUna, 4);
183 pal_wrent(XentSys, 5);
184
185 /*
186 * Find out how much memory is available, by looking at
187 * the memory cluster descriptors.
188 * XXX Assumes that the first "system" cluster is the
189 * only one we can use. Can there be more than two clusters?
190 * Is the second (etc.) system cluster guaranteed to be
191 * discontiguous?
192 */
193 mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off);
194 physmem = 0;
195 if (mddtp->mddt_cluster_cnt != 2)
196 printf("warning: strange number of memory clusters (%d).\n",
197 mddtp->mddt_cluster_cnt);
198 physmem = 0;
199 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
200 /* add up physmem, stopping on first OS-available space. */
201 physmem += mddtp->mddt_clusters[i].mddt_pg_cnt;
202 if ((mddtp->mddt_clusters[i].mddt_usage & 0x01) == 0)
203 break;
204 else
205 resvmem += mddtp->mddt_clusters[i].mddt_pg_cnt;
206 }
207 if (physmem == 0)
208 panic("can't happen: system seems to have no memory!");
209 maxmem = physmem;
210
211 /*
212 * find out this CPU's page size
213 */
214 PAGE_SIZE = hwrpb->rpb_page_size;
215
216 #ifdef __GNUC__ /* XXX */
217 v = (caddr_t)alpha_round_page(_end); /* XXX */
218 #else /* __GNUC__ */ /* XXX */
219 v = (caddr_t)alpha_round_page(end); /* XXX */
220 #endif /* __GNUC__ */ /* XXX */
221 /*
222 * Init mapping for u page(s) for proc 0
223 */
224 start = v;
225 curproc->p_addr = proc0paddr = (struct user *)v;
226 v += UPAGES * NBPG;
227
228 /*
229 * Find out what hardware we're on, and remember its type name.
230 * XXX and start dealing with config?
231 */
232 cputype = hwrpb->rpb_type;
233 switch (cputype) {
234 #ifdef ADU
235 case ST_ADU:
236 THIS SYSTEM NOT SUPPORTED
237 #endif /* ADU */
238 #ifdef DEC_4000
239 case ST_DEC_4000:
240 THIS SYSTEM NOT SUPPORTED
241 #endif /* DEC_4000 */
242 #ifdef DEC_7000
243 case ST_DEC_7000:
244 THIS SYSTEM NOT SUPPORTED
245 #endif /* DEC_7000 */
246 #ifdef DEC_3000_500 /* and 400, and 600 and 800 */
247 case ST_DEC_3000_500:
248 /* XXX XXX XXX */
249 break;
250 #endif /* DEC_3000_500 */
251 #ifdef DEC_2000_300
252 case ST_DEC_2000_300:
253 THIS SYSTEM NOT SUPPORTED
254 #endif /* DEC_2000_300 */
255 #ifdef DEC_3000_300
256 case DEC_3000_300:
257 THIS SYSTEM NOT SUPPORTED
258 #endif /* DEC_3000_300*/
259 default:
260 if (cputype > nmodel_names)
261 panic("Unknown system type %d", cputype);
262 else
263 panic("Support for %s system type not in kernel.",
264 model_names[cputype]);
265 }
266 strcpy(cpu_model, model_names[cputype]);
267
268 #if NLE > 0
269 /*
270 * Grab 128K at the top of physical memory for the lance chip
271 * on machines where it does dma through the I/O ASIC.
272 * It must be physically contiguous and aligned on a 128K boundary.
273 */
274 if (cputype == ST_DEC_3000_500 ||
275 cputype == ST_DEC_3000_300) { /* XXX possibly others? */
276 maxmem -= btoc(128 * 1024);
277 le_iomem = (caddr_t)phystok0seg(maxmem << PGSHIFT);
278 }
279 #endif /* NLE */
280 #if NESP > 0
281 /*
282 * Ditto for the scsi chip. There is probably a way to make esp.c
283 * do dma without these buffers, but it would require major
284 * re-engineering of the esp driver.
285 * They must be 8K in size and page aligned.
286 */
287 if (cputype == ST_DEC_3000_500 ||
288 cputype == ST_DEC_3000_300) { /* XXX possibly others? */
289 maxmem -= btoc(NESP * 8192);
290 esp_iomem = (caddr_t)phystok0seg(maxmem << PGSHIFT);
291 }
292 #endif /* NESP */
293
294 /*
295 * Initialize error message buffer (at end of core).
296 */
297 maxmem -= btoc(sizeof (struct msgbuf));
298 msgbufp = (struct msgbuf *)phystok0seg(maxmem << PGSHIFT);
299 msgbufmapped = 1;
300
301 /*
302 * Allocate space for system data structures.
303 * The first available kernel virtual address is in "v".
304 * As pages of kernel virtual memory are allocated, "v" is incremented.
305 *
306 * These data structures are allocated here instead of cpu_startup()
307 * because physical memory is directly addressable. We don't have
308 * to map these into virtual address space.
309 */
310 #define valloc(name, type, num) \
311 (name) = (type *)v; v = (caddr_t)((name)+(num))
312 #define valloclim(name, type, num, lim) \
313 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
314 #ifdef REAL_CLISTS
315 valloc(cfree, struct cblock, nclist);
316 #endif
317 valloc(callout, struct callout, ncallout);
318 valloc(swapmap, struct map, nswapmap = maxproc * 2);
319 #ifdef SYSVSHM
320 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
321 #endif
322 #ifdef SYSVSEM
323 valloc(sema, struct semid_ds, seminfo.semmni);
324 valloc(sem, struct sem, seminfo.semmns);
325 /* This is pretty disgusting! */
326 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
327 #endif
328 #ifdef SYSVMSG
329 valloc(msgpool, char, msginfo.msgmax);
330 valloc(msgmaps, struct msgmap, msginfo.msgseg);
331 valloc(msghdrs, struct msg, msginfo.msgtql);
332 valloc(msqids, struct msqid_ds, msginfo.msgmni);
333 #endif
334
335 /*
336 * Determine how many buffers to allocate.
337 * We allocate the BSD standard of 10% of memory for the first
338 * 2 Meg, and 5% of remaining memory for buffer space. Insure a
339 * minimum of 16 buffers. We allocate 1/2 as many swap buffer
340 * headers as file i/o buffers.
341 */
342 if (bufpages == 0)
343 bufpages = (btoc(2 * 1024 * 1024) + (physmem - resvmem)) /
344 (20 * CLSIZE);
345 if (nbuf == 0) {
346 nbuf = bufpages;
347 if (nbuf < 16)
348 nbuf = 16;
349 }
350 if (nswbuf == 0) {
351 nswbuf = (nbuf / 2) &~ 1; /* force even */
352 if (nswbuf > 256)
353 nswbuf = 256; /* sanity */
354 }
355 valloc(swbuf, struct buf, nswbuf);
356 valloc(buf, struct buf, nbuf);
357
358 /*
359 * Clear allocated memory.
360 */
361 bzero(start, v - start);
362
363 /*
364 * Initialize the virtual memory system, and set the
365 * page table base register in proc 0's PCB.
366 */
367 pmap_bootstrap((vm_offset_t)v, phystok0seg(ptb << PGSHIFT));
368
369 /*
370 * Initialize the rest of proc 0's PCB, and init the ptes
371 * which are cached in its md_proc structure, so we can switch
372 * to it in locore. Also cache the physical address of the pcb.
373 */
374 for (i = 0; i < UPAGES; i++)
375 proc0.p_md.md_upte[i] = PG_V | PG_KRE | PG_KWE |
376 (((k0segtophys(proc0paddr) >> PGSHIFT) + i) << PG_SHIFT);
377 proc0.p_md.md_pcbpaddr = (struct pcb *)k0segtophys(&proc0paddr->u_pcb);
378 proc0paddr->u_pcb.pcb_ksp = KSTACKTOP; /* set the kernel sp */
379
380 /*
381 * Look at arguments and compute bootdev.
382 *
383 * XXX
384 * Boot currently doesn't pass any arguments concerning booting
385 * or the root device.
386 */
387 { extern dev_t bootdev;
388 bootdev = MAKEBOOTDEV(8, 0, 0, 0, 0); /* sd0a. XXX */
389 }
390
391 /*
392 * Look at arguments passed to us and compute boothowto.
393 */
394 #ifdef GENERIC
395 boothowto = RB_SINGLE | RB_ASKNAME;
396 #else
397 boothowto = RB_SINGLE;
398 #endif
399 #ifdef KADB
400 boothowto |= RB_KDB;
401 #endif
402
403 printf("argc = %d\n", argc);
404 printf("argv = %lx\n", argv);
405 for (i = 0; i < argc; i++)
406 printf("argv[%d] = (%lx) \"%s\"\n", i, argv[i], argv[i]);
407
408 if (argc > 1) {
409 /* we have arguments. argv[1] is the flags. */
410 for (p = argv[1]; *p != '\0'; p++) {
411 switch (*p) {
412 case 'a': /* autoboot */
413 case 'A': /* DEC's notion of autoboot */
414 boothowto &= ~RB_SINGLE;
415 break;
416
417 case 'd': /* use compiled in default root */
418 boothowto |= RB_DFLTROOT;
419 break;
420
421 case 'm': /* mini root present in memory */
422 boothowto |= RB_MINIROOT;
423 break;
424
425 case 'n': /* ask for names */
426 boothowto |= RB_ASKNAME;
427 break;
428
429 case 'N': /* don't ask for names */
430 boothowto &= ~RB_ASKNAME;
431 }
432 }
433 }
434
435 return (0);
436 }
437
438 /* for cons.c */
439 struct consdev constab[] = {
440 { 0 },
441 };
442
443 consinit()
444 {
445 /* XXX SET UP THE CONSOLE TAB TO HAVE REASONABLE ENTRIES */
446 /* XXX */
447
448 /* XXX PICK A NEW CONSOLE DEVICE */
449 /* cninit(); */
450
451 pmap_unmap_prom();
452 }
453
454 cpu_startup()
455 {
456 register unsigned i;
457 register caddr_t v;
458 int base, residual;
459 vm_offset_t minaddr, maxaddr;
460 vm_size_t size;
461 #ifdef DEBUG
462 extern int pmapdebug;
463 int opmapdebug = pmapdebug;
464
465 pmapdebug = 0;
466 #endif
467
468 /*
469 * Good {morning,afternoon,evening,night}.
470 */
471 printf(version);
472 identifycpu();
473 printf("real mem = %d (%d reserved for PROM)\n", ctob(physmem),
474 ctob(resvmem));
475
476 /*
477 * Allocate virtual address space for file I/O buffers.
478 * Note they are different than the array of headers, 'buf',
479 * and usually occupy more virtual memory than physical.
480 */
481 size = MAXBSIZE * nbuf;
482 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
483 &maxaddr, size, TRUE);
484 minaddr = (vm_offset_t)buffers;
485 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
486 &minaddr, size, FALSE) != KERN_SUCCESS)
487 panic("startup: cannot allocate buffers");
488 base = bufpages / nbuf;
489 residual = bufpages % nbuf;
490 for (i = 0; i < nbuf; i++) {
491 vm_size_t curbufsize;
492 vm_offset_t curbuf;
493
494 /*
495 * First <residual> buffers get (base+1) physical pages
496 * allocated for them. The rest get (base) physical pages.
497 *
498 * The rest of each buffer occupies virtual space,
499 * but has no physical memory allocated for it.
500 */
501 curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
502 curbufsize = CLBYTES * (i < residual ? base+1 : base);
503 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
504 vm_map_simplify(buffer_map, curbuf);
505 }
506 /*
507 * Allocate a submap for exec arguments. This map effectively
508 * limits the number of processes exec'ing at any time.
509 */
510 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
511 16 * NCARGS, TRUE);
512
513 /*
514 * Allocate a submap for physio
515 */
516 phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
517 VM_PHYS_SIZE, TRUE);
518
519 /*
520 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
521 * we use the more space efficient malloc in place of kmem_alloc.
522 */
523 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
524 M_MBUF, M_NOWAIT);
525 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
526 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
527 VM_MBUF_SIZE, FALSE);
528 /*
529 * Initialize callouts
530 */
531 callfree = callout;
532 for (i = 1; i < ncallout; i++)
533 callout[i-1].c_next = &callout[i];
534 callout[i-1].c_next = NULL;
535
536 #ifdef DEBUG
537 pmapdebug = opmapdebug;
538 #endif
539 printf("avail mem = %ld\n", (long)ptoa(cnt.v_free_count));
540 printf("using %ld buffers containing %ld bytes of memory\n",
541 (long)nbuf, (long)(bufpages * CLBYTES));
542
543 /*
544 * Set up buffers, so they can be used to read disk labels.
545 */
546 bufinit();
547
548 /*
549 * Configure the system.
550 */
551 configure();
552 }
553
554 identifycpu()
555 {
556
557 /* most of the work here is taken care of in alpha_init(). */
558 printf("%s, serial number 0x%lx 0x%lx\n", cpu_model,
559 ((long *)hwrpb->rpb_ssn)[0], ((long *)hwrpb->rpb_ssn)[1]);
560 printf("variation: 0x%lx, revision 0x%lx\n",
561 hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision);
562 printf("%d byte page size, %d processor%s.\n", hwrpb->rpb_page_size,
563 hwrpb->rpb_pcs_cnt, hwrpb->rpb_pcs_cnt == 1 ? "" : "s");
564 }
565
566 int waittime = -1;
567
568 boot(howto)
569 int howto;
570 {
571 extern int cold;
572
573 /* Take a snapshot before clobbering any registers. */
574 if (curproc)
575 savectx(curproc->p_addr, 0);
576
577 /* If system is cold, just halt. */
578 if (cold) {
579 howto |= RB_HALT;
580 goto haltsys;
581 }
582
583 /* Sync the disks, if appropriate */
584 if ((howto & RB_NOSYNC) == 0 && waittime < 0 && 0 /* XXX */) {
585 register struct buf *bp;
586 int iter, nbusy;
587
588 waittime = 0;
589 (void) spl0();
590 printf("syncing disks... ");
591 #ifdef notdef /* XXX */
592 /*
593 * Release vnodes held by texts before sync.
594 */
595 if (panicstr == 0)
596 vnode_pager_umount(NULL);
597
598 sync(&proc0, (void *)NULL, (int *)NULL);
599
600 for (iter = 0; iter < 20; iter++) {
601 nbusy = 0;
602 for (bp = &buf[nbuf]; --bp >= buf; )
603 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
604 nbusy++;
605 if (nbusy == 0)
606 break;
607 printf("%d ", nbusy);
608 DELAY(40000 * iter);
609 }
610 if (nbusy)
611 printf("giving up\n");
612 else
613 #endif
614 printf("done\n");
615 #ifdef notdef /* XXX */
616 /*
617 * If we've been adjusting the clock, the todr
618 * will be out of synch; adjust it now.
619 */
620 resettodr();
621 #endif
622 }
623
624 /* Disable interrupts. */
625 splhigh();
626
627 #ifdef notdef /* XXX */
628 /* If rebooting and a dump is requested do the dump. */
629 if ((howto & (RB_DUMP|RB_HALT)) == RB_DUMP)
630 dumpsys();
631 #endif
632
633 haltsys:
634 /* Finally, halt/reboot the system. */
635 printf("%s\n\n", howto & RB_HALT ? "halted." : "rebooting...");
636 prom_halt(howto & RB_HALT);
637 /*NOTREACHED*/
638 }
639
640 void
641 frametoreg(framep, regp)
642 struct trapframe *framep;
643 struct reg *regp;
644 {
645
646 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
647 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
648 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
649 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
650 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
651 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
652 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
653 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
654 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
655 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
656 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
657 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
658 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
659 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
660 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
661 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
662 regp->r_regs[R_A0] = framep->tf_a0;
663 regp->r_regs[R_A1] = framep->tf_a1;
664 regp->r_regs[R_A2] = framep->tf_a2;
665 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
666 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
667 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
668 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
669 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
670 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
671 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
672 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
673 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
674 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
675 regp->r_regs[R_GP] = framep->tf_gp;
676 regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP];
677 regp->r_regs[R_ZERO] = 0;
678 }
679
680 void
681 regtoframe(regp, framep)
682 struct reg *regp;
683 struct trapframe *framep;
684 {
685
686 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
687 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
688 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
689 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
690 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
691 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
692 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
693 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
694 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
695 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
696 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
697 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
698 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
699 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
700 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
701 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
702 framep->tf_a0 = regp->r_regs[R_A0];
703 framep->tf_a1 = regp->r_regs[R_A1];
704 framep->tf_a2 = regp->r_regs[R_A2];
705 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
706 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
707 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
708 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
709 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
710 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
711 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
712 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
713 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
714 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
715 framep->tf_gp = regp->r_regs[R_GP];
716 framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP];
717 /* ??? = regp->r_regs[R_ZERO]; */
718 }
719
720 void
721 printregs(regp)
722 struct reg *regp;
723 {
724 int i;
725
726 for (i = 0; i < 32; i++)
727 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
728 i & 1 ? "\n" : "\t");
729 }
730
731 void
732 regdump(framep)
733 struct trapframe *framep;
734 {
735 struct reg reg;
736
737 frametoreg(framep, ®);
738 printf("REGISTERS:\n");
739 printregs(®);
740 }
741
742 #ifdef DEBUG
743 int sigdebug = 0;
744 int sigpid = 0;
745 #define SDB_FOLLOW 0x01
746 #define SDB_KSTACK 0x02
747 #endif
748
749 /*
750 * Send an interrupt to process.
751 */
752 void
753 sendsig(catcher, sig, mask, code)
754 sig_t catcher;
755 int sig, mask;
756 u_long code;
757 {
758 struct proc *p = curproc;
759 struct sigcontext *scp, ksc;
760 struct trapframe *frame;
761 struct sigacts *psp = p->p_sigacts;
762 int oonstack, fsize, rndfsize;
763 extern char sigcode[], esigcode[];
764 extern struct proc *fpcurproc;
765
766 frame = p->p_md.md_tf;
767 oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
768 fsize = sizeof ksc;
769 rndfsize = ((fsize + 15) / 16) * 16;
770 /*
771 * Allocate and validate space for the signal handler
772 * context. Note that if the stack is in P0 space, the
773 * call to grow() is a nop, and the useracc() check
774 * will fail if the process has not already allocated
775 * the space with a `brk'.
776 */
777 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
778 (psp->ps_sigonstack & sigmask(sig))) {
779 scp = (struct sigcontext *)(psp->ps_sigstk.ss_base +
780 psp->ps_sigstk.ss_size - rndfsize);
781 psp->ps_sigstk.ss_flags |= SA_ONSTACK;
782 } else
783 scp = (struct sigcontext *)(frame->tf_regs[FRAME_SP] -
784 rndfsize);
785 if ((u_long)scp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
786 (void)grow(p, (u_long)scp);
787 #ifdef DEBUG
788 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
789 printf("sendsig(%d): sig %d ssp %lx usp %lx\n", p->p_pid,
790 sig, &oonstack, scp);
791 #endif
792 if (useracc((caddr_t)scp, fsize, B_WRITE) == 0) {
793 #ifdef DEBUG
794 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
795 printf("sendsig(%d): useracc failed on sig %d\n",
796 p->p_pid, sig);
797 #endif
798 /*
799 * Process has trashed its stack; give it an illegal
800 * instruction to halt it in its tracks.
801 */
802 SIGACTION(p, SIGILL) = SIG_DFL;
803 sig = sigmask(SIGILL);
804 p->p_sigignore &= ~sig;
805 p->p_sigcatch &= ~sig;
806 p->p_sigmask &= ~sig;
807 psignal(p, SIGILL);
808 return;
809 }
810
811 /*
812 * Build the signal context to be used by sigreturn.
813 */
814 ksc.sc_onstack = oonstack;
815 ksc.sc_mask = mask;
816 ksc.sc_pc = frame->tf_pc;
817 ksc.sc_ps = frame->tf_ps;
818
819 /* copy the registers. */
820 frametoreg(frame, (struct reg *)ksc.sc_regs);
821 ksc.sc_regs[R_ZERO] = 0xACEDBADE; /* magic number */
822
823 /* save the floating-point state, if necessary, then copy it. */
824 if (p == fpcurproc) {
825 pal_wrfen(1);
826 savefpstate(&p->p_addr->u_pcb.pcb_fp);
827 pal_wrfen(0);
828 fpcurproc = NULL;
829 }
830 ksc.sc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
831 bcopy(&p->p_addr->u_pcb.pcb_fp, (struct fpreg *)ksc.sc_fpregs,
832 sizeof(struct fpreg));
833 ksc.sc_fp_control = 0; /* XXX ? */
834 bzero(ksc.sc_reserved, sizeof ksc.sc_reserved); /* XXX */
835 bzero(ksc.sc_xxx, sizeof ksc.sc_xxx); /* XXX */
836
837
838 #ifdef COMPAT_OSF1
839 /*
840 * XXX Create an OSF/1-style sigcontext and associated goo.
841 */
842 #endif
843
844 /*
845 * copy the frame out to userland.
846 */
847 (void) copyout((caddr_t)&ksc, (caddr_t)scp, fsize);
848 #ifdef DEBUG
849 if (sigdebug & SDB_FOLLOW)
850 printf("sendsig(%d): sig %d scp %lx code %lx\n", p->p_pid, sig,
851 scp, code);
852 #endif
853
854 /*
855 * Set up the registers to return to sigcode.
856 */
857 frame->tf_pc = (u_int64_t)PS_STRINGS - (esigcode - sigcode);
858 frame->tf_regs[FRAME_SP] = (u_int64_t)scp;
859 frame->tf_a0 = sig;
860 frame->tf_a1 = code;
861 frame->tf_a2 = (u_int64_t)scp;
862 frame->tf_regs[FRAME_T12] = (u_int64_t)catcher; /* t12 is pv */
863
864 #ifdef DEBUG
865 if (sigdebug & SDB_FOLLOW)
866 printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid,
867 frame->tf_pc, frame->tf_regs[FRAME_A3]);
868 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
869 printf("sendsig(%d): sig %d returns\n",
870 p->p_pid, sig);
871 #endif
872 }
873
874 /*
875 * System call to cleanup state after a signal
876 * has been taken. Reset signal mask and
877 * stack state from context left by sendsig (above).
878 * Return to previous pc and psl as specified by
879 * context left by sendsig. Check carefully to
880 * make sure that the user has not modified the
881 * psl to gain improper priviledges or to cause
882 * a machine fault.
883 */
884 /* ARGSUSED */
885 sigreturn(p, uap, retval)
886 struct proc *p;
887 struct sigreturn_args /* {
888 syscallarg(struct sigcontext *) sigcntxp;
889 } */ *uap;
890 register_t *retval;
891 {
892 struct sigcontext *scp, ksc;
893 extern struct proc *fpcurproc;
894
895 scp = SCARG(uap, sigcntxp);
896 #ifdef DEBUG
897 if (sigdebug & SDB_FOLLOW)
898 printf("sigreturn: pid %d, scp %lx\n", p->p_pid, scp);
899 #endif
900
901 if (ALIGN(scp) != (u_int64_t)scp)
902 return (EINVAL);
903
904 /*
905 * Test and fetch the context structure.
906 * We grab it all at once for speed.
907 */
908 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
909 copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc))
910 return (EINVAL);
911
912 if (ksc.sc_regs[R_ZERO] != 0xACEDBADE) /* magic number */
913 return (EINVAL);
914 /*
915 * Restore the user-supplied information
916 */
917 if (ksc.sc_onstack)
918 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
919 else
920 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
921 p->p_sigmask = ksc.sc_mask &~ sigcantmask;
922
923 p->p_md.md_tf->tf_pc = ksc.sc_pc;
924 p->p_md.md_tf->tf_ps = (ksc.sc_ps | PSL_USERSET) & ~PSL_USERCLR;
925
926 regtoframe((struct reg *)ksc.sc_regs, p->p_md.md_tf);
927
928 /* XXX ksc.sc_ownedfp ? */
929 if (p == fpcurproc)
930 fpcurproc = NULL;
931 bcopy((struct fpreg *)ksc.sc_fpregs, &p->p_addr->u_pcb.pcb_fp,
932 sizeof(struct fpreg));
933 /* XXX ksc.sc_fp_control ? */
934
935 #ifdef DEBUG
936 if (sigdebug & SDB_FOLLOW)
937 printf("sigreturn(%d): returns\n", p->p_pid);
938 #endif
939 return (EJUSTRETURN);
940 }
941
942 /*
943 * machine dependent system variables.
944 */
945 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
946 int *name;
947 u_int namelen;
948 void *oldp;
949 size_t *oldlenp;
950 void *newp;
951 size_t newlen;
952 struct proc *p;
953 {
954 dev_t consdev;
955
956 /* all sysctl names at this level are terminal */
957 if (namelen != 1)
958 return (ENOTDIR); /* overloaded */
959
960 switch (name[0]) {
961 case CPU_CONSDEV:
962 if (cn_tab != NULL)
963 consdev = cn_tab->cn_dev;
964 else
965 consdev = NODEV;
966 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
967 sizeof consdev));
968 default:
969 return (EOPNOTSUPP);
970 }
971 /* NOTREACHED */
972 }
973
974 /*
975 * Set registers on exec.
976 */
977 void
978 setregs(p, entry, stack, retval)
979 register struct proc *p;
980 u_long entry;
981 u_long stack;
982 register_t *retval;
983 {
984 struct trapframe *tfp = p->p_md.md_tf;
985 int i;
986 extern struct proc *fpcurproc;
987
988 #ifdef DEBUG
989 for (i = 0; i < FRAME_NSAVEREGS; i++)
990 tfp->tf_regs[i] = 0xbabefacedeadbeef;
991 tfp->tf_gp = 0xbabefacedeadbeef;
992 tfp->tf_a0 = 0xbabefacedeadbeef;
993 tfp->tf_a1 = 0xbabefacedeadbeef;
994 tfp->tf_a2 = 0xbabefacedeadbeef;
995 #else
996 bzero(tfp->tf_regs, FRAME_NSAVEREGS * sizeof tfp->tf_regs[0]);
997 tfp->tf_gp = 0;
998 tfp->tf_a0 = 0;
999 tfp->tf_a1 = 0;
1000 tfp->tf_a2 = 0;
1001 #endif
1002 bzero(&p->p_addr->u_pcb.pcb_fp, sizeof p->p_addr->u_pcb.pcb_fp);
1003
1004 tfp->tf_regs[FRAME_SP] = stack; /* restored to usp in trap return */
1005 tfp->tf_ps = PSL_USERSET;
1006 tfp->tf_pc = entry & ~3;
1007
1008 p->p_md.md_flags & ~MDP_FPUSED;
1009 if (fpcurproc == p)
1010 fpcurproc = NULL;
1011
1012 retval[0] = retval[1] = 0;
1013 }
1014
1015 void
1016 netintr()
1017 {
1018 #ifdef INET
1019 #if NETHER > 0
1020 if (netisr & (1 << NETISR_ARP)) {
1021 netisr &= ~(1 << NETISR_ARP);
1022 arpintr();
1023 }
1024 #endif
1025 if (netisr & (1 << NETISR_IP)) {
1026 netisr &= ~(1 << NETISR_IP);
1027 ipintr();
1028 }
1029 #endif
1030 #ifdef NS
1031 if (netisr & (1 << NETISR_NS)) {
1032 netisr &= ~(1 << NETISR_NS);
1033 nsintr();
1034 }
1035 #endif
1036 #ifdef ISO
1037 if (netisr & (1 << NETISR_ISO)) {
1038 netisr &= ~(1 << NETISR_ISO);
1039 clnlintr();
1040 }
1041 #endif
1042 #ifdef CCITT
1043 if (netisr & (1 << NETISR_CCITT)) {
1044 netisr &= ~(1 << NETISR_CCITT);
1045 ccittintr();
1046 }
1047 #endif
1048 }
1049
1050 void
1051 do_sir()
1052 {
1053
1054 if (ssir & SIR_NET) {
1055 siroff(SIR_NET);
1056 cnt.v_soft++;
1057 netintr();
1058 }
1059 if (ssir & SIR_CLOCK) {
1060 siroff(SIR_CLOCK);
1061 cnt.v_soft++;
1062 softclock();
1063 }
1064 }
1065
1066 int
1067 spl0()
1068 {
1069
1070 if (ssir) {
1071 splsoft();
1072 do_sir();
1073 }
1074
1075 return (pal_swpipl(PSL_IPL_0));
1076 }
1077
1078 /*
1079 * The following primitives manipulate the run queues. _whichqs tells which
1080 * of the 32 queues _qs have processes in them. Setrunqueue puts processes
1081 * into queues, Remrq removes them from queues. The running process is on
1082 * no queue, other processes are on a queue related to p->p_priority, divided
1083 * by 4 actually to shrink the 0-127 range of priorities into the 32 available
1084 * queues.
1085 */
1086 /*
1087 * setrunqueue(p)
1088 * proc *p;
1089 *
1090 * Call should be made at splclock(), and p->p_stat should be SRUN.
1091 */
1092
1093 void
1094 setrunqueue(p)
1095 struct proc *p;
1096 {
1097 int bit;
1098
1099 /* firewall: p->p_back must be NULL */
1100 if (p->p_back != NULL)
1101 panic("setrunqueue");
1102
1103 bit = p->p_priority >> 2;
1104 whichqs |= (1 << bit);
1105 p->p_forw = (struct proc *)&qs[bit];
1106 p->p_back = qs[bit].ph_rlink;
1107 p->p_back->p_forw = p;
1108 qs[bit].ph_rlink = p;
1109 }
1110
1111 /*
1112 * Remrq(p)
1113 *
1114 * Call should be made at splclock().
1115 */
1116 void
1117 remrq(p)
1118 struct proc *p;
1119 {
1120 int bit;
1121
1122 bit = p->p_priority >> 2;
1123 if ((whichqs & (1 << bit)) == 0)
1124 panic("remrq");
1125
1126 p->p_back->p_forw = p->p_forw;
1127 p->p_forw->p_back = p->p_back;
1128 p->p_back = NULL; /* for firewall checking. */
1129
1130 if ((struct proc *)&qs[bit] == qs[bit].ph_link)
1131 whichqs &= ~(1 << bit);
1132 }
1133
1134 /*
1135 * Return the best possible estimate of the time in the timeval
1136 * to which tvp points. Unfortunately, we can't read the hardware registers.
1137 * We guarantee that the time will be greater than the value obtained by a
1138 * previous call.
1139 */
1140 void
1141 microtime(tvp)
1142 register struct timeval *tvp;
1143 {
1144 int s = splclock();
1145 static struct timeval lasttime;
1146
1147 *tvp = time;
1148 #ifdef notdef
1149 tvp->tv_usec += clkread();
1150 while (tvp->tv_usec > 1000000) {
1151 tvp->tv_sec++;
1152 tvp->tv_usec -= 1000000;
1153 }
1154 #endif
1155 if (tvp->tv_sec == lasttime.tv_sec &&
1156 tvp->tv_usec <= lasttime.tv_usec &&
1157 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
1158 tvp->tv_sec++;
1159 tvp->tv_usec -= 1000000;
1160 }
1161 lasttime = *tvp;
1162 splx(s);
1163 }
1164
1165 #ifdef COMPAT_OSF1
1166 void
1167 cpu_exec_ecoff_setup(cmd, p, epp, sp)
1168 int cmd;
1169 struct proc *p;
1170 struct exec_package *epp;
1171 void *sp;
1172 {
1173 struct ecoff_aouthdr *eap;
1174
1175 if (cmd != EXEC_SETUP_FINISH)
1176 return;
1177
1178 eap = (struct ecoff_aouthdr *)
1179 ((caddr_t)epp->ep_hdr + sizeof(struct ecoff_filehdr));
1180 p->p_md.md_tf->tf_gp = eap->ea_gp_value;
1181 }
1182
1183 /*
1184 * cpu_exec_ecoff_hook():
1185 * cpu-dependent ECOFF format hook for execve().
1186 *
1187 * Do any machine-dependent diddling of the exec package when doing ECOFF.
1188 *
1189 */
1190 int
1191 cpu_exec_ecoff_hook(p, epp, eap)
1192 struct proc *p;
1193 struct exec_package *epp;
1194 struct ecoff_aouthdr *eap;
1195 {
1196 struct ecoff_filehdr *efp = epp->ep_hdr;
1197
1198 switch (efp->ef_magic) {
1199 case ECOFF_MAGIC_ALPHA:
1200 epp->ep_emul = EMUL_OSF1;
1201 break;
1202
1203 case ECOFF_MAGIC_NETBSD_ALPHA:
1204 epp->ep_emul = EMUL_NETBSD;
1205 break;
1206
1207 #ifdef DIAGNOSTIC
1208 default:
1209 panic("cpu_exec_ecoff_hook: can't get here from there.");
1210 #endif
1211 }
1212 epp->ep_setup = cpu_exec_ecoff_setup;
1213 return 0;
1214 }
1215 #endif
1216