machdep.c revision 1.30 1 /* $NetBSD: machdep.c,v 1.30 1996/06/14 20:40:47 cgd Exp $ */
2
3 /*
4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/signalvar.h>
33 #include <sys/kernel.h>
34 #include <sys/map.h>
35 #include <sys/proc.h>
36 #include <sys/buf.h>
37 #include <sys/reboot.h>
38 #include <sys/device.h>
39 #include <sys/conf.h>
40 #include <sys/file.h>
41 #ifdef REAL_CLISTS
42 #include <sys/clist.h>
43 #endif
44 #include <sys/callout.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/msgbuf.h>
48 #include <sys/ioctl.h>
49 #include <sys/tty.h>
50 #include <sys/user.h>
51 #include <sys/exec.h>
52 #include <sys/exec_ecoff.h>
53 #include <sys/sysctl.h>
54 #ifdef SYSVMSG
55 #include <sys/msg.h>
56 #endif
57 #ifdef SYSVSEM
58 #include <sys/sem.h>
59 #endif
60 #ifdef SYSVSHM
61 #include <sys/shm.h>
62 #endif
63
64 #include <sys/mount.h>
65 #include <sys/syscallargs.h>
66
67 #include <vm/vm_kern.h>
68
69 #include <dev/cons.h>
70
71 #include <machine/cpu.h>
72 #include <machine/reg.h>
73 #include <machine/rpb.h>
74 #include <machine/prom.h>
75
76 #ifdef DEC_3000_500
77 #include <alpha/alpha/dec_3000_500.h>
78 #endif
79 #ifdef DEC_3000_300
80 #include <alpha/alpha/dec_3000_300.h>
81 #endif
82 #ifdef DEC_2100_A50
83 #include <alpha/alpha/dec_2100_a50.h>
84 #endif
85 #ifdef DEC_KN20AA
86 #include <alpha/alpha/dec_kn20aa.h>
87 #endif
88 #ifdef DEC_AXPPCI_33
89 #include <alpha/alpha/dec_axppci_33.h>
90 #endif
91 #ifdef DEC_21000
92 #include <alpha/alpha/dec_21000.h>
93 #endif
94
95 #include <net/netisr.h>
96 #include "ether.h"
97
98 #include "le_ioasic.h" /* for le_iomem creation */
99
100 vm_map_t buffer_map;
101
102 void dumpsys __P((void));
103
104 /*
105 * Declare these as initialized data so we can patch them.
106 */
107 int nswbuf = 0;
108 #ifdef NBUF
109 int nbuf = NBUF;
110 #else
111 int nbuf = 0;
112 #endif
113 #ifdef BUFPAGES
114 int bufpages = BUFPAGES;
115 #else
116 int bufpages = 0;
117 #endif
118 int msgbufmapped = 0; /* set when safe to use msgbuf */
119 int maxmem; /* max memory per process */
120
121 int totalphysmem; /* total amount of physical memory in system */
122 int physmem; /* physical memory used by NetBSD + some rsvd */
123 int firstusablepage; /* first usable memory page */
124 int lastusablepage; /* last usable memory page */
125 int resvmem; /* amount of memory reserved for PROM */
126 int unusedmem; /* amount of memory for OS that we don't use */
127 int unknownmem; /* amount of memory with an unknown use */
128
129 int cputype; /* system type, from the RPB */
130
131 /*
132 * XXX We need an address to which we can assign things so that they
133 * won't be optimized away because we didn't use the value.
134 */
135 u_int32_t no_optimize;
136
137 /* the following is used externally (sysctl_hw) */
138 char machine[] = "alpha";
139 char cpu_model[128];
140 char *model_names[] = {
141 "UNKNOWN (0)",
142 "Alpha Demonstration Unit",
143 "DEC 4000 (\"Cobra\")",
144 "DEC 7000 (\"Ruby\")",
145 "DEC 3000/500 (\"Flamingo\") family",
146 "UNKNOWN (5)",
147 "DEC 2000/300 (\"Jensen\")",
148 "DEC 3000/300 (\"Pelican\")",
149 "UNKNOWN (8)",
150 "DEC 2100/A500 (\"Sable\")",
151 "AXPvme 64",
152 "AXPpci 33 (\"NoName\")",
153 "DEC 21000 (\"TurboLaser\")",
154 "DEC 2100/A50 (\"Avanti\") family",
155 "Mustang",
156 "DEC KN20AA",
157 "UNKNOWN (16)",
158 "DEC 1000 (\"Mikasa\")",
159 };
160 int nmodel_names = sizeof model_names/sizeof model_names[0];
161
162 struct user *proc0paddr;
163
164 /* Number of machine cycles per microsecond */
165 u_int64_t cycles_per_usec;
166
167 /* some memory areas for device DMA. "ick." */
168 caddr_t le_iomem; /* XXX iomem for LANCE DMA */
169
170 /* Interrupt vectors (in locore) */
171 extern int XentInt(), XentArith(), XentMM(), XentIF(), XentUna(), XentSys();
172
173 /* number of cpus in the box. really! */
174 int ncpus;
175
176 /* various CPU-specific functions. */
177 char *(*cpu_modelname) __P((void));
178 void (*cpu_consinit) __P((void));
179 void (*cpu_device_register) __P((struct device *dev, void *aux));
180 char *cpu_iobus;
181
182 char boot_flags[64];
183
184 /* for cpu_sysctl() */
185 char root_device[17];
186
187 int
188 alpha_init(pfn, ptb)
189 u_long pfn; /* first free PFN number */
190 u_long ptb; /* PFN of current level 1 page table */
191 {
192 extern char _end[];
193 caddr_t start, v;
194 struct mddt *mddtp;
195 int i, mddtweird;
196 char *p;
197
198 /*
199 * Turn off interrupts and floating point.
200 * Make sure the instruction and data streams are consistent.
201 */
202 (void)splhigh();
203 pal_wrfen(0);
204 TBIA();
205 IMB();
206
207 /*
208 * get address of the restart block, while we the bootstrap
209 * mapping is still around.
210 */
211 hwrpb = (struct rpb *) phystok0seg(*(struct rpb **)HWRPB_ADDR);
212
213 /*
214 * Remember how many cycles there are per microsecond,
215 * so that we can use delay(). Round up, for safety.
216 */
217 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
218
219 /*
220 * Init the PROM interface, so we can use printf
221 * until PROM mappings go away in consinit.
222 */
223 init_prom_interface();
224
225 /*
226 * Point interrupt/exception vectors to our own.
227 */
228 pal_wrent(XentInt, 0);
229 pal_wrent(XentArith, 1);
230 pal_wrent(XentMM, 2);
231 pal_wrent(XentIF, 3);
232 pal_wrent(XentUna, 4);
233 pal_wrent(XentSys, 5);
234
235 /*
236 * Find out how much memory is available, by looking at
237 * the memory cluster descriptors. This also tries to do
238 * its best to detect things things that have never been seen
239 * before...
240 *
241 * XXX Assumes that the first "system" cluster is the
242 * only one we can use. Is the second (etc.) system cluster
243 * (if one happens to exist) guaranteed to be contiguous? or...?
244 */
245 mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off);
246
247 /*
248 * BEGIN MDDT WEIRDNESS CHECKING
249 */
250 mddtweird = 0;
251
252 #define cnt mddtp->mddt_cluster_cnt
253 #define usage(n) mddtp->mddt_clusters[(n)].mddt_usage
254 if (cnt != 2 && cnt != 3) {
255 printf("WARNING: weird number (%d) of mem clusters\n", cnt);
256 mddtweird = 1;
257 } else if (usage(0) != MDDT_PALCODE ||
258 usage(1) != MDDT_SYSTEM ||
259 (cnt == 3 && usage(2) != MDDT_PALCODE)) {
260 mddtweird = 1;
261 printf("WARNING: %d mem clusters, but weird config\n", cnt);
262 }
263
264 for (i = 0; i < cnt; i++) {
265 if ((usage(i) & MDDT_mbz) != 0) {
266 printf("WARNING: mem cluster %d has weird usage %lx\n",
267 i, usage(i));
268 mddtweird = 1;
269 }
270 if (mddtp->mddt_clusters[i].mddt_pg_cnt == 0) {
271 printf("WARNING: mem cluster %d has pg cnt == 0\n", i);
272 mddtweird = 1;
273 }
274 /* XXX other things to check? */
275 }
276 #undef cnt
277 #undef usage
278
279 if (mddtweird) {
280 printf("\n");
281 printf("complete memory cluster information:\n");
282 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
283 printf("mddt %d:\n", i);
284 printf("\tpfn %lx\n",
285 mddtp->mddt_clusters[i].mddt_pfn);
286 printf("\tcnt %lx\n",
287 mddtp->mddt_clusters[i].mddt_pg_cnt);
288 printf("\ttest %lx\n",
289 mddtp->mddt_clusters[i].mddt_pg_test);
290 printf("\tbva %lx\n",
291 mddtp->mddt_clusters[i].mddt_v_bitaddr);
292 printf("\tbpa %lx\n",
293 mddtp->mddt_clusters[i].mddt_p_bitaddr);
294 printf("\tbcksum %lx\n",
295 mddtp->mddt_clusters[i].mddt_bit_cksum);
296 printf("\tusage %lx\n",
297 mddtp->mddt_clusters[i].mddt_usage);
298 }
299 printf("\n");
300 }
301 /*
302 * END MDDT WEIRDNESS CHECKING
303 */
304
305 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
306 totalphysmem += mddtp->mddt_clusters[i].mddt_pg_cnt;
307 #define usage(n) mddtp->mddt_clusters[(n)].mddt_usage
308 #define pgcnt(n) mddtp->mddt_clusters[(n)].mddt_pg_cnt
309 if ((usage(i) & MDDT_mbz) != 0)
310 unknownmem += pgcnt(i);
311 else if ((usage(i) & ~MDDT_mbz) == MDDT_PALCODE)
312 resvmem += pgcnt(i);
313 else if ((usage(i) & ~MDDT_mbz) == MDDT_SYSTEM) {
314 /*
315 * assumes that the system cluster listed is
316 * one we're in...
317 */
318 if (physmem != resvmem) {
319 physmem += pgcnt(i);
320 firstusablepage =
321 mddtp->mddt_clusters[i].mddt_pfn;
322 lastusablepage = firstusablepage + pgcnt(i) - 1;
323 } else
324 unusedmem += pgcnt(i);
325 }
326 #undef usage
327 #undef pgcnt
328 }
329 if (totalphysmem == 0)
330 panic("can't happen: system seems to have no memory!");
331 maxmem = physmem;
332
333 #if 0
334 printf("totalphysmem = %d\n", totalphysmem);
335 printf("physmem = %d\n", physmem);
336 printf("firstusablepage = %d\n", firstusablepage);
337 printf("lastusablepage = %d\n", lastusablepage);
338 printf("resvmem = %d\n", resvmem);
339 printf("unusedmem = %d\n", unusedmem);
340 printf("unknownmem = %d\n", unknownmem);
341 #endif
342
343 /*
344 * find out this CPU's page size
345 */
346 PAGE_SIZE = hwrpb->rpb_page_size;
347 if (PAGE_SIZE != 8192)
348 panic("page size %d != 8192?!", PAGE_SIZE);
349
350 v = (caddr_t)alpha_round_page(_end);
351 /*
352 * Init mapping for u page(s) for proc 0
353 */
354 start = v;
355 curproc->p_addr = proc0paddr = (struct user *)v;
356 v += UPAGES * NBPG;
357
358 /*
359 * Find out what hardware we're on, and remember its type name.
360 */
361 cputype = hwrpb->rpb_type;
362 switch (cputype) {
363 #ifdef DEC_3000_500 /* and 400, [6-9]00 */
364 case ST_DEC_3000_500:
365 cpu_modelname = dec_3000_500_modelname;
366 cpu_consinit = dec_3000_500_consinit;
367 cpu_device_register = dec_3000_500_device_register;
368 cpu_iobus = "tcasic";
369 break;
370 #endif
371
372 #ifdef DEC_3000_300
373 case ST_DEC_3000_300:
374 cpu_modelname = dec_3000_300_modelname;
375 cpu_consinit = dec_3000_300_consinit;
376 cpu_device_register = dec_3000_300_device_register;
377 cpu_iobus = "tcasic";
378 break;
379 #endif
380
381 #ifdef DEC_2100_A50
382 case ST_DEC_2100_A50:
383 cpu_modelname = dec_2100_a50_modelname;
384 cpu_consinit = dec_2100_a50_consinit;
385 cpu_device_register = dec_2100_a50_device_register;
386 cpu_iobus = "apecs";
387 break;
388 #endif
389
390 #ifdef DEC_KN20AA
391 case ST_DEC_KN20AA:
392 cpu_modelname = dec_kn20aa_modelname;
393 cpu_consinit = dec_kn20aa_consinit;
394 cpu_device_register = dec_kn20aa_device_register;
395 cpu_iobus = "cia";
396 break;
397 #endif
398
399 #ifdef DEC_AXPPCI_33
400 case ST_DEC_AXPPCI_33:
401 cpu_modelname = dec_axppci_33_modelname;
402 cpu_consinit = dec_axppci_33_consinit;
403 cpu_device_register = dec_axppci_33_device_register;
404 cpu_iobus = "lca";
405 break;
406 #endif
407
408 #ifdef DEC_2000_300
409 case ST_DEC_2000_300:
410 cpu_modelname = dec_2000_300_modelname;
411 cpu_consinit = dec_2000_300_consinit;
412 cpu_device_register = dec_2000_300_device_register;
413 cpu_iobus = "ibus";
414 XXX DEC 2000/300 NOT SUPPORTED
415 break;
416 #endif
417
418 #ifdef DEC_21000
419 case ST_DEC_21000:
420 cpu_modelname = dec_21000_modelname;
421 cpu_consinit = dec_21000_consinit;
422 cpu_device_register = dec_21000_device_register;
423 cpu_iobus = "tlsb";
424 XXX DEC 21000 NOT SUPPORTED
425 break;
426 #endif
427
428 default:
429 if (cputype > nmodel_names)
430 panic("Unknown system type %d", cputype);
431 else
432 panic("Support for %s system type not in kernel.",
433 model_names[cputype]);
434 }
435
436 if ((*cpu_modelname)() != NULL)
437 strncpy(cpu_model, (*cpu_modelname)(), sizeof cpu_model - 1);
438 else
439 strncpy(cpu_model, model_names[cputype], sizeof cpu_model - 1);
440 cpu_model[sizeof cpu_model - 1] = '\0';
441
442 #if NLE_IOASIC > 0
443 /*
444 * Grab 128K at the top of physical memory for the lance chip
445 * on machines where it does dma through the I/O ASIC.
446 * It must be physically contiguous and aligned on a 128K boundary.
447 *
448 * Note that since this is conditional on the presence of
449 * IOASIC-attached 'le' units in the kernel config, the
450 * message buffer may move on these systems. This shouldn't
451 * be a problem, because once people have a kernel config that
452 * they use, they're going to stick with it.
453 */
454 if (cputype == ST_DEC_3000_500 ||
455 cputype == ST_DEC_3000_300) { /* XXX possibly others? */
456 lastusablepage -= btoc(128 * 1024);
457 le_iomem = (caddr_t)phystok0seg(ctob(lastusablepage + 1));
458 }
459 #endif /* NLE_IOASIC */
460
461 /*
462 * Initialize error message buffer (at end of core).
463 */
464 lastusablepage -= btoc(sizeof (struct msgbuf));
465 msgbufp = (struct msgbuf *)phystok0seg(ctob(lastusablepage + 1));
466 msgbufmapped = 1;
467
468 /*
469 * Allocate space for system data structures.
470 * The first available kernel virtual address is in "v".
471 * As pages of kernel virtual memory are allocated, "v" is incremented.
472 *
473 * These data structures are allocated here instead of cpu_startup()
474 * because physical memory is directly addressable. We don't have
475 * to map these into virtual address space.
476 */
477 #define valloc(name, type, num) \
478 (name) = (type *)v; v = (caddr_t)ALIGN((name)+(num))
479 #define valloclim(name, type, num, lim) \
480 (name) = (type *)v; v = (caddr_t)ALIGN((lim) = ((name)+(num)))
481 #ifdef REAL_CLISTS
482 valloc(cfree, struct cblock, nclist);
483 #endif
484 valloc(callout, struct callout, ncallout);
485 valloc(swapmap, struct map, nswapmap = maxproc * 2);
486 #ifdef SYSVSHM
487 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
488 #endif
489 #ifdef SYSVSEM
490 valloc(sema, struct semid_ds, seminfo.semmni);
491 valloc(sem, struct sem, seminfo.semmns);
492 /* This is pretty disgusting! */
493 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
494 #endif
495 #ifdef SYSVMSG
496 valloc(msgpool, char, msginfo.msgmax);
497 valloc(msgmaps, struct msgmap, msginfo.msgseg);
498 valloc(msghdrs, struct msg, msginfo.msgtql);
499 valloc(msqids, struct msqid_ds, msginfo.msgmni);
500 #endif
501
502 /*
503 * Determine how many buffers to allocate.
504 * We allocate the BSD standard of 10% of memory for the first
505 * 2 Meg, and 5% of remaining memory for buffer space. Insure a
506 * minimum of 16 buffers. We allocate 1/2 as many swap buffer
507 * headers as file i/o buffers.
508 */
509 if (bufpages == 0)
510 bufpages = (btoc(2 * 1024 * 1024) + physmem) /
511 (20 * CLSIZE);
512 if (nbuf == 0) {
513 nbuf = bufpages;
514 if (nbuf < 16)
515 nbuf = 16;
516 }
517 if (nswbuf == 0) {
518 nswbuf = (nbuf / 2) &~ 1; /* force even */
519 if (nswbuf > 256)
520 nswbuf = 256; /* sanity */
521 }
522 valloc(swbuf, struct buf, nswbuf);
523 valloc(buf, struct buf, nbuf);
524
525 /*
526 * Clear allocated memory.
527 */
528 bzero(start, v - start);
529
530 /*
531 * Initialize the virtual memory system, and set the
532 * page table base register in proc 0's PCB.
533 */
534 pmap_bootstrap((vm_offset_t)v, phystok0seg(ptb << PGSHIFT));
535
536 /*
537 * Initialize the rest of proc 0's PCB, and cache its physical
538 * address.
539 */
540 proc0.p_md.md_pcbpaddr =
541 (struct pcb *)k0segtophys(&proc0paddr->u_pcb);
542
543 /*
544 * Set the kernel sp, reserving space for an (empty) trapframe,
545 * and make proc0's trapframe pointer point to it for sanity.
546 */
547 proc0paddr->u_pcb.pcb_ksp =
548 (u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe);
549 proc0.p_md.md_tf = (struct trapframe *)proc0paddr->u_pcb.pcb_ksp;
550
551 /*
552 * Look at arguments passed to us and compute boothowto.
553 */
554 prom_getenv(PROM_E_BOOTED_OSFLAGS, boot_flags, sizeof(boot_flags));
555 #if 0
556 printf("boot flags = \"%s\"\n", boot_flags);
557 #endif
558
559 boothowto = RB_SINGLE;
560 #ifdef KADB
561 boothowto |= RB_KDB;
562 #endif
563 for (p = boot_flags; p && *p != '\0'; p++) {
564 /*
565 * Note that we'd really like to differentiate case here,
566 * but the Alpha AXP Architecture Reference Manual
567 * says that we shouldn't.
568 */
569 switch (*p) {
570 case 'a': /* autoboot */
571 case 'A':
572 boothowto &= ~RB_SINGLE;
573 break;
574
575 case 'n': /* askname */
576 case 'N':
577 boothowto |= RB_ASKNAME;
578 break;
579
580 #if 0
581 case 'm': /* mini root present in memory */
582 case 'M':
583 boothowto |= RB_MINIROOT;
584 break;
585 #endif
586 }
587 }
588
589 /*
590 * Figure out the number of cpus in the box, from RPB fields.
591 * Really. We mean it.
592 */
593 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
594 struct pcs *pcsp;
595
596 pcsp = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off +
597 (i * hwrpb->rpb_pcs_size));
598 if ((pcsp->pcs_flags & PCS_PP) != 0)
599 ncpus++;
600 }
601
602 return (0);
603 }
604
605 void
606 consinit()
607 {
608
609 (*cpu_consinit)();
610 pmap_unmap_prom();
611 }
612
613 void
614 cpu_startup()
615 {
616 register unsigned i;
617 register caddr_t v;
618 int base, residual;
619 vm_offset_t minaddr, maxaddr;
620 vm_size_t size;
621 #ifdef DEBUG
622 extern int pmapdebug;
623 int opmapdebug = pmapdebug;
624
625 pmapdebug = 0;
626 #endif
627
628 /*
629 * Good {morning,afternoon,evening,night}.
630 */
631 printf(version);
632 identifycpu();
633 printf("real mem = %d (%d reserved for PROM, %d used by NetBSD)\n",
634 ctob(totalphysmem), ctob(resvmem), ctob(physmem));
635 if (unusedmem)
636 printf("WARNING: unused memory = %d bytes\n", ctob(unusedmem));
637 if (unknownmem)
638 printf("WARNING: %d bytes of memory with unknown purpose\n",
639 ctob(unknownmem));
640
641 /*
642 * Allocate virtual address space for file I/O buffers.
643 * Note they are different than the array of headers, 'buf',
644 * and usually occupy more virtual memory than physical.
645 */
646 size = MAXBSIZE * nbuf;
647 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
648 &maxaddr, size, TRUE);
649 minaddr = (vm_offset_t)buffers;
650 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
651 &minaddr, size, FALSE) != KERN_SUCCESS)
652 panic("startup: cannot allocate buffers");
653 base = bufpages / nbuf;
654 residual = bufpages % nbuf;
655 for (i = 0; i < nbuf; i++) {
656 vm_size_t curbufsize;
657 vm_offset_t curbuf;
658
659 /*
660 * First <residual> buffers get (base+1) physical pages
661 * allocated for them. The rest get (base) physical pages.
662 *
663 * The rest of each buffer occupies virtual space,
664 * but has no physical memory allocated for it.
665 */
666 curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
667 curbufsize = CLBYTES * (i < residual ? base+1 : base);
668 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
669 vm_map_simplify(buffer_map, curbuf);
670 }
671 /*
672 * Allocate a submap for exec arguments. This map effectively
673 * limits the number of processes exec'ing at any time.
674 */
675 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
676 16 * NCARGS, TRUE);
677
678 /*
679 * Allocate a submap for physio
680 */
681 phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
682 VM_PHYS_SIZE, TRUE);
683
684 /*
685 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
686 * we use the more space efficient malloc in place of kmem_alloc.
687 */
688 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
689 M_MBUF, M_NOWAIT);
690 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
691 mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
692 VM_MBUF_SIZE, FALSE);
693 /*
694 * Initialize callouts
695 */
696 callfree = callout;
697 for (i = 1; i < ncallout; i++)
698 callout[i-1].c_next = &callout[i];
699 callout[i-1].c_next = NULL;
700
701 #ifdef DEBUG
702 pmapdebug = opmapdebug;
703 #endif
704 printf("avail mem = %ld\n", (long)ptoa(cnt.v_free_count));
705 printf("using %ld buffers containing %ld bytes of memory\n",
706 (long)nbuf, (long)(bufpages * CLBYTES));
707
708 /*
709 * Set up buffers, so they can be used to read disk labels.
710 */
711 bufinit();
712
713 /*
714 * Configure the system.
715 */
716 configure();
717 }
718
719 identifycpu()
720 {
721
722 /*
723 * print out CPU identification information.
724 */
725 printf("%s, %dMHz\n", cpu_model,
726 hwrpb->rpb_cc_freq / 1000000); /* XXX true for 21164? */
727 printf("%d byte page size, %d processor%s.\n",
728 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s");
729 #if 0
730 /* this isn't defined for any systems that we run on? */
731 printf("serial number 0x%lx 0x%lx\n",
732 ((long *)hwrpb->rpb_ssn)[0], ((long *)hwrpb->rpb_ssn)[1]);
733
734 /* and these aren't particularly useful! */
735 printf("variation: 0x%lx, revision 0x%lx\n",
736 hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision);
737 #endif
738 }
739
740 int waittime = -1;
741 struct pcb dumppcb;
742
743 void
744 boot(howto)
745 int howto;
746 {
747 extern int cold;
748
749 /* If system is cold, just halt. */
750 if (cold) {
751 howto |= RB_HALT;
752 goto haltsys;
753 }
754
755 boothowto = howto;
756 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
757 waittime = 0;
758 vfs_shutdown();
759 /*
760 * If we've been adjusting the clock, the todr
761 * will be out of synch; adjust it now.
762 */
763 resettodr();
764 }
765
766 /* Disable interrupts. */
767 splhigh();
768
769 /* If rebooting and a dump is requested do it. */
770 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) {
771 savectx(&dumppcb, 0);
772 dumpsys();
773 }
774
775 haltsys:
776
777 /* run any shutdown hooks */
778 doshutdownhooks();
779
780 #ifdef BOOTKEY
781 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot");
782 cngetc();
783 printf("\n");
784 #endif
785
786 /* Finally, halt/reboot the system. */
787 printf("%s\n\n", howto & RB_HALT ? "halted." : "rebooting...");
788 prom_halt(howto & RB_HALT);
789 /*NOTREACHED*/
790 }
791
792 /*
793 * These variables are needed by /sbin/savecore
794 */
795 u_long dumpmag = 0x8fca0101; /* magic number */
796 int dumpsize = 0; /* pages */
797 long dumplo = 0; /* blocks */
798
799 /*
800 * This is called by configure to set dumplo and dumpsize.
801 * Dumps always skip the first CLBYTES of disk space
802 * in case there might be a disk label stored there.
803 * If there is extra space, put dump at the end to
804 * reduce the chance that swapping trashes it.
805 */
806 void
807 dumpconf()
808 {
809 int nblks; /* size of dump area */
810 int maj;
811
812 if (dumpdev == NODEV)
813 return;
814 maj = major(dumpdev);
815 if (maj < 0 || maj >= nblkdev)
816 panic("dumpconf: bad dumpdev=0x%x", dumpdev);
817 if (bdevsw[maj].d_psize == NULL)
818 return;
819 nblks = (*bdevsw[maj].d_psize)(dumpdev);
820 if (nblks <= ctod(1))
821 return;
822
823 /* XXX XXX XXX STARTING MEMORY LOCATION */
824 dumpsize = physmem;
825
826 /* Always skip the first CLBYTES, in case there is a label there. */
827 if (dumplo < ctod(1))
828 dumplo = ctod(1);
829
830 /* Put dump at end of partition, and make it fit. */
831 if (dumpsize > dtoc(nblks - dumplo))
832 dumpsize = dtoc(nblks - dumplo);
833 if (dumplo < nblks - ctod(dumpsize))
834 dumplo = nblks - ctod(dumpsize);
835 }
836
837 /*
838 * Doadump comes here after turning off memory management and
839 * getting on the dump stack, either when called above, or by
840 * the auto-restart code.
841 */
842 void
843 dumpsys()
844 {
845
846 msgbufmapped = 0;
847 if (dumpdev == NODEV)
848 return;
849 if (dumpsize == 0) {
850 dumpconf();
851 if (dumpsize == 0)
852 return;
853 }
854 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
855
856 printf("dump ");
857 switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
858
859 case ENXIO:
860 printf("device bad\n");
861 break;
862
863 case EFAULT:
864 printf("device not ready\n");
865 break;
866
867 case EINVAL:
868 printf("area improper\n");
869 break;
870
871 case EIO:
872 printf("i/o error\n");
873 break;
874
875 case EINTR:
876 printf("aborted from console\n");
877 break;
878
879 default:
880 printf("succeeded\n");
881 break;
882 }
883 printf("\n\n");
884 delay(1000);
885 }
886
887 void
888 frametoreg(framep, regp)
889 struct trapframe *framep;
890 struct reg *regp;
891 {
892
893 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
894 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
895 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
896 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
897 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
898 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
899 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
900 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
901 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
902 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
903 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
904 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
905 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
906 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
907 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
908 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
909 regp->r_regs[R_A0] = framep->tf_a0;
910 regp->r_regs[R_A1] = framep->tf_a1;
911 regp->r_regs[R_A2] = framep->tf_a2;
912 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
913 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
914 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
915 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
916 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
917 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
918 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
919 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
920 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
921 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
922 regp->r_regs[R_GP] = framep->tf_gp;
923 regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP];
924 regp->r_regs[R_ZERO] = 0;
925 }
926
927 void
928 regtoframe(regp, framep)
929 struct reg *regp;
930 struct trapframe *framep;
931 {
932
933 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
934 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
935 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
936 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
937 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
938 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
939 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
940 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
941 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
942 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
943 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
944 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
945 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
946 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
947 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
948 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
949 framep->tf_a0 = regp->r_regs[R_A0];
950 framep->tf_a1 = regp->r_regs[R_A1];
951 framep->tf_a2 = regp->r_regs[R_A2];
952 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
953 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
954 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
955 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
956 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
957 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
958 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
959 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
960 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
961 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
962 framep->tf_gp = regp->r_regs[R_GP];
963 framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP];
964 /* ??? = regp->r_regs[R_ZERO]; */
965 }
966
967 void
968 printregs(regp)
969 struct reg *regp;
970 {
971 int i;
972
973 for (i = 0; i < 32; i++)
974 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
975 i & 1 ? "\n" : "\t");
976 }
977
978 void
979 regdump(framep)
980 struct trapframe *framep;
981 {
982 struct reg reg;
983
984 frametoreg(framep, ®);
985 printf("REGISTERS:\n");
986 printregs(®);
987 }
988
989 #ifdef DEBUG
990 int sigdebug = 0;
991 int sigpid = 0;
992 #define SDB_FOLLOW 0x01
993 #define SDB_KSTACK 0x02
994 #endif
995
996 /*
997 * Send an interrupt to process.
998 */
999 void
1000 sendsig(catcher, sig, mask, code)
1001 sig_t catcher;
1002 int sig, mask;
1003 u_long code;
1004 {
1005 struct proc *p = curproc;
1006 struct sigcontext *scp, ksc;
1007 struct trapframe *frame;
1008 struct sigacts *psp = p->p_sigacts;
1009 int oonstack, fsize, rndfsize;
1010 extern char sigcode[], esigcode[];
1011 extern struct proc *fpcurproc;
1012
1013 frame = p->p_md.md_tf;
1014 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
1015 fsize = sizeof ksc;
1016 rndfsize = ((fsize + 15) / 16) * 16;
1017 /*
1018 * Allocate and validate space for the signal handler
1019 * context. Note that if the stack is in P0 space, the
1020 * call to grow() is a nop, and the useracc() check
1021 * will fail if the process has not already allocated
1022 * the space with a `brk'.
1023 */
1024 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
1025 (psp->ps_sigonstack & sigmask(sig))) {
1026 scp = (struct sigcontext *)(psp->ps_sigstk.ss_sp +
1027 psp->ps_sigstk.ss_size - rndfsize);
1028 psp->ps_sigstk.ss_flags |= SS_ONSTACK;
1029 } else
1030 scp = (struct sigcontext *)(frame->tf_regs[FRAME_SP] -
1031 rndfsize);
1032 if ((u_long)scp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
1033 (void)grow(p, (u_long)scp);
1034 #ifdef DEBUG
1035 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1036 printf("sendsig(%d): sig %d ssp %lx usp %lx\n", p->p_pid,
1037 sig, &oonstack, scp);
1038 #endif
1039 if (useracc((caddr_t)scp, fsize, B_WRITE) == 0) {
1040 #ifdef DEBUG
1041 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1042 printf("sendsig(%d): useracc failed on sig %d\n",
1043 p->p_pid, sig);
1044 #endif
1045 /*
1046 * Process has trashed its stack; give it an illegal
1047 * instruction to halt it in its tracks.
1048 */
1049 SIGACTION(p, SIGILL) = SIG_DFL;
1050 sig = sigmask(SIGILL);
1051 p->p_sigignore &= ~sig;
1052 p->p_sigcatch &= ~sig;
1053 p->p_sigmask &= ~sig;
1054 psignal(p, SIGILL);
1055 return;
1056 }
1057
1058 /*
1059 * Build the signal context to be used by sigreturn.
1060 */
1061 ksc.sc_onstack = oonstack;
1062 ksc.sc_mask = mask;
1063 ksc.sc_pc = frame->tf_pc;
1064 ksc.sc_ps = frame->tf_ps;
1065
1066 /* copy the registers. */
1067 frametoreg(frame, (struct reg *)ksc.sc_regs);
1068 ksc.sc_regs[R_ZERO] = 0xACEDBADE; /* magic number */
1069
1070 /* save the floating-point state, if necessary, then copy it. */
1071 if (p == fpcurproc) {
1072 pal_wrfen(1);
1073 savefpstate(&p->p_addr->u_pcb.pcb_fp);
1074 pal_wrfen(0);
1075 fpcurproc = NULL;
1076 }
1077 ksc.sc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
1078 bcopy(&p->p_addr->u_pcb.pcb_fp, (struct fpreg *)ksc.sc_fpregs,
1079 sizeof(struct fpreg));
1080 ksc.sc_fp_control = 0; /* XXX ? */
1081 bzero(ksc.sc_reserved, sizeof ksc.sc_reserved); /* XXX */
1082 bzero(ksc.sc_xxx, sizeof ksc.sc_xxx); /* XXX */
1083
1084
1085 #ifdef COMPAT_OSF1
1086 /*
1087 * XXX Create an OSF/1-style sigcontext and associated goo.
1088 */
1089 #endif
1090
1091 /*
1092 * copy the frame out to userland.
1093 */
1094 (void) copyout((caddr_t)&ksc, (caddr_t)scp, fsize);
1095 #ifdef DEBUG
1096 if (sigdebug & SDB_FOLLOW)
1097 printf("sendsig(%d): sig %d scp %lx code %lx\n", p->p_pid, sig,
1098 scp, code);
1099 #endif
1100
1101 /*
1102 * Set up the registers to return to sigcode.
1103 */
1104 frame->tf_pc = (u_int64_t)PS_STRINGS - (esigcode - sigcode);
1105 frame->tf_regs[FRAME_SP] = (u_int64_t)scp;
1106 frame->tf_a0 = sig;
1107 frame->tf_a1 = code;
1108 frame->tf_a2 = (u_int64_t)scp;
1109 frame->tf_regs[FRAME_T12] = (u_int64_t)catcher; /* t12 is pv */
1110
1111 #ifdef DEBUG
1112 if (sigdebug & SDB_FOLLOW)
1113 printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid,
1114 frame->tf_pc, frame->tf_regs[FRAME_A3]);
1115 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1116 printf("sendsig(%d): sig %d returns\n",
1117 p->p_pid, sig);
1118 #endif
1119 }
1120
1121 /*
1122 * System call to cleanup state after a signal
1123 * has been taken. Reset signal mask and
1124 * stack state from context left by sendsig (above).
1125 * Return to previous pc and psl as specified by
1126 * context left by sendsig. Check carefully to
1127 * make sure that the user has not modified the
1128 * psl to gain improper priviledges or to cause
1129 * a machine fault.
1130 */
1131 /* ARGSUSED */
1132 int
1133 sys_sigreturn(p, v, retval)
1134 struct proc *p;
1135 void *v;
1136 register_t *retval;
1137 {
1138 struct sys_sigreturn_args /* {
1139 syscallarg(struct sigcontext *) sigcntxp;
1140 } */ *uap = v;
1141 struct sigcontext *scp, ksc;
1142 extern struct proc *fpcurproc;
1143
1144 scp = SCARG(uap, sigcntxp);
1145 #ifdef DEBUG
1146 if (sigdebug & SDB_FOLLOW)
1147 printf("sigreturn: pid %d, scp %lx\n", p->p_pid, scp);
1148 #endif
1149
1150 if (ALIGN(scp) != (u_int64_t)scp)
1151 return (EINVAL);
1152
1153 /*
1154 * Test and fetch the context structure.
1155 * We grab it all at once for speed.
1156 */
1157 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
1158 copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc))
1159 return (EINVAL);
1160
1161 if (ksc.sc_regs[R_ZERO] != 0xACEDBADE) /* magic number */
1162 return (EINVAL);
1163 /*
1164 * Restore the user-supplied information
1165 */
1166 if (ksc.sc_onstack)
1167 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
1168 else
1169 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
1170 p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1171
1172 p->p_md.md_tf->tf_pc = ksc.sc_pc;
1173 p->p_md.md_tf->tf_ps = (ksc.sc_ps | PSL_USERSET) & ~PSL_USERCLR;
1174
1175 regtoframe((struct reg *)ksc.sc_regs, p->p_md.md_tf);
1176
1177 /* XXX ksc.sc_ownedfp ? */
1178 if (p == fpcurproc)
1179 fpcurproc = NULL;
1180 bcopy((struct fpreg *)ksc.sc_fpregs, &p->p_addr->u_pcb.pcb_fp,
1181 sizeof(struct fpreg));
1182 /* XXX ksc.sc_fp_control ? */
1183
1184 #ifdef DEBUG
1185 if (sigdebug & SDB_FOLLOW)
1186 printf("sigreturn(%d): returns\n", p->p_pid);
1187 #endif
1188 return (EJUSTRETURN);
1189 }
1190
1191 /*
1192 * machine dependent system variables.
1193 */
1194 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1195 int *name;
1196 u_int namelen;
1197 void *oldp;
1198 size_t *oldlenp;
1199 void *newp;
1200 size_t newlen;
1201 struct proc *p;
1202 {
1203 dev_t consdev;
1204
1205 /* all sysctl names at this level are terminal */
1206 if (namelen != 1)
1207 return (ENOTDIR); /* overloaded */
1208
1209 switch (name[0]) {
1210 case CPU_CONSDEV:
1211 if (cn_tab != NULL)
1212 consdev = cn_tab->cn_dev;
1213 else
1214 consdev = NODEV;
1215 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1216 sizeof consdev));
1217
1218 case CPU_ROOT_DEVICE:
1219 return (sysctl_rdstring(oldp, oldlenp, newp, root_device));
1220
1221 default:
1222 return (EOPNOTSUPP);
1223 }
1224 /* NOTREACHED */
1225 }
1226
1227 /*
1228 * Set registers on exec.
1229 */
1230 void
1231 setregs(p, pack, stack, retval)
1232 register struct proc *p;
1233 struct exec_package *pack;
1234 u_long stack;
1235 register_t *retval;
1236 {
1237 struct trapframe *tfp = p->p_md.md_tf;
1238 int i;
1239 extern struct proc *fpcurproc;
1240
1241 #ifdef DEBUG
1242 for (i = 0; i < FRAME_NSAVEREGS; i++)
1243 tfp->tf_regs[i] = 0xbabefacedeadbeef;
1244 tfp->tf_gp = 0xbabefacedeadbeef;
1245 tfp->tf_a0 = 0xbabefacedeadbeef;
1246 tfp->tf_a1 = 0xbabefacedeadbeef;
1247 tfp->tf_a2 = 0xbabefacedeadbeef;
1248 #else
1249 bzero(tfp->tf_regs, FRAME_NSAVEREGS * sizeof tfp->tf_regs[0]);
1250 tfp->tf_gp = 0;
1251 tfp->tf_a0 = 0;
1252 tfp->tf_a1 = 0;
1253 tfp->tf_a2 = 0;
1254 #endif
1255 bzero(&p->p_addr->u_pcb.pcb_fp, sizeof p->p_addr->u_pcb.pcb_fp);
1256 #define FP_RN 2 /* XXX */
1257 p->p_addr->u_pcb.pcb_fp.fpr_cr = (long)FP_RN << 58;
1258 tfp->tf_regs[FRAME_SP] = stack; /* restored to usp in trap return */
1259 tfp->tf_ps = PSL_USERSET;
1260 tfp->tf_pc = pack->ep_entry & ~3;
1261
1262 p->p_md.md_flags & ~MDP_FPUSED;
1263 if (fpcurproc == p)
1264 fpcurproc = NULL;
1265
1266 retval[0] = retval[1] = 0;
1267 }
1268
1269 void
1270 netintr()
1271 {
1272 #ifdef INET
1273 #if NETHER > 0
1274 if (netisr & (1 << NETISR_ARP)) {
1275 netisr &= ~(1 << NETISR_ARP);
1276 arpintr();
1277 }
1278 #endif
1279 if (netisr & (1 << NETISR_IP)) {
1280 netisr &= ~(1 << NETISR_IP);
1281 ipintr();
1282 }
1283 #endif
1284 #ifdef NS
1285 if (netisr & (1 << NETISR_NS)) {
1286 netisr &= ~(1 << NETISR_NS);
1287 nsintr();
1288 }
1289 #endif
1290 #ifdef ISO
1291 if (netisr & (1 << NETISR_ISO)) {
1292 netisr &= ~(1 << NETISR_ISO);
1293 clnlintr();
1294 }
1295 #endif
1296 #ifdef CCITT
1297 if (netisr & (1 << NETISR_CCITT)) {
1298 netisr &= ~(1 << NETISR_CCITT);
1299 ccittintr();
1300 }
1301 #endif
1302 #ifdef PPP
1303 if (netisr & (1 << NETISR_PPP)) {
1304 netisr &= ~(1 << NETISR_PPP);
1305 pppintr();
1306 }
1307 #endif
1308 }
1309
1310 void
1311 do_sir()
1312 {
1313
1314 if (ssir & SIR_NET) {
1315 siroff(SIR_NET);
1316 cnt.v_soft++;
1317 netintr();
1318 }
1319 if (ssir & SIR_CLOCK) {
1320 siroff(SIR_CLOCK);
1321 cnt.v_soft++;
1322 softclock();
1323 }
1324 }
1325
1326 int
1327 spl0()
1328 {
1329
1330 if (ssir) {
1331 splsoft();
1332 do_sir();
1333 }
1334
1335 return (pal_swpipl(PSL_IPL_0));
1336 }
1337
1338 /*
1339 * The following primitives manipulate the run queues. _whichqs tells which
1340 * of the 32 queues _qs have processes in them. Setrunqueue puts processes
1341 * into queues, Remrq removes them from queues. The running process is on
1342 * no queue, other processes are on a queue related to p->p_priority, divided
1343 * by 4 actually to shrink the 0-127 range of priorities into the 32 available
1344 * queues.
1345 */
1346 /*
1347 * setrunqueue(p)
1348 * proc *p;
1349 *
1350 * Call should be made at splclock(), and p->p_stat should be SRUN.
1351 */
1352
1353 void
1354 setrunqueue(p)
1355 struct proc *p;
1356 {
1357 int bit;
1358
1359 /* firewall: p->p_back must be NULL */
1360 if (p->p_back != NULL)
1361 panic("setrunqueue");
1362
1363 bit = p->p_priority >> 2;
1364 whichqs |= (1 << bit);
1365 p->p_forw = (struct proc *)&qs[bit];
1366 p->p_back = qs[bit].ph_rlink;
1367 p->p_back->p_forw = p;
1368 qs[bit].ph_rlink = p;
1369 }
1370
1371 /*
1372 * Remrq(p)
1373 *
1374 * Call should be made at splclock().
1375 */
1376 void
1377 remrq(p)
1378 struct proc *p;
1379 {
1380 int bit;
1381
1382 bit = p->p_priority >> 2;
1383 if ((whichqs & (1 << bit)) == 0)
1384 panic("remrq");
1385
1386 p->p_back->p_forw = p->p_forw;
1387 p->p_forw->p_back = p->p_back;
1388 p->p_back = NULL; /* for firewall checking. */
1389
1390 if ((struct proc *)&qs[bit] == qs[bit].ph_link)
1391 whichqs &= ~(1 << bit);
1392 }
1393
1394 /*
1395 * Return the best possible estimate of the time in the timeval
1396 * to which tvp points. Unfortunately, we can't read the hardware registers.
1397 * We guarantee that the time will be greater than the value obtained by a
1398 * previous call.
1399 */
1400 void
1401 microtime(tvp)
1402 register struct timeval *tvp;
1403 {
1404 int s = splclock();
1405 static struct timeval lasttime;
1406
1407 *tvp = time;
1408 #ifdef notdef
1409 tvp->tv_usec += clkread();
1410 while (tvp->tv_usec > 1000000) {
1411 tvp->tv_sec++;
1412 tvp->tv_usec -= 1000000;
1413 }
1414 #endif
1415 if (tvp->tv_sec == lasttime.tv_sec &&
1416 tvp->tv_usec <= lasttime.tv_usec &&
1417 (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
1418 tvp->tv_sec++;
1419 tvp->tv_usec -= 1000000;
1420 }
1421 lasttime = *tvp;
1422 splx(s);
1423 }
1424
1425 /*
1426 * Wait "n" microseconds.
1427 */
1428 int
1429 delay(n)
1430 int n;
1431 {
1432 long N = cycles_per_usec * (n);
1433
1434 while (N > 0) /* XXX */
1435 N -= 3; /* XXX */
1436 }
1437
1438 #if defined(COMPAT_OSF1) || 1 /* XXX */
1439 void
1440 cpu_exec_ecoff_setregs(p, epp, stack, retval)
1441 struct proc *p;
1442 struct exec_package *epp;
1443 u_long stack;
1444 register_t *retval;
1445 {
1446 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1447
1448 setregs(p, epp, stack, retval);
1449 p->p_md.md_tf->tf_gp = execp->a.gp_value;
1450 }
1451
1452 /*
1453 * cpu_exec_ecoff_hook():
1454 * cpu-dependent ECOFF format hook for execve().
1455 *
1456 * Do any machine-dependent diddling of the exec package when doing ECOFF.
1457 *
1458 */
1459 int
1460 cpu_exec_ecoff_hook(p, epp)
1461 struct proc *p;
1462 struct exec_package *epp;
1463 {
1464 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1465 extern struct emul emul_netbsd;
1466 #ifdef COMPAT_OSF1
1467 extern struct emul emul_osf1;
1468 #endif
1469
1470 switch (execp->f.f_magic) {
1471 #ifdef COMPAT_OSF1
1472 case ECOFF_MAGIC_ALPHA:
1473 epp->ep_emul = &emul_osf1;
1474 break;
1475 #endif
1476
1477 case ECOFF_MAGIC_NETBSD_ALPHA:
1478 epp->ep_emul = &emul_netbsd;
1479 break;
1480
1481 default:
1482 return ENOEXEC;
1483 }
1484 return 0;
1485 }
1486 #endif
1487
1488 vm_offset_t
1489 vtophys(vaddr)
1490 vm_offset_t vaddr;
1491 {
1492 vm_offset_t paddr;
1493
1494 if (vaddr < K0SEG_BEGIN) {
1495 printf("vtophys: invalid vaddr 0x%lx", vaddr);
1496 paddr = vaddr;
1497 } else if (vaddr < K0SEG_END)
1498 paddr = k0segtophys(vaddr);
1499 else
1500 paddr = vatopa(vaddr);
1501
1502 #if 0
1503 printf("vtophys(0x%lx) -> %lx\n", vaddr, paddr);
1504 #endif
1505
1506 return (paddr);
1507 }
1508