machdep.c revision 1.40 1 /* $NetBSD: machdep.c,v 1.40 1999/02/27 06:39:37 scottr Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1982, 1986, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah Hdr: machdep.c 1.74 92/12/20
41 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94
42 */
43
44 #include "opt_bufcache.h"
45 #include "opt_ddb.h"
46 #include "opt_uvm.h"
47 #include "opt_sysv.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/map.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/reboot.h>
56 #include <sys/conf.h>
57 #include <sys/file.h>
58 #include <sys/clist.h>
59 #include <sys/callout.h>
60 #include <sys/device.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/msgbuf.h>
64 #include <sys/ioctl.h>
65 #include <sys/tty.h>
66 #include <sys/mount.h>
67 #include <sys/user.h>
68 #include <sys/exec.h>
69 #include <sys/core.h>
70 #include <sys/kcore.h>
71 #include <sys/vnode.h>
72 #include <sys/syscallargs.h>
73 #ifdef SYSVMSG
74 #include <sys/msg.h>
75 #endif
76 #ifdef SYSVSEM
77 #include <sys/sem.h>
78 #endif
79 #ifdef SYSVSHM
80 #include <sys/shm.h>
81 #endif
82 #ifdef KGDB
83 #include <sys/kgdb.h>
84 #endif
85
86 #include <vm/vm.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_page.h>
90
91 #if defined(UVM)
92 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */
93 #endif
94
95 #include <sys/sysctl.h>
96
97 #include <dev/cons.h>
98
99 #include <machine/cpu.h>
100 #include <machine/dvma.h>
101 #include <machine/idprom.h>
102 #include <machine/kcore.h>
103 #include <machine/reg.h>
104 #include <machine/psl.h>
105 #include <machine/pte.h>
106
107 #include <machine/db_machdep.h>
108 #include <ddb/db_sym.h>
109 #include <ddb/db_extern.h>
110
111 #include <sun3/sun3/machdep.h>
112
113 /* Defined in locore.s */
114 extern char kernel_text[];
115 /* Defined by the linker */
116 extern char etext[];
117
118 #if defined(UVM)
119 /* XXX - Gratuitous name changes... */
120 #define kmem_alloc uvm_km_alloc
121 vm_map_t exec_map = NULL;
122 vm_map_t mb_map = NULL;
123 vm_map_t phys_map = NULL;
124 #else
125 vm_map_t buffer_map;
126 #endif
127
128 int physmem;
129 int fputype;
130 caddr_t msgbufaddr;
131
132 /* Virtual page frame for /dev/mem (see mem.c) */
133 vm_offset_t vmmap;
134
135 /*
136 * safepri is a safe priority for sleep to set for a spin-wait
137 * during autoconfiguration or after a panic.
138 */
139 int safepri = PSL_LOWIPL;
140
141 /*
142 * Declare these as initialized data so we can patch them.
143 */
144 int nswbuf = 0;
145 #ifdef NBUF
146 int nbuf = NBUF;
147 #else
148 int nbuf = 0;
149 #endif
150 #ifdef BUFPAGES
151 int bufpages = BUFPAGES;
152 #else
153 int bufpages = 0;
154 #endif
155
156 u_char cpu_machine_id = 0;
157 char *cpu_string = NULL;
158 int cpu_has_vme = 0;
159 int has_iocache = 0;
160
161 static void identifycpu __P((void));
162 static void initcpu __P((void));
163
164 /*
165 * Console initialization: called early on from main,
166 * before vm init or cpu_startup. This system is able
167 * to use the console for output immediately (via PROM)
168 * but can not use it for input until after this point.
169 */
170 void
171 consinit()
172 {
173
174 /*
175 * Switch from the PROM console (output only)
176 * to our own console driver.
177 */
178 cninit();
179
180 #ifdef DDB
181 db_machine_init();
182 {
183 extern int end[];
184 extern char *esym;
185
186 /* symsize, symstart, symend */
187 ddb_init(end[0], end + 1, (int*)esym);
188 }
189 #endif DDB
190
191 /*
192 * Now that the console can do input as well as
193 * output, consider stopping for a debugger.
194 */
195 if (boothowto & RB_KDB) {
196 #ifdef KGDB
197 /* XXX - Ask on console for kgdb_dev? */
198 /* Note: this will just return if kgdb_dev==NODEV */
199 kgdb_connect(1);
200 #else /* KGDB */
201 /* Either DDB or no debugger (just PROM). */
202 Debugger();
203 #endif /* KGDB */
204 }
205 }
206
207 /*
208 * allocsys() - Private routine used by cpu_startup() below.
209 *
210 * Allocate space for system data structures. We are given
211 * a starting virtual address and we return a final virtual
212 * address; along the way we set each data structure pointer.
213 *
214 * We call allocsys() with 0 to find out how much space we want,
215 * allocate that much and fill it with zeroes, and then call
216 * allocsys() again with the correct base virtual address.
217 */
218 #define valloc(name, type, num) \
219 v = (caddr_t)(((name) = (type *)v) + (num))
220 static caddr_t allocsys __P((caddr_t));
221 static caddr_t
222 allocsys(v)
223 register caddr_t v;
224 {
225
226 #ifdef REAL_CLISTS
227 valloc(cfree, struct cblock, nclist);
228 #endif
229 valloc(callout, struct callout, ncallout);
230 #ifdef SYSVSHM
231 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
232 #endif
233 #ifdef SYSVSEM
234 valloc(sema, struct semid_ds, seminfo.semmni);
235 valloc(sem, struct sem, seminfo.semmns);
236 /* This is pretty disgusting! */
237 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
238 #endif
239 #ifdef SYSVMSG
240 valloc(msgpool, char, msginfo.msgmax);
241 valloc(msgmaps, struct msgmap, msginfo.msgseg);
242 valloc(msghdrs, struct msg, msginfo.msgtql);
243 valloc(msqids, struct msqid_ds, msginfo.msgmni);
244 #endif
245
246 /*
247 * Determine how many buffers to allocate. We allocate
248 * the BSD standard of use 10% of memory for the first 2 Meg,
249 * 5% of remaining. Insure a minimum of 16 buffers.
250 * Allocate 1/2 as many swap buffer headers as file i/o buffers.
251 */
252 if (bufpages == 0) {
253 /* We always have more than 2MB of memory. */
254 bufpages = ((btoc(2 * 1024 * 1024) + physmem) /
255 (20 * CLSIZE));
256 }
257 if (nbuf == 0) {
258 nbuf = bufpages;
259 if (nbuf < 16)
260 nbuf = 16;
261 }
262 if (nswbuf == 0) {
263 nswbuf = (nbuf / 2) &~ 1; /* force even */
264 if (nswbuf > 256)
265 nswbuf = 256; /* sanity */
266 }
267 #if !defined(UVM)
268 valloc(swbuf, struct buf, nswbuf);
269 #endif
270 valloc(buf, struct buf, nbuf);
271 return v;
272 }
273 #undef valloc
274
275 /*
276 * cpu_startup: allocate memory for variable-sized tables,
277 * initialize cpu, and do autoconfiguration.
278 *
279 * This is called early in init_main.c:main(), after the
280 * kernel memory allocator is ready for use, but before
281 * the creation of processes 1,2, and mountroot, etc.
282 */
283 void
284 cpu_startup()
285 {
286 caddr_t v;
287 int sz, i;
288 vm_size_t size;
289 int base, residual;
290 vm_offset_t minaddr, maxaddr;
291
292 /*
293 * Initialize message buffer (for kernel printf).
294 * This is put in physical page zero so it will
295 * always be in the same place after a reboot.
296 * Its mapping was prepared in pmap_bootstrap().
297 * Also, offset some to avoid PROM scribbles.
298 */
299 v = (caddr_t) KERNBASE;
300 msgbufaddr = (caddr_t)(v + MSGBUFOFF);
301 initmsgbuf(msgbufaddr, MSGBUFSIZE);
302
303 /*
304 * Good {morning,afternoon,evening,night}.
305 */
306 printf(version);
307 identifycpu();
308 initfpu(); /* also prints FPU type */
309
310 size = ptoa(physmem);
311 printf("real mem = %ldK (0x%lx)\n", (size >> 10), size);
312
313 /*
314 * Find out how much space we need, allocate it,
315 * and then give everything true virtual addresses.
316 */
317 sz = (int)allocsys((caddr_t)0);
318 if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
319 panic("startup: no room for tables");
320 if (allocsys(v) - v != sz)
321 panic("startup: table size inconsistency");
322
323 /*
324 * Now allocate buffers proper. They are different than the above
325 * in that they usually occupy more virtual memory than physical.
326 */
327 size = MAXBSIZE * nbuf;
328 #if defined(UVM)
329 if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size),
330 NULL, UVM_UNKNOWN_OFFSET,
331 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
332 UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
333 panic("startup: cannot allocate VM for buffers");
334 minaddr = (vm_offset_t)buffers;
335 #else
336 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
337 &maxaddr, size, TRUE);
338 minaddr = (vm_offset_t)buffers;
339 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
340 &minaddr, size, FALSE) != KERN_SUCCESS)
341 panic("startup: cannot allocate buffers");
342 #endif /* UVM */
343 if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
344 /* don't want to alloc more physical mem than needed */
345 bufpages = btoc(MAXBSIZE) * nbuf;
346 }
347 base = bufpages / nbuf;
348 residual = bufpages % nbuf;
349 for (i = 0; i < nbuf; i++) {
350 #if defined(UVM)
351 vm_size_t curbufsize;
352 vm_offset_t curbuf;
353 struct vm_page *pg;
354
355 /*
356 * Each buffer has MAXBSIZE bytes of VM space allocated. Of
357 * that MAXBSIZE space, we allocate and map (base+1) pages
358 * for the first "residual" buffers, and then we allocate
359 * "base" pages for the rest.
360 */
361 curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
362 curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
363
364 while (curbufsize) {
365 pg = uvm_pagealloc(NULL, 0, NULL);
366 if (pg == NULL)
367 panic("cpu_startup: not enough memory for "
368 "buffer cache");
369 #if defined(PMAP_NEW)
370 pmap_kenter_pgs(curbuf, &pg, 1);
371 #else
372 pmap_enter(kernel_map->pmap, curbuf,
373 VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE);
374 #endif
375 curbuf += PAGE_SIZE;
376 curbufsize -= PAGE_SIZE;
377 }
378 #else /* ! UVM */
379 vm_size_t curbufsize;
380 vm_offset_t curbuf;
381
382 /*
383 * First <residual> buffers get (base+1) physical pages
384 * allocated for them. The rest get (base) physical pages.
385 *
386 * The rest of each buffer occupies virtual space,
387 * but has no physical memory allocated for it.
388 */
389 curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
390 curbufsize = CLBYTES * (i < residual ? base+1 : base);
391 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
392 vm_map_simplify(buffer_map, curbuf);
393 #endif /* UVM */
394 }
395
396 /*
397 * Allocate a submap for exec arguments. This map effectively
398 * limits the number of processes exec'ing at any time.
399 */
400 #if defined(UVM)
401 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
402 16*NCARGS, TRUE, FALSE, NULL);
403 #else
404 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
405 16*NCARGS, TRUE);
406 #endif
407
408 /*
409 * We don't use a submap for physio, and use a separate map
410 * for DVMA allocations. Our vmapbuf just maps pages into
411 * the kernel map (any kernel mapping is OK) and then the
412 * device drivers clone the kernel mappings into DVMA space.
413 */
414
415 /*
416 * Finally, allocate mbuf cluster submap.
417 */
418 #if defined(UVM)
419 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
420 VM_MBUF_SIZE, FALSE, FALSE, NULL);
421 #else
422 mb_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
423 VM_MBUF_SIZE, FALSE);
424 #endif
425
426 /*
427 * Initialize callouts
428 */
429 callfree = callout;
430 for (i = 1; i < ncallout; i++)
431 callout[i-1].c_next = &callout[i];
432 callout[i-1].c_next = NULL;
433
434 #if defined(UVM)
435 size = ptoa(uvmexp.free);
436 #else
437 size = ptoa(cnt.v_free_count);
438 #endif
439 printf("avail mem = %ldK (0x%lx)\n", (size >> 10), size);
440 printf("using %d buffers containing %d bytes of memory\n",
441 nbuf, bufpages * CLBYTES);
442
443 /*
444 * Tell the VM system that writing to kernel text isn't allowed.
445 * If we don't, we might end up COW'ing the text segment!
446 */
447 #if defined(UVM)
448 if (uvm_map_protect(kernel_map, (vm_offset_t) kernel_text,
449 m68k_trunc_page((vm_offset_t) etext),
450 UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != KERN_SUCCESS)
451 panic("can't protect kernel text");
452 #else
453 if (vm_map_protect(kernel_map, (vm_offset_t) kernel_text,
454 m68k_trunc_page((vm_offset_t) etext),
455 VM_PROT_READ|VM_PROT_EXECUTE, TRUE) != KERN_SUCCESS)
456 panic("can't protect kernel text");
457 #endif
458
459 /*
460 * Allocate a virtual page (for use by /dev/mem)
461 * This page is handed to pmap_enter() therefore
462 * it has to be in the normal kernel VA range.
463 */
464 #if defined(UVM)
465 vmmap = uvm_km_valloc_wait(kernel_map, NBPG);
466 #else
467 vmmap = kmem_alloc_wait(kernel_map, NBPG);
468 #endif
469
470 /*
471 * Create the DVMA maps.
472 */
473 dvma_init();
474
475 /*
476 * Set up CPU-specific registers, cache, etc.
477 */
478 initcpu();
479
480 /*
481 * Set up buffers, so they can be used to read disk labels.
482 */
483 bufinit();
484
485 /*
486 * Configure the system.
487 */
488 configure();
489 }
490
491 /*
492 * Set registers on exec.
493 */
494 void
495 setregs(p, pack, stack)
496 struct proc *p;
497 struct exec_package *pack;
498 u_long stack;
499 {
500 struct trapframe *tf = (struct trapframe *)p->p_md.md_regs;
501
502 tf->tf_sr = PSL_USERSET;
503 tf->tf_pc = pack->ep_entry & ~1;
504 tf->tf_regs[D0] = 0;
505 tf->tf_regs[D1] = 0;
506 tf->tf_regs[D2] = 0;
507 tf->tf_regs[D3] = 0;
508 tf->tf_regs[D4] = 0;
509 tf->tf_regs[D5] = 0;
510 tf->tf_regs[D6] = 0;
511 tf->tf_regs[D7] = 0;
512 tf->tf_regs[A0] = 0;
513 tf->tf_regs[A1] = 0;
514 tf->tf_regs[A2] = (int)PS_STRINGS;
515 tf->tf_regs[A3] = 0;
516 tf->tf_regs[A4] = 0;
517 tf->tf_regs[A5] = 0;
518 tf->tf_regs[A6] = 0;
519 tf->tf_regs[SP] = stack;
520
521 /* restore a null state frame */
522 p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
523 if (fputype)
524 m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
525
526 p->p_md.md_flags = 0;
527 }
528
529 /*
530 * Info for CTL_HW
531 */
532 char machine[16] = MACHINE; /* from <machine/param.h> */
533 char cpu_model[120];
534
535 /*
536 * XXX - Should empirically estimate the divisor...
537 * Note that the value of delay_divisor is roughly
538 * 2048 / cpuclock (where cpuclock is in MHz).
539 */
540 int delay_divisor = 62; /* assume the fastest (33 MHz) */
541
542 void
543 identifycpu()
544 {
545 u_char machtype;
546
547 machtype = identity_prom.idp_machtype;
548 if ((machtype & IDM_ARCH_MASK) != IDM_ARCH_SUN3X) {
549 printf("Bad IDPROM arch!\n");
550 sunmon_abort();
551 }
552
553 cpu_machine_id = machtype;
554 switch (cpu_machine_id) {
555
556 case SUN3X_MACH_80:
557 cpu_string = "80"; /* Hydra */
558 delay_divisor = 102; /* 20 MHz */
559 cpu_has_vme = FALSE;
560 break;
561
562 case SUN3X_MACH_470:
563 cpu_string = "470"; /* Pegasus */
564 delay_divisor = 62; /* 33 MHz */
565 cpu_has_vme = TRUE;
566 break;
567
568 default:
569 printf("unknown sun3x model\n");
570 sunmon_abort();
571 }
572
573 /* Other stuff? (VAC, mc6888x version, etc.) */
574 sprintf(cpu_model, "Sun-3X (3/%s)", cpu_string);
575
576 printf("Model: %s\n", cpu_model);
577 }
578
579 /*
580 * machine dependent system variables.
581 */
582 int
583 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
584 int *name;
585 u_int namelen;
586 void *oldp;
587 size_t *oldlenp;
588 void *newp;
589 size_t newlen;
590 struct proc *p;
591 {
592 int error;
593 dev_t consdev;
594
595 /* all sysctl names at this level are terminal */
596 if (namelen != 1)
597 return (ENOTDIR); /* overloaded */
598
599 switch (name[0]) {
600 case CPU_CONSDEV:
601 if (cn_tab != NULL)
602 consdev = cn_tab->cn_dev;
603 else
604 consdev = NODEV;
605 error = sysctl_rdstruct(oldp, oldlenp, newp,
606 &consdev, sizeof consdev);
607 break;
608
609 #if 0 /* XXX - Not yet... */
610 case CPU_ROOT_DEVICE:
611 error = sysctl_rdstring(oldp, oldlenp, newp, root_device);
612 break;
613
614 case CPU_BOOTED_KERNEL:
615 error = sysctl_rdstring(oldp, oldlenp, newp, booted_kernel);
616 break;
617 #endif
618
619 default:
620 error = EOPNOTSUPP;
621 }
622 return (error);
623 }
624
625 /* See: sig_machdep.c */
626
627 /*
628 * Do a sync in preparation for a reboot.
629 * XXX - This could probably be common code.
630 * XXX - And now, most of it is in vfs_shutdown()
631 * XXX - Put waittime checks in there too?
632 */
633 int waittime = -1; /* XXX - Who else looks at this? -gwr */
634 static void
635 reboot_sync __P((void))
636 {
637
638 /* Check waittime here to localize its use to this function. */
639 if (waittime >= 0)
640 return;
641 waittime = 0;
642 vfs_shutdown();
643 }
644
645 /*
646 * Common part of the BSD and SunOS reboot system calls.
647 */
648 __dead void
649 cpu_reboot(howto, user_boot_string)
650 int howto;
651 char *user_boot_string;
652 {
653 /* Note: this string MUST be static! */
654 static char bootstr[128];
655 char *p;
656
657 /* If system is cold, just halt. (early panic?) */
658 if (cold)
659 goto haltsys;
660
661 /* Un-blank the screen if appropriate. */
662 cnpollc(1);
663
664 if ((howto & RB_NOSYNC) == 0) {
665 reboot_sync();
666 /*
667 * If we've been adjusting the clock, the todr
668 * will be out of synch; adjust it now.
669 *
670 * XXX - However, if the kernel has been sitting in ddb,
671 * the time will be way off, so don't set the HW clock!
672 * XXX - Should do sanity check against HW clock. -gwr
673 */
674 /* resettodr(); */
675 }
676
677 /* Disable interrupts. */
678 splhigh();
679
680 /* Write out a crash dump if asked. */
681 if (howto & RB_DUMP)
682 dumpsys();
683
684 /* run any shutdown hooks */
685 doshutdownhooks();
686
687 if (howto & RB_HALT) {
688 haltsys:
689 printf("Kernel halted.\n");
690 #if 0
691 /*
692 * This calls the PROM monitor "exit_to_mon" function
693 * which appears to have problems... SunOS uses the
694 * "abort" function when you halt (bug work-around?)
695 * so we might as well do the same.
696 */
697 sunmon_halt(); /* provokes PROM monitor bug */
698 #else
699 sunmon_abort();
700 #endif
701 }
702
703 /*
704 * Automatic reboot.
705 */
706 if (user_boot_string)
707 strncpy(bootstr, user_boot_string, sizeof(bootstr));
708 else {
709 /*
710 * Build our own boot string with an empty
711 * boot device/file and (maybe) some flags.
712 * The PROM will supply the device/file name.
713 */
714 p = bootstr;
715 *p = '\0';
716 if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) {
717 /* Append the boot flags. */
718 *p++ = ' ';
719 *p++ = '-';
720 if (howto & RB_KDB)
721 *p++ = 'd';
722 if (howto & RB_ASKNAME)
723 *p++ = 'a';
724 if (howto & RB_SINGLE)
725 *p++ = 's';
726 *p = '\0';
727 }
728 }
729 printf("Kernel rebooting...\n");
730 sunmon_reboot(bootstr);
731 for (;;) ;
732 /*NOTREACHED*/
733 }
734
735 /*
736 * These variables are needed by /sbin/savecore
737 */
738 u_long dumpmag = 0x8fca0101; /* magic number */
739 int dumpsize = 0; /* pages */
740 long dumplo = 0; /* blocks */
741
742 /*
743 * This is called by main to set dumplo, dumpsize.
744 * Dumps always skip the first CLBYTES of disk space
745 * in case there might be a disk label stored there.
746 * If there is extra space, put dump at the end to
747 * reduce the chance that swapping trashes it.
748 */
749 void
750 cpu_dumpconf()
751 {
752 int nblks; /* size of dump area */
753 int maj;
754 int (*getsize)__P((dev_t));
755
756 /* Validate space in page zero for the kcore header. */
757 if (MSGBUFOFF < (sizeof(kcore_seg_t) + sizeof(cpu_kcore_hdr_t)))
758 panic("cpu_dumpconf: MSGBUFOFF too small");
759
760 if (dumpdev == NODEV)
761 return;
762
763 maj = major(dumpdev);
764 if (maj < 0 || maj >= nblkdev)
765 panic("dumpconf: bad dumpdev=0x%x", dumpdev);
766 getsize = bdevsw[maj].d_psize;
767 if (getsize == NULL)
768 return;
769 nblks = (*getsize)(dumpdev);
770 if (nblks <= ctod(1))
771 return;
772
773 /* Position dump image near end of space, page aligned. */
774 dumpsize = physmem; /* pages */
775 dumplo = nblks - ctod(dumpsize);
776 dumplo &= ~(ctod(1)-1);
777
778 /* If it does not fit, truncate it by moving dumplo. */
779 /* Note: Must force signed comparison. */
780 if (dumplo < ((long)ctod(1))) {
781 dumplo = ctod(1);
782 dumpsize = dtoc(nblks - dumplo);
783 }
784 }
785
786 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */
787 struct pcb dumppcb;
788
789 /*
790 * Write a crash dump. The format while in swap is:
791 * kcore_seg_t cpu_hdr;
792 * cpu_kcore_hdr_t cpu_data;
793 * padding (NBPG-sizeof(kcore_seg_t))
794 * pagemap (2*NBPG)
795 * physical memory...
796 */
797 void
798 dumpsys()
799 {
800 struct bdevsw *dsw;
801 kcore_seg_t *kseg_p;
802 cpu_kcore_hdr_t *chdr_p;
803 struct sun3x_kcore_hdr *sh;
804 phys_ram_seg_t *crs_p;
805 char *vaddr;
806 vm_offset_t paddr;
807 int psize, todo, seg, segsz;
808 daddr_t blkno;
809 int error = 0;
810
811 msgbufenabled = 0;
812 if (dumpdev == NODEV)
813 return;
814
815 /*
816 * For dumps during autoconfiguration,
817 * if dump device has already configured...
818 */
819 if (dumpsize == 0)
820 cpu_dumpconf();
821 if (dumplo <= 0) {
822 printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
823 minor(dumpdev));
824 return;
825 }
826 savectx(&dumppcb);
827
828 dsw = &bdevsw[major(dumpdev)];
829 psize = (*(dsw->d_psize))(dumpdev);
830 if (psize == -1) {
831 printf("dump area unavailable\n");
832 return;
833 }
834
835 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
836 minor(dumpdev), dumplo);
837
838 /*
839 * We put the dump header is in physical page zero,
840 * so there is no extra work here to write it out.
841 * All we do is initialize the header.
842 */
843
844 /* Set pointers to all three parts. */
845 kseg_p = (kcore_seg_t *)KERNBASE;
846 chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1);
847 sh = &chdr_p->un._sun3x;
848
849 /* Fill in kcore_seg_t part. */
850 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
851 kseg_p->c_size = sizeof(*chdr_p);
852
853 /* Fill in cpu_kcore_hdr_t part. */
854 /* Can NOT use machine[] as the name! */
855 strncpy(chdr_p->name, "sun3x", sizeof(chdr_p->name));
856 chdr_p->page_size = NBPG;
857 chdr_p->kernbase = KERNBASE;
858
859 /* Fill in the sun3x_kcore_hdr part. */
860 pmap_kcore_hdr(sh);
861
862 /*
863 * Now dump physical memory. Note that physical memory
864 * might NOT be congiguous, so do it by segments.
865 */
866
867 blkno = dumplo;
868 todo = dumpsize; /* pages */
869 vaddr = (char*)vmmap; /* Borrow /dev/mem VA */
870
871 for (seg = 0; seg < SUN3X_NPHYS_RAM_SEGS; seg++) {
872 crs_p = &sh->ram_segs[seg];
873 paddr = crs_p->start;
874 segsz = crs_p->size;
875 /*
876 * Our header lives in the first little bit of
877 * physical memory (not written separately), so
878 * we have to adjust the first ram segment size
879 * and start address to reflect the stolen RAM.
880 * (Nothing interesing in that RAM anyway 8^).
881 */
882 if (seg == 0) {
883 int adj = sizeof(*kseg_p) + sizeof(*chdr_p);
884 crs_p->start += adj;
885 crs_p->size -= adj;
886 }
887
888 while (todo && (segsz > 0)) {
889
890 /* Print pages left after every 16. */
891 if ((todo & 0xf) == 0)
892 printf("\r%4d", todo);
893
894 /* Make a temporary mapping for the page. */
895 pmap_enter(pmap_kernel(), vmmap, paddr | PMAP_NC,
896 VM_PROT_READ, FALSE);
897 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
898 pmap_remove(pmap_kernel(), vmmap, vmmap + NBPG);
899 if (error)
900 goto fail;
901 paddr += NBPG;
902 segsz -= NBPG;
903 blkno += btodb(NBPG);
904 todo--;
905 }
906 }
907 printf("\rdump succeeded\n");
908 return;
909 fail:
910 printf(" dump error=%d\n", error);
911 }
912
913 static void
914 initcpu()
915 {
916 /* XXX: Enable RAM parity/ECC checking? */
917 /* XXX: parityenable(); */
918
919 #ifdef HAVECACHE
920 cache_enable();
921 #endif
922 }
923
924 /* straptrap() in trap.c */
925
926 /* from hp300: badaddr() */
927 /* peek_byte(), peek_word() moved to bus_subr.c */
928
929 /* XXX: parityenable() ? */
930 /* regdump() moved to regdump.c */
931
932 /*
933 * cpu_exec_aout_makecmds():
934 * cpu-dependent a.out format hook for execve().
935 *
936 * Determine if the given exec package refers to something which we
937 * understand and, if so, set up the vmcmds for it.
938 */
939 int
940 cpu_exec_aout_makecmds(p, epp)
941 struct proc *p;
942 struct exec_package *epp;
943 {
944 return ENOEXEC;
945 }
946