kern_subr.c revision 1.123 1 /* $NetBSD: kern_subr.c,v 1.123 2005/12/27 04:06:46 chs Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Luke Mewburn.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1982, 1986, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Copyright (c) 1992, 1993
50 * The Regents of the University of California. All rights reserved.
51 *
52 * This software was developed by the Computer Systems Engineering group
53 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
54 * contributed to Berkeley.
55 *
56 * All advertising materials mentioning features or use of this software
57 * must display the following acknowledgement:
58 * This product includes software developed by the University of
59 * California, Lawrence Berkeley Laboratory.
60 *
61 * Redistribution and use in source and binary forms, with or without
62 * modification, are permitted provided that the following conditions
63 * are met:
64 * 1. Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * 2. Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * 3. Neither the name of the University nor the names of its contributors
70 * may be used to endorse or promote products derived from this software
71 * without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
74 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
77 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83 * SUCH DAMAGE.
84 *
85 * @(#)kern_subr.c 8.4 (Berkeley) 2/14/95
86 */
87
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: kern_subr.c,v 1.123 2005/12/27 04:06:46 chs Exp $");
90
91 #include "opt_ddb.h"
92 #include "opt_md.h"
93 #include "opt_syscall_debug.h"
94 #include "opt_ktrace.h"
95 #include "opt_systrace.h"
96
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/proc.h>
100 #include <sys/malloc.h>
101 #include <sys/mount.h>
102 #include <sys/device.h>
103 #include <sys/reboot.h>
104 #include <sys/conf.h>
105 #include <sys/disklabel.h>
106 #include <sys/queue.h>
107 #include <sys/systrace.h>
108 #include <sys/ktrace.h>
109 #include <sys/fcntl.h>
110
111 #include <uvm/uvm_extern.h>
112
113 #include <dev/cons.h>
114
115 #include <net/if.h>
116
117 /* XXX these should eventually move to subr_autoconf.c */
118 static struct device *finddevice(const char *);
119 static struct device *getdisk(char *, int, int, dev_t *, int);
120 static struct device *parsedisk(char *, int, int, dev_t *);
121
122 /*
123 * A generic linear hook.
124 */
125 struct hook_desc {
126 LIST_ENTRY(hook_desc) hk_list;
127 void (*hk_fn)(void *);
128 void *hk_arg;
129 };
130 typedef LIST_HEAD(, hook_desc) hook_list_t;
131
132 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
133
134 int
135 uiomove(void *buf, size_t n, struct uio *uio)
136 {
137 struct iovec *iov;
138 u_int cnt;
139 int error = 0;
140 char *cp = buf;
141 struct lwp *l;
142 struct proc *p;
143 int hold_count;
144
145 hold_count = KERNEL_LOCK_RELEASE_ALL();
146
147 #ifdef LOCKDEBUG
148 spinlock_switchcheck();
149 simple_lock_only_held(NULL, "uiomove");
150 #endif
151
152 #ifdef DIAGNOSTIC
153 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
154 panic("uiomove: mode");
155 #endif
156 while (n > 0 && uio->uio_resid) {
157 iov = uio->uio_iov;
158 cnt = iov->iov_len;
159 if (cnt == 0) {
160 KASSERT(uio->uio_iovcnt > 0);
161 uio->uio_iov++;
162 uio->uio_iovcnt--;
163 continue;
164 }
165 if (cnt > n)
166 cnt = n;
167 switch (uio->uio_segflg) {
168
169 case UIO_USERSPACE:
170 l = uio->uio_lwp;
171 p = l ? l->l_proc : NULL;
172
173 if (curcpu()->ci_schedstate.spc_flags &
174 SPCF_SHOULDYIELD)
175 preempt(1);
176 if (uio->uio_rw == UIO_READ)
177 error = copyout_proc(p, cp, iov->iov_base, cnt);
178 else
179 error = copyin_proc(p, iov->iov_base, cp, cnt);
180 if (error)
181 goto out;
182 break;
183
184 case UIO_SYSSPACE:
185 if (uio->uio_rw == UIO_READ)
186 error = kcopy(cp, iov->iov_base, cnt);
187 else
188 error = kcopy(iov->iov_base, cp, cnt);
189 if (error)
190 goto out;
191 break;
192 }
193 iov->iov_base = (caddr_t)iov->iov_base + cnt;
194 iov->iov_len -= cnt;
195 uio->uio_resid -= cnt;
196 uio->uio_offset += cnt;
197 cp += cnt;
198 KDASSERT(cnt <= n);
199 n -= cnt;
200 }
201 out:
202 KERNEL_LOCK_ACQUIRE_COUNT(hold_count);
203 return (error);
204 }
205
206 /*
207 * Wrapper for uiomove() that validates the arguments against a known-good
208 * kernel buffer.
209 */
210 int
211 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
212 {
213 size_t offset;
214
215 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
216 (offset = uio->uio_offset) != uio->uio_offset)
217 return (EINVAL);
218 if (offset >= buflen)
219 return (0);
220 return (uiomove((char *)buf + offset, buflen - offset, uio));
221 }
222
223 /*
224 * Give next character to user as result of read.
225 */
226 int
227 ureadc(int c, struct uio *uio)
228 {
229 struct iovec *iov;
230
231 if (uio->uio_resid <= 0)
232 panic("ureadc: non-positive resid");
233 again:
234 if (uio->uio_iovcnt <= 0)
235 panic("ureadc: non-positive iovcnt");
236 iov = uio->uio_iov;
237 if (iov->iov_len <= 0) {
238 uio->uio_iovcnt--;
239 uio->uio_iov++;
240 goto again;
241 }
242 switch (uio->uio_segflg) {
243
244 case UIO_USERSPACE:
245 if (subyte(iov->iov_base, c) < 0)
246 return (EFAULT);
247 break;
248
249 case UIO_SYSSPACE:
250 *(char *)iov->iov_base = c;
251 break;
252 }
253 iov->iov_base = (caddr_t)iov->iov_base + 1;
254 iov->iov_len--;
255 uio->uio_resid--;
256 uio->uio_offset++;
257 return (0);
258 }
259
260 /*
261 * Like copyin(), but operates on an arbitrary process.
262 */
263 int
264 copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
265 {
266 struct iovec iov;
267 struct uio uio;
268 int error;
269
270 if (len == 0)
271 return (0);
272
273 if (__predict_true(p == curproc))
274 return copyin(uaddr, kaddr, len);
275
276 iov.iov_base = kaddr;
277 iov.iov_len = len;
278 uio.uio_iov = &iov;
279 uio.uio_iovcnt = 1;
280 uio.uio_offset = (off_t)(intptr_t)uaddr;
281 uio.uio_resid = len;
282 uio.uio_segflg = UIO_SYSSPACE;
283 uio.uio_rw = UIO_READ;
284 uio.uio_lwp = NULL;
285
286 /* XXXCDC: how should locking work here? */
287 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
288 return (EFAULT);
289 p->p_vmspace->vm_refcnt++; /* XXX */
290 error = uvm_io(&p->p_vmspace->vm_map, &uio);
291 uvmspace_free(p->p_vmspace);
292
293 return (error);
294 }
295
296 /*
297 * Like copyout(), but operates on an arbitrary process.
298 */
299 int
300 copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
301 {
302 struct iovec iov;
303 struct uio uio;
304 int error;
305
306 if (len == 0)
307 return (0);
308
309 if (__predict_true(p == curproc))
310 return copyout(kaddr, uaddr, len);
311
312 iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
313 iov.iov_len = len;
314 uio.uio_iov = &iov;
315 uio.uio_iovcnt = 1;
316 uio.uio_offset = (off_t)(intptr_t)uaddr;
317 uio.uio_resid = len;
318 uio.uio_segflg = UIO_SYSSPACE;
319 uio.uio_rw = UIO_WRITE;
320 uio.uio_lwp = NULL;
321
322 /* XXXCDC: how should locking work here? */
323 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
324 return (EFAULT);
325 p->p_vmspace->vm_refcnt++; /* XXX */
326 error = uvm_io(&p->p_vmspace->vm_map, &uio);
327 uvmspace_free(p->p_vmspace);
328
329 return (error);
330 }
331
332 /*
333 * Like copyin(), except it operates on kernel addresses when the FKIOCTL
334 * flag is passed in `ioctlflags' from the ioctl call.
335 */
336 int
337 ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
338 {
339 if (ioctlflags & FKIOCTL)
340 return kcopy(src, dst, len);
341 return copyin(src, dst, len);
342 }
343
344 /*
345 * Like copyout(), except it operates on kernel addresses when the FKIOCTL
346 * flag is passed in `ioctlflags' from the ioctl call.
347 */
348 int
349 ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
350 {
351 if (ioctlflags & FKIOCTL)
352 return kcopy(src, dst, len);
353 return copyout(src, dst, len);
354 }
355
356 /*
357 * General routine to allocate a hash table.
358 * Allocate enough memory to hold at least `elements' list-head pointers.
359 * Return a pointer to the allocated space and set *hashmask to a pattern
360 * suitable for masking a value to use as an index into the returned array.
361 */
362 void *
363 hashinit(u_int elements, enum hashtype htype, struct malloc_type *mtype,
364 int mflags, u_long *hashmask)
365 {
366 u_long hashsize, i;
367 LIST_HEAD(, generic) *hashtbl_list;
368 TAILQ_HEAD(, generic) *hashtbl_tailq;
369 size_t esize;
370 void *p;
371
372 if (elements == 0)
373 panic("hashinit: bad cnt");
374 for (hashsize = 1; hashsize < elements; hashsize <<= 1)
375 continue;
376
377 switch (htype) {
378 case HASH_LIST:
379 esize = sizeof(*hashtbl_list);
380 break;
381 case HASH_TAILQ:
382 esize = sizeof(*hashtbl_tailq);
383 break;
384 default:
385 #ifdef DIAGNOSTIC
386 panic("hashinit: invalid table type");
387 #else
388 return NULL;
389 #endif
390 }
391
392 if ((p = malloc(hashsize * esize, mtype, mflags)) == NULL)
393 return (NULL);
394
395 switch (htype) {
396 case HASH_LIST:
397 hashtbl_list = p;
398 for (i = 0; i < hashsize; i++)
399 LIST_INIT(&hashtbl_list[i]);
400 break;
401 case HASH_TAILQ:
402 hashtbl_tailq = p;
403 for (i = 0; i < hashsize; i++)
404 TAILQ_INIT(&hashtbl_tailq[i]);
405 break;
406 }
407 *hashmask = hashsize - 1;
408 return (p);
409 }
410
411 /*
412 * Free memory from hash table previosly allocated via hashinit().
413 */
414 void
415 hashdone(void *hashtbl, struct malloc_type *mtype)
416 {
417
418 free(hashtbl, mtype);
419 }
420
421
422 static void *
423 hook_establish(hook_list_t *list, void (*fn)(void *), void *arg)
424 {
425 struct hook_desc *hd;
426
427 hd = malloc(sizeof(*hd), M_DEVBUF, M_NOWAIT);
428 if (hd == NULL)
429 return (NULL);
430
431 hd->hk_fn = fn;
432 hd->hk_arg = arg;
433 LIST_INSERT_HEAD(list, hd, hk_list);
434
435 return (hd);
436 }
437
438 static void
439 hook_disestablish(hook_list_t *list, void *vhook)
440 {
441 #ifdef DIAGNOSTIC
442 struct hook_desc *hd;
443
444 LIST_FOREACH(hd, list, hk_list) {
445 if (hd == vhook)
446 break;
447 }
448
449 if (hd == NULL)
450 panic("hook_disestablish: hook %p not established", vhook);
451 #endif
452 LIST_REMOVE((struct hook_desc *)vhook, hk_list);
453 free(vhook, M_DEVBUF);
454 }
455
456 static void
457 hook_destroy(hook_list_t *list)
458 {
459 struct hook_desc *hd;
460
461 while ((hd = LIST_FIRST(list)) != NULL) {
462 LIST_REMOVE(hd, hk_list);
463 free(hd, M_DEVBUF);
464 }
465 }
466
467 static void
468 hook_proc_run(hook_list_t *list, struct proc *p)
469 {
470 struct hook_desc *hd;
471
472 for (hd = LIST_FIRST(list); hd != NULL; hd = LIST_NEXT(hd, hk_list)) {
473 ((void (*)(struct proc *, void *))*hd->hk_fn)(p,
474 hd->hk_arg);
475 }
476 }
477
478 /*
479 * "Shutdown hook" types, functions, and variables.
480 *
481 * Should be invoked immediately before the
482 * system is halted or rebooted, i.e. after file systems unmounted,
483 * after crash dump done, etc.
484 *
485 * Each shutdown hook is removed from the list before it's run, so that
486 * it won't be run again.
487 */
488
489 static hook_list_t shutdownhook_list;
490
491 void *
492 shutdownhook_establish(void (*fn)(void *), void *arg)
493 {
494 return hook_establish(&shutdownhook_list, fn, arg);
495 }
496
497 void
498 shutdownhook_disestablish(void *vhook)
499 {
500 hook_disestablish(&shutdownhook_list, vhook);
501 }
502
503 /*
504 * Run shutdown hooks. Should be invoked immediately before the
505 * system is halted or rebooted, i.e. after file systems unmounted,
506 * after crash dump done, etc.
507 *
508 * Each shutdown hook is removed from the list before it's run, so that
509 * it won't be run again.
510 */
511 void
512 doshutdownhooks(void)
513 {
514 struct hook_desc *dp;
515
516 while ((dp = LIST_FIRST(&shutdownhook_list)) != NULL) {
517 LIST_REMOVE(dp, hk_list);
518 (*dp->hk_fn)(dp->hk_arg);
519 #if 0
520 /*
521 * Don't bother freeing the hook structure,, since we may
522 * be rebooting because of a memory corruption problem,
523 * and this might only make things worse. It doesn't
524 * matter, anyway, since the system is just about to
525 * reboot.
526 */
527 free(dp, M_DEVBUF);
528 #endif
529 }
530 }
531
532 /*
533 * "Mountroot hook" types, functions, and variables.
534 */
535
536 static hook_list_t mountroothook_list;
537
538 void *
539 mountroothook_establish(void (*fn)(struct device *), struct device *dev)
540 {
541 return hook_establish(&mountroothook_list, (void (*)(void *))fn, dev);
542 }
543
544 void
545 mountroothook_disestablish(void *vhook)
546 {
547 hook_disestablish(&mountroothook_list, vhook);
548 }
549
550 void
551 mountroothook_destroy(void)
552 {
553 hook_destroy(&mountroothook_list);
554 }
555
556 void
557 domountroothook(void)
558 {
559 struct hook_desc *hd;
560
561 LIST_FOREACH(hd, &mountroothook_list, hk_list) {
562 if (hd->hk_arg == (void *)root_device) {
563 (*hd->hk_fn)(hd->hk_arg);
564 return;
565 }
566 }
567 }
568
569 static hook_list_t exechook_list;
570
571 void *
572 exechook_establish(void (*fn)(struct proc *, void *), void *arg)
573 {
574 return hook_establish(&exechook_list, (void (*)(void *))fn, arg);
575 }
576
577 void
578 exechook_disestablish(void *vhook)
579 {
580 hook_disestablish(&exechook_list, vhook);
581 }
582
583 /*
584 * Run exec hooks.
585 */
586 void
587 doexechooks(struct proc *p)
588 {
589 hook_proc_run(&exechook_list, p);
590 }
591
592 static hook_list_t exithook_list;
593
594 void *
595 exithook_establish(void (*fn)(struct proc *, void *), void *arg)
596 {
597 return hook_establish(&exithook_list, (void (*)(void *))fn, arg);
598 }
599
600 void
601 exithook_disestablish(void *vhook)
602 {
603 hook_disestablish(&exithook_list, vhook);
604 }
605
606 /*
607 * Run exit hooks.
608 */
609 void
610 doexithooks(struct proc *p)
611 {
612 hook_proc_run(&exithook_list, p);
613 }
614
615 static hook_list_t forkhook_list;
616
617 void *
618 forkhook_establish(void (*fn)(struct proc *, struct proc *))
619 {
620 return hook_establish(&forkhook_list, (void (*)(void *))fn, NULL);
621 }
622
623 void
624 forkhook_disestablish(void *vhook)
625 {
626 hook_disestablish(&forkhook_list, vhook);
627 }
628
629 /*
630 * Run fork hooks.
631 */
632 void
633 doforkhooks(struct proc *p2, struct proc *p1)
634 {
635 struct hook_desc *hd;
636
637 LIST_FOREACH(hd, &forkhook_list, hk_list) {
638 ((void (*)(struct proc *, struct proc *))*hd->hk_fn)
639 (p2, p1);
640 }
641 }
642
643 /*
644 * "Power hook" types, functions, and variables.
645 * The list of power hooks is kept ordered with the last registered hook
646 * first.
647 * When running the hooks on power down the hooks are called in reverse
648 * registration order, when powering up in registration order.
649 */
650 struct powerhook_desc {
651 CIRCLEQ_ENTRY(powerhook_desc) sfd_list;
652 void (*sfd_fn)(int, void *);
653 void *sfd_arg;
654 };
655
656 static CIRCLEQ_HEAD(, powerhook_desc) powerhook_list =
657 CIRCLEQ_HEAD_INITIALIZER(powerhook_list);
658
659 void *
660 powerhook_establish(void (*fn)(int, void *), void *arg)
661 {
662 struct powerhook_desc *ndp;
663
664 ndp = (struct powerhook_desc *)
665 malloc(sizeof(*ndp), M_DEVBUF, M_NOWAIT);
666 if (ndp == NULL)
667 return (NULL);
668
669 ndp->sfd_fn = fn;
670 ndp->sfd_arg = arg;
671 CIRCLEQ_INSERT_HEAD(&powerhook_list, ndp, sfd_list);
672
673 return (ndp);
674 }
675
676 void
677 powerhook_disestablish(void *vhook)
678 {
679 #ifdef DIAGNOSTIC
680 struct powerhook_desc *dp;
681
682 CIRCLEQ_FOREACH(dp, &powerhook_list, sfd_list)
683 if (dp == vhook)
684 goto found;
685 panic("powerhook_disestablish: hook %p not established", vhook);
686 found:
687 #endif
688
689 CIRCLEQ_REMOVE(&powerhook_list, (struct powerhook_desc *)vhook,
690 sfd_list);
691 free(vhook, M_DEVBUF);
692 }
693
694 /*
695 * Run power hooks.
696 */
697 void
698 dopowerhooks(int why)
699 {
700 struct powerhook_desc *dp;
701
702 if (why == PWR_RESUME || why == PWR_SOFTRESUME) {
703 CIRCLEQ_FOREACH_REVERSE(dp, &powerhook_list, sfd_list) {
704 (*dp->sfd_fn)(why, dp->sfd_arg);
705 }
706 } else {
707 CIRCLEQ_FOREACH(dp, &powerhook_list, sfd_list) {
708 (*dp->sfd_fn)(why, dp->sfd_arg);
709 }
710 }
711 }
712
713 /*
714 * Determine the root device and, if instructed to, the root file system.
715 */
716
717 #include "md.h"
718 #if NMD == 0
719 #undef MEMORY_DISK_HOOKS
720 #endif
721
722 #ifdef MEMORY_DISK_HOOKS
723 static struct device fakemdrootdev[NMD];
724 #endif
725
726 #ifdef MEMORY_DISK_IS_ROOT
727 #define BOOT_FROM_MEMORY_HOOKS 1
728 #endif
729
730 #include "raid.h"
731 #if NRAID == 1
732 #define BOOT_FROM_RAID_HOOKS 1
733 #endif
734
735 #ifdef BOOT_FROM_RAID_HOOKS
736 extern int numraid;
737 extern struct device *raidrootdev;
738 #endif
739
740 /*
741 * The device and wedge that we booted from. If booted_wedge is NULL,
742 * the we might consult booted_partition.
743 */
744 struct device *booted_device;
745 struct device *booted_wedge;
746 int booted_partition;
747
748 /*
749 * Use partition letters if it's a disk class but not a wedge.
750 * XXX Check for wedge is kinda gross.
751 */
752 #define DEV_USES_PARTITIONS(dv) \
753 ((dv)->dv_class == DV_DISK && \
754 ((dv)->dv_cfdata == NULL || \
755 strcmp((dv)->dv_cfdata->cf_name, "dk") != 0))
756
757 void
758 setroot(struct device *bootdv, int bootpartition)
759 {
760 struct device *dv;
761 int len;
762 #ifdef MEMORY_DISK_HOOKS
763 int i;
764 #endif
765 dev_t nrootdev;
766 dev_t ndumpdev = NODEV;
767 char buf[128];
768 const char *rootdevname;
769 const char *dumpdevname;
770 struct device *rootdv = NULL; /* XXX gcc -Wuninitialized */
771 struct device *dumpdv = NULL;
772 struct ifnet *ifp;
773 const char *deffsname;
774 struct vfsops *vops;
775
776 #ifdef MEMORY_DISK_HOOKS
777 for (i = 0; i < NMD; i++) {
778 fakemdrootdev[i].dv_class = DV_DISK;
779 fakemdrootdev[i].dv_cfdata = NULL;
780 fakemdrootdev[i].dv_unit = i;
781 fakemdrootdev[i].dv_parent = NULL;
782 snprintf(fakemdrootdev[i].dv_xname,
783 sizeof(fakemdrootdev[i].dv_xname), "md%d", i);
784 }
785 #endif /* MEMORY_DISK_HOOKS */
786
787 #ifdef MEMORY_DISK_IS_ROOT
788 bootdv = &fakemdrootdev[0];
789 bootpartition = 0;
790 #endif
791
792 /*
793 * If NFS is specified as the file system, and we found
794 * a DV_DISK boot device (or no boot device at all), then
795 * find a reasonable network interface for "rootspec".
796 */
797 vops = vfs_getopsbyname("nfs");
798 if (vops != NULL && vops->vfs_mountroot == mountroot &&
799 rootspec == NULL &&
800 (bootdv == NULL || bootdv->dv_class != DV_IFNET)) {
801 IFNET_FOREACH(ifp) {
802 if ((ifp->if_flags &
803 (IFF_LOOPBACK|IFF_POINTOPOINT)) == 0)
804 break;
805 }
806 if (ifp == NULL) {
807 /*
808 * Can't find a suitable interface; ask the
809 * user.
810 */
811 boothowto |= RB_ASKNAME;
812 } else {
813 /*
814 * Have a suitable interface; behave as if
815 * the user specified this interface.
816 */
817 rootspec = (const char *)ifp->if_xname;
818 }
819 }
820
821 /*
822 * If wildcarded root and we the boot device wasn't determined,
823 * ask the user.
824 */
825 if (rootspec == NULL && bootdv == NULL)
826 boothowto |= RB_ASKNAME;
827
828 top:
829 if (boothowto & RB_ASKNAME) {
830 struct device *defdumpdv;
831
832 for (;;) {
833 printf("root device");
834 if (bootdv != NULL) {
835 printf(" (default %s", bootdv->dv_xname);
836 if (DEV_USES_PARTITIONS(bootdv))
837 printf("%c", bootpartition + 'a');
838 printf(")");
839 }
840 printf(": ");
841 len = cngetsn(buf, sizeof(buf));
842 if (len == 0 && bootdv != NULL) {
843 strlcpy(buf, bootdv->dv_xname, sizeof(buf));
844 len = strlen(buf);
845 }
846 if (len > 0 && buf[len - 1] == '*') {
847 buf[--len] = '\0';
848 dv = getdisk(buf, len, 1, &nrootdev, 0);
849 if (dv != NULL) {
850 rootdv = dv;
851 break;
852 }
853 }
854 dv = getdisk(buf, len, bootpartition, &nrootdev, 0);
855 if (dv != NULL) {
856 rootdv = dv;
857 break;
858 }
859 }
860
861 /*
862 * Set up the default dump device. If root is on
863 * a network device, there is no default dump
864 * device, since we don't support dumps to the
865 * network.
866 */
867 if (DEV_USES_PARTITIONS(rootdv) == 0)
868 defdumpdv = NULL;
869 else
870 defdumpdv = rootdv;
871
872 for (;;) {
873 printf("dump device");
874 if (defdumpdv != NULL) {
875 /*
876 * Note, we know it's a disk if we get here.
877 */
878 printf(" (default %sb)", defdumpdv->dv_xname);
879 }
880 printf(": ");
881 len = cngetsn(buf, sizeof(buf));
882 if (len == 0) {
883 if (defdumpdv != NULL) {
884 ndumpdev = MAKEDISKDEV(major(nrootdev),
885 DISKUNIT(nrootdev), 1);
886 }
887 dumpdv = defdumpdv;
888 break;
889 }
890 if (len == 4 && strcmp(buf, "none") == 0) {
891 dumpdv = NULL;
892 break;
893 }
894 dv = getdisk(buf, len, 1, &ndumpdev, 1);
895 if (dv != NULL) {
896 dumpdv = dv;
897 break;
898 }
899 }
900
901 rootdev = nrootdev;
902 dumpdev = ndumpdev;
903
904 for (vops = LIST_FIRST(&vfs_list); vops != NULL;
905 vops = LIST_NEXT(vops, vfs_list)) {
906 if (vops->vfs_mountroot != NULL &&
907 vops->vfs_mountroot == mountroot)
908 break;
909 }
910
911 if (vops == NULL) {
912 mountroot = NULL;
913 deffsname = "generic";
914 } else
915 deffsname = vops->vfs_name;
916
917 for (;;) {
918 printf("file system (default %s): ", deffsname);
919 len = cngetsn(buf, sizeof(buf));
920 if (len == 0)
921 break;
922 if (len == 4 && strcmp(buf, "halt") == 0)
923 cpu_reboot(RB_HALT, NULL);
924 else if (len == 6 && strcmp(buf, "reboot") == 0)
925 cpu_reboot(0, NULL);
926 #if defined(DDB)
927 else if (len == 3 && strcmp(buf, "ddb") == 0) {
928 console_debugger();
929 }
930 #endif
931 else if (len == 7 && strcmp(buf, "generic") == 0) {
932 mountroot = NULL;
933 break;
934 }
935 vops = vfs_getopsbyname(buf);
936 if (vops == NULL || vops->vfs_mountroot == NULL) {
937 printf("use one of: generic");
938 for (vops = LIST_FIRST(&vfs_list);
939 vops != NULL;
940 vops = LIST_NEXT(vops, vfs_list)) {
941 if (vops->vfs_mountroot != NULL)
942 printf(" %s", vops->vfs_name);
943 }
944 #if defined(DDB)
945 printf(" ddb");
946 #endif
947 printf(" halt reboot\n");
948 } else {
949 mountroot = vops->vfs_mountroot;
950 break;
951 }
952 }
953
954 } else if (rootspec == NULL) {
955 int majdev;
956
957 /*
958 * Wildcarded root; use the boot device.
959 */
960 rootdv = bootdv;
961
962 majdev = devsw_name2blk(bootdv->dv_xname, NULL, 0);
963 if (majdev >= 0) {
964 /*
965 * Root is on a disk. `bootpartition' is root,
966 * unless the device does not use partitions.
967 */
968 if (DEV_USES_PARTITIONS(bootdv))
969 rootdev = MAKEDISKDEV(majdev, bootdv->dv_unit,
970 bootpartition);
971 else
972 rootdev = makedev(majdev, bootdv->dv_unit);
973 }
974 } else {
975
976 /*
977 * `root on <dev> ...'
978 */
979
980 /*
981 * If it's a network interface, we can bail out
982 * early.
983 */
984 dv = finddevice(rootspec);
985 if (dv != NULL && dv->dv_class == DV_IFNET) {
986 rootdv = dv;
987 goto haveroot;
988 }
989
990 rootdevname = devsw_blk2name(major(rootdev));
991 if (rootdevname == NULL) {
992 printf("unknown device major 0x%x\n", rootdev);
993 boothowto |= RB_ASKNAME;
994 goto top;
995 }
996 memset(buf, 0, sizeof(buf));
997 snprintf(buf, sizeof(buf), "%s%d", rootdevname,
998 DISKUNIT(rootdev));
999
1000 rootdv = finddevice(buf);
1001 if (rootdv == NULL) {
1002 printf("device %s (0x%x) not configured\n",
1003 buf, rootdev);
1004 boothowto |= RB_ASKNAME;
1005 goto top;
1006 }
1007 }
1008
1009 haveroot:
1010
1011 root_device = rootdv;
1012
1013 switch (rootdv->dv_class) {
1014 case DV_IFNET:
1015 aprint_normal("root on %s", rootdv->dv_xname);
1016 break;
1017
1018 case DV_DISK:
1019 aprint_normal("root on %s%c", rootdv->dv_xname,
1020 DISKPART(rootdev) + 'a');
1021 break;
1022
1023 default:
1024 printf("can't determine root device\n");
1025 boothowto |= RB_ASKNAME;
1026 goto top;
1027 }
1028
1029 /*
1030 * Now configure the dump device.
1031 *
1032 * If we haven't figured out the dump device, do so, with
1033 * the following rules:
1034 *
1035 * (a) We already know dumpdv in the RB_ASKNAME case.
1036 *
1037 * (b) If dumpspec is set, try to use it. If the device
1038 * is not available, punt.
1039 *
1040 * (c) If dumpspec is not set, the dump device is
1041 * wildcarded or unspecified. If the root device
1042 * is DV_IFNET, punt. Otherwise, use partition b
1043 * of the root device.
1044 */
1045
1046 if (boothowto & RB_ASKNAME) { /* (a) */
1047 if (dumpdv == NULL)
1048 goto nodumpdev;
1049 } else if (dumpspec != NULL) { /* (b) */
1050 if (strcmp(dumpspec, "none") == 0 || dumpdev == NODEV) {
1051 /*
1052 * Operator doesn't want a dump device.
1053 * Or looks like they tried to pick a network
1054 * device. Oops.
1055 */
1056 goto nodumpdev;
1057 }
1058
1059 dumpdevname = devsw_blk2name(major(dumpdev));
1060 if (dumpdevname == NULL)
1061 goto nodumpdev;
1062 memset(buf, 0, sizeof(buf));
1063 snprintf(buf, sizeof(buf), "%s%d", dumpdevname,
1064 DISKUNIT(dumpdev));
1065
1066 dumpdv = finddevice(buf);
1067 if (dumpdv == NULL) {
1068 /*
1069 * Device not configured.
1070 */
1071 goto nodumpdev;
1072 }
1073 } else { /* (c) */
1074 if (DEV_USES_PARTITIONS(rootdv) == 0)
1075 goto nodumpdev;
1076 else {
1077 dumpdv = rootdv;
1078 dumpdev = MAKEDISKDEV(major(rootdev),
1079 dumpdv->dv_unit, 1);
1080 }
1081 }
1082
1083 aprint_normal(" dumps on %s%c\n", dumpdv->dv_xname,
1084 DISKPART(dumpdev) + 'a');
1085 return;
1086
1087 nodumpdev:
1088 dumpdev = NODEV;
1089 aprint_normal("\n");
1090 }
1091
1092 static struct device *
1093 finddevice(const char *name)
1094 {
1095 struct device *dv;
1096 #if defined(BOOT_FROM_RAID_HOOKS) || defined(BOOT_FROM_MEMORY_HOOKS)
1097 int j;
1098 #endif /* BOOT_FROM_RAID_HOOKS || BOOT_FROM_MEMORY_HOOKS */
1099
1100 #ifdef BOOT_FROM_RAID_HOOKS
1101 for (j = 0; j < numraid; j++) {
1102 if (strcmp(name, raidrootdev[j].dv_xname) == 0) {
1103 dv = &raidrootdev[j];
1104 return (dv);
1105 }
1106 }
1107 #endif /* BOOT_FROM_RAID_HOOKS */
1108
1109 #ifdef BOOT_FROM_MEMORY_HOOKS
1110 for (j = 0; j < NMD; j++) {
1111 if (strcmp(name, fakemdrootdev[j].dv_xname) == 0) {
1112 dv = &fakemdrootdev[j];
1113 return (dv);
1114 }
1115 }
1116 #endif /* BOOT_FROM_MEMORY_HOOKS */
1117
1118 for (dv = TAILQ_FIRST(&alldevs); dv != NULL;
1119 dv = TAILQ_NEXT(dv, dv_list))
1120 if (strcmp(dv->dv_xname, name) == 0)
1121 break;
1122 return (dv);
1123 }
1124
1125 static struct device *
1126 getdisk(char *str, int len, int defpart, dev_t *devp, int isdump)
1127 {
1128 struct device *dv;
1129 #ifdef MEMORY_DISK_HOOKS
1130 int i;
1131 #endif
1132 #ifdef BOOT_FROM_RAID_HOOKS
1133 int j;
1134 #endif
1135
1136 if ((dv = parsedisk(str, len, defpart, devp)) == NULL) {
1137 printf("use one of:");
1138 #ifdef MEMORY_DISK_HOOKS
1139 if (isdump == 0)
1140 for (i = 0; i < NMD; i++)
1141 printf(" %s[a-%c]", fakemdrootdev[i].dv_xname,
1142 'a' + MAXPARTITIONS - 1);
1143 #endif
1144 #ifdef BOOT_FROM_RAID_HOOKS
1145 if (isdump == 0)
1146 for (j = 0; j < numraid; j++)
1147 printf(" %s[a-%c]", raidrootdev[j].dv_xname,
1148 'a' + MAXPARTITIONS - 1);
1149 #endif
1150 TAILQ_FOREACH(dv, &alldevs, dv_list) {
1151 if (DEV_USES_PARTITIONS(dv))
1152 printf(" %s[a-%c]", dv->dv_xname,
1153 'a' + MAXPARTITIONS - 1);
1154 else if (dv->dv_class == DV_DISK)
1155 printf(" %s", dv->dv_xname);
1156 if (isdump == 0 && dv->dv_class == DV_IFNET)
1157 printf(" %s", dv->dv_xname);
1158 }
1159 if (isdump)
1160 printf(" none");
1161 #if defined(DDB)
1162 printf(" ddb");
1163 #endif
1164 printf(" halt reboot\n");
1165 }
1166 return (dv);
1167 }
1168
1169 static struct device *
1170 parsedisk(char *str, int len, int defpart, dev_t *devp)
1171 {
1172 struct device *dv;
1173 char *cp, c;
1174 int majdev, part;
1175 #ifdef MEMORY_DISK_HOOKS
1176 int i;
1177 #endif
1178 if (len == 0)
1179 return (NULL);
1180
1181 if (len == 4 && strcmp(str, "halt") == 0)
1182 cpu_reboot(RB_HALT, NULL);
1183 else if (len == 6 && strcmp(str, "reboot") == 0)
1184 cpu_reboot(0, NULL);
1185 #if defined(DDB)
1186 else if (len == 3 && strcmp(str, "ddb") == 0)
1187 console_debugger();
1188 #endif
1189
1190 cp = str + len - 1;
1191 c = *cp;
1192 if (c >= 'a' && c <= ('a' + MAXPARTITIONS - 1)) {
1193 part = c - 'a';
1194 *cp = '\0';
1195 } else
1196 part = defpart;
1197
1198 #ifdef MEMORY_DISK_HOOKS
1199 for (i = 0; i < NMD; i++)
1200 if (strcmp(str, fakemdrootdev[i].dv_xname) == 0) {
1201 dv = &fakemdrootdev[i];
1202 goto gotdisk;
1203 }
1204 #endif
1205
1206 dv = finddevice(str);
1207 if (dv != NULL) {
1208 if (dv->dv_class == DV_DISK) {
1209 #ifdef MEMORY_DISK_HOOKS
1210 gotdisk:
1211 #endif
1212 majdev = devsw_name2blk(dv->dv_xname, NULL, 0);
1213 if (majdev < 0)
1214 panic("parsedisk");
1215 if (DEV_USES_PARTITIONS(dv))
1216 *devp = MAKEDISKDEV(majdev, dv->dv_unit, part);
1217 else
1218 *devp = makedev(majdev, dv->dv_unit);
1219 }
1220
1221 if (dv->dv_class == DV_IFNET)
1222 *devp = NODEV;
1223 }
1224
1225 *cp = c;
1226 return (dv);
1227 }
1228
1229 /*
1230 * snprintf() `bytes' into `buf', reformatting it so that the number,
1231 * plus a possible `x' + suffix extension) fits into len bytes (including
1232 * the terminating NUL).
1233 * Returns the number of bytes stored in buf, or -1 if there was a problem.
1234 * E.g, given a len of 9 and a suffix of `B':
1235 * bytes result
1236 * ----- ------
1237 * 99999 `99999 B'
1238 * 100000 `97 kB'
1239 * 66715648 `65152 kB'
1240 * 252215296 `240 MB'
1241 */
1242 int
1243 humanize_number(char *buf, size_t len, uint64_t bytes, const char *suffix,
1244 int divisor)
1245 {
1246 /* prefixes are: (none), kilo, Mega, Giga, Tera, Peta, Exa */
1247 const char *prefixes;
1248 int r;
1249 uint64_t umax;
1250 size_t i, suffixlen;
1251
1252 if (buf == NULL || suffix == NULL)
1253 return (-1);
1254 if (len > 0)
1255 buf[0] = '\0';
1256 suffixlen = strlen(suffix);
1257 /* check if enough room for `x y' + suffix + `\0' */
1258 if (len < 4 + suffixlen)
1259 return (-1);
1260
1261 if (divisor == 1024) {
1262 /*
1263 * binary multiplies
1264 * XXX IEC 60027-2 recommends Ki, Mi, Gi...
1265 */
1266 prefixes = " KMGTPE";
1267 } else
1268 prefixes = " kMGTPE"; /* SI for decimal multiplies */
1269
1270 umax = 1;
1271 for (i = 0; i < len - suffixlen - 3; i++)
1272 umax *= 10;
1273 for (i = 0; bytes >= umax && prefixes[i + 1]; i++)
1274 bytes /= divisor;
1275
1276 r = snprintf(buf, len, "%qu%s%c%s", (unsigned long long)bytes,
1277 i == 0 ? "" : " ", prefixes[i], suffix);
1278
1279 return (r);
1280 }
1281
1282 int
1283 format_bytes(char *buf, size_t len, uint64_t bytes)
1284 {
1285 int rv;
1286 size_t nlen;
1287
1288 rv = humanize_number(buf, len, bytes, "B", 1024);
1289 if (rv != -1) {
1290 /* nuke the trailing ` B' if it exists */
1291 nlen = strlen(buf) - 2;
1292 if (strcmp(&buf[nlen], " B") == 0)
1293 buf[nlen] = '\0';
1294 }
1295 return (rv);
1296 }
1297
1298 /*
1299 * Start trace of particular system call. If process is being traced,
1300 * this routine is called by MD syscall dispatch code just before
1301 * a system call is actually executed.
1302 * MD caller guarantees the passed 'code' is within the supported
1303 * system call number range for emulation the process runs under.
1304 */
1305 int
1306 trace_enter(struct lwp *l, register_t code,
1307 register_t realcode, const struct sysent *callp, void *args)
1308 {
1309 #if defined(KTRACE) || defined(SYSTRACE)
1310 struct proc *p = l->l_proc;
1311 #endif
1312
1313 #ifdef SYSCALL_DEBUG
1314 scdebug_call(l, code, args);
1315 #endif /* SYSCALL_DEBUG */
1316
1317 #ifdef KTRACE
1318 if (KTRPOINT(p, KTR_SYSCALL))
1319 ktrsyscall(l, code, realcode, callp, args);
1320 #endif /* KTRACE */
1321
1322 #ifdef SYSTRACE
1323 if (ISSET(p->p_flag, P_SYSTRACE))
1324 return systrace_enter(p, code, args);
1325 #endif
1326 return 0;
1327 }
1328
1329 /*
1330 * End trace of particular system call. If process is being traced,
1331 * this routine is called by MD syscall dispatch code just after
1332 * a system call finishes.
1333 * MD caller guarantees the passed 'code' is within the supported
1334 * system call number range for emulation the process runs under.
1335 */
1336 void
1337 trace_exit(struct lwp *l, register_t code, void *args, register_t rval[],
1338 int error)
1339 {
1340 #if defined(KTRACE) || defined(SYSTRACE)
1341 struct proc *p = l->l_proc;
1342 #endif
1343
1344 #ifdef SYSCALL_DEBUG
1345 scdebug_ret(l, code, error, rval);
1346 #endif /* SYSCALL_DEBUG */
1347
1348 #ifdef KTRACE
1349 if (KTRPOINT(p, KTR_SYSRET)) {
1350 KERNEL_PROC_LOCK(l);
1351 ktrsysret(l, code, error, rval);
1352 KERNEL_PROC_UNLOCK(l);
1353 }
1354 #endif /* KTRACE */
1355
1356 #ifdef SYSTRACE
1357 if (ISSET(p->p_flag, P_SYSTRACE)) {
1358 KERNEL_PROC_LOCK(l);
1359 systrace_exit(p, code, args, rval, error);
1360 KERNEL_PROC_UNLOCK(l);
1361 }
1362 #endif
1363 }
1364