vfs_mount.c revision 1.34 1 /* $NetBSD: vfs_mount.c,v 1.34 2015/04/20 13:44:16 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.34 2015/04/20 13:44:16 riastradh Exp $");
71
72 #define _VFS_VNODE_PRIVATE
73
74 #include <sys/param.h>
75 #include <sys/kernel.h>
76
77 #include <sys/atomic.h>
78 #include <sys/buf.h>
79 #include <sys/conf.h>
80 #include <sys/fcntl.h>
81 #include <sys/filedesc.h>
82 #include <sys/device.h>
83 #include <sys/kauth.h>
84 #include <sys/kmem.h>
85 #include <sys/module.h>
86 #include <sys/mount.h>
87 #include <sys/namei.h>
88 #include <sys/extattr.h>
89 #include <sys/syscallargs.h>
90 #include <sys/sysctl.h>
91 #include <sys/systm.h>
92 #include <sys/vfs_syscalls.h>
93 #include <sys/vnode.h>
94
95 #include <miscfs/genfs/genfs.h>
96 #include <miscfs/syncfs/syncfs.h>
97 #include <miscfs/specfs/specdev.h>
98
99 /* Root filesystem. */
100 vnode_t * rootvnode;
101
102 /* Mounted filesystem list. */
103 struct mntlist mountlist;
104 kmutex_t mountlist_lock;
105
106 kmutex_t mntvnode_lock;
107 kmutex_t vfs_list_lock;
108
109 static specificdata_domain_t mount_specificdata_domain;
110 static kmutex_t mntid_lock;
111
112 static kmutex_t mountgen_lock;
113 static uint64_t mountgen;
114
115 void
116 vfs_mount_sysinit(void)
117 {
118
119 TAILQ_INIT(&mountlist);
120 mutex_init(&mountlist_lock, MUTEX_DEFAULT, IPL_NONE);
121 mutex_init(&mntvnode_lock, MUTEX_DEFAULT, IPL_NONE);
122 mutex_init(&vfs_list_lock, MUTEX_DEFAULT, IPL_NONE);
123
124 mount_specificdata_domain = specificdata_domain_create();
125 mutex_init(&mntid_lock, MUTEX_DEFAULT, IPL_NONE);
126 mutex_init(&mountgen_lock, MUTEX_DEFAULT, IPL_NONE);
127 mountgen = 0;
128 }
129
130 struct mount *
131 vfs_mountalloc(struct vfsops *vfsops, vnode_t *vp)
132 {
133 struct mount *mp;
134 int error __diagused;
135
136 mp = kmem_zalloc(sizeof(*mp), KM_SLEEP);
137 if (mp == NULL)
138 return NULL;
139
140 mp->mnt_op = vfsops;
141 mp->mnt_refcnt = 1;
142 TAILQ_INIT(&mp->mnt_vnodelist);
143 mutex_init(&mp->mnt_unmounting, MUTEX_DEFAULT, IPL_NONE);
144 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
145 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
146 error = vfs_busy(mp, NULL);
147 KASSERT(error == 0);
148 mp->mnt_vnodecovered = vp;
149 mount_initspecific(mp);
150
151 mutex_enter(&mountgen_lock);
152 mp->mnt_gen = mountgen++;
153 mutex_exit(&mountgen_lock);
154
155 return mp;
156 }
157
158 /*
159 * vfs_rootmountalloc: lookup a filesystem type, and if found allocate and
160 * initialize a mount structure for it.
161 *
162 * Devname is usually updated by mount(8) after booting.
163 */
164 int
165 vfs_rootmountalloc(const char *fstypename, const char *devname,
166 struct mount **mpp)
167 {
168 struct vfsops *vfsp = NULL;
169 struct mount *mp;
170
171 mutex_enter(&vfs_list_lock);
172 LIST_FOREACH(vfsp, &vfs_list, vfs_list)
173 if (!strncmp(vfsp->vfs_name, fstypename,
174 sizeof(mp->mnt_stat.f_fstypename)))
175 break;
176 if (vfsp == NULL) {
177 mutex_exit(&vfs_list_lock);
178 return (ENODEV);
179 }
180 vfsp->vfs_refcount++;
181 mutex_exit(&vfs_list_lock);
182
183 if ((mp = vfs_mountalloc(vfsp, NULL)) == NULL)
184 return ENOMEM;
185 mp->mnt_flag = MNT_RDONLY;
186 (void)strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name,
187 sizeof(mp->mnt_stat.f_fstypename));
188 mp->mnt_stat.f_mntonname[0] = '/';
189 mp->mnt_stat.f_mntonname[1] = '\0';
190 mp->mnt_stat.f_mntfromname[sizeof(mp->mnt_stat.f_mntfromname) - 1] =
191 '\0';
192 (void)copystr(devname, mp->mnt_stat.f_mntfromname,
193 sizeof(mp->mnt_stat.f_mntfromname) - 1, 0);
194 *mpp = mp;
195 return 0;
196 }
197
198 /*
199 * vfs_getnewfsid: get a new unique fsid.
200 */
201 void
202 vfs_getnewfsid(struct mount *mp)
203 {
204 static u_short xxxfs_mntid;
205 fsid_t tfsid;
206 int mtype;
207
208 mutex_enter(&mntid_lock);
209 mtype = makefstype(mp->mnt_op->vfs_name);
210 mp->mnt_stat.f_fsidx.__fsid_val[0] = makedev(mtype, 0);
211 mp->mnt_stat.f_fsidx.__fsid_val[1] = mtype;
212 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
213 if (xxxfs_mntid == 0)
214 ++xxxfs_mntid;
215 tfsid.__fsid_val[0] = makedev(mtype & 0xff, xxxfs_mntid);
216 tfsid.__fsid_val[1] = mtype;
217 if (!TAILQ_EMPTY(&mountlist)) {
218 while (vfs_getvfs(&tfsid)) {
219 tfsid.__fsid_val[0]++;
220 xxxfs_mntid++;
221 }
222 }
223 mp->mnt_stat.f_fsidx.__fsid_val[0] = tfsid.__fsid_val[0];
224 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
225 mutex_exit(&mntid_lock);
226 }
227
228 /*
229 * Lookup a mount point by filesystem identifier.
230 *
231 * XXX Needs to add a reference to the mount point.
232 */
233 struct mount *
234 vfs_getvfs(fsid_t *fsid)
235 {
236 struct mount *mp;
237
238 mutex_enter(&mountlist_lock);
239 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
240 if (mp->mnt_stat.f_fsidx.__fsid_val[0] == fsid->__fsid_val[0] &&
241 mp->mnt_stat.f_fsidx.__fsid_val[1] == fsid->__fsid_val[1]) {
242 mutex_exit(&mountlist_lock);
243 return (mp);
244 }
245 }
246 mutex_exit(&mountlist_lock);
247 return NULL;
248 }
249
250 /*
251 * Drop a reference to a mount structure, freeing if the last reference.
252 */
253 void
254 vfs_destroy(struct mount *mp)
255 {
256
257 if (__predict_true((int)atomic_dec_uint_nv(&mp->mnt_refcnt) > 0)) {
258 return;
259 }
260
261 /*
262 * Nothing else has visibility of the mount: we can now
263 * free the data structures.
264 */
265 KASSERT(mp->mnt_refcnt == 0);
266 specificdata_fini(mount_specificdata_domain, &mp->mnt_specdataref);
267 mutex_destroy(&mp->mnt_unmounting);
268 mutex_destroy(&mp->mnt_updating);
269 mutex_destroy(&mp->mnt_renamelock);
270 if (mp->mnt_op != NULL) {
271 vfs_delref(mp->mnt_op);
272 }
273 kmem_free(mp, sizeof(*mp));
274 }
275
276 /*
277 * Mark a mount point as busy, and gain a new reference to it. Used to
278 * prevent the file system from being unmounted during critical sections.
279 *
280 * vfs_busy can be called multiple times and by multiple threads
281 * and must be accompanied by the same number of vfs_unbusy calls.
282 *
283 * => The caller must hold a pre-existing reference to the mount.
284 * => Will fail if the file system is being unmounted, or is unmounted.
285 */
286 int
287 vfs_busy(struct mount *mp, struct mount **nextp)
288 {
289
290 KASSERT(mp->mnt_refcnt > 0);
291
292 mutex_enter(&mp->mnt_unmounting);
293 if (__predict_false((mp->mnt_iflag & IMNT_GONE) != 0)) {
294 mutex_exit(&mp->mnt_unmounting);
295 if (nextp != NULL) {
296 KASSERT(mutex_owned(&mountlist_lock));
297 *nextp = TAILQ_NEXT(mp, mnt_list);
298 }
299 return ENOENT;
300 }
301 ++mp->mnt_busynest;
302 KASSERT(mp->mnt_busynest != 0);
303 mutex_exit(&mp->mnt_unmounting);
304 if (nextp != NULL) {
305 mutex_exit(&mountlist_lock);
306 }
307 atomic_inc_uint(&mp->mnt_refcnt);
308 return 0;
309 }
310
311 /*
312 * Unbusy a busy filesystem.
313 *
314 * Every successful vfs_busy() call must be undone by a vfs_unbusy() call.
315 *
316 * => If keepref is true, preserve reference added by vfs_busy().
317 * => If nextp != NULL, acquire mountlist_lock.
318 */
319 void
320 vfs_unbusy(struct mount *mp, bool keepref, struct mount **nextp)
321 {
322
323 KASSERT(mp->mnt_refcnt > 0);
324
325 if (nextp != NULL) {
326 mutex_enter(&mountlist_lock);
327 }
328 mutex_enter(&mp->mnt_unmounting);
329 KASSERT(mp->mnt_busynest != 0);
330 mp->mnt_busynest--;
331 mutex_exit(&mp->mnt_unmounting);
332 if (!keepref) {
333 vfs_destroy(mp);
334 }
335 if (nextp != NULL) {
336 KASSERT(mutex_owned(&mountlist_lock));
337 *nextp = TAILQ_NEXT(mp, mnt_list);
338 }
339 }
340
341 struct vnode_iterator {
342 struct vnode vi_vnode;
343 };
344
345 void
346 vfs_vnode_iterator_init(struct mount *mp, struct vnode_iterator **vip)
347 {
348 struct vnode *vp;
349
350 vp = vnalloc(mp);
351
352 mutex_enter(&mntvnode_lock);
353 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
354 vp->v_usecount = 1;
355 mutex_exit(&mntvnode_lock);
356
357 *vip = (struct vnode_iterator *)vp;
358 }
359
360 void
361 vfs_vnode_iterator_destroy(struct vnode_iterator *vi)
362 {
363 struct vnode *mvp = &vi->vi_vnode;
364
365 mutex_enter(&mntvnode_lock);
366 KASSERT(ISSET(mvp->v_iflag, VI_MARKER));
367 if (mvp->v_usecount != 0) {
368 TAILQ_REMOVE(&mvp->v_mount->mnt_vnodelist, mvp, v_mntvnodes);
369 mvp->v_usecount = 0;
370 }
371 mutex_exit(&mntvnode_lock);
372 vnfree(mvp);
373 }
374
375 struct vnode *
376 vfs_vnode_iterator_next(struct vnode_iterator *vi,
377 bool (*f)(void *, struct vnode *), void *cl)
378 {
379 struct vnode *mvp = &vi->vi_vnode;
380 struct mount *mp = mvp->v_mount;
381 struct vnode *vp;
382 int error;
383
384 KASSERT(ISSET(mvp->v_iflag, VI_MARKER));
385
386 do {
387 mutex_enter(&mntvnode_lock);
388 vp = TAILQ_NEXT(mvp, v_mntvnodes);
389 TAILQ_REMOVE(&mp->mnt_vnodelist, mvp, v_mntvnodes);
390 mvp->v_usecount = 0;
391 again:
392 if (vp == NULL) {
393 mutex_exit(&mntvnode_lock);
394 return NULL;
395 }
396 mutex_enter(vp->v_interlock);
397 if (ISSET(vp->v_iflag, VI_MARKER) ||
398 (f && !ISSET(vp->v_iflag, VI_XLOCK) && !(*f)(cl, vp))) {
399 mutex_exit(vp->v_interlock);
400 vp = TAILQ_NEXT(vp, v_mntvnodes);
401 goto again;
402 }
403
404 TAILQ_INSERT_AFTER(&mp->mnt_vnodelist, vp, mvp, v_mntvnodes);
405 mvp->v_usecount = 1;
406 mutex_exit(&mntvnode_lock);
407 error = vget(vp, 0, true /* wait */);
408 KASSERT(error == 0 || error == ENOENT);
409 } while (error != 0);
410
411 return vp;
412 }
413
414 /*
415 * Move a vnode from one mount queue to another.
416 */
417 void
418 vfs_insmntque(vnode_t *vp, struct mount *mp)
419 {
420 struct mount *omp;
421
422 KASSERT(mp == NULL || (mp->mnt_iflag & IMNT_UNMOUNT) == 0 ||
423 vp->v_tag == VT_VFS);
424
425 mutex_enter(&mntvnode_lock);
426 /*
427 * Delete from old mount point vnode list, if on one.
428 */
429 if ((omp = vp->v_mount) != NULL)
430 TAILQ_REMOVE(&vp->v_mount->mnt_vnodelist, vp, v_mntvnodes);
431 /*
432 * Insert into list of vnodes for the new mount point, if
433 * available. The caller must take a reference on the mount
434 * structure and donate to the vnode.
435 */
436 if ((vp->v_mount = mp) != NULL)
437 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
438 mutex_exit(&mntvnode_lock);
439
440 if (omp != NULL) {
441 /* Release reference to old mount. */
442 vfs_destroy(omp);
443 }
444 }
445
446 /*
447 * Remove any vnodes in the vnode table belonging to mount point mp.
448 *
449 * If FORCECLOSE is not specified, there should not be any active ones,
450 * return error if any are found (nb: this is a user error, not a
451 * system error). If FORCECLOSE is specified, detach any active vnodes
452 * that are found.
453 *
454 * If WRITECLOSE is set, only flush out regular file vnodes open for
455 * writing.
456 *
457 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
458 */
459 #ifdef DEBUG
460 int busyprt = 0; /* print out busy vnodes */
461 struct ctldebug debug1 = { "busyprt", &busyprt };
462 #endif
463
464 struct vflush_ctx {
465 const struct vnode *skipvp;
466 int flags;
467 };
468
469 static bool
470 vflush_selector(void *cl, struct vnode *vp)
471 {
472 struct vflush_ctx *c = cl;
473 /*
474 * Skip over a selected vnode.
475 */
476 if (vp == c->skipvp)
477 return false;
478 /*
479 * Skip over a vnodes marked VSYSTEM.
480 */
481 if ((c->flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM))
482 return false;
483
484 /*
485 * If WRITECLOSE is set, only flush out regular file
486 * vnodes open for writing.
487 */
488 if ((c->flags & WRITECLOSE) && vp->v_type == VREG) {
489 if (vp->v_writecount == 0)
490 return false;
491 }
492 return true;
493 }
494
495 static vnode_t *
496 vflushnext(struct vnode_iterator *marker, void *ctx, int *when)
497 {
498 if (hardclock_ticks > *when) {
499 yield();
500 *when = hardclock_ticks + hz / 10;
501 }
502 return vfs_vnode_iterator_next(marker, vflush_selector, ctx);
503 }
504
505
506 int
507 vflush(struct mount *mp, vnode_t *skipvp, int flags)
508 {
509 vnode_t *vp;
510 struct vnode_iterator *marker;
511 int busy = 0, when = 0;
512 struct vflush_ctx ctx;
513
514 /* First, flush out any vnode references from vrele_list. */
515 vrele_flush();
516
517 vfs_vnode_iterator_init(mp, &marker);
518
519 ctx.skipvp = skipvp;
520 ctx.flags = flags;
521 while ((vp = vflushnext(marker, &ctx, &when)) != NULL) {
522 /*
523 * First try to recycle the vnode.
524 */
525 if (vrecycle(vp))
526 continue;
527 /*
528 * If FORCECLOSE is set, forcibly close the vnode.
529 */
530 if (flags & FORCECLOSE) {
531 vgone(vp);
532 continue;
533 }
534 #ifdef DEBUG
535 if (busyprt)
536 vprint("vflush: busy vnode", vp);
537 #endif
538 vrele(vp);
539 busy++;
540 }
541 vfs_vnode_iterator_destroy(marker);
542 if (busy)
543 return (EBUSY);
544 return (0);
545 }
546
547 /*
548 * Mount a file system.
549 */
550
551 /*
552 * Scan all active processes to see if any of them have a current or root
553 * directory onto which the new filesystem has just been mounted. If so,
554 * replace them with the new mount point.
555 */
556 static void
557 mount_checkdirs(vnode_t *olddp)
558 {
559 vnode_t *newdp, *rele1, *rele2;
560 struct cwdinfo *cwdi;
561 struct proc *p;
562 bool retry;
563
564 if (olddp->v_usecount == 1) {
565 return;
566 }
567 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
568 panic("mount: lost mount");
569
570 do {
571 retry = false;
572 mutex_enter(proc_lock);
573 PROCLIST_FOREACH(p, &allproc) {
574 if ((cwdi = p->p_cwdi) == NULL)
575 continue;
576 /*
577 * Cannot change to the old directory any more,
578 * so even if we see a stale value it is not a
579 * problem.
580 */
581 if (cwdi->cwdi_cdir != olddp &&
582 cwdi->cwdi_rdir != olddp)
583 continue;
584 retry = true;
585 rele1 = NULL;
586 rele2 = NULL;
587 atomic_inc_uint(&cwdi->cwdi_refcnt);
588 mutex_exit(proc_lock);
589 rw_enter(&cwdi->cwdi_lock, RW_WRITER);
590 if (cwdi->cwdi_cdir == olddp) {
591 rele1 = cwdi->cwdi_cdir;
592 vref(newdp);
593 cwdi->cwdi_cdir = newdp;
594 }
595 if (cwdi->cwdi_rdir == olddp) {
596 rele2 = cwdi->cwdi_rdir;
597 vref(newdp);
598 cwdi->cwdi_rdir = newdp;
599 }
600 rw_exit(&cwdi->cwdi_lock);
601 cwdfree(cwdi);
602 if (rele1 != NULL)
603 vrele(rele1);
604 if (rele2 != NULL)
605 vrele(rele2);
606 mutex_enter(proc_lock);
607 break;
608 }
609 mutex_exit(proc_lock);
610 } while (retry);
611
612 if (rootvnode == olddp) {
613 vrele(rootvnode);
614 vref(newdp);
615 rootvnode = newdp;
616 }
617 vput(newdp);
618 }
619
620 /*
621 * Start extended attributes
622 */
623 static int
624 start_extattr(struct mount *mp)
625 {
626 int error;
627
628 error = VFS_EXTATTRCTL(mp, EXTATTR_CMD_START, NULL, 0, NULL);
629 if (error)
630 printf("%s: failed to start extattr: error = %d\n",
631 mp->mnt_stat.f_mntonname, error);
632
633 return error;
634 }
635
636 int
637 mount_domount(struct lwp *l, vnode_t **vpp, struct vfsops *vfsops,
638 const char *path, int flags, void *data, size_t *data_len)
639 {
640 vnode_t *vp = *vpp;
641 struct mount *mp;
642 struct pathbuf *pb;
643 struct nameidata nd;
644 int error;
645
646 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
647 KAUTH_REQ_SYSTEM_MOUNT_NEW, vp, KAUTH_ARG(flags), data);
648 if (error) {
649 vfs_delref(vfsops);
650 return error;
651 }
652
653 /* Cannot make a non-dir a mount-point (from here anyway). */
654 if (vp->v_type != VDIR) {
655 vfs_delref(vfsops);
656 return ENOTDIR;
657 }
658
659 if (flags & MNT_EXPORTED) {
660 vfs_delref(vfsops);
661 return EINVAL;
662 }
663
664 if ((mp = vfs_mountalloc(vfsops, vp)) == NULL) {
665 vfs_delref(vfsops);
666 return ENOMEM;
667 }
668
669 mp->mnt_stat.f_owner = kauth_cred_geteuid(l->l_cred);
670
671 /*
672 * The underlying file system may refuse the mount for
673 * various reasons. Allow the user to force it to happen.
674 *
675 * Set the mount level flags.
676 */
677 mp->mnt_flag = flags & (MNT_BASIC_FLAGS | MNT_FORCE | MNT_IGNORE);
678
679 mutex_enter(&mp->mnt_updating);
680 error = VFS_MOUNT(mp, path, data, data_len);
681 mp->mnt_flag &= ~MNT_OP_FLAGS;
682
683 if (error != 0)
684 goto err_unmounted;
685
686 /*
687 * Validate and prepare the mount point.
688 */
689 error = pathbuf_copyin(path, &pb);
690 if (error != 0) {
691 goto err_mounted;
692 }
693 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb);
694 error = namei(&nd);
695 pathbuf_destroy(pb);
696 if (error != 0) {
697 goto err_mounted;
698 }
699 if (nd.ni_vp != vp) {
700 vput(nd.ni_vp);
701 error = EINVAL;
702 goto err_mounted;
703 }
704 if (vp->v_mountedhere != NULL) {
705 vput(nd.ni_vp);
706 error = EBUSY;
707 goto err_mounted;
708 }
709 error = vinvalbuf(vp, V_SAVE, l->l_cred, l, 0, 0);
710 if (error != 0) {
711 vput(nd.ni_vp);
712 goto err_mounted;
713 }
714
715 /*
716 * Put the new filesystem on the mount list after root.
717 */
718 cache_purge(vp);
719 mp->mnt_iflag &= ~IMNT_WANTRDWR;
720
721 mutex_enter(&mountlist_lock);
722 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
723 mutex_exit(&mountlist_lock);
724 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
725 error = vfs_allocate_syncvnode(mp);
726 if (error == 0)
727 vp->v_mountedhere = mp;
728 vput(nd.ni_vp);
729 if (error != 0)
730 goto err_onmountlist;
731
732 mount_checkdirs(vp);
733 mutex_exit(&mp->mnt_updating);
734
735 /* Hold an additional reference to the mount across VFS_START(). */
736 vfs_unbusy(mp, true, NULL);
737 (void) VFS_STATVFS(mp, &mp->mnt_stat);
738 error = VFS_START(mp, 0);
739 if (error) {
740 vrele(vp);
741 } else if (flags & MNT_EXTATTR) {
742 (void)start_extattr(mp);
743 }
744 /* Drop reference held for VFS_START(). */
745 vfs_destroy(mp);
746 *vpp = NULL;
747 return error;
748
749 err_onmountlist:
750 mutex_enter(&mountlist_lock);
751 TAILQ_REMOVE(&mountlist, mp, mnt_list);
752 mp->mnt_iflag |= IMNT_GONE;
753 mutex_exit(&mountlist_lock);
754
755 err_mounted:
756 if (VFS_UNMOUNT(mp, MNT_FORCE) != 0)
757 panic("Unmounting fresh file system failed");
758
759 err_unmounted:
760 vp->v_mountedhere = NULL;
761 mutex_exit(&mp->mnt_updating);
762 vfs_unbusy(mp, false, NULL);
763 vfs_destroy(mp);
764
765 return error;
766 }
767
768 /*
769 * Do the actual file system unmount. File system is assumed to have
770 * been locked by the caller.
771 *
772 * => Caller hold reference to the mount, explicitly for dounmount().
773 */
774 int
775 dounmount(struct mount *mp, int flags, struct lwp *l)
776 {
777 vnode_t *coveredvp;
778 int error, async, used_syncer, used_extattr;
779
780 #if NVERIEXEC > 0
781 error = veriexec_unmountchk(mp);
782 if (error)
783 return (error);
784 #endif /* NVERIEXEC > 0 */
785
786 /*
787 * XXX Freeze syncer. Must do this before locking the
788 * mount point. See dounmount() for details.
789 */
790 mutex_enter(&syncer_mutex);
791
792 /*
793 * Abort unmount attempt when the filesystem is in use
794 */
795 mutex_enter(&mp->mnt_unmounting);
796 if (mp->mnt_busynest != 0) {
797 mutex_exit(&mp->mnt_unmounting);
798 mutex_exit(&syncer_mutex);
799 return EBUSY;
800 }
801
802 /*
803 * Abort unmount attempt when the filesystem is not mounted
804 */
805 if ((mp->mnt_iflag & IMNT_GONE) != 0) {
806 mutex_exit(&mp->mnt_unmounting);
807 mutex_exit(&syncer_mutex);
808 return ENOENT;
809 }
810
811 used_syncer = (mp->mnt_syncer != NULL);
812 used_extattr = mp->mnt_flag & MNT_EXTATTR;
813
814 /*
815 * XXX Syncer must be frozen when we get here. This should really
816 * be done on a per-mountpoint basis, but the syncer doesn't work
817 * like that.
818 *
819 * The caller of dounmount() must acquire syncer_mutex because
820 * the syncer itself acquires locks in syncer_mutex -> vfs_busy
821 * order, and we must preserve that order to avoid deadlock.
822 *
823 * So, if the file system did not use the syncer, now is
824 * the time to release the syncer_mutex.
825 */
826 if (used_syncer == 0) {
827 mutex_exit(&syncer_mutex);
828 }
829 mp->mnt_iflag |= IMNT_UNMOUNT;
830 mutex_enter(&mp->mnt_updating);
831 async = mp->mnt_flag & MNT_ASYNC;
832 mp->mnt_flag &= ~MNT_ASYNC;
833 cache_purgevfs(mp); /* remove cache entries for this file sys */
834 if (mp->mnt_syncer != NULL)
835 vfs_deallocate_syncvnode(mp);
836 error = 0;
837 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
838 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred);
839 }
840 if (error == 0 || (flags & MNT_FORCE)) {
841 error = VFS_UNMOUNT(mp, flags);
842 }
843 if (error) {
844 mp->mnt_iflag &= ~IMNT_UNMOUNT;
845 mutex_exit(&mp->mnt_unmounting);
846 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
847 (void) vfs_allocate_syncvnode(mp);
848 mp->mnt_flag |= async;
849 mutex_exit(&mp->mnt_updating);
850 if (used_syncer)
851 mutex_exit(&syncer_mutex);
852 if (used_extattr) {
853 if (start_extattr(mp) != 0)
854 mp->mnt_flag &= ~MNT_EXTATTR;
855 else
856 mp->mnt_flag |= MNT_EXTATTR;
857 }
858 return (error);
859 }
860 mutex_exit(&mp->mnt_updating);
861
862 /*
863 * release mnt_umounting lock here, because other code calls
864 * vfs_busy() while holding the mountlist_lock.
865 *
866 * mark filesystem as gone to prevent further umounts
867 * after mnt_umounting lock is gone, this also prevents
868 * vfs_busy() from succeeding.
869 */
870 mp->mnt_iflag |= IMNT_GONE;
871 mutex_exit(&mp->mnt_unmounting);
872
873 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
874 vn_lock(coveredvp, LK_EXCLUSIVE | LK_RETRY);
875 coveredvp->v_mountedhere = NULL;
876 VOP_UNLOCK(coveredvp);
877 }
878 mutex_enter(&mountlist_lock);
879 TAILQ_REMOVE(&mountlist, mp, mnt_list);
880 mutex_exit(&mountlist_lock);
881 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL)
882 panic("unmount: dangling vnode");
883 if (used_syncer)
884 mutex_exit(&syncer_mutex);
885 vfs_hooks_unmount(mp);
886
887 vfs_destroy(mp); /* reference from mount() */
888 if (coveredvp != NULLVP) {
889 vrele(coveredvp);
890 }
891 return (0);
892 }
893
894 /*
895 * Unmount all file systems.
896 * We traverse the list in reverse order under the assumption that doing so
897 * will avoid needing to worry about dependencies.
898 */
899 bool
900 vfs_unmountall(struct lwp *l)
901 {
902
903 printf("unmounting file systems...\n");
904 return vfs_unmountall1(l, true, true);
905 }
906
907 static void
908 vfs_unmount_print(struct mount *mp, const char *pfx)
909 {
910
911 aprint_verbose("%sunmounted %s on %s type %s\n", pfx,
912 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname,
913 mp->mnt_stat.f_fstypename);
914 }
915
916 bool
917 vfs_unmount_forceone(struct lwp *l)
918 {
919 struct mount *mp, *nmp;
920 int error;
921
922 nmp = NULL;
923
924 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
925 if (nmp == NULL || mp->mnt_gen > nmp->mnt_gen) {
926 nmp = mp;
927 }
928 }
929 if (nmp == NULL) {
930 return false;
931 }
932
933 #ifdef DEBUG
934 printf("forcefully unmounting %s (%s)...\n",
935 nmp->mnt_stat.f_mntonname, nmp->mnt_stat.f_mntfromname);
936 #endif
937 atomic_inc_uint(&nmp->mnt_refcnt);
938 if ((error = dounmount(nmp, MNT_FORCE, l)) == 0) {
939 vfs_unmount_print(nmp, "forcefully ");
940 return true;
941 } else {
942 vfs_destroy(nmp);
943 }
944
945 #ifdef DEBUG
946 printf("forceful unmount of %s failed with error %d\n",
947 nmp->mnt_stat.f_mntonname, error);
948 #endif
949
950 return false;
951 }
952
953 bool
954 vfs_unmountall1(struct lwp *l, bool force, bool verbose)
955 {
956 struct mount *mp, *nmp;
957 bool any_error = false, progress = false;
958 int error;
959
960 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
961 #ifdef DEBUG
962 printf("unmounting %p %s (%s)...\n",
963 (void *)mp, mp->mnt_stat.f_mntonname,
964 mp->mnt_stat.f_mntfromname);
965 #endif
966 atomic_inc_uint(&mp->mnt_refcnt);
967 if ((error = dounmount(mp, force ? MNT_FORCE : 0, l)) == 0) {
968 vfs_unmount_print(mp, "");
969 progress = true;
970 } else {
971 vfs_destroy(mp);
972 if (verbose) {
973 printf("unmount of %s failed with error %d\n",
974 mp->mnt_stat.f_mntonname, error);
975 }
976 any_error = true;
977 }
978 }
979 if (verbose) {
980 printf("unmounting done\n");
981 }
982 if (any_error && verbose) {
983 printf("WARNING: some file systems would not unmount\n");
984 }
985 return progress;
986 }
987
988 void
989 vfs_sync_all(struct lwp *l)
990 {
991 printf("syncing disks... ");
992
993 /* remove user processes from run queue */
994 suspendsched();
995 (void)spl0();
996
997 /* avoid coming back this way again if we panic. */
998 doing_shutdown = 1;
999
1000 do_sys_sync(l);
1001
1002 /* Wait for sync to finish. */
1003 if (buf_syncwait() != 0) {
1004 #if defined(DDB) && defined(DEBUG_HALT_BUSY)
1005 Debugger();
1006 #endif
1007 printf("giving up\n");
1008 return;
1009 } else
1010 printf("done\n");
1011 }
1012
1013 /*
1014 * Sync and unmount file systems before shutting down.
1015 */
1016 void
1017 vfs_shutdown(void)
1018 {
1019 lwp_t *l = curlwp;
1020
1021 vfs_sync_all(l);
1022
1023 /*
1024 * If we have paniced - do not make the situation potentially
1025 * worse by unmounting the file systems.
1026 */
1027 if (panicstr != NULL) {
1028 return;
1029 }
1030
1031 /* Unmount file systems. */
1032 vfs_unmountall(l);
1033 }
1034
1035 /*
1036 * Print a list of supported file system types (used by vfs_mountroot)
1037 */
1038 static void
1039 vfs_print_fstypes(void)
1040 {
1041 struct vfsops *v;
1042 int cnt = 0;
1043
1044 mutex_enter(&vfs_list_lock);
1045 LIST_FOREACH(v, &vfs_list, vfs_list)
1046 ++cnt;
1047 mutex_exit(&vfs_list_lock);
1048
1049 if (cnt == 0) {
1050 printf("WARNING: No file system modules have been loaded.\n");
1051 return;
1052 }
1053
1054 printf("Supported file systems:");
1055 mutex_enter(&vfs_list_lock);
1056 LIST_FOREACH(v, &vfs_list, vfs_list) {
1057 printf(" %s", v->vfs_name);
1058 }
1059 mutex_exit(&vfs_list_lock);
1060 printf("\n");
1061 }
1062
1063 /*
1064 * Mount the root file system. If the operator didn't specify a
1065 * file system to use, try all possible file systems until one
1066 * succeeds.
1067 */
1068 int
1069 vfs_mountroot(void)
1070 {
1071 struct vfsops *v;
1072 int error = ENODEV;
1073
1074 if (root_device == NULL)
1075 panic("vfs_mountroot: root device unknown");
1076
1077 switch (device_class(root_device)) {
1078 case DV_IFNET:
1079 if (rootdev != NODEV)
1080 panic("vfs_mountroot: rootdev set for DV_IFNET "
1081 "(0x%llx -> %llu,%llu)",
1082 (unsigned long long)rootdev,
1083 (unsigned long long)major(rootdev),
1084 (unsigned long long)minor(rootdev));
1085 break;
1086
1087 case DV_DISK:
1088 if (rootdev == NODEV)
1089 panic("vfs_mountroot: rootdev not set for DV_DISK");
1090 if (bdevvp(rootdev, &rootvp))
1091 panic("vfs_mountroot: can't get vnode for rootdev");
1092 error = VOP_OPEN(rootvp, FREAD, FSCRED);
1093 if (error) {
1094 printf("vfs_mountroot: can't open root device\n");
1095 return (error);
1096 }
1097 break;
1098
1099 case DV_VIRTUAL:
1100 break;
1101
1102 default:
1103 printf("%s: inappropriate for root file system\n",
1104 device_xname(root_device));
1105 return (ENODEV);
1106 }
1107
1108 /*
1109 * If user specified a root fs type, use it. Make sure the
1110 * specified type exists and has a mount_root()
1111 */
1112 if (strcmp(rootfstype, ROOT_FSTYPE_ANY) != 0) {
1113 v = vfs_getopsbyname(rootfstype);
1114 error = EFTYPE;
1115 if (v != NULL) {
1116 if (v->vfs_mountroot != NULL) {
1117 error = (v->vfs_mountroot)();
1118 }
1119 v->vfs_refcount--;
1120 }
1121 goto done;
1122 }
1123
1124 /*
1125 * Try each file system currently configured into the kernel.
1126 */
1127 mutex_enter(&vfs_list_lock);
1128 LIST_FOREACH(v, &vfs_list, vfs_list) {
1129 if (v->vfs_mountroot == NULL)
1130 continue;
1131 #ifdef DEBUG
1132 aprint_normal("mountroot: trying %s...\n", v->vfs_name);
1133 #endif
1134 v->vfs_refcount++;
1135 mutex_exit(&vfs_list_lock);
1136 error = (*v->vfs_mountroot)();
1137 mutex_enter(&vfs_list_lock);
1138 v->vfs_refcount--;
1139 if (!error) {
1140 aprint_normal("root file system type: %s\n",
1141 v->vfs_name);
1142 break;
1143 }
1144 }
1145 mutex_exit(&vfs_list_lock);
1146
1147 if (v == NULL) {
1148 vfs_print_fstypes();
1149 printf("no file system for %s", device_xname(root_device));
1150 if (device_class(root_device) == DV_DISK)
1151 printf(" (dev 0x%llx)", (unsigned long long)rootdev);
1152 printf("\n");
1153 error = EFTYPE;
1154 }
1155
1156 done:
1157 if (error && device_class(root_device) == DV_DISK) {
1158 VOP_CLOSE(rootvp, FREAD, FSCRED);
1159 vrele(rootvp);
1160 }
1161 if (error == 0) {
1162 struct mount *mp;
1163 extern struct cwdinfo cwdi0;
1164
1165 mp = TAILQ_FIRST(&mountlist);
1166 mp->mnt_flag |= MNT_ROOTFS;
1167 mp->mnt_op->vfs_refcount++;
1168
1169 /*
1170 * Get the vnode for '/'. Set cwdi0.cwdi_cdir to
1171 * reference it.
1172 */
1173 error = VFS_ROOT(mp, &rootvnode);
1174 if (error)
1175 panic("cannot find root vnode, error=%d", error);
1176 cwdi0.cwdi_cdir = rootvnode;
1177 vref(cwdi0.cwdi_cdir);
1178 VOP_UNLOCK(rootvnode);
1179 cwdi0.cwdi_rdir = NULL;
1180
1181 /*
1182 * Now that root is mounted, we can fixup initproc's CWD
1183 * info. All other processes are kthreads, which merely
1184 * share proc0's CWD info.
1185 */
1186 initproc->p_cwdi->cwdi_cdir = rootvnode;
1187 vref(initproc->p_cwdi->cwdi_cdir);
1188 initproc->p_cwdi->cwdi_rdir = NULL;
1189 /*
1190 * Enable loading of modules from the filesystem
1191 */
1192 module_load_vfs_init();
1193
1194 }
1195 return (error);
1196 }
1197
1198 /*
1199 * mount_specific_key_create --
1200 * Create a key for subsystem mount-specific data.
1201 */
1202 int
1203 mount_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1204 {
1205
1206 return specificdata_key_create(mount_specificdata_domain, keyp, dtor);
1207 }
1208
1209 /*
1210 * mount_specific_key_delete --
1211 * Delete a key for subsystem mount-specific data.
1212 */
1213 void
1214 mount_specific_key_delete(specificdata_key_t key)
1215 {
1216
1217 specificdata_key_delete(mount_specificdata_domain, key);
1218 }
1219
1220 /*
1221 * mount_initspecific --
1222 * Initialize a mount's specificdata container.
1223 */
1224 void
1225 mount_initspecific(struct mount *mp)
1226 {
1227 int error __diagused;
1228
1229 error = specificdata_init(mount_specificdata_domain,
1230 &mp->mnt_specdataref);
1231 KASSERT(error == 0);
1232 }
1233
1234 /*
1235 * mount_finispecific --
1236 * Finalize a mount's specificdata container.
1237 */
1238 void
1239 mount_finispecific(struct mount *mp)
1240 {
1241
1242 specificdata_fini(mount_specificdata_domain, &mp->mnt_specdataref);
1243 }
1244
1245 /*
1246 * mount_getspecific --
1247 * Return mount-specific data corresponding to the specified key.
1248 */
1249 void *
1250 mount_getspecific(struct mount *mp, specificdata_key_t key)
1251 {
1252
1253 return specificdata_getspecific(mount_specificdata_domain,
1254 &mp->mnt_specdataref, key);
1255 }
1256
1257 /*
1258 * mount_setspecific --
1259 * Set mount-specific data corresponding to the specified key.
1260 */
1261 void
1262 mount_setspecific(struct mount *mp, specificdata_key_t key, void *data)
1263 {
1264
1265 specificdata_setspecific(mount_specificdata_domain,
1266 &mp->mnt_specdataref, key, data);
1267 }
1268
1269 /*
1270 * Check to see if a filesystem is mounted on a block device.
1271 */
1272 int
1273 vfs_mountedon(vnode_t *vp)
1274 {
1275 vnode_t *vq;
1276 int error = 0;
1277
1278 if (vp->v_type != VBLK)
1279 return ENOTBLK;
1280 if (spec_node_getmountedfs(vp) != NULL)
1281 return EBUSY;
1282 if (spec_node_lookup_by_dev(vp->v_type, vp->v_rdev, &vq) == 0) {
1283 if (spec_node_getmountedfs(vq) != NULL)
1284 error = EBUSY;
1285 vrele(vq);
1286 }
1287
1288 return error;
1289 }
1290
1291 /*
1292 * Check if a device pointed to by vp is mounted.
1293 *
1294 * Returns:
1295 * EINVAL if it's not a disk
1296 * EBUSY if it's a disk and mounted
1297 * 0 if it's a disk and not mounted
1298 */
1299 int
1300 rawdev_mounted(vnode_t *vp, vnode_t **bvpp)
1301 {
1302 vnode_t *bvp;
1303 dev_t dev;
1304 int d_type;
1305
1306 bvp = NULL;
1307 d_type = D_OTHER;
1308
1309 if (iskmemvp(vp))
1310 return EINVAL;
1311
1312 switch (vp->v_type) {
1313 case VCHR: {
1314 const struct cdevsw *cdev;
1315
1316 dev = vp->v_rdev;
1317 cdev = cdevsw_lookup(dev);
1318 if (cdev != NULL) {
1319 dev_t blkdev;
1320
1321 blkdev = devsw_chr2blk(dev);
1322 if (blkdev != NODEV) {
1323 if (vfinddev(blkdev, VBLK, &bvp) != 0) {
1324 d_type = (cdev->d_flag & D_TYPEMASK);
1325 /* XXX: what if bvp disappears? */
1326 vrele(bvp);
1327 }
1328 }
1329 }
1330
1331 break;
1332 }
1333
1334 case VBLK: {
1335 const struct bdevsw *bdev;
1336
1337 dev = vp->v_rdev;
1338 bdev = bdevsw_lookup(dev);
1339 if (bdev != NULL)
1340 d_type = (bdev->d_flag & D_TYPEMASK);
1341
1342 bvp = vp;
1343
1344 break;
1345 }
1346
1347 default:
1348 break;
1349 }
1350
1351 if (d_type != D_DISK)
1352 return EINVAL;
1353
1354 if (bvpp != NULL)
1355 *bvpp = bvp;
1356
1357 /*
1358 * XXX: This is bogus. We should be failing the request
1359 * XXX: not only if this specific slice is mounted, but
1360 * XXX: if it's on a disk with any other mounted slice.
1361 */
1362 if (vfs_mountedon(bvp))
1363 return EBUSY;
1364
1365 return 0;
1366 }
1367
1368 /*
1369 * Make a 'unique' number from a mount type name.
1370 */
1371 long
1372 makefstype(const char *type)
1373 {
1374 long rv;
1375
1376 for (rv = 0; *type; type++) {
1377 rv <<= 2;
1378 rv ^= *type;
1379 }
1380 return rv;
1381 }
1382
1383 void
1384 mountlist_append(struct mount *mp)
1385 {
1386 mutex_enter(&mountlist_lock);
1387 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
1388 mutex_exit(&mountlist_lock);
1389 }
1390