Home | History | Annotate | Line # | Download | only in genfs
layer_vnops.c revision 1.27
      1 /*	$NetBSD: layer_vnops.c,v 1.27 2006/05/14 21:31:52 elad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1999 National Aeronautics & Space Administration
      5  * All rights reserved.
      6  *
      7  * This software was written by William Studenmund of the
      8  * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the National Aeronautics & Space Administration
     19  *    nor the names of its contributors may be used to endorse or promote
     20  *    products derived from this software without specific prior written
     21  *    permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
     24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
     27  * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33  * POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 /*
     36  * Copyright (c) 1992, 1993
     37  *	The Regents of the University of California.  All rights reserved.
     38  *
     39  * This code is derived from software contributed to Berkeley by
     40  * John Heidemann of the UCLA Ficus project.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
     67  *
     68  * Ancestors:
     69  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
     70  *	$Id: layer_vnops.c,v 1.27 2006/05/14 21:31:52 elad Exp $
     71  *	$Id: layer_vnops.c,v 1.27 2006/05/14 21:31:52 elad Exp $
     72  *	...and...
     73  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
     74  */
     75 
     76 /*
     77  * Null Layer vnode routines.
     78  *
     79  * (See mount_null(8) for more information.)
     80  *
     81  * The layer.h, layer_extern.h, layer_vfs.c, and layer_vnops.c files provide
     82  * the core implementation of the null file system and most other stacked
     83  * fs's. The description below refers to the null file system, but the
     84  * services provided by the layer* files are useful for all layered fs's.
     85  *
     86  * The null layer duplicates a portion of the file system
     87  * name space under a new name.  In this respect, it is
     88  * similar to the loopback file system.  It differs from
     89  * the loopback fs in two respects:  it is implemented using
     90  * a stackable layers techniques, and it's "null-node"s stack above
     91  * all lower-layer vnodes, not just over directory vnodes.
     92  *
     93  * The null layer has two purposes.  First, it serves as a demonstration
     94  * of layering by proving a layer which does nothing.  (It actually
     95  * does everything the loopback file system does, which is slightly
     96  * more than nothing.)  Second, the null layer can serve as a prototype
     97  * layer.  Since it provides all necessary layer framework,
     98  * new file system layers can be created very easily be starting
     99  * with a null layer.
    100  *
    101  * The remainder of the man page examines the null layer as a basis
    102  * for constructing new layers.
    103  *
    104  *
    105  * INSTANTIATING NEW NULL LAYERS
    106  *
    107  * New null layers are created with mount_null(8).
    108  * Mount_null(8) takes two arguments, the pathname
    109  * of the lower vfs (target-pn) and the pathname where the null
    110  * layer will appear in the namespace (alias-pn).  After
    111  * the null layer is put into place, the contents
    112  * of target-pn subtree will be aliased under alias-pn.
    113  *
    114  * It is conceivable that other overlay filesystems will take different
    115  * parameters. For instance, data migration or access controll layers might
    116  * only take one pathname which will serve both as the target-pn and
    117  * alias-pn described above.
    118  *
    119  *
    120  * OPERATION OF A NULL LAYER
    121  *
    122  * The null layer is the minimum file system layer,
    123  * simply bypassing all possible operations to the lower layer
    124  * for processing there.  The majority of its activity centers
    125  * on the bypass routine, through which nearly all vnode operations
    126  * pass.
    127  *
    128  * The bypass routine accepts arbitrary vnode operations for
    129  * handling by the lower layer.  It begins by examing vnode
    130  * operation arguments and replacing any layered nodes by their
    131  * lower-layer equivalents.  It then invokes the operation
    132  * on the lower layer.  Finally, it replaces the layered nodes
    133  * in the arguments and, if a vnode is return by the operation,
    134  * stacks a layered node on top of the returned vnode.
    135  *
    136  * The bypass routine in this file, layer_bypass(), is suitable for use
    137  * by many different layered filesystems. It can be used by multiple
    138  * filesystems simultaneously. Alternatively, a layered fs may provide
    139  * its own bypass routine, in which case layer_bypass() should be used as
    140  * a model. For instance, the main functionality provided by umapfs, the user
    141  * identity mapping file system, is handled by a custom bypass routine.
    142  *
    143  * Typically a layered fs registers its selected bypass routine as the
    144  * default vnode operation in its vnodeopv_entry_desc table. Additionally
    145  * the filesystem must store the bypass entry point in the layerm_bypass
    146  * field of struct layer_mount. All other layer routines in this file will
    147  * use the layerm_bypass routine.
    148  *
    149  * Although the bypass routine handles most operations outright, a number
    150  * of operations are special cased, and handled by the layered fs. One
    151  * group, layer_setattr, layer_getattr, layer_access, layer_open, and
    152  * layer_fsync, perform layer-specific manipulation in addition to calling
    153  * the bypass routine. The other group
    154 
    155  * Although bypass handles most operations, vop_getattr, vop_lock,
    156  * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
    157  * bypassed. Vop_getattr must change the fsid being returned.
    158  * Vop_lock and vop_unlock must handle any locking for the
    159  * current vnode as well as pass the lock request down.
    160  * Vop_inactive and vop_reclaim are not bypassed so that
    161  * they can handle freeing null-layer specific data. Vop_print
    162  * is not bypassed to avoid excessive debugging information.
    163  * Also, certain vnode operations change the locking state within
    164  * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
    165  * and symlink). Ideally these operations should not change the
    166  * lock state, but should be changed to let the caller of the
    167  * function unlock them. Otherwise all intermediate vnode layers
    168  * (such as union, umapfs, etc) must catch these functions to do
    169  * the necessary locking at their layer.
    170  *
    171  *
    172  * INSTANTIATING VNODE STACKS
    173  *
    174  * Mounting associates the null layer with a lower layer,
    175  * effect stacking two VFSes.  Vnode stacks are instead
    176  * created on demand as files are accessed.
    177  *
    178  * The initial mount creates a single vnode stack for the
    179  * root of the new null layer.  All other vnode stacks
    180  * are created as a result of vnode operations on
    181  * this or other null vnode stacks.
    182  *
    183  * New vnode stacks come into existence as a result of
    184  * an operation which returns a vnode.
    185  * The bypass routine stacks a null-node above the new
    186  * vnode before returning it to the caller.
    187  *
    188  * For example, imagine mounting a null layer with
    189  * "mount_null /usr/include /dev/layer/null".
    190  * Changing directory to /dev/layer/null will assign
    191  * the root null-node (which was created when the null layer was mounted).
    192  * Now consider opening "sys".  A vop_lookup would be
    193  * done on the root null-node.  This operation would bypass through
    194  * to the lower layer which would return a vnode representing
    195  * the UFS "sys".  layer_bypass then builds a null-node
    196  * aliasing the UFS "sys" and returns this to the caller.
    197  * Later operations on the null-node "sys" will repeat this
    198  * process when constructing other vnode stacks.
    199  *
    200  *
    201  * CREATING OTHER FILE SYSTEM LAYERS
    202  *
    203  * One of the easiest ways to construct new file system layers is to make
    204  * a copy of the null layer, rename all files and variables, and
    205  * then begin modifing the copy.  Sed can be used to easily rename
    206  * all variables.
    207  *
    208  * The umap layer is an example of a layer descended from the
    209  * null layer.
    210  *
    211  *
    212  * INVOKING OPERATIONS ON LOWER LAYERS
    213  *
    214  * There are two techniques to invoke operations on a lower layer
    215  * when the operation cannot be completely bypassed.  Each method
    216  * is appropriate in different situations.  In both cases,
    217  * it is the responsibility of the aliasing layer to make
    218  * the operation arguments "correct" for the lower layer
    219  * by mapping an vnode arguments to the lower layer.
    220  *
    221  * The first approach is to call the aliasing layer's bypass routine.
    222  * This method is most suitable when you wish to invoke the operation
    223  * currently being handled on the lower layer.  It has the advantage
    224  * that the bypass routine already must do argument mapping.
    225  * An example of this is null_getattrs in the null layer.
    226  *
    227  * A second approach is to directly invoke vnode operations on
    228  * the lower layer with the VOP_OPERATIONNAME interface.
    229  * The advantage of this method is that it is easy to invoke
    230  * arbitrary operations on the lower layer.  The disadvantage
    231  * is that vnodes' arguments must be manually mapped.
    232  *
    233  */
    234 
    235 #include <sys/cdefs.h>
    236 __KERNEL_RCSID(0, "$NetBSD: layer_vnops.c,v 1.27 2006/05/14 21:31:52 elad Exp $");
    237 
    238 #include <sys/param.h>
    239 #include <sys/systm.h>
    240 #include <sys/proc.h>
    241 #include <sys/time.h>
    242 #include <sys/vnode.h>
    243 #include <sys/mount.h>
    244 #include <sys/namei.h>
    245 #include <sys/malloc.h>
    246 #include <sys/buf.h>
    247 #include <sys/kauth.h>
    248 
    249 #include <miscfs/genfs/layer.h>
    250 #include <miscfs/genfs/layer_extern.h>
    251 #include <miscfs/genfs/genfs.h>
    252 
    253 
    254 /*
    255  * This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass
    256  *		routine by John Heidemann.
    257  *	The new element for this version is that the whole nullfs
    258  * system gained the concept of locks on the lower node, and locks on
    259  * our nodes. When returning from a call to the lower layer, we may
    260  * need to update lock state ONLY on our layer. The LAYERFS_UPPER*LOCK()
    261  * macros provide this functionality.
    262  *    The 10-Apr-92 version was optimized for speed, throwing away some
    263  * safety checks.  It should still always work, but it's not as
    264  * robust to programmer errors.
    265  *    Define SAFETY to include some error checking code.
    266  *
    267  * In general, we map all vnodes going down and unmap them on the way back.
    268  *
    269  * Also, some BSD vnode operations have the side effect of vrele'ing
    270  * their arguments.  With stacking, the reference counts are held
    271  * by the upper node, not the lower one, so we must handle these
    272  * side-effects here.  This is not of concern in Sun-derived systems
    273  * since there are no such side-effects.
    274  *
    275  * New for the 08-June-99 version: we also handle operations which unlock
    276  * the passed-in node (typically they vput the node).
    277  *
    278  * This makes the following assumptions:
    279  * - only one returned vpp
    280  * - no INOUT vpp's (Sun's vop_open has one of these)
    281  * - the vnode operation vector of the first vnode should be used
    282  *   to determine what implementation of the op should be invoked
    283  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
    284  *   problems on rmdir'ing mount points and renaming?)
    285  */
    286 int
    287 layer_bypass(v)
    288 	void *v;
    289 {
    290 	struct vop_generic_args /* {
    291 		struct vnodeop_desc *a_desc;
    292 		<other random data follows, presumably>
    293 	} */ *ap = v;
    294 	int (**our_vnodeop_p)(void *);
    295 	struct vnode **this_vp_p;
    296 	int error, error1;
    297 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
    298 	struct vnode **vps_p[VDESC_MAX_VPS];
    299 	struct vnode ***vppp;
    300 	struct vnodeop_desc *descp = ap->a_desc;
    301 	int reles, i, flags;
    302 
    303 #ifdef SAFETY
    304 	/*
    305 	 * We require at least one vp.
    306 	 */
    307 	if (descp->vdesc_vp_offsets == NULL ||
    308 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
    309 		panic("%s: no vp's in map.\n", __func__);
    310 #endif
    311 
    312 	vps_p[0] =
    313 	    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
    314 	vp0 = *vps_p[0];
    315 	flags = MOUNTTOLAYERMOUNT(vp0->v_mount)->layerm_flags;
    316 	our_vnodeop_p = vp0->v_op;
    317 
    318 	if (flags & LAYERFS_MBYPASSDEBUG)
    319 		printf("%s: %s\n", __func__, descp->vdesc_name);
    320 
    321 	/*
    322 	 * Map the vnodes going in.
    323 	 * Later, we'll invoke the operation based on
    324 	 * the first mapped vnode's operation vector.
    325 	 */
    326 	reles = descp->vdesc_flags;
    327 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    328 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    329 			break;   /* bail out at end of list */
    330 		vps_p[i] = this_vp_p =
    331 		    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
    332 		    ap);
    333 		/*
    334 		 * We're not guaranteed that any but the first vnode
    335 		 * are of our type.  Check for and don't map any
    336 		 * that aren't.  (We must always map first vp or vclean fails.)
    337 		 */
    338 		if (i && (*this_vp_p == NULL ||
    339 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
    340 			old_vps[i] = NULL;
    341 		} else {
    342 			old_vps[i] = *this_vp_p;
    343 			*(vps_p[i]) = LAYERVPTOLOWERVP(*this_vp_p);
    344 			/*
    345 			 * XXX - Several operations have the side effect
    346 			 * of vrele'ing their vp's.  We must account for
    347 			 * that.  (This should go away in the future.)
    348 			 */
    349 			if (reles & VDESC_VP0_WILLRELE)
    350 				VREF(*this_vp_p);
    351 		}
    352 
    353 	}
    354 
    355 	/*
    356 	 * Call the operation on the lower layer
    357 	 * with the modified argument structure.
    358 	 */
    359 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
    360 
    361 	/*
    362 	 * Maintain the illusion of call-by-value
    363 	 * by restoring vnodes in the argument structure
    364 	 * to their original value.
    365 	 */
    366 	reles = descp->vdesc_flags;
    367 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    368 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    369 			break;   /* bail out at end of list */
    370 		if (old_vps[i]) {
    371 			*(vps_p[i]) = old_vps[i];
    372 			if (reles & VDESC_VP0_WILLUNLOCK)
    373 				LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
    374 			if (reles & VDESC_VP0_WILLRELE)
    375 				vrele(*(vps_p[i]));
    376 		}
    377 	}
    378 
    379 	/*
    380 	 * Map the possible out-going vpp
    381 	 * (Assumes that the lower layer always returns
    382 	 * a VREF'ed vpp unless it gets an error.)
    383 	 */
    384 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
    385 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
    386 	    !error) {
    387 		/*
    388 		 * XXX - even though some ops have vpp returned vp's,
    389 		 * several ops actually vrele this before returning.
    390 		 * We must avoid these ops.
    391 		 * (This should go away when these ops are regularized.)
    392 		 */
    393 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
    394 			goto out;
    395 		vppp = VOPARG_OFFSETTO(struct vnode***,
    396 				 descp->vdesc_vpp_offset, ap);
    397 		/*
    398 		 * Only vop_lookup, vop_create, vop_makedir, vop_bmap,
    399 		 * vop_mknod, and vop_symlink return vpp's. vop_bmap
    400 		 * doesn't call bypass as the lower vpp is fine (we're just
    401 		 * going to do i/o on it). vop_lookup doesn't call bypass
    402 		 * as a lookup on "." would generate a locking error.
    403 		 * So all the calls which get us here have a locked vpp. :-)
    404 		 */
    405 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
    406 		if (error) {
    407 			vput(**vppp);
    408 			**vppp = NULL;
    409 		}
    410 	}
    411 
    412  out:
    413 	return (error);
    414 }
    415 
    416 /*
    417  * We have to carry on the locking protocol on the layer vnodes
    418  * as we progress through the tree. We also have to enforce read-only
    419  * if this layer is mounted read-only.
    420  */
    421 int
    422 layer_lookup(v)
    423 	void *v;
    424 {
    425 	struct vop_lookup_args /* {
    426 		struct vnodeop_desc *a_desc;
    427 		struct vnode * a_dvp;
    428 		struct vnode ** a_vpp;
    429 		struct componentname * a_cnp;
    430 	} */ *ap = v;
    431 	struct componentname *cnp = ap->a_cnp;
    432 	int flags = cnp->cn_flags;
    433 	struct vnode *dvp, *vp, *ldvp;
    434 	int error, r;
    435 
    436 	dvp = ap->a_dvp;
    437 
    438 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    439 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
    440 		return (EROFS);
    441 
    442 	ldvp = LAYERVPTOLOWERVP(dvp);
    443 	ap->a_dvp = ldvp;
    444 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
    445 	vp = *ap->a_vpp;
    446 	*ap->a_vpp = NULL;
    447 
    448 	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
    449 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    450 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
    451 		error = EROFS;
    452 	/*
    453 	 * We must do the same locking and unlocking at this layer as
    454 	 * is done in the layers below us. It used to be we would try
    455 	 * to guess based on what was set with the flags and error codes.
    456 	 *
    457 	 * But that doesn't work. So now we have the underlying VOP_LOOKUP
    458 	 * tell us if it released the parent vnode, and we adjust the
    459 	 * upper node accordingly. We can't just look at the lock states
    460 	 * of the lower nodes as someone else might have come along and
    461 	 * locked the parent node after our call to VOP_LOOKUP locked it.
    462 	 */
    463 	if ((cnp->cn_flags & PDIRUNLOCK)) {
    464 		LAYERFS_UPPERUNLOCK(dvp, 0, r);
    465 	}
    466 	if (ldvp == vp) {
    467 		/*
    468 		 * Did lookup on "." or ".." in the root node of a mount point.
    469 		 * So we return dvp after a VREF.
    470 		 */
    471 		*ap->a_vpp = dvp;
    472 		VREF(dvp);
    473 		vrele(vp);
    474 	} else if (vp != NULL) {
    475 		error = layer_node_create(dvp->v_mount, vp, ap->a_vpp);
    476 		if (error) {
    477 			vput(vp);
    478 			if (cnp->cn_flags & PDIRUNLOCK) {
    479 				if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY) == 0)
    480 					cnp->cn_flags &= ~PDIRUNLOCK;
    481 			}
    482 		}
    483 	}
    484 	return (error);
    485 }
    486 
    487 /*
    488  * Setattr call. Disallow write attempts if the layer is mounted read-only.
    489  */
    490 int
    491 layer_setattr(v)
    492 	void *v;
    493 {
    494 	struct vop_setattr_args /* {
    495 		struct vnodeop_desc *a_desc;
    496 		struct vnode *a_vp;
    497 		struct vattr *a_vap;
    498 		kauth_cred_t a_cred;
    499 		struct lwp *a_l;
    500 	} */ *ap = v;
    501 	struct vnode *vp = ap->a_vp;
    502 	struct vattr *vap = ap->a_vap;
    503 
    504   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
    505 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
    506 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
    507 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
    508 		return (EROFS);
    509 	if (vap->va_size != VNOVAL) {
    510  		switch (vp->v_type) {
    511  		case VDIR:
    512  			return (EISDIR);
    513  		case VCHR:
    514  		case VBLK:
    515  		case VSOCK:
    516  		case VFIFO:
    517 			return (0);
    518 		case VREG:
    519 		case VLNK:
    520  		default:
    521 			/*
    522 			 * Disallow write attempts if the filesystem is
    523 			 * mounted read-only.
    524 			 */
    525 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
    526 				return (EROFS);
    527 		}
    528 	}
    529 	return (LAYERFS_DO_BYPASS(vp, ap));
    530 }
    531 
    532 /*
    533  *  We handle getattr only to change the fsid.
    534  */
    535 int
    536 layer_getattr(v)
    537 	void *v;
    538 {
    539 	struct vop_getattr_args /* {
    540 		struct vnode *a_vp;
    541 		struct vattr *a_vap;
    542 		kauth_cred_t a_cred;
    543 		struct lwp *a_l;
    544 	} */ *ap = v;
    545 	struct vnode *vp = ap->a_vp;
    546 	int error;
    547 
    548 	if ((error = LAYERFS_DO_BYPASS(vp, ap)) != 0)
    549 		return (error);
    550 	/* Requires that arguments be restored. */
    551 	ap->a_vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
    552 	return (0);
    553 }
    554 
    555 int
    556 layer_access(v)
    557 	void *v;
    558 {
    559 	struct vop_access_args /* {
    560 		struct vnode *a_vp;
    561 		int  a_mode;
    562 		kauth_cred_t a_cred;
    563 		struct lwp *a_l;
    564 	} */ *ap = v;
    565 	struct vnode *vp = ap->a_vp;
    566 	mode_t mode = ap->a_mode;
    567 
    568 	/*
    569 	 * Disallow write attempts on read-only layers;
    570 	 * unless the file is a socket, fifo, or a block or
    571 	 * character device resident on the file system.
    572 	 */
    573 	if (mode & VWRITE) {
    574 		switch (vp->v_type) {
    575 		case VDIR:
    576 		case VLNK:
    577 		case VREG:
    578 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
    579 				return (EROFS);
    580 			break;
    581 		default:
    582 			break;
    583 		}
    584 	}
    585 	return (LAYERFS_DO_BYPASS(vp, ap));
    586 }
    587 
    588 /*
    589  * We must handle open to be able to catch MNT_NODEV and friends.
    590  */
    591 int
    592 layer_open(v)
    593 	void *v;
    594 {
    595 	struct vop_open_args *ap = v;
    596 	struct vnode *vp = ap->a_vp;
    597 	enum vtype lower_type = LAYERVPTOLOWERVP(vp)->v_type;
    598 
    599 	if (((lower_type == VBLK) || (lower_type == VCHR)) &&
    600 	    (vp->v_mount->mnt_flag & MNT_NODEV))
    601 		return ENXIO;
    602 
    603 	return LAYERFS_DO_BYPASS(vp, ap);
    604 }
    605 
    606 /*
    607  * We need to process our own vnode lock and then clear the
    608  * interlock flag as it applies only to our vnode, not the
    609  * vnodes below us on the stack.
    610  */
    611 int
    612 layer_lock(v)
    613 	void *v;
    614 {
    615 	struct vop_lock_args /* {
    616 		struct vnode *a_vp;
    617 		int a_flags;
    618 		struct proc *a_p;
    619 	} */ *ap = v;
    620 	struct vnode *vp = ap->a_vp, *lowervp;
    621 	int	flags = ap->a_flags, error;
    622 
    623 	if (vp->v_vnlock != NULL) {
    624 		/*
    625 		 * The lower level has exported a struct lock to us. Use
    626 		 * it so that all vnodes in the stack lock and unlock
    627 		 * simultaneously. Note: we don't DRAIN the lock as DRAIN
    628 		 * decommissions the lock - just because our vnode is
    629 		 * going away doesn't mean the struct lock below us is.
    630 		 * LK_EXCLUSIVE is fine.
    631 		 */
    632 		if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
    633 			return(lockmgr(vp->v_vnlock,
    634 				(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
    635 				&vp->v_interlock));
    636 		} else
    637 			return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock));
    638 	} else {
    639 		/*
    640 		 * Ahh well. It would be nice if the fs we're over would
    641 		 * export a struct lock for us to use, but it doesn't.
    642 		 *
    643 		 * To prevent race conditions involving doing a lookup
    644 		 * on "..", we have to lock the lower node, then lock our
    645 		 * node. Most of the time it won't matter that we lock our
    646 		 * node (as any locking would need the lower one locked
    647 		 * first). But we can LK_DRAIN the upper lock as a step
    648 		 * towards decomissioning it.
    649 		 */
    650 		lowervp = LAYERVPTOLOWERVP(vp);
    651 		if (flags & LK_INTERLOCK) {
    652 			simple_unlock(&vp->v_interlock);
    653 			flags &= ~LK_INTERLOCK;
    654 		}
    655 		if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
    656 			error = VOP_LOCK(lowervp,
    657 				(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE);
    658 		} else
    659 			error = VOP_LOCK(lowervp, flags);
    660 		if (error)
    661 			return (error);
    662 		if ((error = lockmgr(&vp->v_lock, flags, &vp->v_interlock))) {
    663 			VOP_UNLOCK(lowervp, 0);
    664 		}
    665 		return (error);
    666 	}
    667 }
    668 
    669 /*
    670  */
    671 int
    672 layer_unlock(v)
    673 	void *v;
    674 {
    675 	struct vop_unlock_args /* {
    676 		struct vnode *a_vp;
    677 		int a_flags;
    678 		struct proc *a_p;
    679 	} */ *ap = v;
    680 	struct vnode *vp = ap->a_vp;
    681 	int	flags = ap->a_flags;
    682 
    683 	if (vp->v_vnlock != NULL) {
    684 		return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
    685 			&vp->v_interlock));
    686 	} else {
    687 		if (flags & LK_INTERLOCK) {
    688 			simple_unlock(&vp->v_interlock);
    689 			flags &= ~LK_INTERLOCK;
    690 		}
    691 		VOP_UNLOCK(LAYERVPTOLOWERVP(vp), flags);
    692 		return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
    693 			&vp->v_interlock));
    694 	}
    695 }
    696 
    697 int
    698 layer_islocked(v)
    699 	void *v;
    700 {
    701 	struct vop_islocked_args /* {
    702 		struct vnode *a_vp;
    703 	} */ *ap = v;
    704 	struct vnode *vp = ap->a_vp;
    705 	int lkstatus;
    706 
    707 	if (vp->v_vnlock != NULL)
    708 		return lockstatus(vp->v_vnlock);
    709 
    710 	lkstatus = VOP_ISLOCKED(LAYERVPTOLOWERVP(vp));
    711 	if (lkstatus)
    712 		return lkstatus;
    713 
    714 	return lockstatus(&vp->v_lock);
    715 }
    716 
    717 /*
    718  * If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
    719  * syncing the underlying vnodes, since they'll be fsync'ed when
    720  * reclaimed; otherwise,
    721  * pass it through to the underlying layer.
    722  *
    723  * XXX Do we still need to worry about shallow fsync?
    724  */
    725 
    726 int
    727 layer_fsync(v)
    728 	void *v;
    729 {
    730 	struct vop_fsync_args /* {
    731 		struct vnode *a_vp;
    732 		kauth_cred_t a_cred;
    733 		int  a_flags;
    734 		off_t offlo;
    735 		off_t offhi;
    736 		struct lwp *a_l;
    737 	} */ *ap = v;
    738 
    739 	if (ap->a_flags & FSYNC_RECLAIM) {
    740 		return 0;
    741 	}
    742 
    743 	return (LAYERFS_DO_BYPASS(ap->a_vp, ap));
    744 }
    745 
    746 
    747 int
    748 layer_inactive(v)
    749 	void *v;
    750 {
    751 	struct vop_inactive_args /* {
    752 		struct vnode *a_vp;
    753 		struct lwp *a_l;
    754 	} */ *ap = v;
    755 	struct vnode *vp = ap->a_vp;
    756 
    757 	/*
    758 	 * Do nothing (and _don't_ bypass).
    759 	 * Wait to vrele lowervp until reclaim,
    760 	 * so that until then our layer_node is in the
    761 	 * cache and reusable.
    762 	 *
    763 	 * NEEDSWORK: Someday, consider inactive'ing
    764 	 * the lowervp and then trying to reactivate it
    765 	 * with capabilities (v_id)
    766 	 * like they do in the name lookup cache code.
    767 	 * That's too much work for now.
    768 	 */
    769 	VOP_UNLOCK(vp, 0);
    770 
    771 	/*
    772 	 * ..., but don't cache the device node. Also, if we did a
    773 	 * remove, don't cache the node.
    774 	 */
    775 	if (vp->v_type == VBLK || vp->v_type == VCHR
    776 	    || (VTOLAYER(vp)->layer_flags & LAYERFS_REMOVED))
    777 		vgone(vp);
    778 	return (0);
    779 }
    780 
    781 int
    782 layer_remove(v)
    783 	void *v;
    784 {
    785 	struct vop_remove_args /* {
    786 		struct vonde		*a_dvp;
    787 		struct vnode		*a_vp;
    788 		struct componentname	*a_cnp;
    789 	} */ *ap = v;
    790 
    791 	int		error;
    792 	struct vnode	*vp = ap->a_vp;
    793 
    794 	vref(vp);
    795 	if ((error = LAYERFS_DO_BYPASS(vp, ap)) == 0)
    796 		VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED;
    797 
    798 	vrele(vp);
    799 
    800 	return (error);
    801 }
    802 
    803 int
    804 layer_rename(v)
    805 	void *v;
    806 {
    807 	struct vop_rename_args  /* {
    808 		struct vnode		*a_fdvp;
    809 		struct vnode		*a_fvp;
    810 		struct componentname	*a_fcnp;
    811 		struct vnode		*a_tdvp;
    812 		struct vnode		*a_tvp;
    813 		struct componentname	*a_tcnp;
    814 	} */ *ap = v;
    815 
    816 	int error;
    817 	struct vnode *fdvp = ap->a_fdvp;
    818 	struct vnode *tvp;
    819 
    820 	tvp = ap->a_tvp;
    821 	if (tvp) {
    822 		if (tvp->v_mount != fdvp->v_mount)
    823 			tvp = NULL;
    824 		else
    825 			vref(tvp);
    826 	}
    827 	error = LAYERFS_DO_BYPASS(fdvp, ap);
    828 	if (tvp) {
    829 		if (error == 0)
    830 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
    831 		vrele(tvp);
    832 	}
    833 
    834 	return (error);
    835 }
    836 
    837 int
    838 layer_rmdir(v)
    839 	void *v;
    840 {
    841 	struct vop_rmdir_args /* {
    842 		struct vnode		*a_dvp;
    843 		struct vnode		*a_vp;
    844 		struct componentname	*a_cnp;
    845 	} */ *ap = v;
    846 	int		error;
    847 	struct vnode	*vp = ap->a_vp;
    848 
    849 	vref(vp);
    850 	if ((error = LAYERFS_DO_BYPASS(vp, ap)) == 0)
    851 		VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED;
    852 
    853 	vrele(vp);
    854 
    855 	return (error);
    856 }
    857 
    858 int
    859 layer_reclaim(v)
    860 	void *v;
    861 {
    862 	struct vop_reclaim_args /* {
    863 		struct vnode *a_vp;
    864 		struct lwp *a_l;
    865 	} */ *ap = v;
    866 	struct vnode *vp = ap->a_vp;
    867 	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(vp->v_mount);
    868 	struct layer_node *xp = VTOLAYER(vp);
    869 	struct vnode *lowervp = xp->layer_lowervp;
    870 
    871 	/*
    872 	 * Note: in vop_reclaim, the node's struct lock has been
    873 	 * decomissioned, so we have to be careful about calling
    874 	 * VOP's on ourself. Even if we turned a LK_DRAIN into an
    875 	 * LK_EXCLUSIVE in layer_lock, we still must be careful as VXLOCK is
    876 	 * set.
    877 	 */
    878 	/* After this assignment, this node will not be re-used. */
    879 	if ((vp == lmp->layerm_rootvp)) {
    880 		/*
    881 		 * Oops! We no longer have a root node. Most likely reason is
    882 		 * that someone forcably unmunted the underlying fs.
    883 		 *
    884 		 * Now getting the root vnode will fail. We're dead. :-(
    885 		 */
    886 		lmp->layerm_rootvp = NULL;
    887 	}
    888 	xp->layer_lowervp = NULL;
    889 	simple_lock(&lmp->layerm_hashlock);
    890 	LIST_REMOVE(xp, layer_hash);
    891 	simple_unlock(&lmp->layerm_hashlock);
    892 	FREE(vp->v_data, M_TEMP);
    893 	vp->v_data = NULL;
    894 	vrele (lowervp);
    895 	return (0);
    896 }
    897 
    898 /*
    899  * We just feed the returned vnode up to the caller - there's no need
    900  * to build a layer node on top of the node on which we're going to do
    901  * i/o. :-)
    902  */
    903 int
    904 layer_bmap(v)
    905 	void *v;
    906 {
    907 	struct vop_bmap_args /* {
    908 		struct vnode *a_vp;
    909 		daddr_t  a_bn;
    910 		struct vnode **a_vpp;
    911 		daddr_t *a_bnp;
    912 		int *a_runp;
    913 	} */ *ap = v;
    914 	struct vnode *vp;
    915 
    916 	ap->a_vp = vp = LAYERVPTOLOWERVP(ap->a_vp);
    917 
    918 	return (VCALL(vp, ap->a_desc->vdesc_offset, ap));
    919 }
    920 
    921 int
    922 layer_print(v)
    923 	void *v;
    924 {
    925 	struct vop_print_args /* {
    926 		struct vnode *a_vp;
    927 	} */ *ap = v;
    928 	struct vnode *vp = ap->a_vp;
    929 	printf ("\ttag VT_LAYERFS, vp=%p, lowervp=%p\n", vp, LAYERVPTOLOWERVP(vp));
    930 	return (0);
    931 }
    932 
    933 /*
    934  * XXX - vop_bwrite must be hand coded because it has no
    935  * vnode in its arguments.
    936  * This goes away with a merged VM/buffer cache.
    937  */
    938 int
    939 layer_bwrite(v)
    940 	void *v;
    941 {
    942 	struct vop_bwrite_args /* {
    943 		struct buf *a_bp;
    944 	} */ *ap = v;
    945 	struct buf *bp = ap->a_bp;
    946 	int error;
    947 	struct vnode *savedvp;
    948 
    949 	savedvp = bp->b_vp;
    950 	bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp);
    951 
    952 	error = VOP_BWRITE(bp);
    953 
    954 	bp->b_vp = savedvp;
    955 
    956 	return (error);
    957 }
    958 
    959 int
    960 layer_getpages(v)
    961 	void *v;
    962 {
    963 	struct vop_getpages_args /* {
    964 		struct vnode *a_vp;
    965 		voff_t a_offset;
    966 		struct vm_page **a_m;
    967 		int *a_count;
    968 		int a_centeridx;
    969 		vm_prot_t a_access_type;
    970 		int a_advice;
    971 		int a_flags;
    972 	} */ *ap = v;
    973 	struct vnode *vp = ap->a_vp;
    974 	int error;
    975 
    976 	/*
    977 	 * just pass the request on to the underlying layer.
    978 	 */
    979 
    980 	if (ap->a_flags & PGO_LOCKED) {
    981 		return EBUSY;
    982 	}
    983 	ap->a_vp = LAYERVPTOLOWERVP(vp);
    984 	simple_unlock(&vp->v_interlock);
    985 	simple_lock(&ap->a_vp->v_interlock);
    986 	error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
    987 	return error;
    988 }
    989 
    990 int
    991 layer_putpages(v)
    992 	void *v;
    993 {
    994 	struct vop_putpages_args /* {
    995 		struct vnode *a_vp;
    996 		voff_t a_offlo;
    997 		voff_t a_offhi;
    998 		int a_flags;
    999 	} */ *ap = v;
   1000 	struct vnode *vp = ap->a_vp;
   1001 	int error;
   1002 
   1003 	/*
   1004 	 * just pass the request on to the underlying layer.
   1005 	 */
   1006 
   1007 	ap->a_vp = LAYERVPTOLOWERVP(vp);
   1008 	simple_unlock(&vp->v_interlock);
   1009 	simple_lock(&ap->a_vp->v_interlock);
   1010 	error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
   1011 	return error;
   1012 }
   1013