Home | History | Annotate | Line # | Download | only in genfs
layer_vnops.c revision 1.67.12.2
      1  1.67.12.2        ad /*	$NetBSD: layer_vnops.c,v 1.67.12.2 2020/02/29 20:21:04 ad Exp $	*/
      2        1.1  wrstuden 
      3        1.1  wrstuden /*
      4        1.1  wrstuden  * Copyright (c) 1999 National Aeronautics & Space Administration
      5        1.1  wrstuden  * All rights reserved.
      6        1.1  wrstuden  *
      7        1.1  wrstuden  * This software was written by William Studenmund of the
      8        1.6       wiz  * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
      9        1.1  wrstuden  *
     10        1.1  wrstuden  * Redistribution and use in source and binary forms, with or without
     11        1.1  wrstuden  * modification, are permitted provided that the following conditions
     12        1.1  wrstuden  * are met:
     13        1.1  wrstuden  * 1. Redistributions of source code must retain the above copyright
     14        1.1  wrstuden  *    notice, this list of conditions and the following disclaimer.
     15        1.1  wrstuden  * 2. Redistributions in binary form must reproduce the above copyright
     16        1.1  wrstuden  *    notice, this list of conditions and the following disclaimer in the
     17        1.1  wrstuden  *    documentation and/or other materials provided with the distribution.
     18        1.2     soren  * 3. Neither the name of the National Aeronautics & Space Administration
     19        1.1  wrstuden  *    nor the names of its contributors may be used to endorse or promote
     20        1.1  wrstuden  *    products derived from this software without specific prior written
     21        1.1  wrstuden  *    permission.
     22        1.1  wrstuden  *
     23        1.1  wrstuden  * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
     24        1.1  wrstuden  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25        1.1  wrstuden  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26        1.1  wrstuden  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
     27        1.1  wrstuden  * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     28        1.1  wrstuden  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29        1.1  wrstuden  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30        1.1  wrstuden  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31        1.1  wrstuden  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32        1.1  wrstuden  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33        1.1  wrstuden  * POSSIBILITY OF SUCH DAMAGE.
     34        1.1  wrstuden  */
     35       1.42     rmind 
     36        1.1  wrstuden /*
     37        1.1  wrstuden  * Copyright (c) 1992, 1993
     38        1.1  wrstuden  *	The Regents of the University of California.  All rights reserved.
     39        1.1  wrstuden  *
     40        1.1  wrstuden  * This code is derived from software contributed to Berkeley by
     41        1.1  wrstuden  * John Heidemann of the UCLA Ficus project.
     42        1.1  wrstuden  *
     43        1.1  wrstuden  * Redistribution and use in source and binary forms, with or without
     44        1.1  wrstuden  * modification, are permitted provided that the following conditions
     45        1.1  wrstuden  * are met:
     46        1.1  wrstuden  * 1. Redistributions of source code must retain the above copyright
     47        1.1  wrstuden  *    notice, this list of conditions and the following disclaimer.
     48        1.1  wrstuden  * 2. Redistributions in binary form must reproduce the above copyright
     49        1.1  wrstuden  *    notice, this list of conditions and the following disclaimer in the
     50        1.1  wrstuden  *    documentation and/or other materials provided with the distribution.
     51       1.11       agc  * 3. Neither the name of the University nor the names of its contributors
     52        1.1  wrstuden  *    may be used to endorse or promote products derived from this software
     53        1.1  wrstuden  *    without specific prior written permission.
     54        1.1  wrstuden  *
     55        1.1  wrstuden  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     56        1.1  wrstuden  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     57        1.1  wrstuden  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     58        1.1  wrstuden  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     59        1.1  wrstuden  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     60        1.1  wrstuden  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     61        1.1  wrstuden  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     62        1.1  wrstuden  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     63        1.1  wrstuden  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     64        1.1  wrstuden  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     65        1.1  wrstuden  * SUCH DAMAGE.
     66        1.1  wrstuden  *
     67        1.1  wrstuden  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
     68        1.1  wrstuden  *
     69        1.1  wrstuden  * Ancestors:
     70        1.1  wrstuden  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
     71       1.31     enami  *	Id: lofs_vnops.c,v 1.11 1992/05/30 10:05:43 jsp Exp jsp
     72        1.1  wrstuden  *	...and...
     73        1.1  wrstuden  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
     74        1.1  wrstuden  */
     75        1.1  wrstuden 
     76        1.1  wrstuden /*
     77       1.42     rmind  * Generic layer vnode operations.
     78        1.1  wrstuden  *
     79       1.42     rmind  * The layer.h, layer_extern.h, layer_vfs.c, and layer_vnops.c files provide
     80       1.42     rmind  * the core implementation of stacked file-systems.
     81        1.1  wrstuden  *
     82       1.42     rmind  * The layerfs duplicates a portion of the file system name space under
     83       1.42     rmind  * a new name.  In this respect, it is similar to the loopback file system.
     84       1.42     rmind  * It differs from the loopback fs in two respects: it is implemented using
     85       1.42     rmind  * a stackable layers technique, and it is "layerfs-nodes" stack above all
     86       1.42     rmind  * lower-layer vnodes, not just over directory vnodes.
     87       1.42     rmind  *
     88       1.42     rmind  * OPERATION OF LAYERFS
     89       1.42     rmind  *
     90       1.42     rmind  * The layerfs is the minimum file system layer, bypassing all possible
     91       1.42     rmind  * operations to the lower layer for processing there.  The majority of its
     92       1.42     rmind  * activity centers on the bypass routine, through which nearly all vnode
     93       1.42     rmind  * operations pass.
     94       1.42     rmind  *
     95       1.42     rmind  * The bypass routine accepts arbitrary vnode operations for handling by
     96       1.42     rmind  * the lower layer.  It begins by examining vnode operation arguments and
     97       1.42     rmind  * replacing any layered nodes by their lower-layer equivalents.  It then
     98       1.42     rmind  * invokes an operation on the lower layer.  Finally, it replaces the
     99       1.42     rmind  * layered nodes in the arguments and, if a vnode is returned by the
    100       1.42     rmind  * operation, stacks a layered node on top of the returned vnode.
    101        1.1  wrstuden  *
    102        1.1  wrstuden  * The bypass routine in this file, layer_bypass(), is suitable for use
    103        1.1  wrstuden  * by many different layered filesystems. It can be used by multiple
    104        1.1  wrstuden  * filesystems simultaneously. Alternatively, a layered fs may provide
    105        1.1  wrstuden  * its own bypass routine, in which case layer_bypass() should be used as
    106        1.1  wrstuden  * a model. For instance, the main functionality provided by umapfs, the user
    107        1.1  wrstuden  * identity mapping file system, is handled by a custom bypass routine.
    108        1.1  wrstuden  *
    109        1.1  wrstuden  * Typically a layered fs registers its selected bypass routine as the
    110        1.1  wrstuden  * default vnode operation in its vnodeopv_entry_desc table. Additionally
    111        1.1  wrstuden  * the filesystem must store the bypass entry point in the layerm_bypass
    112        1.1  wrstuden  * field of struct layer_mount. All other layer routines in this file will
    113       1.42     rmind  * use the layerm_bypass() routine.
    114        1.1  wrstuden  *
    115        1.1  wrstuden  * Although the bypass routine handles most operations outright, a number
    116       1.42     rmind  * of operations are special cased and handled by the layerfs.  For instance,
    117       1.42     rmind  * layer_getattr() must change the fsid being returned.  While layer_lock()
    118       1.42     rmind  * and layer_unlock() must handle any locking for the current vnode as well
    119       1.42     rmind  * as pass the lock request down.  layer_inactive() and layer_reclaim() are
    120       1.42     rmind  * not bypassed so that they can handle freeing layerfs-specific data.  Also,
    121       1.42     rmind  * certain vnode operations (create, mknod, remove, link, rename, mkdir,
    122       1.42     rmind  * rmdir, and symlink) change the locking state within the operation.  Ideally
    123       1.42     rmind  * these operations should not change the lock state, but should be changed
    124       1.42     rmind  * to let the caller of the function unlock them.  Otherwise, all intermediate
    125       1.42     rmind  * vnode layers (such as union, umapfs, etc) must catch these functions to do
    126        1.1  wrstuden  * the necessary locking at their layer.
    127        1.1  wrstuden  *
    128       1.42     rmind  * INSTANTIATING VNODE STACKS
    129        1.1  wrstuden  *
    130       1.42     rmind  * Mounting associates "layerfs-nodes" stack and lower layer, in effect
    131       1.42     rmind  * stacking two VFSes.  The initial mount creates a single vnode stack for
    132       1.42     rmind  * the root of the new layerfs.  All other vnode stacks are created as a
    133       1.42     rmind  * result of vnode operations on this or other layerfs vnode stacks.
    134        1.1  wrstuden  *
    135       1.42     rmind  * New vnode stacks come into existence as a result of an operation which
    136       1.42     rmind  * returns a vnode.  The bypass routine stacks a layerfs-node above the new
    137        1.1  wrstuden  * vnode before returning it to the caller.
    138        1.1  wrstuden  *
    139       1.42     rmind  * For example, imagine mounting a null layer with:
    140        1.1  wrstuden  *
    141       1.42     rmind  *	"mount_null /usr/include /dev/layer/null"
    142        1.1  wrstuden  *
    143       1.42     rmind  * Changing directory to /dev/layer/null will assign the root layerfs-node,
    144       1.42     rmind  * which was created when the null layer was mounted).  Now consider opening
    145       1.42     rmind  * "sys".  A layer_lookup() would be performed on the root layerfs-node.
    146       1.42     rmind  * This operation would bypass through to the lower layer which would return
    147       1.42     rmind  * a vnode representing the UFS "sys".  Then, layer_bypass() builds a
    148       1.42     rmind  * layerfs-node aliasing the UFS "sys" and returns this to the caller.
    149       1.42     rmind  * Later operations on the layerfs-node "sys" will repeat this process when
    150       1.42     rmind  * constructing other vnode stacks.
    151        1.1  wrstuden  *
    152        1.1  wrstuden  * INVOKING OPERATIONS ON LOWER LAYERS
    153        1.1  wrstuden  *
    154       1.42     rmind  * There are two techniques to invoke operations on a lower layer when the
    155       1.42     rmind  * operation cannot be completely bypassed.  Each method is appropriate in
    156       1.42     rmind  * different situations.  In both cases, it is the responsibility of the
    157       1.42     rmind  * aliasing layer to make the operation arguments "correct" for the lower
    158       1.42     rmind  * layer by mapping any vnode arguments to the lower layer.
    159       1.42     rmind  *
    160       1.42     rmind  * The first approach is to call the aliasing layer's bypass routine.  This
    161       1.42     rmind  * method is most suitable when you wish to invoke the operation currently
    162       1.42     rmind  * being handled on the lower layer.  It has the advantage that the bypass
    163       1.42     rmind  * routine already must do argument mapping.  An example of this is
    164       1.42     rmind  * layer_getattr().
    165       1.42     rmind  *
    166       1.42     rmind  * A second approach is to directly invoke vnode operations on the lower
    167       1.42     rmind  * layer with the VOP_OPERATIONNAME interface.  The advantage of this method
    168       1.42     rmind  * is that it is easy to invoke arbitrary operations on the lower layer.
    169       1.42     rmind  * The disadvantage is that vnode's arguments must be manually mapped.
    170        1.1  wrstuden  */
    171        1.8     lukem 
    172        1.8     lukem #include <sys/cdefs.h>
    173  1.67.12.2        ad __KERNEL_RCSID(0, "$NetBSD: layer_vnops.c,v 1.67.12.2 2020/02/29 20:21:04 ad Exp $");
    174        1.1  wrstuden 
    175        1.1  wrstuden #include <sys/param.h>
    176        1.1  wrstuden #include <sys/systm.h>
    177        1.1  wrstuden #include <sys/proc.h>
    178        1.1  wrstuden #include <sys/time.h>
    179        1.1  wrstuden #include <sys/vnode.h>
    180        1.1  wrstuden #include <sys/mount.h>
    181        1.1  wrstuden #include <sys/namei.h>
    182       1.34        ad #include <sys/kmem.h>
    183        1.1  wrstuden #include <sys/buf.h>
    184       1.27      elad #include <sys/kauth.h>
    185       1.60   hannken #include <sys/fcntl.h>
    186       1.65   hannken #include <sys/fstrans.h>
    187       1.27      elad 
    188        1.1  wrstuden #include <miscfs/genfs/layer.h>
    189        1.1  wrstuden #include <miscfs/genfs/layer_extern.h>
    190        1.1  wrstuden #include <miscfs/genfs/genfs.h>
    191       1.50   hannken #include <miscfs/specfs/specdev.h>
    192        1.1  wrstuden 
    193        1.1  wrstuden /*
    194        1.1  wrstuden  * This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass
    195        1.1  wrstuden  *		routine by John Heidemann.
    196        1.1  wrstuden  *	The new element for this version is that the whole nullfs
    197       1.40   hannken  * system gained the concept of locks on the lower node.
    198        1.1  wrstuden  *    The 10-Apr-92 version was optimized for speed, throwing away some
    199        1.1  wrstuden  * safety checks.  It should still always work, but it's not as
    200        1.1  wrstuden  * robust to programmer errors.
    201        1.1  wrstuden  *
    202        1.1  wrstuden  * In general, we map all vnodes going down and unmap them on the way back.
    203        1.1  wrstuden  *
    204        1.1  wrstuden  * Also, some BSD vnode operations have the side effect of vrele'ing
    205        1.1  wrstuden  * their arguments.  With stacking, the reference counts are held
    206        1.1  wrstuden  * by the upper node, not the lower one, so we must handle these
    207        1.1  wrstuden  * side-effects here.  This is not of concern in Sun-derived systems
    208        1.1  wrstuden  * since there are no such side-effects.
    209        1.1  wrstuden  *
    210        1.1  wrstuden  * New for the 08-June-99 version: we also handle operations which unlock
    211        1.1  wrstuden  * the passed-in node (typically they vput the node).
    212        1.1  wrstuden  *
    213        1.1  wrstuden  * This makes the following assumptions:
    214        1.1  wrstuden  * - only one returned vpp
    215        1.1  wrstuden  * - no INOUT vpp's (Sun's vop_open has one of these)
    216        1.1  wrstuden  * - the vnode operation vector of the first vnode should be used
    217        1.1  wrstuden  *   to determine what implementation of the op should be invoked
    218        1.1  wrstuden  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
    219        1.1  wrstuden  *   problems on rmdir'ing mount points and renaming?)
    220       1.24     perry  */
    221        1.1  wrstuden int
    222       1.38       dsl layer_bypass(void *v)
    223        1.1  wrstuden {
    224        1.1  wrstuden 	struct vop_generic_args /* {
    225        1.1  wrstuden 		struct vnodeop_desc *a_desc;
    226        1.1  wrstuden 		<other random data follows, presumably>
    227        1.1  wrstuden 	} */ *ap = v;
    228       1.25   xtraeme 	int (**our_vnodeop_p)(void *);
    229        1.3  augustss 	struct vnode **this_vp_p;
    230       1.40   hannken 	int error;
    231        1.1  wrstuden 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
    232        1.1  wrstuden 	struct vnode **vps_p[VDESC_MAX_VPS];
    233        1.1  wrstuden 	struct vnode ***vppp;
    234       1.33    dyoung 	struct mount *mp;
    235        1.1  wrstuden 	struct vnodeop_desc *descp = ap->a_desc;
    236        1.1  wrstuden 	int reles, i, flags;
    237        1.1  wrstuden 
    238       1.37    plunky #ifdef DIAGNOSTIC
    239        1.1  wrstuden 	/*
    240        1.1  wrstuden 	 * We require at least one vp.
    241        1.1  wrstuden 	 */
    242        1.1  wrstuden 	if (descp->vdesc_vp_offsets == NULL ||
    243        1.1  wrstuden 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
    244       1.20      yamt 		panic("%s: no vp's in map.\n", __func__);
    245        1.1  wrstuden #endif
    246        1.1  wrstuden 
    247       1.20      yamt 	vps_p[0] =
    248       1.20      yamt 	    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
    249        1.1  wrstuden 	vp0 = *vps_p[0];
    250       1.33    dyoung 	mp = vp0->v_mount;
    251       1.33    dyoung 	flags = MOUNTTOLAYERMOUNT(mp)->layerm_flags;
    252        1.1  wrstuden 	our_vnodeop_p = vp0->v_op;
    253        1.1  wrstuden 
    254        1.1  wrstuden 	if (flags & LAYERFS_MBYPASSDEBUG)
    255       1.20      yamt 		printf("%s: %s\n", __func__, descp->vdesc_name);
    256        1.1  wrstuden 
    257        1.1  wrstuden 	/*
    258        1.1  wrstuden 	 * Map the vnodes going in.
    259        1.1  wrstuden 	 * Later, we'll invoke the operation based on
    260        1.1  wrstuden 	 * the first mapped vnode's operation vector.
    261        1.1  wrstuden 	 */
    262        1.1  wrstuden 	reles = descp->vdesc_flags;
    263        1.1  wrstuden 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    264        1.1  wrstuden 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    265        1.1  wrstuden 			break;   /* bail out at end of list */
    266       1.24     perry 		vps_p[i] = this_vp_p =
    267       1.20      yamt 		    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
    268       1.20      yamt 		    ap);
    269        1.1  wrstuden 		/*
    270        1.1  wrstuden 		 * We're not guaranteed that any but the first vnode
    271        1.1  wrstuden 		 * are of our type.  Check for and don't map any
    272        1.1  wrstuden 		 * that aren't.  (We must always map first vp or vclean fails.)
    273        1.1  wrstuden 		 */
    274        1.1  wrstuden 		if (i && (*this_vp_p == NULL ||
    275        1.1  wrstuden 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
    276        1.1  wrstuden 			old_vps[i] = NULL;
    277        1.1  wrstuden 		} else {
    278        1.1  wrstuden 			old_vps[i] = *this_vp_p;
    279        1.1  wrstuden 			*(vps_p[i]) = LAYERVPTOLOWERVP(*this_vp_p);
    280        1.1  wrstuden 			/*
    281        1.1  wrstuden 			 * XXX - Several operations have the side effect
    282        1.1  wrstuden 			 * of vrele'ing their vp's.  We must account for
    283        1.1  wrstuden 			 * that.  (This should go away in the future.)
    284        1.1  wrstuden 			 */
    285        1.1  wrstuden 			if (reles & VDESC_VP0_WILLRELE)
    286       1.39     pooka 				vref(*this_vp_p);
    287        1.1  wrstuden 		}
    288        1.1  wrstuden 	}
    289        1.1  wrstuden 
    290        1.1  wrstuden 	/*
    291        1.1  wrstuden 	 * Call the operation on the lower layer
    292        1.1  wrstuden 	 * with the modified argument structure.
    293        1.1  wrstuden 	 */
    294        1.1  wrstuden 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
    295        1.1  wrstuden 
    296        1.1  wrstuden 	/*
    297        1.1  wrstuden 	 * Maintain the illusion of call-by-value
    298        1.1  wrstuden 	 * by restoring vnodes in the argument structure
    299        1.1  wrstuden 	 * to their original value.
    300        1.1  wrstuden 	 */
    301        1.1  wrstuden 	reles = descp->vdesc_flags;
    302        1.1  wrstuden 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    303        1.1  wrstuden 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    304        1.1  wrstuden 			break;   /* bail out at end of list */
    305        1.1  wrstuden 		if (old_vps[i]) {
    306        1.1  wrstuden 			*(vps_p[i]) = old_vps[i];
    307        1.1  wrstuden 			if (reles & VDESC_VP0_WILLRELE)
    308        1.1  wrstuden 				vrele(*(vps_p[i]));
    309        1.1  wrstuden 		}
    310        1.1  wrstuden 	}
    311        1.1  wrstuden 
    312        1.1  wrstuden 	/*
    313        1.1  wrstuden 	 * Map the possible out-going vpp
    314        1.1  wrstuden 	 * (Assumes that the lower layer always returns
    315        1.1  wrstuden 	 * a VREF'ed vpp unless it gets an error.)
    316        1.1  wrstuden 	 */
    317       1.47     rmind 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && !error) {
    318        1.1  wrstuden 		vppp = VOPARG_OFFSETTO(struct vnode***,
    319       1.20      yamt 				 descp->vdesc_vpp_offset, ap);
    320        1.1  wrstuden 		/*
    321       1.52   hannken 		 * Only vop_lookup, vop_create, vop_makedir, vop_mknod
    322       1.52   hannken 		 * and vop_symlink return vpp's. vop_lookup doesn't call bypass
    323        1.1  wrstuden 		 * as a lookup on "." would generate a locking error.
    324       1.52   hannken 		 * So all the calls which get us here have a unlocked vpp. :-)
    325        1.1  wrstuden 		 */
    326       1.33    dyoung 		error = layer_node_create(mp, **vppp, *vppp);
    327       1.19      yamt 		if (error) {
    328       1.52   hannken 			vrele(**vppp);
    329       1.19      yamt 			**vppp = NULL;
    330       1.19      yamt 		}
    331        1.1  wrstuden 	}
    332       1.42     rmind 	return error;
    333        1.1  wrstuden }
    334        1.1  wrstuden 
    335        1.1  wrstuden /*
    336        1.1  wrstuden  * We have to carry on the locking protocol on the layer vnodes
    337        1.1  wrstuden  * as we progress through the tree. We also have to enforce read-only
    338        1.1  wrstuden  * if this layer is mounted read-only.
    339        1.1  wrstuden  */
    340        1.1  wrstuden int
    341       1.38       dsl layer_lookup(void *v)
    342        1.1  wrstuden {
    343       1.54   hannken 	struct vop_lookup_v2_args /* {
    344        1.1  wrstuden 		struct vnodeop_desc *a_desc;
    345        1.1  wrstuden 		struct vnode * a_dvp;
    346        1.1  wrstuden 		struct vnode ** a_vpp;
    347        1.1  wrstuden 		struct componentname * a_cnp;
    348        1.1  wrstuden 	} */ *ap = v;
    349        1.1  wrstuden 	struct componentname *cnp = ap->a_cnp;
    350       1.29       chs 	struct vnode *dvp, *lvp, *ldvp;
    351       1.42     rmind 	int error, flags = cnp->cn_flags;
    352        1.1  wrstuden 
    353        1.1  wrstuden 	dvp = ap->a_dvp;
    354        1.1  wrstuden 
    355        1.1  wrstuden 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    356       1.51  dholland 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
    357       1.51  dholland 		*ap->a_vpp = NULL;
    358       1.42     rmind 		return EROFS;
    359       1.51  dholland 	}
    360        1.1  wrstuden 
    361        1.1  wrstuden 	ldvp = LAYERVPTOLOWERVP(dvp);
    362        1.1  wrstuden 	ap->a_dvp = ldvp;
    363        1.1  wrstuden 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
    364       1.29       chs 	lvp = *ap->a_vpp;
    365       1.18      yamt 	*ap->a_vpp = NULL;
    366        1.1  wrstuden 
    367        1.1  wrstuden 	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
    368        1.1  wrstuden 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    369        1.1  wrstuden 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
    370        1.1  wrstuden 		error = EROFS;
    371       1.29       chs 
    372        1.1  wrstuden 	/*
    373       1.24     perry 	 * We must do the same locking and unlocking at this layer as
    374       1.29       chs 	 * is done in the layers below us.
    375        1.1  wrstuden 	 */
    376       1.29       chs 	if (ldvp == lvp) {
    377        1.1  wrstuden 		/*
    378       1.36  dholland 		 * Got the same object back, because we looked up ".",
    379       1.36  dholland 		 * or ".." in the root node of a mount point.
    380       1.36  dholland 		 * So we make another reference to dvp and return it.
    381        1.1  wrstuden 		 */
    382       1.39     pooka 		vref(dvp);
    383        1.1  wrstuden 		*ap->a_vpp = dvp;
    384       1.29       chs 		vrele(lvp);
    385       1.29       chs 	} else if (lvp != NULL) {
    386       1.54   hannken 		/* Note: dvp and ldvp are both locked. */
    387  1.67.12.1        ad 		KASSERT(error != ENOLCK);
    388       1.29       chs 		error = layer_node_create(dvp->v_mount, lvp, ap->a_vpp);
    389       1.19      yamt 		if (error) {
    390       1.54   hannken 			vrele(lvp);
    391       1.19      yamt 		}
    392        1.1  wrstuden 	}
    393       1.42     rmind 	return error;
    394        1.1  wrstuden }
    395        1.1  wrstuden 
    396        1.1  wrstuden /*
    397        1.1  wrstuden  * Setattr call. Disallow write attempts if the layer is mounted read-only.
    398        1.1  wrstuden  */
    399        1.1  wrstuden int
    400       1.38       dsl layer_setattr(void *v)
    401        1.1  wrstuden {
    402        1.1  wrstuden 	struct vop_setattr_args /* {
    403        1.1  wrstuden 		struct vnodeop_desc *a_desc;
    404        1.1  wrstuden 		struct vnode *a_vp;
    405        1.1  wrstuden 		struct vattr *a_vap;
    406       1.27      elad 		kauth_cred_t a_cred;
    407       1.26  christos 		struct lwp *a_l;
    408        1.1  wrstuden 	} */ *ap = v;
    409        1.1  wrstuden 	struct vnode *vp = ap->a_vp;
    410        1.1  wrstuden 	struct vattr *vap = ap->a_vap;
    411        1.1  wrstuden 
    412        1.1  wrstuden   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
    413        1.1  wrstuden 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
    414        1.1  wrstuden 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
    415        1.1  wrstuden 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
    416       1.42     rmind 		return EROFS;
    417        1.1  wrstuden 	if (vap->va_size != VNOVAL) {
    418        1.1  wrstuden  		switch (vp->v_type) {
    419        1.1  wrstuden  		case VDIR:
    420       1.42     rmind  			return EISDIR;
    421        1.1  wrstuden  		case VCHR:
    422        1.1  wrstuden  		case VBLK:
    423        1.1  wrstuden  		case VSOCK:
    424        1.1  wrstuden  		case VFIFO:
    425       1.42     rmind 			return 0;
    426        1.1  wrstuden 		case VREG:
    427        1.1  wrstuden 		case VLNK:
    428        1.1  wrstuden  		default:
    429        1.1  wrstuden 			/*
    430        1.1  wrstuden 			 * Disallow write attempts if the filesystem is
    431        1.1  wrstuden 			 * mounted read-only.
    432        1.1  wrstuden 			 */
    433        1.1  wrstuden 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
    434       1.42     rmind 				return EROFS;
    435        1.1  wrstuden 		}
    436        1.1  wrstuden 	}
    437       1.42     rmind 	return LAYERFS_DO_BYPASS(vp, ap);
    438        1.1  wrstuden }
    439        1.1  wrstuden 
    440        1.1  wrstuden /*
    441        1.1  wrstuden  *  We handle getattr only to change the fsid.
    442        1.1  wrstuden  */
    443        1.1  wrstuden int
    444       1.38       dsl layer_getattr(void *v)
    445        1.1  wrstuden {
    446        1.1  wrstuden 	struct vop_getattr_args /* {
    447        1.1  wrstuden 		struct vnode *a_vp;
    448        1.1  wrstuden 		struct vattr *a_vap;
    449       1.27      elad 		kauth_cred_t a_cred;
    450       1.26  christos 		struct lwp *a_l;
    451        1.1  wrstuden 	} */ *ap = v;
    452        1.1  wrstuden 	struct vnode *vp = ap->a_vp;
    453        1.1  wrstuden 	int error;
    454        1.1  wrstuden 
    455       1.42     rmind 	error = LAYERFS_DO_BYPASS(vp, ap);
    456       1.42     rmind 	if (error) {
    457       1.42     rmind 		return error;
    458       1.42     rmind 	}
    459        1.1  wrstuden 	/* Requires that arguments be restored. */
    460       1.15  christos 	ap->a_vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
    461       1.42     rmind 	return 0;
    462        1.1  wrstuden }
    463        1.1  wrstuden 
    464        1.1  wrstuden int
    465       1.38       dsl layer_access(void *v)
    466        1.1  wrstuden {
    467        1.1  wrstuden 	struct vop_access_args /* {
    468        1.1  wrstuden 		struct vnode *a_vp;
    469        1.1  wrstuden 		int  a_mode;
    470       1.27      elad 		kauth_cred_t a_cred;
    471       1.26  christos 		struct lwp *a_l;
    472        1.1  wrstuden 	} */ *ap = v;
    473        1.1  wrstuden 	struct vnode *vp = ap->a_vp;
    474        1.1  wrstuden 	mode_t mode = ap->a_mode;
    475        1.1  wrstuden 
    476        1.1  wrstuden 	/*
    477        1.1  wrstuden 	 * Disallow write attempts on read-only layers;
    478        1.1  wrstuden 	 * unless the file is a socket, fifo, or a block or
    479        1.1  wrstuden 	 * character device resident on the file system.
    480        1.1  wrstuden 	 */
    481        1.1  wrstuden 	if (mode & VWRITE) {
    482        1.1  wrstuden 		switch (vp->v_type) {
    483        1.1  wrstuden 		case VDIR:
    484        1.1  wrstuden 		case VLNK:
    485        1.1  wrstuden 		case VREG:
    486        1.1  wrstuden 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
    487       1.42     rmind 				return EROFS;
    488        1.1  wrstuden 			break;
    489        1.1  wrstuden 		default:
    490        1.1  wrstuden 			break;
    491        1.1  wrstuden 		}
    492        1.1  wrstuden 	}
    493       1.42     rmind 	return LAYERFS_DO_BYPASS(vp, ap);
    494        1.1  wrstuden }
    495        1.1  wrstuden 
    496        1.1  wrstuden /*
    497       1.60   hannken  * We must handle open to be able to catch MNT_NODEV and friends
    498       1.60   hannken  * and increment the lower v_writecount.
    499        1.1  wrstuden  */
    500        1.1  wrstuden int
    501       1.38       dsl layer_open(void *v)
    502        1.1  wrstuden {
    503       1.42     rmind 	struct vop_open_args /* {
    504       1.42     rmind 		const struct vnodeop_desc *a_desc;
    505       1.42     rmind 		struct vnode *a_vp;
    506       1.42     rmind 		int a_mode;
    507       1.42     rmind 		kauth_cred_t a_cred;
    508       1.42     rmind 	} */ *ap = v;
    509        1.1  wrstuden 	struct vnode *vp = ap->a_vp;
    510       1.60   hannken 	struct vnode *lvp = LAYERVPTOLOWERVP(vp);
    511       1.60   hannken 	int error;
    512        1.1  wrstuden 
    513       1.60   hannken 	if (((lvp->v_type == VBLK) || (lvp->v_type == VCHR)) &&
    514        1.1  wrstuden 	    (vp->v_mount->mnt_flag & MNT_NODEV))
    515        1.1  wrstuden 		return ENXIO;
    516        1.1  wrstuden 
    517       1.60   hannken 	error = LAYERFS_DO_BYPASS(vp, ap);
    518       1.60   hannken 	if (error == 0 && (ap->a_mode & FWRITE)) {
    519       1.60   hannken 		mutex_enter(lvp->v_interlock);
    520       1.60   hannken 		lvp->v_writecount++;
    521       1.60   hannken 		mutex_exit(lvp->v_interlock);
    522       1.60   hannken 	}
    523       1.60   hannken 	return error;
    524       1.60   hannken }
    525       1.60   hannken 
    526       1.60   hannken /*
    527       1.60   hannken  * We must handle close to decrement the lower v_writecount.
    528       1.60   hannken  */
    529       1.60   hannken int
    530       1.60   hannken layer_close(void *v)
    531       1.60   hannken {
    532       1.60   hannken 	struct vop_close_args /* {
    533       1.60   hannken 		const struct vnodeop_desc *a_desc;
    534       1.60   hannken 		struct vnode *a_vp;
    535       1.60   hannken 		int a_fflag;
    536       1.60   hannken 		kauth_cred_t a_cred;
    537       1.60   hannken 	} */ *ap = v;
    538       1.60   hannken 	struct vnode *vp = ap->a_vp;
    539       1.60   hannken 	struct vnode *lvp = LAYERVPTOLOWERVP(vp);
    540       1.60   hannken 
    541       1.60   hannken 	if ((ap->a_fflag & FWRITE)) {
    542       1.60   hannken 		mutex_enter(lvp->v_interlock);
    543       1.60   hannken 		KASSERT(lvp->v_writecount > 0);
    544       1.60   hannken 		lvp->v_writecount--;
    545       1.60   hannken 		mutex_exit(lvp->v_interlock);
    546       1.60   hannken 	}
    547        1.1  wrstuden 	return LAYERFS_DO_BYPASS(vp, ap);
    548        1.1  wrstuden }
    549        1.1  wrstuden 
    550        1.1  wrstuden /*
    551        1.1  wrstuden  * If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
    552        1.1  wrstuden  * syncing the underlying vnodes, since they'll be fsync'ed when
    553       1.42     rmind  * reclaimed; otherwise, pass it through to the underlying layer.
    554        1.1  wrstuden  *
    555        1.1  wrstuden  * XXX Do we still need to worry about shallow fsync?
    556        1.1  wrstuden  */
    557        1.1  wrstuden int
    558       1.38       dsl layer_fsync(void *v)
    559        1.1  wrstuden {
    560        1.1  wrstuden 	struct vop_fsync_args /* {
    561        1.1  wrstuden 		struct vnode *a_vp;
    562       1.27      elad 		kauth_cred_t a_cred;
    563        1.1  wrstuden 		int  a_flags;
    564        1.4      fvdl 		off_t offlo;
    565        1.4      fvdl 		off_t offhi;
    566       1.26  christos 		struct lwp *a_l;
    567        1.1  wrstuden 	} */ *ap = v;
    568       1.50   hannken 	int error;
    569        1.1  wrstuden 
    570        1.1  wrstuden 	if (ap->a_flags & FSYNC_RECLAIM) {
    571        1.1  wrstuden 		return 0;
    572        1.1  wrstuden 	}
    573       1.50   hannken 	if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR) {
    574       1.50   hannken 		error = spec_fsync(v);
    575       1.50   hannken 		if (error)
    576       1.50   hannken 			return error;
    577       1.50   hannken 	}
    578       1.42     rmind 	return LAYERFS_DO_BYPASS(ap->a_vp, ap);
    579        1.1  wrstuden }
    580        1.1  wrstuden 
    581        1.1  wrstuden int
    582       1.38       dsl layer_inactive(void *v)
    583        1.1  wrstuden {
    584       1.62  riastrad 	struct vop_inactive_v2_args /* {
    585        1.1  wrstuden 		struct vnode *a_vp;
    586       1.34        ad 		bool *a_recycle;
    587        1.1  wrstuden 	} */ *ap = v;
    588        1.5     enami 	struct vnode *vp = ap->a_vp;
    589        1.1  wrstuden 
    590        1.1  wrstuden 	/*
    591       1.44   hannken 	 * If we did a remove, don't cache the node.
    592       1.34        ad 	 */
    593       1.44   hannken 	*ap->a_recycle = ((VTOLAYER(vp)->layer_flags & LAYERFS_REMOVED) != 0);
    594       1.34        ad 
    595       1.34        ad 	/*
    596        1.1  wrstuden 	 * Do nothing (and _don't_ bypass).
    597        1.1  wrstuden 	 * Wait to vrele lowervp until reclaim,
    598        1.1  wrstuden 	 * so that until then our layer_node is in the
    599        1.1  wrstuden 	 * cache and reusable.
    600        1.1  wrstuden 	 *
    601        1.1  wrstuden 	 * NEEDSWORK: Someday, consider inactive'ing
    602        1.1  wrstuden 	 * the lowervp and then trying to reactivate it
    603        1.1  wrstuden 	 * with capabilities (v_id)
    604        1.1  wrstuden 	 * like they do in the name lookup cache code.
    605        1.1  wrstuden 	 * That's too much work for now.
    606        1.1  wrstuden 	 */
    607       1.62  riastrad 
    608       1.42     rmind 	return 0;
    609        1.1  wrstuden }
    610        1.1  wrstuden 
    611        1.1  wrstuden int
    612       1.38       dsl layer_remove(void *v)
    613       1.16  wrstuden {
    614       1.63  riastrad 	struct vop_remove_v2_args /* {
    615       1.63  riastrad 		struct vnode		*a_dvp;
    616       1.16  wrstuden 		struct vnode		*a_vp;
    617       1.16  wrstuden 		struct componentname	*a_cnp;
    618       1.16  wrstuden 	} */ *ap = v;
    619       1.42     rmind 	struct vnode *vp = ap->a_vp;
    620       1.42     rmind 	int error;
    621       1.16  wrstuden 
    622       1.16  wrstuden 	vref(vp);
    623       1.42     rmind 	error = LAYERFS_DO_BYPASS(vp, ap);
    624       1.42     rmind 	if (error == 0) {
    625       1.16  wrstuden 		VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED;
    626       1.42     rmind 	}
    627       1.16  wrstuden 	vrele(vp);
    628       1.16  wrstuden 
    629       1.42     rmind 	return error;
    630       1.16  wrstuden }
    631       1.16  wrstuden 
    632       1.16  wrstuden int
    633       1.38       dsl layer_rename(void *v)
    634       1.17      yamt {
    635       1.17      yamt 	struct vop_rename_args  /* {
    636       1.17      yamt 		struct vnode		*a_fdvp;
    637       1.17      yamt 		struct vnode		*a_fvp;
    638       1.17      yamt 		struct componentname	*a_fcnp;
    639       1.17      yamt 		struct vnode		*a_tdvp;
    640       1.17      yamt 		struct vnode		*a_tvp;
    641       1.17      yamt 		struct componentname	*a_tcnp;
    642       1.17      yamt 	} */ *ap = v;
    643       1.42     rmind 	struct vnode *fdvp = ap->a_fdvp, *tvp;
    644       1.17      yamt 	int error;
    645       1.17      yamt 
    646       1.17      yamt 	tvp = ap->a_tvp;
    647       1.17      yamt 	if (tvp) {
    648       1.17      yamt 		if (tvp->v_mount != fdvp->v_mount)
    649       1.17      yamt 			tvp = NULL;
    650       1.17      yamt 		else
    651       1.17      yamt 			vref(tvp);
    652       1.17      yamt 	}
    653       1.17      yamt 	error = LAYERFS_DO_BYPASS(fdvp, ap);
    654       1.17      yamt 	if (tvp) {
    655       1.17      yamt 		if (error == 0)
    656       1.17      yamt 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
    657       1.17      yamt 		vrele(tvp);
    658       1.17      yamt 	}
    659       1.42     rmind 	return error;
    660       1.17      yamt }
    661       1.17      yamt 
    662       1.17      yamt int
    663       1.38       dsl layer_rmdir(void *v)
    664       1.23   hannken {
    665       1.63  riastrad 	struct vop_rmdir_v2_args /* {
    666       1.23   hannken 		struct vnode		*a_dvp;
    667       1.23   hannken 		struct vnode		*a_vp;
    668       1.23   hannken 		struct componentname	*a_cnp;
    669       1.23   hannken 	} */ *ap = v;
    670       1.23   hannken 	int		error;
    671       1.23   hannken 	struct vnode	*vp = ap->a_vp;
    672       1.23   hannken 
    673       1.23   hannken 	vref(vp);
    674       1.42     rmind 	error = LAYERFS_DO_BYPASS(vp, ap);
    675       1.42     rmind 	if (error == 0) {
    676       1.23   hannken 		VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED;
    677       1.42     rmind 	}
    678       1.23   hannken 	vrele(vp);
    679       1.23   hannken 
    680       1.42     rmind 	return error;
    681       1.23   hannken }
    682       1.23   hannken 
    683       1.23   hannken int
    684       1.45   hannken layer_revoke(void *v)
    685       1.45   hannken {
    686       1.45   hannken         struct vop_revoke_args /* {
    687       1.45   hannken 		struct vnode *a_vp;
    688       1.45   hannken 		int a_flags;
    689       1.45   hannken 	} */ *ap = v;
    690       1.45   hannken 	struct vnode *vp = ap->a_vp;
    691       1.45   hannken 	struct vnode *lvp = LAYERVPTOLOWERVP(vp);
    692       1.46   hannken 	int error;
    693       1.45   hannken 
    694       1.45   hannken 	/*
    695       1.45   hannken 	 * We will most likely end up in vclean which uses the v_usecount
    696       1.46   hannken 	 * to determine if a vnode is active.  Take an extra reference on
    697       1.46   hannken 	 * the lower vnode so it will always close and inactivate.
    698       1.45   hannken 	 */
    699       1.46   hannken 	vref(lvp);
    700       1.45   hannken 	error = LAYERFS_DO_BYPASS(vp, ap);
    701       1.46   hannken 	vrele(lvp);
    702       1.45   hannken 
    703       1.45   hannken 	return error;
    704       1.45   hannken }
    705       1.45   hannken 
    706       1.45   hannken int
    707       1.38       dsl layer_reclaim(void *v)
    708        1.1  wrstuden {
    709       1.66  riastrad 	struct vop_reclaim_v2_args /* {
    710        1.1  wrstuden 		struct vnode *a_vp;
    711       1.26  christos 		struct lwp *a_l;
    712        1.1  wrstuden 	} */ *ap = v;
    713        1.1  wrstuden 	struct vnode *vp = ap->a_vp;
    714        1.1  wrstuden 	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(vp->v_mount);
    715        1.1  wrstuden 	struct layer_node *xp = VTOLAYER(vp);
    716        1.1  wrstuden 	struct vnode *lowervp = xp->layer_lowervp;
    717        1.1  wrstuden 
    718       1.66  riastrad 	VOP_UNLOCK(vp);
    719       1.66  riastrad 
    720        1.1  wrstuden 	/*
    721        1.1  wrstuden 	 * Note: in vop_reclaim, the node's struct lock has been
    722        1.1  wrstuden 	 * decomissioned, so we have to be careful about calling
    723       1.34        ad 	 * VOP's on ourself.  We must be careful as VXLOCK is set.
    724        1.1  wrstuden 	 */
    725       1.42     rmind 	if (vp == lmp->layerm_rootvp) {
    726        1.1  wrstuden 		/*
    727        1.1  wrstuden 		 * Oops! We no longer have a root node. Most likely reason is
    728        1.1  wrstuden 		 * that someone forcably unmunted the underlying fs.
    729        1.1  wrstuden 		 *
    730        1.1  wrstuden 		 * Now getting the root vnode will fail. We're dead. :-(
    731        1.1  wrstuden 		 */
    732        1.1  wrstuden 		lmp->layerm_rootvp = NULL;
    733        1.1  wrstuden 	}
    734       1.64   hannken 
    735       1.64   hannken 	mutex_enter(vp->v_interlock);
    736       1.64   hannken 	KASSERT(vp->v_interlock == lowervp->v_interlock);
    737       1.64   hannken 	lowervp->v_writecount -= vp->v_writecount;
    738       1.64   hannken 	mutex_exit(vp->v_interlock);
    739       1.64   hannken 
    740       1.42     rmind 	/* After this assignment, this node will not be re-used. */
    741        1.1  wrstuden 	xp->layer_lowervp = NULL;
    742       1.34        ad 	kmem_free(vp->v_data, lmp->layerm_size);
    743        1.1  wrstuden 	vp->v_data = NULL;
    744       1.29       chs 	vrele(lowervp);
    745       1.34        ad 
    746       1.42     rmind 	return 0;
    747        1.1  wrstuden }
    748        1.1  wrstuden 
    749        1.1  wrstuden /*
    750        1.1  wrstuden  * We just feed the returned vnode up to the caller - there's no need
    751        1.1  wrstuden  * to build a layer node on top of the node on which we're going to do
    752        1.1  wrstuden  * i/o. :-)
    753        1.1  wrstuden  */
    754        1.1  wrstuden int
    755       1.38       dsl layer_bmap(void *v)
    756        1.1  wrstuden {
    757        1.1  wrstuden 	struct vop_bmap_args /* {
    758        1.1  wrstuden 		struct vnode *a_vp;
    759        1.1  wrstuden 		daddr_t  a_bn;
    760        1.1  wrstuden 		struct vnode **a_vpp;
    761        1.1  wrstuden 		daddr_t *a_bnp;
    762        1.1  wrstuden 		int *a_runp;
    763        1.1  wrstuden 	} */ *ap = v;
    764        1.1  wrstuden 	struct vnode *vp;
    765        1.1  wrstuden 
    766       1.42     rmind 	vp = LAYERVPTOLOWERVP(ap->a_vp);
    767       1.42     rmind 	ap->a_vp = vp;
    768        1.1  wrstuden 
    769       1.42     rmind 	return VCALL(vp, ap->a_desc->vdesc_offset, ap);
    770        1.1  wrstuden }
    771        1.1  wrstuden 
    772        1.1  wrstuden int
    773       1.38       dsl layer_print(void *v)
    774        1.1  wrstuden {
    775        1.1  wrstuden 	struct vop_print_args /* {
    776        1.1  wrstuden 		struct vnode *a_vp;
    777        1.1  wrstuden 	} */ *ap = v;
    778        1.3  augustss 	struct vnode *vp = ap->a_vp;
    779        1.1  wrstuden 	printf ("\ttag VT_LAYERFS, vp=%p, lowervp=%p\n", vp, LAYERVPTOLOWERVP(vp));
    780       1.42     rmind 	return 0;
    781        1.1  wrstuden }
    782        1.1  wrstuden 
    783       1.10       chs int
    784       1.38       dsl layer_getpages(void *v)
    785       1.10       chs {
    786       1.10       chs 	struct vop_getpages_args /* {
    787       1.10       chs 		struct vnode *a_vp;
    788       1.10       chs 		voff_t a_offset;
    789       1.10       chs 		struct vm_page **a_m;
    790       1.10       chs 		int *a_count;
    791       1.10       chs 		int a_centeridx;
    792       1.10       chs 		vm_prot_t a_access_type;
    793       1.10       chs 		int a_advice;
    794       1.10       chs 		int a_flags;
    795       1.10       chs 	} */ *ap = v;
    796       1.10       chs 	struct vnode *vp = ap->a_vp;
    797       1.65   hannken 	struct mount *mp = vp->v_mount;
    798       1.65   hannken 	int error;
    799  1.67.12.2        ad 	krw_t op;
    800       1.10       chs 
    801  1.67.12.2        ad 	KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
    802       1.10       chs 
    803       1.10       chs 	if (ap->a_flags & PGO_LOCKED) {
    804       1.10       chs 		return EBUSY;
    805       1.10       chs 	}
    806       1.10       chs 	ap->a_vp = LAYERVPTOLOWERVP(vp);
    807  1.67.12.2        ad 	KASSERT(vp->v_uobj.vmobjlock == ap->a_vp->v_uobj.vmobjlock);
    808       1.48     rmind 
    809       1.48     rmind 	/* Just pass the request on to the underlying layer. */
    810  1.67.12.2        ad 	op = rw_lock_op(vp->v_uobj.vmobjlock);
    811  1.67.12.2        ad 	rw_exit(vp->v_uobj.vmobjlock);
    812       1.67   hannken 	fstrans_start(mp);
    813  1.67.12.2        ad 	rw_enter(vp->v_uobj.vmobjlock, op);
    814       1.65   hannken 	if (mp == vp->v_mount) {
    815  1.67.12.2        ad 		/* Will release the lock. */
    816       1.65   hannken 		error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
    817       1.65   hannken 	} else {
    818  1.67.12.2        ad 		rw_exit(vp->v_uobj.vmobjlock);
    819       1.65   hannken 		error = ENOENT;
    820       1.65   hannken 	}
    821       1.65   hannken 	fstrans_done(mp);
    822       1.65   hannken 
    823       1.65   hannken 	return error;
    824       1.10       chs }
    825       1.10       chs 
    826       1.10       chs int
    827       1.38       dsl layer_putpages(void *v)
    828       1.10       chs {
    829       1.10       chs 	struct vop_putpages_args /* {
    830       1.10       chs 		struct vnode *a_vp;
    831       1.10       chs 		voff_t a_offlo;
    832       1.10       chs 		voff_t a_offhi;
    833       1.10       chs 		int a_flags;
    834       1.10       chs 	} */ *ap = v;
    835       1.10       chs 	struct vnode *vp = ap->a_vp;
    836       1.10       chs 
    837  1.67.12.2        ad 	KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
    838       1.10       chs 
    839       1.10       chs 	ap->a_vp = LAYERVPTOLOWERVP(vp);
    840  1.67.12.2        ad 	KASSERT(vp->v_uobj.vmobjlock == ap->a_vp->v_uobj.vmobjlock);
    841       1.48     rmind 
    842       1.30       chs 	if (ap->a_flags & PGO_RECLAIM) {
    843  1.67.12.2        ad 		rw_exit(vp->v_uobj.vmobjlock);
    844       1.30       chs 		return 0;
    845       1.30       chs 	}
    846       1.48     rmind 
    847       1.48     rmind 	/* Just pass the request on to the underlying layer. */
    848       1.48     rmind 	return VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
    849        1.1  wrstuden }
    850