Home | History | Annotate | Line # | Download | only in genfs
layer_subr.c revision 1.26
      1 /*	$NetBSD: layer_subr.c,v 1.26 2009/03/14 15:36:22 dsl Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1999 National Aeronautics & Space Administration
      5  * All rights reserved.
      6  *
      7  * This software was written by William Studenmund of the
      8  * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the National Aeronautics & Space Administration
     19  *    nor the names of its contributors may be used to endorse or promote
     20  *    products derived from this software without specific prior written
     21  *    permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
     24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
     27  * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
     28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33  * POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 /*
     36  * Copyright (c) 1992, 1993
     37  *	The Regents of the University of California.  All rights reserved.
     38  *
     39  * This code is derived from software donated to Berkeley by
     40  * Jan-Simon Pendry.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	from: Id: lofs_subr.c,v 1.11 1992/05/30 10:05:43 jsp Exp
     67  *	@(#)null_subr.c	8.7 (Berkeley) 5/14/95
     68  */
     69 
     70 #include <sys/cdefs.h>
     71 __KERNEL_RCSID(0, "$NetBSD: layer_subr.c,v 1.26 2009/03/14 15:36:22 dsl Exp $");
     72 
     73 #include <sys/param.h>
     74 #include <sys/systm.h>
     75 #include <sys/proc.h>
     76 #include <sys/time.h>
     77 #include <sys/vnode.h>
     78 #include <sys/mount.h>
     79 #include <sys/namei.h>
     80 #include <sys/kmem.h>
     81 #include <sys/malloc.h>
     82 
     83 #include <miscfs/specfs/specdev.h>
     84 #include <miscfs/genfs/layer.h>
     85 #include <miscfs/genfs/layer_extern.h>
     86 
     87 #define	NLAYERNODECACHE 16
     88 
     89 #ifdef LAYERFS_DIAGNOSTIC
     90 int layerfs_debug = 1;
     91 #endif
     92 
     93 /*
     94  * layer cache:
     95  * Each cache entry holds a reference to the lower vnode
     96  * along with a pointer to the alias vnode.  When an
     97  * entry is added the lower vnode is VREF'd.  When the
     98  * alias is removed the lower vnode is vrele'd.
     99  */
    100 
    101 /*
    102  * Initialise cache headers
    103  */
    104 void
    105 layerfs_init()
    106 {
    107 #ifdef LAYERFS_DIAGNOSTIC
    108 	if (layerfs_debug)
    109 		printf("layerfs_init\n");		/* printed during system boot */
    110 #endif
    111 }
    112 
    113 /*
    114  * Free global resources of layerfs.
    115  */
    116 void
    117 layerfs_done()
    118 {
    119 #ifdef LAYERFS_DIAGNOSTIC
    120 	if (layerfs_debug)
    121 		printf("layerfs_done\n");		/* printed on layerfs detach */
    122 #endif
    123 }
    124 
    125 /*
    126  * Return a locked, VREF'ed alias for lower vnode if already exists, else NULL.
    127  * The layermp's hashlock must be held on entry.
    128  * It will be held upon return iff we return NULL.
    129  */
    130 struct vnode *
    131 layer_node_find(struct mount *mp, struct vnode *lowervp)
    132 {
    133 	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
    134 	struct layer_node_hashhead *hd;
    135 	struct layer_node *a;
    136 	struct vnode *vp;
    137 	int error;
    138 
    139 	/*
    140 	 * Find hash base, and then search the (two-way) linked
    141 	 * list looking for a layer_node structure which is referencing
    142 	 * the lower vnode.  If found, the increment the layer_node
    143 	 * reference count (but NOT the lower vnode's VREF counter)
    144 	 * and return the vnode locked.
    145 	 */
    146 	hd = LAYER_NHASH(lmp, lowervp);
    147 loop:
    148 	LIST_FOREACH(a, hd, layer_hash) {
    149 		if (a->layer_lowervp == lowervp && LAYERTOV(a)->v_mount == mp) {
    150 			vp = LAYERTOV(a);
    151 			mutex_enter(&vp->v_interlock);
    152 			/*
    153 			 * If we find a node being cleaned out, then
    154 			 * ignore it and continue.  A thread trying to
    155 			 * clean out the extant layer vnode needs to
    156 			 * acquire the shared lock (i.e. the lower
    157 			 * vnode's lock), which our caller already holds.
    158 			 * To allow the cleaning to succeed the current
    159 			 * thread must make progress.  So, for a brief
    160 			 * time more than one vnode in a layered file
    161 			 * system may refer to a single vnode in the
    162 			 * lower file system.
    163 			 */
    164 			if ((vp->v_iflag & VI_XLOCK) != 0) {
    165 				mutex_exit(&vp->v_interlock);
    166 				continue;
    167 			}
    168 			mutex_exit(&lmp->layerm_hashlock);
    169 			/*
    170 			 * We must not let vget() try to lock the layer
    171 			 * vp, since the lower vp is already locked and
    172 			 * locking the layer vp will involve locking
    173 			 * the lower vp (whether or not they actually
    174 			 * share a lock).  Instead, take the layer vp's
    175 			 * lock separately afterward, but only if it
    176 			 * does not share the lower vp's lock.
    177 			 */
    178 			error = vget(vp, LK_INTERLOCK | LK_NOWAIT);
    179 			if (error) {
    180 				kpause("layerfs", false, 1, NULL);
    181 				mutex_enter(&lmp->layerm_hashlock);
    182 				goto loop;
    183 			}
    184 			LAYERFS_UPPERLOCK(vp, LK_EXCLUSIVE, error);
    185 			return (vp);
    186 		}
    187 	}
    188 	return NULL;
    189 }
    190 
    191 
    192 /*
    193  * Make a new layer_node node.
    194  * Vp is the alias vnode, lowervp is the lower vnode.
    195  * Maintain a reference to lowervp.
    196  */
    197 int
    198 layer_node_alloc(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
    199 {
    200 	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
    201 	struct layer_node_hashhead *hd;
    202 	struct layer_node *xp;
    203 	struct vnode *vp, *nvp;
    204 	int error;
    205 	extern int (**dead_vnodeop_p)(void *);
    206 
    207 	error = getnewvnode(lmp->layerm_tag, mp, lmp->layerm_vnodeop_p, &vp);
    208 	if (error != 0)
    209 		return (error);
    210 	vp->v_type = lowervp->v_type;
    211 	mutex_enter(&vp->v_interlock);
    212 	vp->v_iflag |= VI_LAYER;
    213 	mutex_exit(&vp->v_interlock);
    214 
    215 	xp = kmem_alloc(lmp->layerm_size, KM_SLEEP);
    216 	if (xp == NULL) {
    217 		ungetnewvnode(vp);
    218 		return ENOMEM;
    219 	}
    220 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
    221 		spec_node_init(vp, lowervp->v_rdev);
    222 	}
    223 
    224 	vp->v_data = xp;
    225 	vp->v_vflag = (vp->v_vflag & ~VV_MPSAFE) |
    226 	    (lowervp->v_vflag & VV_MPSAFE);
    227 	xp->layer_vnode = vp;
    228 	xp->layer_lowervp = lowervp;
    229 	xp->layer_flags = 0;
    230 
    231 	/*
    232 	 * Before we insert our new node onto the hash chains,
    233 	 * check to see if someone else has beaten us to it.
    234 	 * (We could have slept in MALLOC.)
    235 	 */
    236 	mutex_enter(&lmp->layerm_hashlock);
    237 	if ((nvp = layer_node_find(mp, lowervp)) != NULL) {
    238 		*vpp = nvp;
    239 
    240 		/* free the substructures we've allocated. */
    241 		kmem_free(xp, lmp->layerm_size);
    242 		if (vp->v_type == VBLK || vp->v_type == VCHR)
    243 			spec_node_destroy(vp);
    244 
    245 		vp->v_type = VBAD;		/* node is discarded */
    246 		vp->v_op = dead_vnodeop_p;	/* so ops will still work */
    247 		vrele(vp);			/* get rid of it. */
    248 		return (0);
    249 	}
    250 
    251 	/*
    252 	 * Now lock the new node. We rely on the fact that we were passed
    253 	 * a locked vnode. If the lower node is exporting a struct lock
    254 	 * (v_vnlock != NULL) then we just set the upper v_vnlock to the
    255 	 * lower one, and both are now locked. If the lower node is exporting
    256 	 * NULL, then we copy that up and manually lock the upper node.
    257 	 *
    258 	 * LAYERFS_UPPERLOCK already has the test, so we use it after copying
    259 	 * up the v_vnlock from below.
    260 	 */
    261 
    262 	vp->v_vnlock = lowervp->v_vnlock;
    263 	LAYERFS_UPPERLOCK(vp, LK_EXCLUSIVE, error);
    264 	KASSERT(error == 0);
    265 
    266 	/*
    267 	 * Insert the new node into the hash.
    268 	 * Add a reference to the lower node.
    269 	 */
    270 
    271 	*vpp = vp;
    272 	VREF(lowervp);
    273 	hd = LAYER_NHASH(lmp, lowervp);
    274 	LIST_INSERT_HEAD(hd, xp, layer_hash);
    275 	uvm_vnp_setsize(vp, 0);
    276 	mutex_exit(&lmp->layerm_hashlock);
    277 	return (0);
    278 }
    279 
    280 
    281 /*
    282  * Try to find an existing layer_node vnode refering
    283  * to it, otherwise make a new layer_node vnode which
    284  * contains a reference to the lower vnode.
    285  *
    286  * >>> we assume that the lower node is already locked upon entry, so we
    287  * propagate the lock state to upper node <<
    288  */
    289 int
    290 layer_node_create(struct mount *mp, struct vnode *lowervp, struct vnode **newvpp)
    291 {
    292 	struct vnode *aliasvp;
    293 	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
    294 
    295 	mutex_enter(&lmp->layerm_hashlock);
    296 	aliasvp = layer_node_find(mp, lowervp);
    297 	if (aliasvp != NULL) {
    298 		/*
    299 		 * layer_node_find has taken another reference
    300 		 * to the alias vnode and moved the lock holding to
    301 		 * aliasvp
    302 		 */
    303 #ifdef LAYERFS_DIAGNOSTIC
    304 		if (layerfs_debug)
    305 			vprint("layer_node_create: exists", aliasvp);
    306 #endif
    307 	} else {
    308 		int error;
    309 
    310 		mutex_exit(&lmp->layerm_hashlock);
    311 
    312 		/*
    313 		 * Get new vnode.
    314 		 */
    315 #ifdef LAYERFS_DIAGNOSTIC
    316 		if (layerfs_debug)
    317 			printf("layer_node_create: create new alias vnode\n");
    318 #endif
    319 
    320 		/*
    321 		 * Make new vnode reference the layer_node.
    322 		 */
    323 		if ((error = (lmp->layerm_alloc)(mp, lowervp, &aliasvp)) != 0)
    324 			return error;
    325 
    326 		/*
    327 		 * aliasvp is already VREF'd by getnewvnode()
    328 		 */
    329 	}
    330 
    331 	/*
    332 	 * Now that we have VREF'd the upper vnode, release the reference
    333 	 * to the lower node. The existence of the layer_node retains one
    334 	 * reference to the lower node.
    335 	 */
    336 	vrele(lowervp);
    337 
    338 #ifdef DIAGNOSTIC
    339 	if (lowervp->v_usecount < 1) {
    340 		/* Should never happen... */
    341 		vprint("layer_node_create: alias", aliasvp);
    342 		vprint("layer_node_create: lower", lowervp);
    343 		panic("layer_node_create: lower has 0 usecount.");
    344 	}
    345 #endif
    346 
    347 #ifdef LAYERFS_DIAGNOSTIC
    348 	if (layerfs_debug)
    349 		vprint("layer_node_create: alias", aliasvp);
    350 #endif
    351 	*newvpp = aliasvp;
    352 	return (0);
    353 }
    354 
    355 #ifdef LAYERFS_DIAGNOSTIC
    356 struct vnode *
    357 layer_checkvp(struct vnode *vp, const char *fil, int lno)
    358 {
    359 	struct layer_node *a = VTOLAYER(vp);
    360 #ifdef notyet
    361 	/*
    362 	 * Can't do this check because vop_reclaim runs
    363 	 * with a funny vop vector.
    364 	 *
    365 	 * WRS - no it doesnt...
    366 	 */
    367 	if (vp->v_op != layer_vnodeop_p) {
    368 		printf ("layer_checkvp: on non-layer-node\n");
    369 #ifdef notyet
    370 		while (layer_checkvp_barrier) /*WAIT*/ ;
    371 #endif
    372 		panic("layer_checkvp");
    373 	};
    374 #endif
    375 	if (a->layer_lowervp == NULL) {
    376 		/* Should never happen */
    377 		int i; u_long *p;
    378 		printf("vp = %p, ZERO ptr\n", vp);
    379 		for (p = (u_long *) a, i = 0; i < 8; i++)
    380 			printf(" %lx", p[i]);
    381 		printf("\n");
    382 		/* wait for debugger */
    383 		panic("layer_checkvp");
    384 	}
    385 	if (a->layer_lowervp->v_usecount < 1) {
    386 		int i; u_long *p;
    387 		printf("vp = %p, unref'ed lowervp\n", vp);
    388 		for (p = (u_long *) a, i = 0; i < 8; i++)
    389 			printf(" %lx", p[i]);
    390 		printf("\n");
    391 		/* wait for debugger */
    392 		panic ("layer with unref'ed lowervp");
    393 	};
    394 #ifdef notnow
    395 	printf("layer %p/%d -> %p/%d [%s, %d]\n",
    396 	        LAYERTOV(a), LAYERTOV(a)->v_usecount,
    397 		a->layer_lowervp, a->layer_lowervp->v_usecount,
    398 		fil, lno);
    399 #endif
    400 	return a->layer_lowervp;
    401 }
    402 #endif
    403