Home | History | Annotate | Line # | Download | only in nullfs
null_vnops.c revision 1.10
      1 /*	$NetBSD: null_vnops.c,v 1.10 1997/05/17 20:32:53 pk Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * John Heidemann of the UCLA Ficus project.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  *	@(#)null_vnops.c	8.1 (Berkeley) 6/10/93
     39  *
     40  * Ancestors:
     41  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
     42  *	Id: lofs_vnops.c,v 1.11 1992/05/30 10:05:43 jsp Exp
     43  *	...and...
     44  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
     45  */
     46 
     47 /*
     48  * Null Layer
     49  *
     50  * (See mount_null(8) for more information.)
     51  *
     52  * The null layer duplicates a portion of the file system
     53  * name space under a new name.  In this respect, it is
     54  * similar to the loopback file system.  It differs from
     55  * the loopback fs in two respects:  it is implemented using
     56  * a stackable layers techniques, and it's "null-node"s stack above
     57  * all lower-layer vnodes, not just over directory vnodes.
     58  *
     59  * The null layer has two purposes.  First, it serves as a demonstration
     60  * of layering by proving a layer which does nothing.  (It actually
     61  * does everything the loopback file system does, which is slightly
     62  * more than nothing.)  Second, the null layer can serve as a prototype
     63  * layer.  Since it provides all necessary layer framework,
     64  * new file system layers can be created very easily be starting
     65  * with a null layer.
     66  *
     67  * The remainder of this man page examines the null layer as a basis
     68  * for constructing new layers.
     69  *
     70  *
     71  * INSTANTIATING NEW NULL LAYERS
     72  *
     73  * New null layers are created with mount_null(8).
     74  * Mount_null(8) takes two arguments, the pathname
     75  * of the lower vfs (target-pn) and the pathname where the null
     76  * layer will appear in the namespace (alias-pn).  After
     77  * the null layer is put into place, the contents
     78  * of target-pn subtree will be aliased under alias-pn.
     79  *
     80  *
     81  * OPERATION OF A NULL LAYER
     82  *
     83  * The null layer is the minimum file system layer,
     84  * simply bypassing all possible operations to the lower layer
     85  * for processing there.  The majority of its activity centers
     86  * on the bypass routine, though which nearly all vnode operations
     87  * pass.
     88  *
     89  * The bypass routine accepts arbitrary vnode operations for
     90  * handling by the lower layer.  It begins by examing vnode
     91  * operation arguments and replacing any null-nodes by their
     92  * lower-layer equivlants.  It then invokes the operation
     93  * on the lower layer.  Finally, it replaces the null-nodes
     94  * in the arguments and, if a vnode is return by the operation,
     95  * stacks a null-node on top of the returned vnode.
     96  *
     97  * Although bypass handles most operations,
     98  * vop_getattr, _inactive, _reclaim, and _print are not bypassed.
     99  * Vop_getattr must change the fsid being returned.
    100  * Vop_inactive and vop_reclaim are not bypassed so that
    101  * they can handle freeing null-layer specific data.
    102  * Vop_print is not bypassed to avoid excessive debugging
    103  * information.
    104  *
    105  *
    106  * INSTANTIATING VNODE STACKS
    107  *
    108  * Mounting associates the null layer with a lower layer,
    109  * effect stacking two VFSes.  Vnode stacks are instead
    110  * created on demand as files are accessed.
    111  *
    112  * The initial mount creates a single vnode stack for the
    113  * root of the new null layer.  All other vnode stacks
    114  * are created as a result of vnode operations on
    115  * this or other null vnode stacks.
    116  *
    117  * New vnode stacks come into existance as a result of
    118  * an operation which returns a vnode.
    119  * The bypass routine stacks a null-node above the new
    120  * vnode before returning it to the caller.
    121  *
    122  * For example, imagine mounting a null layer with
    123  * "mount_null /usr/include /dev/layer/null".
    124  * Changing directory to /dev/layer/null will assign
    125  * the root null-node (which was created when the null layer was mounted).
    126  * Now consider opening "sys".  A vop_lookup would be
    127  * done on the root null-node.  This operation would bypass through
    128  * to the lower layer which would return a vnode representing
    129  * the UFS "sys".  Null_bypass then builds a null-node
    130  * aliasing the UFS "sys" and returns this to the caller.
    131  * Later operations on the null-node "sys" will repeat this
    132  * process when constructing other vnode stacks.
    133  *
    134  *
    135  * CREATING OTHER FILE SYSTEM LAYERS
    136  *
    137  * One of the easiest ways to construct new file system layers is to make
    138  * a copy of the null layer, rename all files and variables, and
    139  * then begin modifing the copy.  Sed can be used to easily rename
    140  * all variables.
    141  *
    142  * The umap layer is an example of a layer descended from the
    143  * null layer.
    144  *
    145  *
    146  * INVOKING OPERATIONS ON LOWER LAYERS
    147  *
    148  * There are two techniques to invoke operations on a lower layer
    149  * when the operation cannot be completely bypassed.  Each method
    150  * is appropriate in different situations.  In both cases,
    151  * it is the responsibility of the aliasing layer to make
    152  * the operation arguments "correct" for the lower layer
    153  * by mapping an vnode arguments to the lower layer.
    154  *
    155  * The first approach is to call the aliasing layer's bypass routine.
    156  * This method is most suitable when you wish to invoke the operation
    157  * currently being hanldled on the lower layer.  It has the advantage
    158  * that the bypass routine already must do argument mapping.
    159  * An example of this is null_getattrs in the null layer.
    160  *
    161  * A second approach is to directly invoked vnode operations on
    162  * the lower layer with the VOP_OPERATIONNAME interface.
    163  * The advantage of this method is that it is easy to invoke
    164  * arbitrary operations on the lower layer.  The disadvantage
    165  * is that vnodes arguments must be manualy mapped.
    166  *
    167  */
    168 
    169 #include <sys/param.h>
    170 #include <sys/systm.h>
    171 #include <sys/proc.h>
    172 #include <sys/time.h>
    173 #include <sys/types.h>
    174 #include <sys/vnode.h>
    175 #include <sys/mount.h>
    176 #include <sys/namei.h>
    177 #include <sys/malloc.h>
    178 #include <sys/buf.h>
    179 #include <miscfs/nullfs/null.h>
    180 
    181 
    182 int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
    183 
    184 int	null_bypass __P((void *));
    185 int	null_getattr __P((void *));
    186 int	null_inactive __P((void *));
    187 int	null_reclaim __P((void *));
    188 int	null_print __P((void *));
    189 int	null_strategy __P((void *));
    190 int	null_bwrite __P((void *));
    191 int	null_lock __P((void *));
    192 int	null_unlock __P((void *));
    193 int	null_islocked __P((void *));
    194 int	null_lookup __P((void *));
    195 
    196 /*
    197  * This is the 10-Apr-92 bypass routine.
    198  *    This version has been optimized for speed, throwing away some
    199  * safety checks.  It should still always work, but it's not as
    200  * robust to programmer errors.
    201  *    Define SAFETY to include some error checking code.
    202  *
    203  * In general, we map all vnodes going down and unmap them on the way back.
    204  * As an exception to this, vnodes can be marked "unmapped" by setting
    205  * the Nth bit in operation's vdesc_flags.
    206  *
    207  * Also, some BSD vnode operations have the side effect of vrele'ing
    208  * their arguments.  With stacking, the reference counts are held
    209  * by the upper node, not the lower one, so we must handle these
    210  * side-effects here.  This is not of concern in Sun-derived systems
    211  * since there are no such side-effects.
    212  *
    213  * This makes the following assumptions:
    214  * - only one returned vpp
    215  * - no INOUT vpp's (Sun's vop_open has one of these)
    216  * - the vnode operation vector of the first vnode should be used
    217  *   to determine what implementation of the op should be invoked
    218  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
    219  *   problems on rmdir'ing mount points and renaming?)
    220  */
    221 int
    222 null_bypass(v)
    223 	void *v;
    224 {
    225 	struct vop_generic_args /* {
    226 		struct vnodeop_desc *a_desc;
    227 		<other random data follows, presumably>
    228 	} */ *ap = v;
    229 	register struct vnode **this_vp_p;
    230 	int error;
    231 	struct vnode *old_vps[VDESC_MAX_VPS];
    232 	struct vnode **vps_p[VDESC_MAX_VPS];
    233 	struct vnode ***vppp;
    234 	struct vnodeop_desc *descp = ap->a_desc;
    235 	int reles, i;
    236 
    237 	if (null_bug_bypass)
    238 		printf ("null_bypass: %s\n", descp->vdesc_name);
    239 
    240 #ifdef SAFETY
    241 	/*
    242 	 * We require at least one vp.
    243 	 */
    244 	if (descp->vdesc_vp_offsets == NULL ||
    245 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
    246 		panic ("null_bypass: no vp's in map.\n");
    247 #endif
    248 
    249 	/*
    250 	 * Map the vnodes going in.
    251 	 * Later, we'll invoke the operation based on
    252 	 * the first mapped vnode's operation vector.
    253 	 */
    254 	reles = descp->vdesc_flags;
    255 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    256 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    257 			break;   /* bail out at end of list */
    258 		vps_p[i] = this_vp_p =
    259 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
    260 		/*
    261 		 * We're not guaranteed that any but the first vnode
    262 		 * are of our type.  Check for and don't map any
    263 		 * that aren't.  (We must always map first vp or vclean fails.)
    264 		 */
    265 		if (i && (*this_vp_p == NULLVP ||
    266 		    (*this_vp_p)->v_op != null_vnodeop_p)) {
    267 			old_vps[i] = NULLVP;
    268 		} else {
    269 			old_vps[i] = *this_vp_p;
    270 			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
    271 			/*
    272 			 * XXX - Several operations have the side effect
    273 			 * of vrele'ing their vp's.  We must account for
    274 			 * that.  (This should go away in the future.)
    275 			 */
    276 			if (reles & 1)
    277 				VREF(*this_vp_p);
    278 		}
    279 
    280 	}
    281 
    282 	/*
    283 	 * Call the operation on the lower layer
    284 	 * with the modified argument structure.
    285 	 */
    286 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
    287 
    288 	/*
    289 	 * Maintain the illusion of call-by-value
    290 	 * by restoring vnodes in the argument structure
    291 	 * to their original value.
    292 	 */
    293 	reles = descp->vdesc_flags;
    294 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    295 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    296 			break;   /* bail out at end of list */
    297 		if (old_vps[i] != NULLVP) {
    298 			*(vps_p[i]) = old_vps[i];
    299 			if (reles & 1) {
    300 				/* they really vput them, so we must drop
    301 				   our locks (but mark underneath as
    302 				   unlocked first).
    303 				   Beware of vnode duplication--put it once,
    304 				   and rele the rest.  Check this
    305 				   by looking at our upper flag. */
    306 			    if (VTONULL(*(vps_p[i]))->null_flags & NULL_LOCKED) {
    307 				    VTONULL(*(vps_p[i]))->null_flags &= ~NULL_LLOCK;
    308 				    vput(*(vps_p[i]));
    309 			    } else
    310 				    vrele(*(vps_p[i]));
    311 			}
    312 		}
    313 	}
    314 
    315 	/*
    316 	 * Map the possible out-going vpp
    317 	 * (Assumes that the lower layer always returns
    318 	 * a VREF'ed vpp unless it gets an error.)
    319 	 */
    320 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
    321 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
    322 	    !error) {
    323 		/*
    324 		 * XXX - even though some ops have vpp returned vp's,
    325 		 * several ops actually vrele this before returning.
    326 		 * We must avoid these ops.
    327 		 * (This should go away when these ops are regularized.)
    328 		 */
    329 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
    330 			goto out;
    331 		vppp = VOPARG_OFFSETTO(struct vnode***,
    332 				 descp->vdesc_vpp_offset,ap);
    333 		/*
    334 		 * This assumes that **vppp is a locked vnode (it is always
    335 		 * so as of this writing, NetBSD-current 1995/02/16)
    336 		 */
    337 		/*
    338 		 * (don't want to lock it if being called on behalf
    339 		 * of lookup--it plays weird locking games depending
    340 		 * on whether or not it's looking up ".", "..", etc.
    341 		 */
    342 		error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp,
    343 					 descp == &vop_lookup_desc ? 0 : 1);
    344 	}
    345 
    346  out:
    347 	return (error);
    348 }
    349 
    350 
    351 /*
    352  *  We handle getattr only to change the fsid.
    353  */
    354 int
    355 null_getattr(v)
    356 	void *v;
    357 {
    358 	struct vop_getattr_args /* {
    359 		struct vnode *a_vp;
    360 		struct vattr *a_vap;
    361 		struct ucred *a_cred;
    362 		struct proc *a_p;
    363 	} */ *ap = v;
    364 	int error;
    365 	if ((error = null_bypass(ap)) != 0)
    366 		return (error);
    367 	/* Requires that arguments be restored. */
    368 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
    369 	return (0);
    370 }
    371 
    372 
    373 int
    374 null_inactive(v)
    375 	void *v;
    376 {
    377 	/*
    378 	 * Do nothing (and _don't_ bypass).
    379 	 * Wait to vrele lowervp until reclaim,
    380 	 * so that until then our null_node is in the
    381 	 * cache and reusable.
    382 	 *
    383 	 * NEEDSWORK: Someday, consider inactive'ing
    384 	 * the lowervp and then trying to reactivate it
    385 	 * with capabilities (v_id)
    386 	 * like they do in the name lookup cache code.
    387 	 * That's too much work for now.
    388 	 */
    389 	return (0);
    390 }
    391 
    392 int
    393 null_reclaim(v)
    394 	void *v;
    395 {
    396 	struct vop_reclaim_args /* {
    397 		struct vnode *a_vp;
    398 	} */ *ap = v;
    399 	struct vnode *vp = ap->a_vp;
    400 	struct null_node *xp = VTONULL(vp);
    401 	struct vnode *lowervp = xp->null_lowervp;
    402 
    403 	/*
    404 	 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p,
    405 	 * so we can't call VOPs on ourself.
    406 	 */
    407 	/* After this assignment, this node will not be re-used. */
    408 	xp->null_lowervp = NULL;
    409 	LIST_REMOVE(xp, null_hash);
    410 	FREE(vp->v_data, M_TEMP);
    411 	vp->v_data = NULL;
    412 	vrele (lowervp);
    413 	return (0);
    414 }
    415 
    416 
    417 int
    418 null_print(v)
    419 	void *v;
    420 {
    421 	struct vop_print_args /* {
    422 		struct vnode *a_vp;
    423 	} */ *ap = v;
    424 	register struct vnode *vp = ap->a_vp;
    425 	register struct null_node *nn = VTONULL(vp);
    426 
    427 	printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
    428 #ifdef DIAGNOSTIC
    429 	printf("%s%s owner pid %d retpc %p retret %p\n",
    430 	    (nn->null_flags & NULL_LOCKED) ? "(LOCKED) " : "",
    431 	    (nn->null_flags & NULL_LLOCK) ? "(LLOCK) " : "",
    432 	    nn->null_pid, nn->null_lockpc, nn->null_lockpc2);
    433 #else
    434 	printf("%s%s\n",
    435 	    (nn->null_flags & NULL_LOCKED) ? "(LOCKED) " : "",
    436 	    (nn->null_flags & NULL_LLOCK) ? "(LLOCK) " : "");
    437 #endif
    438 	vprint("nullfs lowervp", NULLVPTOLOWERVP(vp));
    439 	return (0);
    440 }
    441 
    442 
    443 /*
    444  * XXX - vop_strategy must be hand coded because it has no
    445  * vnode in its arguments.
    446  * This goes away with a merged VM/buffer cache.
    447  */
    448 int
    449 null_strategy(v)
    450 	void *v;
    451 {
    452 	struct vop_strategy_args /* {
    453 		struct buf *a_bp;
    454 	} */ *ap = v;
    455 	struct buf *bp = ap->a_bp;
    456 	int error;
    457 	struct vnode *savedvp;
    458 
    459 	savedvp = bp->b_vp;
    460 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
    461 
    462 	error = VOP_STRATEGY(bp);
    463 
    464 	bp->b_vp = savedvp;
    465 
    466 	return (error);
    467 }
    468 
    469 
    470 /*
    471  * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
    472  * vnode in its arguments.
    473  * This goes away with a merged VM/buffer cache.
    474  */
    475 int
    476 null_bwrite(v)
    477 	void *v;
    478 {
    479 	struct vop_bwrite_args /* {
    480 		struct buf *a_bp;
    481 	} */ *ap = v;
    482 	struct buf *bp = ap->a_bp;
    483 	int error;
    484 	struct vnode *savedvp;
    485 
    486 	savedvp = bp->b_vp;
    487 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
    488 
    489 	error = VOP_BWRITE(bp);
    490 
    491 	bp->b_vp = savedvp;
    492 
    493 	return (error);
    494 }
    495 
    496 /*
    497  * We need a separate null lock routine, to avoid deadlocks at reclaim time.
    498  * If a process holds the lower-vnode locked when it tries to reclaim
    499  * the null upper-vnode, _and_ null_bypass is used as the locking operation,
    500  * then a process can end up locking against itself.
    501  * This has been observed when a null mount is set up to "tunnel" beneath a
    502  * union mount (that setup is useful if you still wish to be able to access
    503  * the non-union version of either the above or below union layer)
    504  */
    505 int
    506 null_lock(v)
    507 	void *v;
    508 {
    509 	struct vop_lock_args *ap = v;
    510 	struct vnode *vp = ap->a_vp;
    511 	struct null_node *nn;
    512 
    513 #ifdef NULLFS_DIAGNOSTIC
    514 	vprint("null_lock_e", ap->a_vp);
    515 	printf("retpc=%lx, retretpc=%lx\n", RETURN_PC(0), RETURN_PC(1));
    516 #endif
    517 start:
    518 	while (vp->v_flag & VXLOCK) {
    519 		vp->v_flag |= VXWANT;
    520 		tsleep((caddr_t)vp, PINOD, "nulllock1", 0);
    521 	}
    522 
    523 	nn = VTONULL(vp);
    524 
    525 	if ((nn->null_flags & NULL_LLOCK) == 0 &&
    526 	    (vp->v_usecount != 0)) {
    527 		/*
    528 		 * only lock underlying node if we haven't locked it yet
    529 		 * for null ops, and our refcount is nonzero.  If usecount
    530 		 * is zero, we are probably being reclaimed so we need to
    531 		 * keep our hands off the lower node.
    532 		 */
    533 		VOP_LOCK(nn->null_lowervp);
    534 		nn->null_flags |= NULL_LLOCK;
    535 	}
    536 
    537 	if (nn->null_flags & NULL_LOCKED) {
    538 #ifdef DIAGNOSTIC
    539 		if (curproc && nn->null_pid == curproc->p_pid &&
    540 		    nn->null_pid > -1 && curproc->p_pid > -1) {
    541 			vprint("self-lock", vp);
    542 			panic("null: locking against myself");
    543 		}
    544 #endif
    545 		nn->null_flags |= NULL_WANTED;
    546 		tsleep((caddr_t)nn, PINOD, "nulllock2", 0);
    547 		goto start;
    548 	}
    549 
    550 #ifdef DIAGNOSTIC
    551 	if (curproc)
    552 		nn->null_pid = curproc->p_pid;
    553 	else
    554 		nn->null_pid = -1;
    555 	nn->null_lockpc = RETURN_PC(0);
    556 	nn->null_lockpc2 = RETURN_PC(1);
    557 #endif
    558 
    559 	nn->null_flags |= NULL_LOCKED;
    560 	return (0);
    561 }
    562 
    563 int
    564 null_unlock(v)
    565 	void *v;
    566 {
    567 	struct vop_lock_args *ap = v;
    568 	struct null_node *nn = VTONULL(ap->a_vp);
    569 
    570 #ifdef NULLFS_DIAGNOSTIC
    571 	vprint("null_unlock_e", ap->a_vp);
    572 #endif
    573 #ifdef DIAGNOSTIC
    574 	if ((nn->null_flags & NULL_LOCKED) == 0) {
    575 		vprint("null_unlock", ap->a_vp);
    576 		panic("null: unlocking unlocked node");
    577 	}
    578 	if (curproc && nn->null_pid != curproc->p_pid &&
    579 	    curproc->p_pid > -1 && nn->null_pid > -1) {
    580 		vprint("null_unlock", ap->a_vp);
    581 		panic("null: unlocking other process's null node");
    582 	}
    583 #endif
    584 	nn->null_flags &= ~NULL_LOCKED;
    585 
    586 	if ((nn->null_flags & NULL_LLOCK) != 0)
    587 		VOP_UNLOCK(nn->null_lowervp);
    588 
    589 	nn->null_flags &= ~NULL_LLOCK;
    590 
    591 	if (nn->null_flags & NULL_WANTED) {
    592 		nn->null_flags &= ~NULL_WANTED;
    593 		wakeup((caddr_t)nn);
    594 	}
    595 #ifdef DIAGNOSTIC
    596 	nn->null_pid = 0;
    597 	nn->null_lockpc = nn->null_lockpc2 = 0;
    598 #endif
    599 	return (0);
    600 }
    601 
    602 int
    603 null_islocked(v)
    604 	void *v;
    605 {
    606 	struct vop_islocked_args *ap = v;
    607 	return ((VTONULL(ap->a_vp)->null_flags & NULL_LOCKED) ? 1 : 0);
    608 }
    609 
    610 int
    611 null_lookup(v)
    612 	void *v;
    613 {
    614 	register struct vop_lookup_args /* {
    615 		struct vnodeop_desc *a_desc;
    616 		struct vnode *a_dvp;
    617 		struct vnode **a_vpp;
    618 		struct componentname *a_cnp;
    619 	} */ *ap = v;
    620 	register int error;
    621 	register struct vnode *dvp;
    622 	int flags = ap->a_cnp->cn_flags;
    623 
    624 #ifdef NULLFS_DIAGNOSTIC
    625 	printf("null_lookup: dvp=%lx, name='%s'\n",
    626 	    ap->a_dvp, ap->a_cnp->cn_nameptr);
    627 #endif
    628 	/*
    629 	 * the starting dir (ap->a_dvp) comes in locked.
    630 	 */
    631 
    632 	/* set LOCKPARENT to hold on to it until done below */
    633 	ap->a_cnp->cn_flags |= LOCKPARENT;
    634 	error = null_bypass(ap);
    635 	if (!(flags & LOCKPARENT))
    636 		ap->a_cnp->cn_flags &= ~LOCKPARENT;
    637 
    638 	if (error)
    639 		/*
    640 		 * starting dir is still locked/has been relocked
    641 		 * on error return.
    642 		 */
    643 		return error;
    644 
    645 	if (ap->a_dvp != *ap->a_vpp) {
    646 		/*
    647 		 * Lookup returns node locked; we mark both lower and
    648 		 * upper nodes as locked by setting the lower lock
    649 		 * flag (it came back locked), and then call lock to
    650 		 * set upper lock flag & record pid, etc.  see
    651 		 * null_node_create()
    652 		 */
    653 		VTONULL(*ap->a_vpp)->null_flags |= NULL_LLOCK;
    654 
    655 		dvp = ap->a_dvp;
    656 		if (flags & ISDOTDOT) {
    657 			/*
    658 			 * If we're looking up `..' and this isn't the
    659 			 * last component, then the starting directory
    660 			 * ("parent") is _unlocked_ as a side-effect
    661 			 * of lookups.  This is to avoid deadlocks:
    662 			 * lock order is always parent, child, so
    663 			 * looking up `..'  requires dropping the lock
    664 			 * on the starting directory.
    665 			 */
    666 			/* see ufs_lookup() for hairy ugly locking protocol
    667 			   examples */
    668 			/*
    669 			 * underlying starting dir comes back locked if flags &
    670 			 * LOCKPARENT (which we artificially set above) and
    671 			 * ISLASTCN.
    672 			 */
    673 			if (flags & ISLASTCN) {
    674 				VTONULL(dvp)->null_flags |= NULL_LLOCK;	/* no-op, right? */
    675 #ifdef NULLFS_DIAGNOSTIC
    676 				if (!VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) {
    677 					vprint("lowerdvp not locked after lookup\n", dvp);
    678 					panic("null_lookup not locked");
    679 				}
    680 #endif
    681 			} else {
    682 				VTONULL(dvp)->null_flags &= ~NULL_LLOCK;
    683 #ifdef NULLFS_DIAGNOSTIC
    684 				if (VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) {
    685 					vprint("lowerdvp locked after lookup?\n", dvp);
    686 					panic("null_lookup locked");
    687 				}
    688 #endif
    689 			}
    690 			/*
    691 			 * locking order: drop lock on lower-in-tree
    692 			 * element, then get lock on higher-in-tree
    693 			 * element, then (if needed) re-fetch lower
    694 			 * lock.  No need for vget() since we hold a
    695 			 * refcount to the starting directory
    696 			 */
    697 			VOP_UNLOCK(dvp);
    698 			VOP_LOCK(*ap->a_vpp);
    699 			/*
    700 			 * we should return our directory locked if
    701 			 * (flags & LOCKPARENT) and (flags & ISLASTCN)
    702 			 */
    703 			if ((flags & LOCKPARENT) && (flags & ISLASTCN))
    704 				VOP_LOCK(dvp);
    705 		} else {
    706 			/*
    707 			 * Normal directory locking order: we hold the starting
    708 			 * directory locked; now lock our layer of the target.
    709 			 */
    710 			VOP_LOCK(*ap->a_vpp);
    711 			/*
    712 			 * underlying starting dir comes back locked
    713 			 * if lockparent (we set it) and no error
    714 			 * (this leg) and ISLASTCN
    715 			 */
    716 			if (flags & ISLASTCN) {
    717 				VTONULL(dvp)->null_flags |= NULL_LLOCK;	/* no op, right? */
    718 #ifdef NULLFS_DIAGNOSTIC
    719 				if (!VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) {
    720 					vprint("lowerdvp not locked after lookup\n", dvp);
    721 					panic("null_lookup not locked");
    722 				}
    723 #endif
    724 			} else {
    725 				VTONULL(dvp)->null_flags &= ~NULL_LLOCK;
    726 #ifdef NULLFS_DIAGNOSTIC
    727 				if (VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) {
    728 					vprint("lowerdvp locked after lookup?\n", dvp);
    729 					panic("null_lookup locked");
    730 				}
    731 #endif
    732 			}
    733 			/*
    734 			 * we should return our directory unlocked if
    735 			 * our caller didn't want the parent locked,
    736 			 * !(flags & LOCKPARENT), or we're not at the
    737 			 * end yet, !(flags & ISLASTCN)
    738 			 */
    739 			if (!(flags & LOCKPARENT) || !(flags & ISLASTCN))
    740 				VOP_UNLOCK(dvp);
    741 		}
    742 	}
    743 	return error;
    744 }
    745 
    746 /*
    747  * Global vfs data structures
    748  */
    749 int (**null_vnodeop_p) __P((void *));
    750 struct vnodeopv_entry_desc null_vnodeop_entries[] = {
    751 	{ &vop_default_desc,	null_bypass },
    752 
    753 	{ &vop_getattr_desc,	null_getattr },
    754 	{ &vop_inactive_desc,	null_inactive },
    755 	{ &vop_reclaim_desc,	null_reclaim },
    756 	{ &vop_print_desc,	null_print },
    757 
    758 	{ &vop_lock_desc,	null_lock },
    759 	{ &vop_unlock_desc,	null_unlock },
    760 	{ &vop_islocked_desc,	null_islocked },
    761 	{ &vop_lookup_desc,	null_lookup }, /* special locking frob */
    762 
    763 	{ &vop_strategy_desc,	null_strategy },
    764 	{ &vop_bwrite_desc,	null_bwrite },
    765 
    766 	{ (struct vnodeop_desc*)NULL,	(int(*) __P((void *)))NULL }
    767 };
    768 struct vnodeopv_desc null_vnodeop_opv_desc =
    769 	{ &null_vnodeop_p, null_vnodeop_entries };
    770