Home | History | Annotate | Line # | Download | only in nullfs
null_vnops.c revision 1.1.1.2
      1 /*
      2  * Copyright (c) 1992, 1993
      3  *	The Regents of the University of California.  All rights reserved.
      4  *
      5  * This code is derived from software contributed to Berkeley by
      6  * John Heidemann of the UCLA Ficus project.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by the University of
     19  *	California, Berkeley and its contributors.
     20  * 4. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
     37  *
     38  * Ancestors:
     39  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
     40  *	$Id: null_vnops.c,v 1.1.1.2 1998/03/01 02:13:14 fvdl Exp $
     41  *	...and...
     42  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
     43  */
     44 
     45 /*
     46  * Null Layer
     47  *
     48  * (See mount_null(8) for more information.)
     49  *
     50  * The null layer duplicates a portion of the file system
     51  * name space under a new name.  In this respect, it is
     52  * similar to the loopback file system.  It differs from
     53  * the loopback fs in two respects:  it is implemented using
     54  * a stackable layers techniques, and it's "null-node"s stack above
     55  * all lower-layer vnodes, not just over directory vnodes.
     56  *
     57  * The null layer has two purposes.  First, it serves as a demonstration
     58  * of layering by proving a layer which does nothing.  (It actually
     59  * does everything the loopback file system does, which is slightly
     60  * more than nothing.)  Second, the null layer can serve as a prototype
     61  * layer.  Since it provides all necessary layer framework,
     62  * new file system layers can be created very easily be starting
     63  * with a null layer.
     64  *
     65  * The remainder of this man page examines the null layer as a basis
     66  * for constructing new layers.
     67  *
     68  *
     69  * INSTANTIATING NEW NULL LAYERS
     70  *
     71  * New null layers are created with mount_null(8).
     72  * Mount_null(8) takes two arguments, the pathname
     73  * of the lower vfs (target-pn) and the pathname where the null
     74  * layer will appear in the namespace (alias-pn).  After
     75  * the null layer is put into place, the contents
     76  * of target-pn subtree will be aliased under alias-pn.
     77  *
     78  *
     79  * OPERATION OF A NULL LAYER
     80  *
     81  * The null layer is the minimum file system layer,
     82  * simply bypassing all possible operations to the lower layer
     83  * for processing there.  The majority of its activity centers
     84  * on the bypass routine, though which nearly all vnode operations
     85  * pass.
     86  *
     87  * The bypass routine accepts arbitrary vnode operations for
     88  * handling by the lower layer.  It begins by examing vnode
     89  * operation arguments and replacing any null-nodes by their
     90  * lower-layer equivlants.  It then invokes the operation
     91  * on the lower layer.  Finally, it replaces the null-nodes
     92  * in the arguments and, if a vnode is return by the operation,
     93  * stacks a null-node on top of the returned vnode.
     94  *
     95  * Although bypass handles most operations, vop_getattr, vop_lock,
     96  * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
     97  * bypassed. Vop_getattr must change the fsid being returned.
     98  * Vop_lock and vop_unlock must handle any locking for the
     99  * current vnode as well as pass the lock request down.
    100  * Vop_inactive and vop_reclaim are not bypassed so that
    101  * they can handle freeing null-layer specific data. Vop_print
    102  * is not bypassed to avoid excessive debugging information.
    103  * Also, certain vnode operations change the locking state within
    104  * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
    105  * and symlink). Ideally these operations should not change the
    106  * lock state, but should be changed to let the caller of the
    107  * function unlock them. Otherwise all intermediate vnode layers
    108  * (such as union, umapfs, etc) must catch these functions to do
    109  * the necessary locking at their layer.
    110  *
    111  *
    112  * INSTANTIATING VNODE STACKS
    113  *
    114  * Mounting associates the null layer with a lower layer,
    115  * effect stacking two VFSes.  Vnode stacks are instead
    116  * created on demand as files are accessed.
    117  *
    118  * The initial mount creates a single vnode stack for the
    119  * root of the new null layer.  All other vnode stacks
    120  * are created as a result of vnode operations on
    121  * this or other null vnode stacks.
    122  *
    123  * New vnode stacks come into existance as a result of
    124  * an operation which returns a vnode.
    125  * The bypass routine stacks a null-node above the new
    126  * vnode before returning it to the caller.
    127  *
    128  * For example, imagine mounting a null layer with
    129  * "mount_null /usr/include /dev/layer/null".
    130  * Changing directory to /dev/layer/null will assign
    131  * the root null-node (which was created when the null layer was mounted).
    132  * Now consider opening "sys".  A vop_lookup would be
    133  * done on the root null-node.  This operation would bypass through
    134  * to the lower layer which would return a vnode representing
    135  * the UFS "sys".  Null_bypass then builds a null-node
    136  * aliasing the UFS "sys" and returns this to the caller.
    137  * Later operations on the null-node "sys" will repeat this
    138  * process when constructing other vnode stacks.
    139  *
    140  *
    141  * CREATING OTHER FILE SYSTEM LAYERS
    142  *
    143  * One of the easiest ways to construct new file system layers is to make
    144  * a copy of the null layer, rename all files and variables, and
    145  * then begin modifing the copy.  Sed can be used to easily rename
    146  * all variables.
    147  *
    148  * The umap layer is an example of a layer descended from the
    149  * null layer.
    150  *
    151  *
    152  * INVOKING OPERATIONS ON LOWER LAYERS
    153  *
    154  * There are two techniques to invoke operations on a lower layer
    155  * when the operation cannot be completely bypassed.  Each method
    156  * is appropriate in different situations.  In both cases,
    157  * it is the responsibility of the aliasing layer to make
    158  * the operation arguments "correct" for the lower layer
    159  * by mapping an vnode arguments to the lower layer.
    160  *
    161  * The first approach is to call the aliasing layer's bypass routine.
    162  * This method is most suitable when you wish to invoke the operation
    163  * currently being hanldled on the lower layer.  It has the advantage
    164  * that the bypass routine already must do argument mapping.
    165  * An example of this is null_getattrs in the null layer.
    166  *
    167  * A second approach is to directly invoked vnode operations on
    168  * the lower layer with the VOP_OPERATIONNAME interface.
    169  * The advantage of this method is that it is easy to invoke
    170  * arbitrary operations on the lower layer.  The disadvantage
    171  * is that vnodes arguments must be manualy mapped.
    172  *
    173  */
    174 
    175 #include <sys/param.h>
    176 #include <sys/systm.h>
    177 #include <sys/proc.h>
    178 #include <sys/time.h>
    179 #include <sys/types.h>
    180 #include <sys/vnode.h>
    181 #include <sys/mount.h>
    182 #include <sys/namei.h>
    183 #include <sys/malloc.h>
    184 #include <sys/buf.h>
    185 #include <miscfs/nullfs/null.h>
    186 
    187 
    188 int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
    189 
    190 /*
    191  * This is the 10-Apr-92 bypass routine.
    192  *    This version has been optimized for speed, throwing away some
    193  * safety checks.  It should still always work, but it's not as
    194  * robust to programmer errors.
    195  *    Define SAFETY to include some error checking code.
    196  *
    197  * In general, we map all vnodes going down and unmap them on the way back.
    198  * As an exception to this, vnodes can be marked "unmapped" by setting
    199  * the Nth bit in operation's vdesc_flags.
    200  *
    201  * Also, some BSD vnode operations have the side effect of vrele'ing
    202  * their arguments.  With stacking, the reference counts are held
    203  * by the upper node, not the lower one, so we must handle these
    204  * side-effects here.  This is not of concern in Sun-derived systems
    205  * since there are no such side-effects.
    206  *
    207  * This makes the following assumptions:
    208  * - only one returned vpp
    209  * - no INOUT vpp's (Sun's vop_open has one of these)
    210  * - the vnode operation vector of the first vnode should be used
    211  *   to determine what implementation of the op should be invoked
    212  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
    213  *   problems on rmdir'ing mount points and renaming?)
    214  */
    215 int
    216 null_bypass(ap)
    217 	struct vop_generic_args /* {
    218 		struct vnodeop_desc *a_desc;
    219 		<other random data follows, presumably>
    220 	} */ *ap;
    221 {
    222 	extern int (**null_vnodeop_p)();  /* not extern, really "forward" */
    223 	register struct vnode **this_vp_p;
    224 	int error;
    225 	struct vnode *old_vps[VDESC_MAX_VPS];
    226 	struct vnode **vps_p[VDESC_MAX_VPS];
    227 	struct vnode ***vppp;
    228 	struct vnodeop_desc *descp = ap->a_desc;
    229 	int reles, i;
    230 
    231 	if (null_bug_bypass)
    232 		printf ("null_bypass: %s\n", descp->vdesc_name);
    233 
    234 #ifdef SAFETY
    235 	/*
    236 	 * We require at least one vp.
    237 	 */
    238 	if (descp->vdesc_vp_offsets == NULL ||
    239 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
    240 		panic ("null_bypass: no vp's in map.\n");
    241 #endif
    242 
    243 	/*
    244 	 * Map the vnodes going in.
    245 	 * Later, we'll invoke the operation based on
    246 	 * the first mapped vnode's operation vector.
    247 	 */
    248 	reles = descp->vdesc_flags;
    249 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    250 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    251 			break;   /* bail out at end of list */
    252 		vps_p[i] = this_vp_p =
    253 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
    254 		/*
    255 		 * We're not guaranteed that any but the first vnode
    256 		 * are of our type.  Check for and don't map any
    257 		 * that aren't.  (We must always map first vp or vclean fails.)
    258 		 */
    259 		if (i && (*this_vp_p == NULL ||
    260 		    (*this_vp_p)->v_op != null_vnodeop_p)) {
    261 			old_vps[i] = NULL;
    262 		} else {
    263 			old_vps[i] = *this_vp_p;
    264 			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
    265 			/*
    266 			 * XXX - Several operations have the side effect
    267 			 * of vrele'ing their vp's.  We must account for
    268 			 * that.  (This should go away in the future.)
    269 			 */
    270 			if (reles & 1)
    271 				VREF(*this_vp_p);
    272 		}
    273 
    274 	}
    275 
    276 	/*
    277 	 * Call the operation on the lower layer
    278 	 * with the modified argument structure.
    279 	 */
    280 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
    281 
    282 	/*
    283 	 * Maintain the illusion of call-by-value
    284 	 * by restoring vnodes in the argument structure
    285 	 * to their original value.
    286 	 */
    287 	reles = descp->vdesc_flags;
    288 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    289 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    290 			break;   /* bail out at end of list */
    291 		if (old_vps[i]) {
    292 			*(vps_p[i]) = old_vps[i];
    293 			if (reles & 1)
    294 				vrele(*(vps_p[i]));
    295 		}
    296 	}
    297 
    298 	/*
    299 	 * Map the possible out-going vpp
    300 	 * (Assumes that the lower layer always returns
    301 	 * a VREF'ed vpp unless it gets an error.)
    302 	 */
    303 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
    304 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
    305 	    !error) {
    306 		/*
    307 		 * XXX - even though some ops have vpp returned vp's,
    308 		 * several ops actually vrele this before returning.
    309 		 * We must avoid these ops.
    310 		 * (This should go away when these ops are regularized.)
    311 		 */
    312 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
    313 			goto out;
    314 		vppp = VOPARG_OFFSETTO(struct vnode***,
    315 				 descp->vdesc_vpp_offset,ap);
    316 		error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp);
    317 	}
    318 
    319  out:
    320 	return (error);
    321 }
    322 
    323 /*
    324  * We have to carry on the locking protocol on the null layer vnodes
    325  * as we progress through the tree. We also have to enforce read-only
    326  * if this layer is mounted read-only.
    327  */
    328 null_lookup(ap)
    329 	struct vop_lookup_args /* {
    330 		struct vnode * a_dvp;
    331 		struct vnode ** a_vpp;
    332 		struct componentname * a_cnp;
    333 	} */ *ap;
    334 {
    335 	struct componentname *cnp = ap->a_cnp;
    336 	struct proc *p = cnp->cn_proc;
    337 	int flags = cnp->cn_flags;
    338 	struct vop_lock_args lockargs;
    339 	struct vop_unlock_args unlockargs;
    340 	struct vnode *dvp, *vp;
    341 	int error;
    342 
    343 	if ((flags & ISLASTCN) && (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    344 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
    345 		return (EROFS);
    346 	error = null_bypass(ap);
    347 	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
    348 	    (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    349 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
    350 		error = EROFS;
    351 	/*
    352 	 * We must do the same locking and unlocking at this layer as
    353 	 * is done in the layers below us. We could figure this out
    354 	 * based on the error return and the LASTCN, LOCKPARENT, and
    355 	 * LOCKLEAF flags. However, it is more expidient to just find
    356 	 * out the state of the lower level vnodes and set ours to the
    357 	 * same state.
    358 	 */
    359 	dvp = ap->a_dvp;
    360 	vp = *ap->a_vpp;
    361 	if (dvp == vp)
    362 		return (error);
    363 	if (!VOP_ISLOCKED(dvp)) {
    364 		unlockargs.a_vp = dvp;
    365 		unlockargs.a_flags = 0;
    366 		unlockargs.a_p = p;
    367 		vop_nounlock(&unlockargs);
    368 	}
    369 	if (vp != NULL && VOP_ISLOCKED(vp)) {
    370 		lockargs.a_vp = vp;
    371 		lockargs.a_flags = LK_SHARED;
    372 		lockargs.a_p = p;
    373 		vop_nolock(&lockargs);
    374 	}
    375 	return (error);
    376 }
    377 
    378 /*
    379  * Setattr call. Disallow write attempts if the layer is mounted read-only.
    380  */
    381 int
    382 null_setattr(ap)
    383 	struct vop_setattr_args /* {
    384 		struct vnodeop_desc *a_desc;
    385 		struct vnode *a_vp;
    386 		struct vattr *a_vap;
    387 		struct ucred *a_cred;
    388 		struct proc *a_p;
    389 	} */ *ap;
    390 {
    391 	struct vnode *vp = ap->a_vp;
    392 	struct vattr *vap = ap->a_vap;
    393 
    394   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
    395 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.ts_sec != VNOVAL ||
    396 	    vap->va_mtime.ts_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
    397 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
    398 		return (EROFS);
    399 	if (vap->va_size != VNOVAL) {
    400  		switch (vp->v_type) {
    401  		case VDIR:
    402  			return (EISDIR);
    403  		case VCHR:
    404  		case VBLK:
    405  		case VSOCK:
    406  		case VFIFO:
    407 			return (0);
    408 		case VREG:
    409 		case VLNK:
    410  		default:
    411 			/*
    412 			 * Disallow write attempts if the filesystem is
    413 			 * mounted read-only.
    414 			 */
    415 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
    416 				return (EROFS);
    417 		}
    418 	}
    419 	return (null_bypass(ap));
    420 }
    421 
    422 /*
    423  *  We handle getattr only to change the fsid.
    424  */
    425 int
    426 null_getattr(ap)
    427 	struct vop_getattr_args /* {
    428 		struct vnode *a_vp;
    429 		struct vattr *a_vap;
    430 		struct ucred *a_cred;
    431 		struct proc *a_p;
    432 	} */ *ap;
    433 {
    434 	int error;
    435 
    436 	if (error = null_bypass(ap))
    437 		return (error);
    438 	/* Requires that arguments be restored. */
    439 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
    440 	return (0);
    441 }
    442 
    443 int
    444 null_access(ap)
    445 	struct vop_access_args /* {
    446 		struct vnode *a_vp;
    447 		int  a_mode;
    448 		struct ucred *a_cred;
    449 		struct proc *a_p;
    450 	} */ *ap;
    451 {
    452 	struct vnode *vp = ap->a_vp;
    453 	mode_t mode = ap->a_mode;
    454 
    455 	/*
    456 	 * Disallow write attempts on read-only layers;
    457 	 * unless the file is a socket, fifo, or a block or
    458 	 * character device resident on the file system.
    459 	 */
    460 	if (mode & VWRITE) {
    461 		switch (vp->v_type) {
    462 		case VDIR:
    463 		case VLNK:
    464 		case VREG:
    465 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
    466 				return (EROFS);
    467 			break;
    468 		}
    469 	}
    470 	return (null_bypass(ap));
    471 }
    472 
    473 /*
    474  * We need to process our own vnode lock and then clear the
    475  * interlock flag as it applies only to our vnode, not the
    476  * vnodes below us on the stack.
    477  */
    478 int
    479 null_lock(ap)
    480 	struct vop_lock_args /* {
    481 		struct vnode *a_vp;
    482 		int a_flags;
    483 		struct proc *a_p;
    484 	} */ *ap;
    485 {
    486 
    487 	vop_nolock(ap);
    488 	if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
    489 		return (0);
    490 	ap->a_flags &= ~LK_INTERLOCK;
    491 	return (null_bypass(ap));
    492 }
    493 
    494 /*
    495  * We need to process our own vnode unlock and then clear the
    496  * interlock flag as it applies only to our vnode, not the
    497  * vnodes below us on the stack.
    498  */
    499 int
    500 null_unlock(ap)
    501 	struct vop_unlock_args /* {
    502 		struct vnode *a_vp;
    503 		int a_flags;
    504 		struct proc *a_p;
    505 	} */ *ap;
    506 {
    507 	struct vnode *vp = ap->a_vp;
    508 
    509 	vop_nounlock(ap);
    510 	ap->a_flags &= ~LK_INTERLOCK;
    511 	return (null_bypass(ap));
    512 }
    513 
    514 int
    515 null_inactive(ap)
    516 	struct vop_inactive_args /* {
    517 		struct vnode *a_vp;
    518 		struct proc *a_p;
    519 	} */ *ap;
    520 {
    521 	/*
    522 	 * Do nothing (and _don't_ bypass).
    523 	 * Wait to vrele lowervp until reclaim,
    524 	 * so that until then our null_node is in the
    525 	 * cache and reusable.
    526 	 *
    527 	 * NEEDSWORK: Someday, consider inactive'ing
    528 	 * the lowervp and then trying to reactivate it
    529 	 * with capabilities (v_id)
    530 	 * like they do in the name lookup cache code.
    531 	 * That's too much work for now.
    532 	 */
    533 	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
    534 	return (0);
    535 }
    536 
    537 int
    538 null_reclaim(ap)
    539 	struct vop_reclaim_args /* {
    540 		struct vnode *a_vp;
    541 		struct proc *a_p;
    542 	} */ *ap;
    543 {
    544 	struct vnode *vp = ap->a_vp;
    545 	struct null_node *xp = VTONULL(vp);
    546 	struct vnode *lowervp = xp->null_lowervp;
    547 
    548 	/*
    549 	 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p,
    550 	 * so we can't call VOPs on ourself.
    551 	 */
    552 	/* After this assignment, this node will not be re-used. */
    553 	xp->null_lowervp = NULL;
    554 	LIST_REMOVE(xp, null_hash);
    555 	FREE(vp->v_data, M_TEMP);
    556 	vp->v_data = NULL;
    557 	vrele (lowervp);
    558 	return (0);
    559 }
    560 
    561 int
    562 null_print(ap)
    563 	struct vop_print_args /* {
    564 		struct vnode *a_vp;
    565 	} */ *ap;
    566 {
    567 	register struct vnode *vp = ap->a_vp;
    568 	printf ("\ttag VT_NULLFS, vp=%x, lowervp=%x\n", vp, NULLVPTOLOWERVP(vp));
    569 	return (0);
    570 }
    571 
    572 /*
    573  * XXX - vop_strategy must be hand coded because it has no
    574  * vnode in its arguments.
    575  * This goes away with a merged VM/buffer cache.
    576  */
    577 int
    578 null_strategy(ap)
    579 	struct vop_strategy_args /* {
    580 		struct buf *a_bp;
    581 	} */ *ap;
    582 {
    583 	struct buf *bp = ap->a_bp;
    584 	int error;
    585 	struct vnode *savedvp;
    586 
    587 	savedvp = bp->b_vp;
    588 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
    589 
    590 	error = VOP_STRATEGY(bp);
    591 
    592 	bp->b_vp = savedvp;
    593 
    594 	return (error);
    595 }
    596 
    597 /*
    598  * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
    599  * vnode in its arguments.
    600  * This goes away with a merged VM/buffer cache.
    601  */
    602 int
    603 null_bwrite(ap)
    604 	struct vop_bwrite_args /* {
    605 		struct buf *a_bp;
    606 	} */ *ap;
    607 {
    608 	struct buf *bp = ap->a_bp;
    609 	int error;
    610 	struct vnode *savedvp;
    611 
    612 	savedvp = bp->b_vp;
    613 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
    614 
    615 	error = VOP_BWRITE(bp);
    616 
    617 	bp->b_vp = savedvp;
    618 
    619 	return (error);
    620 }
    621 
    622 /*
    623  * Global vfs data structures
    624  */
    625 int (**null_vnodeop_p)();
    626 struct vnodeopv_entry_desc null_vnodeop_entries[] = {
    627 	{ &vop_default_desc, null_bypass },
    628 
    629 	{ &vop_lookup_desc, null_lookup },
    630 	{ &vop_setattr_desc, null_setattr },
    631 	{ &vop_getattr_desc, null_getattr },
    632 	{ &vop_access_desc, null_access },
    633 	{ &vop_lock_desc, null_lock },
    634 	{ &vop_unlock_desc, null_unlock },
    635 	{ &vop_inactive_desc, null_inactive },
    636 	{ &vop_reclaim_desc, null_reclaim },
    637 	{ &vop_print_desc, null_print },
    638 
    639 	{ &vop_strategy_desc, null_strategy },
    640 	{ &vop_bwrite_desc, null_bwrite },
    641 
    642 	{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
    643 };
    644 struct vnodeopv_desc null_vnodeop_opv_desc =
    645 	{ &null_vnodeop_p, null_vnodeop_entries };
    646