Home | History | Annotate | Line # | Download | only in nullfs
null_vnops.c revision 1.13
      1 /*	$NetBSD: null_vnops.c,v 1.13 1998/03/01 02:21:43 fvdl Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * John Heidemann of the UCLA Ficus project.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
     39  *
     40  * Ancestors:
     41  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
     42  *	$Id: null_vnops.c,v 1.13 1998/03/01 02:21:43 fvdl Exp $
     43  *	...and...
     44  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
     45  */
     46 
     47 /*
     48  * Null Layer
     49  *
     50  * (See mount_null(8) for more information.)
     51  *
     52  * The null layer duplicates a portion of the file system
     53  * name space under a new name.  In this respect, it is
     54  * similar to the loopback file system.  It differs from
     55  * the loopback fs in two respects:  it is implemented using
     56  * a stackable layers techniques, and it's "null-node"s stack above
     57  * all lower-layer vnodes, not just over directory vnodes.
     58  *
     59  * The null layer has two purposes.  First, it serves as a demonstration
     60  * of layering by proving a layer which does nothing.  (It actually
     61  * does everything the loopback file system does, which is slightly
     62  * more than nothing.)  Second, the null layer can serve as a prototype
     63  * layer.  Since it provides all necessary layer framework,
     64  * new file system layers can be created very easily be starting
     65  * with a null layer.
     66  *
     67  * The remainder of this man page examines the null layer as a basis
     68  * for constructing new layers.
     69  *
     70  *
     71  * INSTANTIATING NEW NULL LAYERS
     72  *
     73  * New null layers are created with mount_null(8).
     74  * Mount_null(8) takes two arguments, the pathname
     75  * of the lower vfs (target-pn) and the pathname where the null
     76  * layer will appear in the namespace (alias-pn).  After
     77  * the null layer is put into place, the contents
     78  * of target-pn subtree will be aliased under alias-pn.
     79  *
     80  *
     81  * OPERATION OF A NULL LAYER
     82  *
     83  * The null layer is the minimum file system layer,
     84  * simply bypassing all possible operations to the lower layer
     85  * for processing there.  The majority of its activity centers
     86  * on the bypass routine, though which nearly all vnode operations
     87  * pass.
     88  *
     89  * The bypass routine accepts arbitrary vnode operations for
     90  * handling by the lower layer.  It begins by examing vnode
     91  * operation arguments and replacing any null-nodes by their
     92  * lower-layer equivlants.  It then invokes the operation
     93  * on the lower layer.  Finally, it replaces the null-nodes
     94  * in the arguments and, if a vnode is return by the operation,
     95  * stacks a null-node on top of the returned vnode.
     96  *
     97  * Although bypass handles most operations, vop_getattr, vop_lock,
     98  * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
     99  * bypassed. Vop_getattr must change the fsid being returned.
    100  * Vop_lock and vop_unlock must handle any locking for the
    101  * current vnode as well as pass the lock request down.
    102  * Vop_inactive and vop_reclaim are not bypassed so that
    103  * they can handle freeing null-layer specific data. Vop_print
    104  * is not bypassed to avoid excessive debugging information.
    105  * Also, certain vnode operations change the locking state within
    106  * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
    107  * and symlink). Ideally these operations should not change the
    108  * lock state, but should be changed to let the caller of the
    109  * function unlock them. Otherwise all intermediate vnode layers
    110  * (such as union, umapfs, etc) must catch these functions to do
    111  * the necessary locking at their layer.
    112  *
    113  *
    114  * INSTANTIATING VNODE STACKS
    115  *
    116  * Mounting associates the null layer with a lower layer,
    117  * effect stacking two VFSes.  Vnode stacks are instead
    118  * created on demand as files are accessed.
    119  *
    120  * The initial mount creates a single vnode stack for the
    121  * root of the new null layer.  All other vnode stacks
    122  * are created as a result of vnode operations on
    123  * this or other null vnode stacks.
    124  *
    125  * New vnode stacks come into existance as a result of
    126  * an operation which returns a vnode.
    127  * The bypass routine stacks a null-node above the new
    128  * vnode before returning it to the caller.
    129  *
    130  * For example, imagine mounting a null layer with
    131  * "mount_null /usr/include /dev/layer/null".
    132  * Changing directory to /dev/layer/null will assign
    133  * the root null-node (which was created when the null layer was mounted).
    134  * Now consider opening "sys".  A vop_lookup would be
    135  * done on the root null-node.  This operation would bypass through
    136  * to the lower layer which would return a vnode representing
    137  * the UFS "sys".  Null_bypass then builds a null-node
    138  * aliasing the UFS "sys" and returns this to the caller.
    139  * Later operations on the null-node "sys" will repeat this
    140  * process when constructing other vnode stacks.
    141  *
    142  *
    143  * CREATING OTHER FILE SYSTEM LAYERS
    144  *
    145  * One of the easiest ways to construct new file system layers is to make
    146  * a copy of the null layer, rename all files and variables, and
    147  * then begin modifing the copy.  Sed can be used to easily rename
    148  * all variables.
    149  *
    150  * The umap layer is an example of a layer descended from the
    151  * null layer.
    152  *
    153  *
    154  * INVOKING OPERATIONS ON LOWER LAYERS
    155  *
    156  * There are two techniques to invoke operations on a lower layer
    157  * when the operation cannot be completely bypassed.  Each method
    158  * is appropriate in different situations.  In both cases,
    159  * it is the responsibility of the aliasing layer to make
    160  * the operation arguments "correct" for the lower layer
    161  * by mapping an vnode arguments to the lower layer.
    162  *
    163  * The first approach is to call the aliasing layer's bypass routine.
    164  * This method is most suitable when you wish to invoke the operation
    165  * currently being hanldled on the lower layer.  It has the advantage
    166  * that the bypass routine already must do argument mapping.
    167  * An example of this is null_getattrs in the null layer.
    168  *
    169  * A second approach is to directly invoked vnode operations on
    170  * the lower layer with the VOP_OPERATIONNAME interface.
    171  * The advantage of this method is that it is easy to invoke
    172  * arbitrary operations on the lower layer.  The disadvantage
    173  * is that vnodes arguments must be manualy mapped.
    174  *
    175  */
    176 
    177 #include <sys/param.h>
    178 #include <sys/systm.h>
    179 #include <sys/proc.h>
    180 #include <sys/time.h>
    181 #include <sys/types.h>
    182 #include <sys/vnode.h>
    183 #include <sys/mount.h>
    184 #include <sys/namei.h>
    185 #include <sys/malloc.h>
    186 #include <sys/buf.h>
    187 #include <miscfs/nullfs/null.h>
    188 #include <miscfs/genfs/genfs.h>
    189 
    190 
    191 int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
    192 
    193 int	null_bypass __P((void *));
    194 int	null_getattr __P((void *));
    195 int	null_inactive __P((void *));
    196 int	null_reclaim __P((void *));
    197 int	null_print __P((void *));
    198 int	null_strategy __P((void *));
    199 int	null_bwrite __P((void *));
    200 int	null_lock __P((void *));
    201 int	null_unlock __P((void *));
    202 int	null_islocked __P((void *));
    203 int	null_lookup __P((void *));
    204 int	null_setattr __P((void *));
    205 int	null_access __P((void *));
    206 
    207 
    208 /*
    209  * This is the 10-Apr-92 bypass routine.
    210  *    This version has been optimized for speed, throwing away some
    211  * safety checks.  It should still always work, but it's not as
    212  * robust to programmer errors.
    213  *    Define SAFETY to include some error checking code.
    214  *
    215  * In general, we map all vnodes going down and unmap them on the way back.
    216  * As an exception to this, vnodes can be marked "unmapped" by setting
    217  * the Nth bit in operation's vdesc_flags.
    218  *
    219  * Also, some BSD vnode operations have the side effect of vrele'ing
    220  * their arguments.  With stacking, the reference counts are held
    221  * by the upper node, not the lower one, so we must handle these
    222  * side-effects here.  This is not of concern in Sun-derived systems
    223  * since there are no such side-effects.
    224  *
    225  * This makes the following assumptions:
    226  * - only one returned vpp
    227  * - no INOUT vpp's (Sun's vop_open has one of these)
    228  * - the vnode operation vector of the first vnode should be used
    229  *   to determine what implementation of the op should be invoked
    230  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
    231  *   problems on rmdir'ing mount points and renaming?)
    232  */
    233 int
    234 null_bypass(v)
    235 	void *v;
    236 {
    237 	struct vop_generic_args /* {
    238 		struct vnodeop_desc *a_desc;
    239 		<other random data follows, presumably>
    240 	} */ *ap = v;
    241 	extern int (**null_vnodeop_p) __P((void *));
    242 	register struct vnode **this_vp_p;
    243 	int error;
    244 	struct vnode *old_vps[VDESC_MAX_VPS];
    245 	struct vnode **vps_p[VDESC_MAX_VPS];
    246 	struct vnode ***vppp;
    247 	struct vnodeop_desc *descp = ap->a_desc;
    248 	int reles, i;
    249 
    250 	if (null_bug_bypass)
    251 		printf ("null_bypass: %s\n", descp->vdesc_name);
    252 
    253 #ifdef SAFETY
    254 	/*
    255 	 * We require at least one vp.
    256 	 */
    257 	if (descp->vdesc_vp_offsets == NULL ||
    258 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
    259 		panic ("null_bypass: no vp's in map.\n");
    260 #endif
    261 
    262 	/*
    263 	 * Map the vnodes going in.
    264 	 * Later, we'll invoke the operation based on
    265 	 * the first mapped vnode's operation vector.
    266 	 */
    267 	reles = descp->vdesc_flags;
    268 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    269 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    270 			break;   /* bail out at end of list */
    271 		vps_p[i] = this_vp_p =
    272 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
    273 		/*
    274 		 * We're not guaranteed that any but the first vnode
    275 		 * are of our type.  Check for and don't map any
    276 		 * that aren't.  (We must always map first vp or vclean fails.)
    277 		 */
    278 		if (i && (*this_vp_p == NULL ||
    279 		    (*this_vp_p)->v_op != null_vnodeop_p)) {
    280 			old_vps[i] = NULL;
    281 		} else {
    282 			old_vps[i] = *this_vp_p;
    283 			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
    284 			/*
    285 			 * XXX - Several operations have the side effect
    286 			 * of vrele'ing their vp's.  We must account for
    287 			 * that.  (This should go away in the future.)
    288 			 */
    289 			if (reles & 1)
    290 				VREF(*this_vp_p);
    291 		}
    292 
    293 	}
    294 
    295 	/*
    296 	 * Call the operation on the lower layer
    297 	 * with the modified argument structure.
    298 	 */
    299 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
    300 
    301 	/*
    302 	 * Maintain the illusion of call-by-value
    303 	 * by restoring vnodes in the argument structure
    304 	 * to their original value.
    305 	 */
    306 	reles = descp->vdesc_flags;
    307 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    308 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    309 			break;   /* bail out at end of list */
    310 		if (old_vps[i]) {
    311 			*(vps_p[i]) = old_vps[i];
    312 			if (reles & 1)
    313 				vrele(*(vps_p[i]));
    314 		}
    315 	}
    316 
    317 	/*
    318 	 * Map the possible out-going vpp
    319 	 * (Assumes that the lower layer always returns
    320 	 * a VREF'ed vpp unless it gets an error.)
    321 	 */
    322 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
    323 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
    324 	    !error) {
    325 		/*
    326 		 * XXX - even though some ops have vpp returned vp's,
    327 		 * several ops actually vrele this before returning.
    328 		 * We must avoid these ops.
    329 		 * (This should go away when these ops are regularized.)
    330 		 */
    331 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
    332 			goto out;
    333 		vppp = VOPARG_OFFSETTO(struct vnode***,
    334 				 descp->vdesc_vpp_offset,ap);
    335 		error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp,
    336 					 descp == &vop_lookup_desc ? 0 : 1);
    337 	}
    338 
    339  out:
    340 	return (error);
    341 }
    342 
    343 /*
    344  * We have to carry on the locking protocol on the null layer vnodes
    345  * as we progress through the tree. We also have to enforce read-only
    346  * if this layer is mounted read-only.
    347  */
    348 int
    349 null_lookup(v)
    350 	void *v;
    351 {
    352 	struct vop_lookup_args /* {
    353 		struct vnode * a_dvp;
    354 		struct vnode ** a_vpp;
    355 		struct componentname * a_cnp;
    356 	} */ *ap = v;
    357 	struct componentname *cnp = ap->a_cnp;
    358 	int flags = cnp->cn_flags;
    359 	struct vop_lock_args lockargs;
    360 	struct vop_unlock_args unlockargs;
    361 	struct vnode *dvp, *vp;
    362 	int error;
    363 
    364 	if ((flags & ISLASTCN) && (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    365 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
    366 		return (EROFS);
    367 	error = null_bypass(ap);
    368 	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
    369 	    (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    370 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
    371 		error = EROFS;
    372 	/*
    373 	 * We must do the same locking and unlocking at this layer as
    374 	 * is done in the layers below us. We could figure this out
    375 	 * based on the error return and the LASTCN, LOCKPARENT, and
    376 	 * LOCKLEAF flags. However, it is more expidient to just find
    377 	 * out the state of the lower level vnodes and set ours to the
    378 	 * same state.
    379 	 */
    380 	dvp = ap->a_dvp;
    381 	vp = *ap->a_vpp;
    382 	if (dvp == vp)
    383 		return (error);
    384 	if (!VOP_ISLOCKED(dvp)) {
    385 		unlockargs.a_vp = dvp;
    386 		unlockargs.a_flags = 0;
    387 		genfs_nounlock(&unlockargs);
    388 	}
    389 	if (vp != NULL && VOP_ISLOCKED(vp)) {
    390 		lockargs.a_vp = vp;
    391 		lockargs.a_flags = LK_SHARED;
    392 		genfs_nolock(&lockargs);
    393 	}
    394 	return (error);
    395 }
    396 
    397 /*
    398  * Setattr call. Disallow write attempts if the layer is mounted read-only.
    399  */
    400 int
    401 null_setattr(v)
    402 	void *v;
    403 {
    404 	struct vop_setattr_args /* {
    405 		struct vnodeop_desc *a_desc;
    406 		struct vnode *a_vp;
    407 		struct vattr *a_vap;
    408 		struct ucred *a_cred;
    409 		struct proc *a_p;
    410 	} */ *ap = v;
    411 	struct vnode *vp = ap->a_vp;
    412 	struct vattr *vap = ap->a_vap;
    413 
    414   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
    415 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
    416 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
    417 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
    418 		return (EROFS);
    419 	if (vap->va_size != VNOVAL) {
    420  		switch (vp->v_type) {
    421  		case VDIR:
    422  			return (EISDIR);
    423  		case VCHR:
    424  		case VBLK:
    425  		case VSOCK:
    426  		case VFIFO:
    427 			return (0);
    428 		case VREG:
    429 		case VLNK:
    430  		default:
    431 			/*
    432 			 * Disallow write attempts if the filesystem is
    433 			 * mounted read-only.
    434 			 */
    435 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
    436 				return (EROFS);
    437 		}
    438 	}
    439 	return (null_bypass(ap));
    440 }
    441 
    442 /*
    443  *  We handle getattr only to change the fsid.
    444  */
    445 int
    446 null_getattr(v)
    447 	void *v;
    448 {
    449 	struct vop_getattr_args /* {
    450 		struct vnode *a_vp;
    451 		struct vattr *a_vap;
    452 		struct ucred *a_cred;
    453 		struct proc *a_p;
    454 	} */ *ap = v;
    455 	int error;
    456 
    457 	if ((error = null_bypass(ap)) != 0)
    458 		return (error);
    459 	/* Requires that arguments be restored. */
    460 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
    461 	return (0);
    462 }
    463 
    464 int
    465 null_access(v)
    466 	void *v;
    467 {
    468 	struct vop_access_args /* {
    469 		struct vnode *a_vp;
    470 		int  a_mode;
    471 		struct ucred *a_cred;
    472 		struct proc *a_p;
    473 	} */ *ap = v;
    474 	struct vnode *vp = ap->a_vp;
    475 	mode_t mode = ap->a_mode;
    476 
    477 	/*
    478 	 * Disallow write attempts on read-only layers;
    479 	 * unless the file is a socket, fifo, or a block or
    480 	 * character device resident on the file system.
    481 	 */
    482 	if (mode & VWRITE) {
    483 		switch (vp->v_type) {
    484 		case VDIR:
    485 		case VLNK:
    486 		case VREG:
    487 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
    488 				return (EROFS);
    489 			break;
    490 		default:
    491 			break;
    492 		}
    493 	}
    494 	return (null_bypass(ap));
    495 }
    496 
    497 /*
    498  * We need to process our own vnode lock and then clear the
    499  * interlock flag as it applies only to our vnode, not the
    500  * vnodes below us on the stack.
    501  */
    502 int
    503 null_lock(v)
    504 	void *v;
    505 {
    506 	struct vop_lock_args /* {
    507 		struct vnode *a_vp;
    508 		int a_flags;
    509 		struct proc *a_p;
    510 	} */ *ap = v;
    511 
    512 	genfs_nolock(ap);
    513 	if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
    514 		return (0);
    515 	ap->a_flags &= ~LK_INTERLOCK;
    516 	return (null_bypass(ap));
    517 }
    518 
    519 /*
    520  * We need to process our own vnode unlock and then clear the
    521  * interlock flag as it applies only to our vnode, not the
    522  * vnodes below us on the stack.
    523  */
    524 int
    525 null_unlock(v)
    526 	void *v;
    527 {
    528 	struct vop_unlock_args /* {
    529 		struct vnode *a_vp;
    530 		int a_flags;
    531 		struct proc *a_p;
    532 	} */ *ap = v;
    533 
    534 	genfs_nounlock(ap);
    535 	ap->a_flags &= ~LK_INTERLOCK;
    536 	return (null_bypass(ap));
    537 }
    538 
    539 int
    540 null_inactive(v)
    541 	void *v;
    542 {
    543 	struct vop_inactive_args /* {
    544 		struct vnode *a_vp;
    545 		struct proc *a_p;
    546 	} */ *ap = v;
    547 
    548 	/*
    549 	 * Do nothing (and _don't_ bypass).
    550 	 * Wait to vrele lowervp until reclaim,
    551 	 * so that until then our null_node is in the
    552 	 * cache and reusable.
    553 	 *
    554 	 * NEEDSWORK: Someday, consider inactive'ing
    555 	 * the lowervp and then trying to reactivate it
    556 	 * with capabilities (v_id)
    557 	 * like they do in the name lookup cache code.
    558 	 * That's too much work for now.
    559 	 */
    560 	VOP_UNLOCK(ap->a_vp, 0);
    561 	return (0);
    562 }
    563 
    564 int
    565 null_reclaim(v)
    566 	void *v;
    567 {
    568 	struct vop_reclaim_args /* {
    569 		struct vnode *a_vp;
    570 		struct proc *a_p;
    571 	} */ *ap = v;
    572 	struct vnode *vp = ap->a_vp;
    573 	struct null_node *xp = VTONULL(vp);
    574 	struct vnode *lowervp = xp->null_lowervp;
    575 
    576 	/*
    577 	 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p,
    578 	 * so we can't call VOPs on ourself.
    579 	 */
    580 	/* After this assignment, this node will not be re-used. */
    581 	xp->null_lowervp = NULL;
    582 	LIST_REMOVE(xp, null_hash);
    583 	FREE(vp->v_data, M_TEMP);
    584 	vp->v_data = NULL;
    585 	vrele (lowervp);
    586 	return (0);
    587 }
    588 
    589 int
    590 null_print(v)
    591 	void *v;
    592 {
    593 	struct vop_print_args /* {
    594 		struct vnode *a_vp;
    595 	} */ *ap = v;
    596 	register struct vnode *vp = ap->a_vp;
    597 	printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
    598 	return (0);
    599 }
    600 
    601 /*
    602  * XXX - vop_strategy must be hand coded because it has no
    603  * vnode in its arguments.
    604  * This goes away with a merged VM/buffer cache.
    605  */
    606 int
    607 null_strategy(v)
    608 	void *v;
    609 {
    610 	struct vop_strategy_args /* {
    611 		struct buf *a_bp;
    612 	} */ *ap = v;
    613 	struct buf *bp = ap->a_bp;
    614 	int error;
    615 	struct vnode *savedvp;
    616 
    617 	savedvp = bp->b_vp;
    618 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
    619 
    620 	error = VOP_STRATEGY(bp);
    621 
    622 	bp->b_vp = savedvp;
    623 
    624 	return (error);
    625 }
    626 
    627 /*
    628  * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
    629  * vnode in its arguments.
    630  * This goes away with a merged VM/buffer cache.
    631  */
    632 int
    633 null_bwrite(v)
    634 	void *v;
    635 {
    636 	struct vop_bwrite_args /* {
    637 		struct buf *a_bp;
    638 	} */ *ap = v;
    639 	struct buf *bp = ap->a_bp;
    640 	int error;
    641 	struct vnode *savedvp;
    642 
    643 	savedvp = bp->b_vp;
    644 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
    645 
    646 	error = VOP_BWRITE(bp);
    647 
    648 	bp->b_vp = savedvp;
    649 
    650 	return (error);
    651 }
    652 
    653 /*
    654  * Global vfs data structures
    655  */
    656 int (**null_vnodeop_p) __P((void *));
    657 struct vnodeopv_entry_desc null_vnodeop_entries[] = {
    658 	{ &vop_default_desc, null_bypass },
    659 
    660 	{ &vop_lookup_desc,   null_lookup },
    661 	{ &vop_setattr_desc,  null_setattr },
    662 	{ &vop_getattr_desc,  null_getattr },
    663 	{ &vop_access_desc,   null_access },
    664 	{ &vop_lock_desc,     null_lock },
    665 	{ &vop_unlock_desc,   null_unlock },
    666 	{ &vop_inactive_desc, null_inactive },
    667 	{ &vop_reclaim_desc,  null_reclaim },
    668 	{ &vop_print_desc,    null_print },
    669 
    670 	{ &vop_strategy_desc, null_strategy },
    671 	{ &vop_bwrite_desc,   null_bwrite },
    672 
    673 	{ (struct vnodeop_desc*)NULL, (int(*)__P((void *)))NULL }
    674 };
    675 struct vnodeopv_desc null_vnodeop_opv_desc =
    676 	{ &null_vnodeop_p, null_vnodeop_entries };
    677