Home | History | Annotate | Line # | Download | only in umapfs
umap_vnops.c revision 1.28
      1 /*	$NetBSD: umap_vnops.c,v 1.28 2004/06/11 12:34:13 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software donated to Berkeley by
      8  * the UCLA Ficus project.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)umap_vnops.c	8.6 (Berkeley) 5/22/95
     35  */
     36 
     37 /*
     38  * Umap Layer
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: umap_vnops.c,v 1.28 2004/06/11 12:34:13 yamt Exp $");
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/time.h>
     47 #include <sys/vnode.h>
     48 #include <sys/mount.h>
     49 #include <sys/namei.h>
     50 #include <sys/malloc.h>
     51 #include <sys/buf.h>
     52 #include <miscfs/umapfs/umap.h>
     53 #include <miscfs/genfs/genfs.h>
     54 #include <miscfs/genfs/layer_extern.h>
     55 
     56 int	umap_lookup	__P((void *));
     57 int	umap_getattr	__P((void *));
     58 int	umap_print	__P((void *));
     59 int	umap_rename	__P((void *));
     60 
     61 /*
     62  * Global vfs data structures
     63  */
     64 /*
     65  * XXX - strategy, bwrite are hand coded currently.  They should
     66  * go away with a merged buffer/block cache.
     67  *
     68  */
     69 int (**umap_vnodeop_p) __P((void *));
     70 const struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
     71 	{ &vop_default_desc,	umap_bypass },
     72 
     73 	{ &vop_lookup_desc,	umap_lookup },
     74 	{ &vop_getattr_desc,	umap_getattr },
     75 	{ &vop_print_desc,	umap_print },
     76 	{ &vop_rename_desc,	umap_rename },
     77 
     78 	{ &vop_lock_desc,	layer_lock },
     79 	{ &vop_unlock_desc,	layer_unlock },
     80 	{ &vop_islocked_desc,	layer_islocked },
     81 	{ &vop_fsync_desc,	layer_fsync },
     82 	{ &vop_inactive_desc,	layer_inactive },
     83 	{ &vop_reclaim_desc,	layer_reclaim },
     84 	{ &vop_open_desc,	layer_open },
     85 	{ &vop_setattr_desc,	layer_setattr },
     86 	{ &vop_access_desc,	layer_access },
     87 	{ &vop_remove_desc,	layer_remove },
     88 
     89 	{ &vop_bwrite_desc,	layer_bwrite },
     90 	{ &vop_bmap_desc,	layer_bmap },
     91 	{ &vop_getpages_desc,	layer_getpages },
     92 	{ &vop_putpages_desc,	layer_putpages },
     93 
     94 	{ NULL, NULL }
     95 };
     96 const struct vnodeopv_desc umapfs_vnodeop_opv_desc =
     97 	{ &umap_vnodeop_p, umap_vnodeop_entries };
     98 
     99 /*
    100  * This is the 08-June-1999 bypass routine.
    101  * See layer_vnops.c:layer_bypass for more details.
    102  */
    103 int
    104 umap_bypass(v)
    105 	void *v;
    106 {
    107 	struct vop_generic_args /* {
    108 		struct vnodeop_desc *a_desc;
    109 		<other random data follows, presumably>
    110 	} */ *ap = v;
    111 	struct ucred **credpp = 0, *credp = 0;
    112 	struct ucred *savecredp = 0, *savecompcredp = 0;
    113 	struct ucred *compcredp = 0;
    114 	struct vnode **this_vp_p;
    115 	int error, error1;
    116 	int (**our_vnodeop_p) __P((void *));
    117 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
    118 	struct vnode **vps_p[VDESC_MAX_VPS];
    119 	struct vnode ***vppp;
    120 	struct vnodeop_desc *descp = ap->a_desc;
    121 	int reles, i, flags;
    122 	struct componentname **compnamepp = 0;
    123 
    124 #ifdef SAFETY
    125 	/*
    126 	 * We require at least one vp.
    127 	 */
    128 	if (descp->vdesc_vp_offsets == NULL ||
    129 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
    130 		panic ("umap_bypass: no vp's in map.\n");
    131 #endif
    132 	vps_p[0] = VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[0],
    133 				ap);
    134 	vp0 = *vps_p[0];
    135 	flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags;
    136 	our_vnodeop_p = vp0->v_op;
    137 
    138 	if (flags & LAYERFS_MBYPASSDEBUG)
    139 		printf("umap_bypass: %s\n", descp->vdesc_name);
    140 
    141 	/*
    142 	 * Map the vnodes going in.
    143 	 * Later, we'll invoke the operation based on
    144 	 * the first mapped vnode's operation vector.
    145 	 */
    146 	reles = descp->vdesc_flags;
    147 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    148 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    149 			break;   /* bail out at end of list */
    150 		vps_p[i] = this_vp_p =
    151 			VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap);
    152 
    153 		/*
    154 		 * We're not guaranteed that any but the first vnode
    155 		 * are of our type.  Check for and don't map any
    156 		 * that aren't.  (Must map first vp or vclean fails.)
    157 		 */
    158 
    159 		if (i && ((*this_vp_p)==NULL ||
    160 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
    161 			old_vps[i] = NULL;
    162 		} else {
    163 			old_vps[i] = *this_vp_p;
    164 			*(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
    165 			if (reles & 1)
    166 				VREF(*this_vp_p);
    167 		}
    168 
    169 	}
    170 
    171 	/*
    172 	 * Fix the credentials.  (That's the purpose of this layer.)
    173 	 */
    174 
    175 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
    176 
    177 		credpp = VOPARG_OFFSETTO(struct ucred**,
    178 		    descp->vdesc_cred_offset, ap);
    179 
    180 		/* Save old values */
    181 
    182 		savecredp = *credpp;
    183 		if (savecredp != NOCRED)
    184 			*credpp = crdup(savecredp);
    185 		credp = *credpp;
    186 
    187 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp->cr_uid != 0)
    188 			printf("umap_bypass: user was %d, group %d\n",
    189 			    credp->cr_uid, credp->cr_gid);
    190 
    191 		/* Map all ids in the credential structure. */
    192 
    193 		umap_mapids(vp0->v_mount, credp);
    194 
    195 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp->cr_uid != 0)
    196 			printf("umap_bypass: user now %d, group %d\n",
    197 			    credp->cr_uid, credp->cr_gid);
    198 	}
    199 
    200 	/* BSD often keeps a credential in the componentname structure
    201 	 * for speed.  If there is one, it better get mapped, too.
    202 	 */
    203 
    204 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
    205 
    206 		compnamepp = VOPARG_OFFSETTO(struct componentname**,
    207 		    descp->vdesc_componentname_offset, ap);
    208 
    209 		savecompcredp = (*compnamepp)->cn_cred;
    210 		if (savecompcredp != NOCRED)
    211 			(*compnamepp)->cn_cred = crdup(savecompcredp);
    212 		compcredp = (*compnamepp)->cn_cred;
    213 
    214 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
    215 			printf("umap_bypass: component credit user was %d, group %d\n",
    216 			    compcredp->cr_uid, compcredp->cr_gid);
    217 
    218 		/* Map all ids in the credential structure. */
    219 
    220 		umap_mapids(vp0->v_mount, compcredp);
    221 
    222 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
    223 			printf("umap_bypass: component credit user now %d, group %d\n",
    224 			    compcredp->cr_uid, compcredp->cr_gid);
    225 	}
    226 
    227 	/*
    228 	 * Call the operation on the lower layer
    229 	 * with the modified argument structure.
    230 	 */
    231 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
    232 
    233 	/*
    234 	 * Maintain the illusion of call-by-value
    235 	 * by restoring vnodes in the argument structure
    236 	 * to their original value.
    237 	 */
    238 	reles = descp->vdesc_flags;
    239 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    240 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    241 			break;   /* bail out at end of list */
    242 		if (old_vps[i]) {
    243 			*(vps_p[i]) = old_vps[i];
    244 			if (reles & VDESC_VP0_WILLUNLOCK)
    245 				LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
    246 			if (reles & VDESC_VP0_WILLRELE)
    247 				vrele(*(vps_p[i]));
    248 		};
    249 	};
    250 
    251 	/*
    252 	 * Map the possible out-going vpp
    253 	 * (Assumes that the lower layer always returns
    254 	 * a VREF'ed vpp unless it gets an error.)
    255 	 */
    256 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
    257 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
    258 	    !error) {
    259 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
    260 			goto out;
    261 		vppp = VOPARG_OFFSETTO(struct vnode***,
    262 				 descp->vdesc_vpp_offset, ap);
    263 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
    264 	};
    265 
    266  out:
    267 	/*
    268 	 * Free duplicate cred structure and restore old one.
    269 	 */
    270 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
    271 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp &&
    272 					credp->cr_uid != 0)
    273 			printf("umap_bypass: returning-user was %d\n",
    274 			    credp->cr_uid);
    275 
    276 		if (savecredp != NOCRED) {
    277 			crfree(credp);
    278 			*credpp = savecredp;
    279 			if ((flags & LAYERFS_MBYPASSDEBUG) && credpp &&
    280 					(*credpp)->cr_uid != 0)
    281 			 	printf("umap_bypass: returning-user now %d\n\n",
    282 				    savecredp->cr_uid);
    283 		}
    284 	}
    285 
    286 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
    287 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
    288 					compcredp->cr_uid != 0)
    289 			printf("umap_bypass: returning-component-user was %d\n",
    290 			    compcredp->cr_uid);
    291 
    292 		if (savecompcredp != NOCRED) {
    293 			crfree(compcredp);
    294 			(*compnamepp)->cn_cred = savecompcredp;
    295 			if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
    296 					savecompcredp->cr_uid != 0)
    297 			 	printf("umap_bypass: returning-component-user now %d\n",
    298 				    savecompcredp->cr_uid);
    299 		}
    300 	}
    301 
    302 	return (error);
    303 }
    304 
    305 /*
    306  * This is based on the 08-June-1999 bypass routine.
    307  * See layer_vnops.c:layer_bypass for more details.
    308  */
    309 int
    310 umap_lookup(v)
    311 	void *v;
    312 {
    313 	struct vop_lookup_args /* {
    314 		struct vnodeop_desc *a_desc;
    315 		struct vnode * a_dvp;
    316 		struct vnode ** a_vpp;
    317 		struct componentname * a_cnp;
    318 	} */ *ap = v;
    319 	struct componentname *cnp = ap->a_cnp;
    320 	struct ucred *savecompcredp = NULL;
    321 	struct ucred *compcredp = NULL;
    322 	struct vnode *dvp, *vp, *ldvp;
    323 	struct mount *mp;
    324 	int error;
    325 	int i, flags, cnf = cnp->cn_flags;
    326 
    327 	dvp = ap->a_dvp;
    328 	mp = dvp->v_mount;
    329 
    330 	if ((cnf & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    331 		(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
    332 		return (EROFS);
    333 
    334 	flags = MOUNTTOUMAPMOUNT(mp)->umapm_flags;
    335 	ldvp = UMAPVPTOLOWERVP(dvp);
    336 
    337 	if (flags & LAYERFS_MBYPASSDEBUG)
    338 		printf("umap_lookup\n");
    339 
    340 	/*
    341 	 * Fix the credentials.  (That's the purpose of this layer.)
    342 	 *
    343 	 * BSD often keeps a credential in the componentname structure
    344 	 * for speed.  If there is one, it better get mapped, too.
    345 	 */
    346 
    347 	if ((savecompcredp = cnp->cn_cred)) {
    348 		compcredp = crdup(savecompcredp);
    349 		cnp->cn_cred = compcredp;
    350 
    351 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
    352 			printf("umap_lookup: component credit user was %d, group %d\n",
    353 			    compcredp->cr_uid, compcredp->cr_gid);
    354 
    355 		/* Map all ids in the credential structure. */
    356 		umap_mapids(mp, compcredp);
    357 	}
    358 
    359 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
    360 		printf("umap_lookup: component credit user now %d, group %d\n",
    361 		    compcredp->cr_uid, compcredp->cr_gid);
    362 
    363 	ap->a_dvp = ldvp;
    364 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
    365 	vp = *ap->a_vpp;
    366 	*ap->a_vpp = NULL;
    367 
    368 	if (error == EJUSTRETURN && (cnf & ISLASTCN) &&
    369 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    370 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
    371 		error = EROFS;
    372 
    373 	/* Do locking fixup as appropriate. See layer_lookup() for info */
    374 	if ((cnp->cn_flags & PDIRUNLOCK)) {
    375 		LAYERFS_UPPERUNLOCK(dvp, 0, i);
    376 	}
    377 	if (ldvp == vp) {
    378 		*ap->a_vpp = dvp;
    379 		VREF(dvp);
    380 		vrele(vp);
    381 	} else if (vp != NULL) {
    382 		error = layer_node_create(mp, vp, ap->a_vpp);
    383 	}
    384 
    385 	/*
    386 	 * Free duplicate cred structure and restore old one.
    387 	 */
    388 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
    389 					compcredp->cr_uid != 0)
    390 		printf("umap_lookup: returning-component-user was %d\n",
    391 			    compcredp->cr_uid);
    392 
    393 	if (savecompcredp != NOCRED) {
    394 		crfree(compcredp);
    395 		cnp->cn_cred = savecompcredp;
    396 		if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
    397 				savecompcredp->cr_uid != 0)
    398 		 	printf("umap_lookup: returning-component-user now %d\n",
    399 			    savecompcredp->cr_uid);
    400 	}
    401 
    402 	return (error);
    403 }
    404 
    405 /*
    406  *  We handle getattr to change the fsid.
    407  */
    408 int
    409 umap_getattr(v)
    410 	void *v;
    411 {
    412 	struct vop_getattr_args /* {
    413 		struct vnode *a_vp;
    414 		struct vattr *a_vap;
    415 		struct ucred *a_cred;
    416 		struct proc *a_p;
    417 	} */ *ap = v;
    418 	uid_t uid;
    419 	gid_t gid;
    420 	int error, tmpid, nentries, gnentries, flags;
    421 	u_long (*mapdata)[2];
    422 	u_long (*gmapdata)[2];
    423 	struct vnode **vp1p;
    424 	const struct vnodeop_desc *descp = ap->a_desc;
    425 
    426 	if ((error = umap_bypass(ap)) != 0)
    427 		return (error);
    428 	/* Requires that arguments be restored. */
    429 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
    430 
    431 	flags = MOUNTTOUMAPMOUNT(ap->a_vp->v_mount)->umapm_flags;
    432 	/*
    433 	 * Umap needs to map the uid and gid returned by a stat
    434 	 * into the proper values for this site.  This involves
    435 	 * finding the returned uid in the mapping information,
    436 	 * translating it into the uid on the other end,
    437 	 * and filling in the proper field in the vattr
    438 	 * structure pointed to by ap->a_vap.  The group
    439 	 * is easier, since currently all groups will be
    440 	 * translate to the NULLGROUP.
    441 	 */
    442 
    443 	/* Find entry in map */
    444 
    445 	uid = ap->a_vap->va_uid;
    446 	gid = ap->a_vap->va_gid;
    447 	if ((flags & LAYERFS_MBYPASSDEBUG))
    448 		printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
    449 		    gid);
    450 
    451 	vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
    452 	nentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
    453 	mapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
    454 	gnentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
    455 	gmapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
    456 
    457 	/* Reverse map the uid for the vnode.  Since it's a reverse
    458 		map, we can't use umap_mapids() to do it. */
    459 
    460 	tmpid = umap_reverse_findid(uid, mapdata, nentries);
    461 
    462 	if (tmpid != -1) {
    463 		ap->a_vap->va_uid = (uid_t) tmpid;
    464 		if ((flags & LAYERFS_MBYPASSDEBUG))
    465 			printf("umap_getattr: original uid = %d\n", uid);
    466 	} else
    467 		ap->a_vap->va_uid = (uid_t) NOBODY;
    468 
    469 	/* Reverse map the gid for the vnode. */
    470 
    471 	tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
    472 
    473 	if (tmpid != -1) {
    474 		ap->a_vap->va_gid = (gid_t) tmpid;
    475 		if ((flags & LAYERFS_MBYPASSDEBUG))
    476 			printf("umap_getattr: original gid = %d\n", gid);
    477 	} else
    478 		ap->a_vap->va_gid = (gid_t) NULLGROUP;
    479 
    480 	return (0);
    481 }
    482 
    483 int
    484 umap_print(v)
    485 	void *v;
    486 {
    487 	struct vop_print_args /* {
    488 		struct vnode *a_vp;
    489 	} */ *ap = v;
    490 	struct vnode *vp = ap->a_vp;
    491 	printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
    492 	    UMAPVPTOLOWERVP(vp));
    493 	return (0);
    494 }
    495 
    496 int
    497 umap_rename(v)
    498 	void *v;
    499 {
    500 	struct vop_rename_args  /* {
    501 		struct vnode *a_fdvp;
    502 		struct vnode *a_fvp;
    503 		struct componentname *a_fcnp;
    504 		struct vnode *a_tdvp;
    505 		struct vnode *a_tvp;
    506 		struct componentname *a_tcnp;
    507 	} */ *ap = v;
    508 	int error, flags;
    509 	struct componentname *compnamep;
    510 	struct ucred *compcredp, *savecompcredp;
    511 	struct vnode *vp;
    512 	struct vnode *tvp;
    513 
    514 	/*
    515 	 * Rename is irregular, having two componentname structures.
    516 	 * We need to map the cre in the second structure,
    517 	 * and then bypass takes care of the rest.
    518 	 */
    519 
    520 	vp = ap->a_fdvp;
    521 	flags = MOUNTTOUMAPMOUNT(vp->v_mount)->umapm_flags;
    522 	compnamep = ap->a_tcnp;
    523 	compcredp = compnamep->cn_cred;
    524 
    525 	savecompcredp = compcredp;
    526 	compcredp = compnamep->cn_cred = crdup(savecompcredp);
    527 
    528 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
    529 		printf("umap_rename: rename component credit user was %d, group %d\n",
    530 		    compcredp->cr_uid, compcredp->cr_gid);
    531 
    532 	/* Map all ids in the credential structure. */
    533 
    534 	umap_mapids(vp->v_mount, compcredp);
    535 
    536 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
    537 		printf("umap_rename: rename component credit user now %d, group %d\n",
    538 		    compcredp->cr_uid, compcredp->cr_gid);
    539 
    540 	tvp = ap->a_tvp;
    541 	if (tvp) {
    542 		if (tvp->v_mount != vp->v_mount)
    543 			tvp = NULL;
    544 		else
    545 			vref(tvp);
    546 	}
    547 	error = umap_bypass(ap);
    548 	if (tvp) {
    549 		if (error == 0)
    550 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
    551 		vrele(tvp);
    552 	}
    553 
    554 	/* Restore the additional mapped componentname cred structure. */
    555 
    556 	crfree(compcredp);
    557 	compnamep->cn_cred = savecompcredp;
    558 
    559 	return error;
    560 }
    561