Home | History | Annotate | Line # | Download | only in umapfs
umap_vnops.c revision 1.54
      1 /*	$NetBSD: umap_vnops.c,v 1.54 2014/02/07 15:29:22 hannken Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software donated to Berkeley by
      8  * the UCLA Ficus project.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)umap_vnops.c	8.6 (Berkeley) 5/22/95
     35  */
     36 
     37 /*
     38  * Umap Layer
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: umap_vnops.c,v 1.54 2014/02/07 15:29:22 hannken Exp $");
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/time.h>
     47 #include <sys/vnode.h>
     48 #include <sys/mount.h>
     49 #include <sys/namei.h>
     50 #include <sys/malloc.h>
     51 #include <sys/buf.h>
     52 #include <sys/kauth.h>
     53 
     54 #include <miscfs/umapfs/umap.h>
     55 #include <miscfs/genfs/genfs.h>
     56 #include <miscfs/genfs/layer_extern.h>
     57 
     58 /*
     59  * Note: If the LAYERFS_MBYPASSDEBUG flag is set, it is possible
     60  * that the debug printing will bomb out, because kauth routines
     61  * do not handle NOCRED or FSCRED like other credentials and end
     62  * up dereferencing an inappropriate pointer.
     63  *
     64  * That should be fixed in kauth rather than here.
     65  */
     66 
     67 int	umap_lookup(void *);
     68 int	umap_getattr(void *);
     69 int	umap_print(void *);
     70 int	umap_rename(void *);
     71 
     72 /*
     73  * Global vfs data structures
     74  */
     75 /*
     76  * XXX - strategy, bwrite are hand coded currently.  They should
     77  * go away with a merged buffer/block cache.
     78  *
     79  */
     80 int (**umap_vnodeop_p)(void *);
     81 const struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
     82 	{ &vop_default_desc,	umap_bypass },
     83 
     84 	{ &vop_lookup_desc,	umap_lookup },
     85 	{ &vop_getattr_desc,	umap_getattr },
     86 	{ &vop_print_desc,	umap_print },
     87 	{ &vop_rename_desc,	umap_rename },
     88 
     89 	{ &vop_fsync_desc,	layer_fsync },
     90 	{ &vop_inactive_desc,	layer_inactive },
     91 	{ &vop_reclaim_desc,	layer_reclaim },
     92 	{ &vop_open_desc,	layer_open },
     93 	{ &vop_setattr_desc,	layer_setattr },
     94 	{ &vop_access_desc,	layer_access },
     95 	{ &vop_remove_desc,	layer_remove },
     96 	{ &vop_revoke_desc,	layer_revoke },
     97 	{ &vop_rmdir_desc,	layer_rmdir },
     98 
     99 	{ &vop_bmap_desc,	layer_bmap },
    100 	{ &vop_getpages_desc,	layer_getpages },
    101 	{ &vop_putpages_desc,	layer_putpages },
    102 
    103 	{ NULL, NULL }
    104 };
    105 const struct vnodeopv_desc umapfs_vnodeop_opv_desc =
    106 	{ &umap_vnodeop_p, umap_vnodeop_entries };
    107 
    108 /*
    109  * This is the 08-June-1999 bypass routine.
    110  * See layer_vnops.c:layer_bypass for more details.
    111  */
    112 int
    113 umap_bypass(void *v)
    114 {
    115 	struct vop_generic_args /* {
    116 		struct vnodeop_desc *a_desc;
    117 		<other random data follows, presumably>
    118 	} */ *ap = v;
    119 	int (**our_vnodeop_p)(void *);
    120 	kauth_cred_t *credpp = NULL, credp = 0;
    121 	kauth_cred_t savecredp = 0, savecompcredp = 0;
    122 	kauth_cred_t compcredp = 0;
    123 	struct vnode **this_vp_p;
    124 	int error;
    125 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
    126 	struct vnode **vps_p[VDESC_MAX_VPS];
    127 	struct vnode ***vppp;
    128 	struct vnodeop_desc *descp = ap->a_desc;
    129 	int reles, i, flags;
    130 	struct componentname **compnamepp = 0;
    131 
    132 #ifdef DIAGNOSTIC
    133 	/*
    134 	 * We require at least one vp.
    135 	 */
    136 	if (descp->vdesc_vp_offsets == NULL ||
    137 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
    138 		panic("%s: no vp's in map.\n", __func__);
    139 #endif
    140 
    141 	vps_p[0] =
    142 	    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
    143 	vp0 = *vps_p[0];
    144 	flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags;
    145 	our_vnodeop_p = vp0->v_op;
    146 
    147 	if (flags & LAYERFS_MBYPASSDEBUG)
    148 		printf("%s: %s\n", __func__, descp->vdesc_name);
    149 
    150 	/*
    151 	 * Map the vnodes going in.
    152 	 * Later, we'll invoke the operation based on
    153 	 * the first mapped vnode's operation vector.
    154 	 */
    155 	reles = descp->vdesc_flags;
    156 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    157 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    158 			break;   /* bail out at end of list */
    159 		vps_p[i] = this_vp_p =
    160 		    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
    161 		    ap);
    162 		/*
    163 		 * We're not guaranteed that any but the first vnode
    164 		 * are of our type.  Check for and don't map any
    165 		 * that aren't.  (We must always map first vp or vclean fails.)
    166 		 */
    167 		if (i && (*this_vp_p == NULL ||
    168 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
    169 			old_vps[i] = NULL;
    170 		} else {
    171 			old_vps[i] = *this_vp_p;
    172 			*(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
    173 			/*
    174 			 * XXX - Several operations have the side effect
    175 			 * of vrele'ing their vp's.  We must account for
    176 			 * that.  (This should go away in the future.)
    177 			 */
    178 			if (reles & VDESC_VP0_WILLRELE)
    179 				vref(*this_vp_p);
    180 		}
    181 
    182 	}
    183 
    184 	/*
    185 	 * Fix the credentials.  (That's the purpose of this layer.)
    186 	 */
    187 
    188 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
    189 
    190 		credpp = VOPARG_OFFSETTO(kauth_cred_t*,
    191 		    descp->vdesc_cred_offset, ap);
    192 
    193 		/* Save old values */
    194 
    195 		savecredp = *credpp;
    196 		if (savecredp != NOCRED && savecredp != FSCRED)
    197 			*credpp = kauth_cred_dup(savecredp);
    198 		credp = *credpp;
    199 
    200 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    201 		    kauth_cred_geteuid(credp) != 0)
    202 			printf("umap_bypass: user was %d, group %d\n",
    203 			    kauth_cred_geteuid(credp), kauth_cred_getegid(credp));
    204 
    205 		/* Map all ids in the credential structure. */
    206 
    207 		umap_mapids(vp0->v_mount, credp);
    208 
    209 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    210 		    kauth_cred_geteuid(credp) != 0)
    211 			printf("umap_bypass: user now %d, group %d\n",
    212 			    kauth_cred_geteuid(credp), kauth_cred_getegid(credp));
    213 	}
    214 
    215 	/* BSD often keeps a credential in the componentname structure
    216 	 * for speed.  If there is one, it better get mapped, too.
    217 	 */
    218 
    219 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
    220 
    221 		compnamepp = VOPARG_OFFSETTO(struct componentname**,
    222 		    descp->vdesc_componentname_offset, ap);
    223 
    224 		savecompcredp = (*compnamepp)->cn_cred;
    225 		if (savecompcredp != NOCRED && savecompcredp != FSCRED)
    226 			(*compnamepp)->cn_cred = kauth_cred_dup(savecompcredp);
    227 		compcredp = (*compnamepp)->cn_cred;
    228 
    229 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    230 		    kauth_cred_geteuid(compcredp) != 0)
    231 			printf("umap_bypass: component credit user was %d, group %d\n",
    232 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    233 
    234 		/* Map all ids in the credential structure. */
    235 
    236 		umap_mapids(vp0->v_mount, compcredp);
    237 
    238 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    239 		    kauth_cred_geteuid(compcredp) != 0)
    240 			printf("umap_bypass: component credit user now %d, group %d\n",
    241 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    242 	}
    243 
    244 	/*
    245 	 * Call the operation on the lower layer
    246 	 * with the modified argument structure.
    247 	 */
    248 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
    249 
    250 	/*
    251 	 * Maintain the illusion of call-by-value
    252 	 * by restoring vnodes in the argument structure
    253 	 * to their original value.
    254 	 */
    255 	reles = descp->vdesc_flags;
    256 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    257 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    258 			break;   /* bail out at end of list */
    259 		if (old_vps[i]) {
    260 			*(vps_p[i]) = old_vps[i];
    261 			if (reles & VDESC_VP0_WILLRELE)
    262 				vrele(*(vps_p[i]));
    263 		}
    264 	}
    265 
    266 	/*
    267 	 * Map the possible out-going vpp
    268 	 * (Assumes that the lower layer always returns
    269 	 * a VREF'ed vpp unless it gets an error.)
    270 	 */
    271 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && !error) {
    272 		vppp = VOPARG_OFFSETTO(struct vnode***,
    273 				 descp->vdesc_vpp_offset, ap);
    274 		/*
    275 		 * Only vop_lookup, vop_create, vop_makedir, vop_bmap,
    276 		 * vop_mknod, and vop_symlink return vpp's. vop_bmap
    277 		 * doesn't call bypass as the lower vpp is fine (we're just
    278 		 * going to do i/o on it). vop_lookup doesn't call bypass
    279 		 * as a lookup on "." would generate a locking error.
    280 		 * So all the calls which get us here have a locked vpp. :-)
    281 		 */
    282 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
    283 		if (error) {
    284 			vput(**vppp);
    285 			**vppp = NULL;
    286 		}
    287 	}
    288 
    289 	/*
    290 	 * Free duplicate cred structure and restore old one.
    291 	 */
    292 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
    293 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp &&
    294 		    kauth_cred_geteuid(credp) != 0)
    295 			printf("umap_bypass: returning-user was %d\n",
    296 			    kauth_cred_geteuid(credp));
    297 
    298 		if (savecredp != NOCRED && savecredp != FSCRED && credpp) {
    299 			kauth_cred_free(credp);
    300 			*credpp = savecredp;
    301 			if ((flags & LAYERFS_MBYPASSDEBUG) && credpp &&
    302 			    kauth_cred_geteuid(*credpp) != 0)
    303 			 	printf("umap_bypass: returning-user now %d\n\n",
    304 				    kauth_cred_geteuid(savecredp));
    305 		}
    306 	}
    307 
    308 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
    309 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
    310 		    kauth_cred_geteuid(compcredp) != 0)
    311 			printf("umap_bypass: returning-component-user was %d\n",
    312 			    kauth_cred_geteuid(compcredp));
    313 
    314 		if (savecompcredp != NOCRED && savecompcredp != FSCRED) {
    315 			kauth_cred_free(compcredp);
    316 			(*compnamepp)->cn_cred = savecompcredp;
    317 			if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
    318 			    kauth_cred_geteuid(savecompcredp) != 0)
    319 			 	printf("umap_bypass: returning-component-user now %d\n",
    320 				    kauth_cred_geteuid(savecompcredp));
    321 		}
    322 	}
    323 
    324 	return (error);
    325 }
    326 
    327 /*
    328  * This is based on the 08-June-1999 bypass routine.
    329  * See layer_vnops.c:layer_bypass for more details.
    330  */
    331 int
    332 umap_lookup(void *v)
    333 {
    334 	struct vop_lookup_v2_args /* {
    335 		struct vnodeop_desc *a_desc;
    336 		struct vnode * a_dvp;
    337 		struct vnode ** a_vpp;
    338 		struct componentname * a_cnp;
    339 	} */ *ap = v;
    340 	struct componentname *cnp = ap->a_cnp;
    341 	kauth_cred_t savecompcredp = NULL;
    342 	kauth_cred_t compcredp = NULL;
    343 	struct vnode *dvp, *vp, *ldvp;
    344 	struct mount *mp;
    345 	int error;
    346 	int flags, cnf = cnp->cn_flags;
    347 
    348 	dvp = ap->a_dvp;
    349 	mp = dvp->v_mount;
    350 
    351 	if ((cnf & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    352 		(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
    353 		return (EROFS);
    354 
    355 	flags = MOUNTTOUMAPMOUNT(mp)->umapm_flags;
    356 	ldvp = UMAPVPTOLOWERVP(dvp);
    357 
    358 	if (flags & LAYERFS_MBYPASSDEBUG)
    359 		printf("umap_lookup\n");
    360 
    361 	/*
    362 	 * Fix the credentials.  (That's the purpose of this layer.)
    363 	 *
    364 	 * BSD often keeps a credential in the componentname structure
    365 	 * for speed.  If there is one, it better get mapped, too.
    366 	 */
    367 
    368 	if ((savecompcredp = cnp->cn_cred)) {
    369 		compcredp = kauth_cred_dup(savecompcredp);
    370 		cnp->cn_cred = compcredp;
    371 
    372 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    373 		    kauth_cred_geteuid(compcredp) != 0)
    374 			printf("umap_lookup: component credit user was %d, group %d\n",
    375 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    376 
    377 		/* Map all ids in the credential structure. */
    378 		umap_mapids(mp, compcredp);
    379 	}
    380 
    381 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
    382 	    kauth_cred_geteuid(compcredp) != 0)
    383 		printf("umap_lookup: component credit user now %d, group %d\n",
    384 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    385 
    386 	ap->a_dvp = ldvp;
    387 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
    388 	vp = *ap->a_vpp;
    389 	*ap->a_vpp = NULL;
    390 
    391 	if (error == EJUSTRETURN && (cnf & ISLASTCN) &&
    392 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    393 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
    394 		error = EROFS;
    395 
    396 	/* Do locking fixup as appropriate. See layer_lookup() for info */
    397 	if (ldvp == vp) {
    398 		*ap->a_vpp = dvp;
    399 		vref(dvp);
    400 		vrele(vp);
    401 	} else if (vp != NULL) {
    402 		error = layer_node_create(mp, vp, ap->a_vpp);
    403 		if (error) {
    404 			vrele(vp);
    405 		}
    406 	}
    407 
    408 	/*
    409 	 * Free duplicate cred structure and restore old one.
    410 	 */
    411 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
    412 	    kauth_cred_geteuid(compcredp) != 0)
    413 		printf("umap_lookup: returning-component-user was %d\n",
    414 			    kauth_cred_geteuid(compcredp));
    415 
    416 	if (savecompcredp != NOCRED && savecompcredp != FSCRED) {
    417 		if (compcredp)
    418 			kauth_cred_free(compcredp);
    419 		cnp->cn_cred = savecompcredp;
    420 		if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
    421 		    kauth_cred_geteuid(savecompcredp) != 0)
    422 		 	printf("umap_lookup: returning-component-user now %d\n",
    423 			    kauth_cred_geteuid(savecompcredp));
    424 	}
    425 
    426 	return (error);
    427 }
    428 
    429 /*
    430  *  We handle getattr to change the fsid.
    431  */
    432 int
    433 umap_getattr(void *v)
    434 {
    435 	struct vop_getattr_args /* {
    436 		struct vnode *a_vp;
    437 		struct vattr *a_vap;
    438 		kauth_cred_t a_cred;
    439 		struct lwp *a_l;
    440 	} */ *ap = v;
    441 	uid_t uid;
    442 	gid_t gid;
    443 	int error, tmpid, nentries, gnentries, flags;
    444 	u_long (*mapdata)[2];
    445 	u_long (*gmapdata)[2];
    446 	struct vnode **vp1p;
    447 	const struct vnodeop_desc *descp = ap->a_desc;
    448 
    449 	if ((error = umap_bypass(ap)) != 0)
    450 		return (error);
    451 	/* Requires that arguments be restored. */
    452 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
    453 
    454 	flags = MOUNTTOUMAPMOUNT(ap->a_vp->v_mount)->umapm_flags;
    455 	/*
    456 	 * Umap needs to map the uid and gid returned by a stat
    457 	 * into the proper values for this site.  This involves
    458 	 * finding the returned uid in the mapping information,
    459 	 * translating it into the uid on the other end,
    460 	 * and filling in the proper field in the vattr
    461 	 * structure pointed to by ap->a_vap.  The group
    462 	 * is easier, since currently all groups will be
    463 	 * translate to the NULLGROUP.
    464 	 */
    465 
    466 	/* Find entry in map */
    467 
    468 	uid = ap->a_vap->va_uid;
    469 	gid = ap->a_vap->va_gid;
    470 	if ((flags & LAYERFS_MBYPASSDEBUG))
    471 		printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
    472 		    gid);
    473 
    474 	vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
    475 	nentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
    476 	mapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
    477 	gnentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
    478 	gmapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
    479 
    480 	/* Reverse map the uid for the vnode.  Since it's a reverse
    481 		map, we can't use umap_mapids() to do it. */
    482 
    483 	tmpid = umap_reverse_findid(uid, mapdata, nentries);
    484 
    485 	if (tmpid != -1) {
    486 		ap->a_vap->va_uid = (uid_t) tmpid;
    487 		if ((flags & LAYERFS_MBYPASSDEBUG))
    488 			printf("umap_getattr: original uid = %d\n", uid);
    489 	} else
    490 		ap->a_vap->va_uid = (uid_t) NOBODY;
    491 
    492 	/* Reverse map the gid for the vnode. */
    493 
    494 	tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
    495 
    496 	if (tmpid != -1) {
    497 		ap->a_vap->va_gid = (gid_t) tmpid;
    498 		if ((flags & LAYERFS_MBYPASSDEBUG))
    499 			printf("umap_getattr: original gid = %d\n", gid);
    500 	} else
    501 		ap->a_vap->va_gid = (gid_t) NULLGROUP;
    502 
    503 	return (0);
    504 }
    505 
    506 int
    507 umap_print(void *v)
    508 {
    509 	struct vop_print_args /* {
    510 		struct vnode *a_vp;
    511 	} */ *ap = v;
    512 	struct vnode *vp = ap->a_vp;
    513 	printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
    514 	    UMAPVPTOLOWERVP(vp));
    515 	return (0);
    516 }
    517 
    518 int
    519 umap_rename(void *v)
    520 {
    521 	struct vop_rename_args  /* {
    522 		struct vnode *a_fdvp;
    523 		struct vnode *a_fvp;
    524 		struct componentname *a_fcnp;
    525 		struct vnode *a_tdvp;
    526 		struct vnode *a_tvp;
    527 		struct componentname *a_tcnp;
    528 	} */ *ap = v;
    529 	int error, flags;
    530 	struct componentname *compnamep;
    531 	kauth_cred_t compcredp, savecompcredp;
    532 	struct vnode *vp;
    533 	struct vnode *tvp;
    534 
    535 	/*
    536 	 * Rename is irregular, having two componentname structures.
    537 	 * We need to map the cre in the second structure,
    538 	 * and then bypass takes care of the rest.
    539 	 */
    540 
    541 	vp = ap->a_fdvp;
    542 	flags = MOUNTTOUMAPMOUNT(vp->v_mount)->umapm_flags;
    543 	compnamep = ap->a_tcnp;
    544 	compcredp = compnamep->cn_cred;
    545 
    546 	savecompcredp = compcredp;
    547 	compcredp = compnamep->cn_cred = kauth_cred_dup(savecompcredp);
    548 
    549 	if ((flags & LAYERFS_MBYPASSDEBUG) &&
    550 	    kauth_cred_geteuid(compcredp) != 0)
    551 		printf("umap_rename: rename component credit user was %d, group %d\n",
    552 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    553 
    554 	/* Map all ids in the credential structure. */
    555 
    556 	umap_mapids(vp->v_mount, compcredp);
    557 
    558 	if ((flags & LAYERFS_MBYPASSDEBUG) &&
    559 	    kauth_cred_geteuid(compcredp) != 0)
    560 		printf("umap_rename: rename component credit user now %d, group %d\n",
    561 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    562 
    563 	tvp = ap->a_tvp;
    564 	if (tvp) {
    565 		if (tvp->v_mount != vp->v_mount)
    566 			tvp = NULL;
    567 		else
    568 			vref(tvp);
    569 	}
    570 	error = umap_bypass(ap);
    571 	if (tvp) {
    572 		if (error == 0)
    573 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
    574 		vrele(tvp);
    575 	}
    576 
    577 	/* Restore the additional mapped componentname cred structure. */
    578 
    579 	kauth_cred_free(compcredp);
    580 	compnamep->cn_cred = savecompcredp;
    581 
    582 	return error;
    583 }
    584