Home | History | Annotate | Line # | Download | only in umapfs
umap_vnops.c revision 1.40
      1 /*	$NetBSD: umap_vnops.c,v 1.40 2006/09/08 20:58:57 elad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software donated to Berkeley by
      8  * the UCLA Ficus project.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)umap_vnops.c	8.6 (Berkeley) 5/22/95
     35  */
     36 
     37 /*
     38  * Umap Layer
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: umap_vnops.c,v 1.40 2006/09/08 20:58:57 elad Exp $");
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/time.h>
     47 #include <sys/vnode.h>
     48 #include <sys/mount.h>
     49 #include <sys/namei.h>
     50 #include <sys/malloc.h>
     51 #include <sys/buf.h>
     52 #include <sys/kauth.h>
     53 
     54 #include <miscfs/umapfs/umap.h>
     55 #include <miscfs/genfs/genfs.h>
     56 #include <miscfs/genfs/layer_extern.h>
     57 
     58 int	umap_lookup(void *);
     59 int	umap_getattr(void *);
     60 int	umap_print(void *);
     61 int	umap_rename(void *);
     62 
     63 /*
     64  * Global vfs data structures
     65  */
     66 /*
     67  * XXX - strategy, bwrite are hand coded currently.  They should
     68  * go away with a merged buffer/block cache.
     69  *
     70  */
     71 int (**umap_vnodeop_p)(void *);
     72 const struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
     73 	{ &vop_default_desc,	umap_bypass },
     74 
     75 	{ &vop_lookup_desc,	umap_lookup },
     76 	{ &vop_getattr_desc,	umap_getattr },
     77 	{ &vop_print_desc,	umap_print },
     78 	{ &vop_rename_desc,	umap_rename },
     79 
     80 	{ &vop_lock_desc,	layer_lock },
     81 	{ &vop_unlock_desc,	layer_unlock },
     82 	{ &vop_islocked_desc,	layer_islocked },
     83 	{ &vop_fsync_desc,	layer_fsync },
     84 	{ &vop_inactive_desc,	layer_inactive },
     85 	{ &vop_reclaim_desc,	layer_reclaim },
     86 	{ &vop_open_desc,	layer_open },
     87 	{ &vop_setattr_desc,	layer_setattr },
     88 	{ &vop_access_desc,	layer_access },
     89 	{ &vop_remove_desc,	layer_remove },
     90 	{ &vop_rmdir_desc,	layer_rmdir },
     91 
     92 	{ &vop_bwrite_desc,	layer_bwrite },
     93 	{ &vop_bmap_desc,	layer_bmap },
     94 	{ &vop_getpages_desc,	layer_getpages },
     95 	{ &vop_putpages_desc,	layer_putpages },
     96 
     97 	{ NULL, NULL }
     98 };
     99 const struct vnodeopv_desc umapfs_vnodeop_opv_desc =
    100 	{ &umap_vnodeop_p, umap_vnodeop_entries };
    101 
    102 /*
    103  * This is the 08-June-1999 bypass routine.
    104  * See layer_vnops.c:layer_bypass for more details.
    105  */
    106 int
    107 umap_bypass(v)
    108 	void *v;
    109 {
    110 	struct vop_generic_args /* {
    111 		struct vnodeop_desc *a_desc;
    112 		<other random data follows, presumably>
    113 	} */ *ap = v;
    114 	int (**our_vnodeop_p)(void *);
    115 	kauth_cred_t *credpp = NULL, credp = 0;
    116 	kauth_cred_t savecredp = 0, savecompcredp = 0;
    117 	kauth_cred_t compcredp = 0;
    118 	struct vnode **this_vp_p;
    119 	int error, error1;
    120 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
    121 	struct vnode **vps_p[VDESC_MAX_VPS];
    122 	struct vnode ***vppp;
    123 	struct vnodeop_desc *descp = ap->a_desc;
    124 	int reles, i, flags;
    125 	struct componentname **compnamepp = 0;
    126 
    127 #ifdef SAFETY
    128 	/*
    129 	 * We require at least one vp.
    130 	 */
    131 	if (descp->vdesc_vp_offsets == NULL ||
    132 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
    133 		panic("%s: no vp's in map.\n", __func__);
    134 #endif
    135 
    136 	vps_p[0] =
    137 	    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
    138 	vp0 = *vps_p[0];
    139 	flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags;
    140 	our_vnodeop_p = vp0->v_op;
    141 
    142 	if (flags & LAYERFS_MBYPASSDEBUG)
    143 		printf("%s: %s\n", __func__, descp->vdesc_name);
    144 
    145 	/*
    146 	 * Map the vnodes going in.
    147 	 * Later, we'll invoke the operation based on
    148 	 * the first mapped vnode's operation vector.
    149 	 */
    150 	reles = descp->vdesc_flags;
    151 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    152 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    153 			break;   /* bail out at end of list */
    154 		vps_p[i] = this_vp_p =
    155 		    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
    156 		    ap);
    157 		/*
    158 		 * We're not guaranteed that any but the first vnode
    159 		 * are of our type.  Check for and don't map any
    160 		 * that aren't.  (We must always map first vp or vclean fails.)
    161 		 */
    162 		if (i && (*this_vp_p == NULL ||
    163 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
    164 			old_vps[i] = NULL;
    165 		} else {
    166 			old_vps[i] = *this_vp_p;
    167 			*(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
    168 			/*
    169 			 * XXX - Several operations have the side effect
    170 			 * of vrele'ing their vp's.  We must account for
    171 			 * that.  (This should go away in the future.)
    172 			 */
    173 			if (reles & VDESC_VP0_WILLRELE)
    174 				VREF(*this_vp_p);
    175 		}
    176 
    177 	}
    178 
    179 	/*
    180 	 * Fix the credentials.  (That's the purpose of this layer.)
    181 	 */
    182 
    183 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
    184 
    185 		credpp = VOPARG_OFFSETTO(kauth_cred_t*,
    186 		    descp->vdesc_cred_offset, ap);
    187 
    188 		/* Save old values */
    189 
    190 		savecredp = *credpp;
    191 		if (savecredp != NOCRED)
    192 			*credpp = kauth_cred_dup(savecredp);
    193 		credp = *credpp;
    194 
    195 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    196 		    kauth_authorize_generic(credp, KAUTH_GENERIC_ISSUSER,
    197 		     NULL) != KAUTH_RESULT_ALLOW)
    198 			printf("umap_bypass: user was %d, group %d\n",
    199 			    kauth_cred_geteuid(credp), kauth_cred_getegid(credp));
    200 
    201 		/* Map all ids in the credential structure. */
    202 
    203 		umap_mapids(vp0->v_mount, credp);
    204 
    205 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    206 		    kauth_authorize_generic(credp, KAUTH_GENERIC_ISSUSER,
    207 		     NULL) != KAUTH_RESULT_ALLOW)
    208 			printf("umap_bypass: user now %d, group %d\n",
    209 			    kauth_cred_geteuid(credp), kauth_cred_getegid(credp));
    210 	}
    211 
    212 	/* BSD often keeps a credential in the componentname structure
    213 	 * for speed.  If there is one, it better get mapped, too.
    214 	 */
    215 
    216 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
    217 
    218 		compnamepp = VOPARG_OFFSETTO(struct componentname**,
    219 		    descp->vdesc_componentname_offset, ap);
    220 
    221 		savecompcredp = (*compnamepp)->cn_cred;
    222 		if (savecompcredp != NOCRED)
    223 			(*compnamepp)->cn_cred = kauth_cred_dup(savecompcredp);
    224 		compcredp = (*compnamepp)->cn_cred;
    225 
    226 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    227 		    kauth_authorize_generic(compcredp, KAUTH_GENERIC_ISSUSER,
    228 		     NULL) != KAUTH_RESULT_ALLOW)
    229 			printf("umap_bypass: component credit user was %d, group %d\n",
    230 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    231 
    232 		/* Map all ids in the credential structure. */
    233 
    234 		umap_mapids(vp0->v_mount, compcredp);
    235 
    236 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    237 		    kauth_authorize_generic(compcredp, KAUTH_GENERIC_ISSUSER,
    238 		     NULL) != KAUTH_RESULT_ALLOW)
    239 			printf("umap_bypass: component credit user now %d, group %d\n",
    240 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    241 	}
    242 
    243 	/*
    244 	 * Call the operation on the lower layer
    245 	 * with the modified argument structure.
    246 	 */
    247 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
    248 
    249 	/*
    250 	 * Maintain the illusion of call-by-value
    251 	 * by restoring vnodes in the argument structure
    252 	 * to their original value.
    253 	 */
    254 	reles = descp->vdesc_flags;
    255 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
    256 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
    257 			break;   /* bail out at end of list */
    258 		if (old_vps[i]) {
    259 			*(vps_p[i]) = old_vps[i];
    260 			if (reles & VDESC_VP0_WILLUNLOCK)
    261 				LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
    262 			if (reles & VDESC_VP0_WILLRELE)
    263 				vrele(*(vps_p[i]));
    264 		}
    265 	}
    266 
    267 	/*
    268 	 * Map the possible out-going vpp
    269 	 * (Assumes that the lower layer always returns
    270 	 * a VREF'ed vpp unless it gets an error.)
    271 	 */
    272 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
    273 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
    274 	    !error) {
    275 		/*
    276 		 * XXX - even though some ops have vpp returned vp's,
    277 		 * several ops actually vrele this before returning.
    278 		 * We must avoid these ops.
    279 		 * (This should go away when these ops are regularized.)
    280 		 */
    281 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
    282 			goto out;
    283 		vppp = VOPARG_OFFSETTO(struct vnode***,
    284 				 descp->vdesc_vpp_offset, ap);
    285 		/*
    286 		 * Only vop_lookup, vop_create, vop_makedir, vop_bmap,
    287 		 * vop_mknod, and vop_symlink return vpp's. vop_bmap
    288 		 * doesn't call bypass as the lower vpp is fine (we're just
    289 		 * going to do i/o on it). vop_lookup doesn't call bypass
    290 		 * as a lookup on "." would generate a locking error.
    291 		 * So all the calls which get us here have a locked vpp. :-)
    292 		 */
    293 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
    294 		if (error) {
    295 			vput(**vppp);
    296 			**vppp = NULL;
    297 		}
    298 	}
    299 
    300  out:
    301 	/*
    302 	 * Free duplicate cred structure and restore old one.
    303 	 */
    304 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
    305 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp &&
    306 		    kauth_cred_geteuid(credp) != 0)
    307 			printf("umap_bypass: returning-user was %d\n",
    308 			    kauth_cred_geteuid(credp));
    309 
    310 		if (savecredp != NOCRED && credpp) {
    311 			kauth_cred_free(credp);
    312 			*credpp = savecredp;
    313 			if ((flags & LAYERFS_MBYPASSDEBUG) && credpp &&
    314 			    kauth_authorize_generic(*credpp,
    315 			     KAUTH_GENERIC_ISSUSER,NULL) != KAUTH_RESULT_ALLOW)
    316 			 	printf("umap_bypass: returning-user now %d\n\n",
    317 				    kauth_cred_geteuid(savecredp));
    318 		}
    319 	}
    320 
    321 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
    322 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
    323 		    kauth_authorize_generic(compcredp, KAUTH_GENERIC_ISSUSER,
    324 		     NULL) != KAUTH_RESULT_ALLOW)
    325 			printf("umap_bypass: returning-component-user was %d\n",
    326 			    kauth_cred_geteuid(compcredp));
    327 
    328 		if (savecompcredp != NOCRED) {
    329 			kauth_cred_free(compcredp);
    330 			(*compnamepp)->cn_cred = savecompcredp;
    331 			if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
    332 			    kauth_authorize_generic(savecompcredp,
    333 			     KAUTH_GENERIC_ISSUSER, NULL) != KAUTH_RESULT_ALLOW)
    334 			 	printf("umap_bypass: returning-component-user now %d\n",
    335 				    kauth_cred_geteuid(savecompcredp));
    336 		}
    337 	}
    338 
    339 	return (error);
    340 }
    341 
    342 /*
    343  * This is based on the 08-June-1999 bypass routine.
    344  * See layer_vnops.c:layer_bypass for more details.
    345  */
    346 int
    347 umap_lookup(v)
    348 	void *v;
    349 {
    350 	struct vop_lookup_args /* {
    351 		struct vnodeop_desc *a_desc;
    352 		struct vnode * a_dvp;
    353 		struct vnode ** a_vpp;
    354 		struct componentname * a_cnp;
    355 	} */ *ap = v;
    356 	struct componentname *cnp = ap->a_cnp;
    357 	kauth_cred_t savecompcredp = NULL;
    358 	kauth_cred_t compcredp = NULL;
    359 	struct vnode *dvp, *vp, *ldvp;
    360 	struct mount *mp;
    361 	int error;
    362 	int i, flags, cnf = cnp->cn_flags;
    363 
    364 	dvp = ap->a_dvp;
    365 	mp = dvp->v_mount;
    366 
    367 	if ((cnf & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    368 		(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
    369 		return (EROFS);
    370 
    371 	flags = MOUNTTOUMAPMOUNT(mp)->umapm_flags;
    372 	ldvp = UMAPVPTOLOWERVP(dvp);
    373 
    374 	if (flags & LAYERFS_MBYPASSDEBUG)
    375 		printf("umap_lookup\n");
    376 
    377 	/*
    378 	 * Fix the credentials.  (That's the purpose of this layer.)
    379 	 *
    380 	 * BSD often keeps a credential in the componentname structure
    381 	 * for speed.  If there is one, it better get mapped, too.
    382 	 */
    383 
    384 	if ((savecompcredp = cnp->cn_cred)) {
    385 		compcredp = kauth_cred_dup(savecompcredp);
    386 		cnp->cn_cred = compcredp;
    387 
    388 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
    389 		    kauth_authorize_generic(compcredp, KAUTH_GENERIC_ISSUSER,
    390 		     NULL) != KAUTH_RESULT_ALLOW)
    391 			printf("umap_lookup: component credit user was %d, group %d\n",
    392 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    393 
    394 		/* Map all ids in the credential structure. */
    395 		umap_mapids(mp, compcredp);
    396 	}
    397 
    398 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
    399 	    kauth_authorize_generic(compcredp, KAUTH_GENERIC_ISSUSER,
    400 	     NULL) != KAUTH_RESULT_ALLOW)
    401 		printf("umap_lookup: component credit user now %d, group %d\n",
    402 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    403 
    404 	ap->a_dvp = ldvp;
    405 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
    406 	vp = *ap->a_vpp;
    407 	*ap->a_vpp = NULL;
    408 
    409 	if (error == EJUSTRETURN && (cnf & ISLASTCN) &&
    410 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
    411 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
    412 		error = EROFS;
    413 
    414 	/* Do locking fixup as appropriate. See layer_lookup() for info */
    415 	if ((cnp->cn_flags & PDIRUNLOCK)) {
    416 		LAYERFS_UPPERUNLOCK(dvp, 0, i);
    417 	}
    418 	if (ldvp == vp) {
    419 		*ap->a_vpp = dvp;
    420 		VREF(dvp);
    421 		vrele(vp);
    422 	} else if (vp != NULL) {
    423 		error = layer_node_create(mp, vp, ap->a_vpp);
    424 		if (error) {
    425 			vput(vp);
    426 			if (cnp->cn_flags & PDIRUNLOCK) {
    427 				if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY) == 0)
    428 					cnp->cn_flags &= ~PDIRUNLOCK;
    429 			}
    430 		}
    431 	}
    432 
    433 	/*
    434 	 * Free duplicate cred structure and restore old one.
    435 	 */
    436 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
    437 	    kauth_authorize_generic(compcredp, KAUTH_GENERIC_ISSUSER,
    438 	     NULL) != KAUTH_RESULT_ALLOW)
    439 		printf("umap_lookup: returning-component-user was %d\n",
    440 			    kauth_cred_geteuid(compcredp));
    441 
    442 	if (savecompcredp != NOCRED) {
    443 		if (compcredp)
    444 			kauth_cred_free(compcredp);
    445 		cnp->cn_cred = savecompcredp;
    446 		if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
    447 		    kauth_authorize_generic(savecompcredp,
    448 		     KAUTH_GENERIC_ISSUSER, NULL) != KAUTH_RESULT_ALLOW)
    449 		 	printf("umap_lookup: returning-component-user now %d\n",
    450 			    kauth_cred_geteuid(savecompcredp));
    451 	}
    452 
    453 	return (error);
    454 }
    455 
    456 /*
    457  *  We handle getattr to change the fsid.
    458  */
    459 int
    460 umap_getattr(v)
    461 	void *v;
    462 {
    463 	struct vop_getattr_args /* {
    464 		struct vnode *a_vp;
    465 		struct vattr *a_vap;
    466 		kauth_cred_t a_cred;
    467 		struct lwp *a_l;
    468 	} */ *ap = v;
    469 	uid_t uid;
    470 	gid_t gid;
    471 	int error, tmpid, nentries, gnentries, flags;
    472 	u_long (*mapdata)[2];
    473 	u_long (*gmapdata)[2];
    474 	struct vnode **vp1p;
    475 	const struct vnodeop_desc *descp = ap->a_desc;
    476 
    477 	if ((error = umap_bypass(ap)) != 0)
    478 		return (error);
    479 	/* Requires that arguments be restored. */
    480 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
    481 
    482 	flags = MOUNTTOUMAPMOUNT(ap->a_vp->v_mount)->umapm_flags;
    483 	/*
    484 	 * Umap needs to map the uid and gid returned by a stat
    485 	 * into the proper values for this site.  This involves
    486 	 * finding the returned uid in the mapping information,
    487 	 * translating it into the uid on the other end,
    488 	 * and filling in the proper field in the vattr
    489 	 * structure pointed to by ap->a_vap.  The group
    490 	 * is easier, since currently all groups will be
    491 	 * translate to the NULLGROUP.
    492 	 */
    493 
    494 	/* Find entry in map */
    495 
    496 	uid = ap->a_vap->va_uid;
    497 	gid = ap->a_vap->va_gid;
    498 	if ((flags & LAYERFS_MBYPASSDEBUG))
    499 		printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
    500 		    gid);
    501 
    502 	vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
    503 	nentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
    504 	mapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
    505 	gnentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
    506 	gmapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
    507 
    508 	/* Reverse map the uid for the vnode.  Since it's a reverse
    509 		map, we can't use umap_mapids() to do it. */
    510 
    511 	tmpid = umap_reverse_findid(uid, mapdata, nentries);
    512 
    513 	if (tmpid != -1) {
    514 		ap->a_vap->va_uid = (uid_t) tmpid;
    515 		if ((flags & LAYERFS_MBYPASSDEBUG))
    516 			printf("umap_getattr: original uid = %d\n", uid);
    517 	} else
    518 		ap->a_vap->va_uid = (uid_t) NOBODY;
    519 
    520 	/* Reverse map the gid for the vnode. */
    521 
    522 	tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
    523 
    524 	if (tmpid != -1) {
    525 		ap->a_vap->va_gid = (gid_t) tmpid;
    526 		if ((flags & LAYERFS_MBYPASSDEBUG))
    527 			printf("umap_getattr: original gid = %d\n", gid);
    528 	} else
    529 		ap->a_vap->va_gid = (gid_t) NULLGROUP;
    530 
    531 	return (0);
    532 }
    533 
    534 int
    535 umap_print(v)
    536 	void *v;
    537 {
    538 	struct vop_print_args /* {
    539 		struct vnode *a_vp;
    540 	} */ *ap = v;
    541 	struct vnode *vp = ap->a_vp;
    542 	printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
    543 	    UMAPVPTOLOWERVP(vp));
    544 	return (0);
    545 }
    546 
    547 int
    548 umap_rename(v)
    549 	void *v;
    550 {
    551 	struct vop_rename_args  /* {
    552 		struct vnode *a_fdvp;
    553 		struct vnode *a_fvp;
    554 		struct componentname *a_fcnp;
    555 		struct vnode *a_tdvp;
    556 		struct vnode *a_tvp;
    557 		struct componentname *a_tcnp;
    558 	} */ *ap = v;
    559 	int error, flags;
    560 	struct componentname *compnamep;
    561 	kauth_cred_t compcredp, savecompcredp;
    562 	struct vnode *vp;
    563 	struct vnode *tvp;
    564 
    565 	/*
    566 	 * Rename is irregular, having two componentname structures.
    567 	 * We need to map the cre in the second structure,
    568 	 * and then bypass takes care of the rest.
    569 	 */
    570 
    571 	vp = ap->a_fdvp;
    572 	flags = MOUNTTOUMAPMOUNT(vp->v_mount)->umapm_flags;
    573 	compnamep = ap->a_tcnp;
    574 	compcredp = compnamep->cn_cred;
    575 
    576 	savecompcredp = compcredp;
    577 	compcredp = compnamep->cn_cred = kauth_cred_dup(savecompcredp);
    578 
    579 	if ((flags & LAYERFS_MBYPASSDEBUG) &&
    580 	    kauth_authorize_generic(compcredp, KAUTH_GENERIC_ISSUSER,
    581 	     NULL) != KAUTH_RESULT_ALLOW)
    582 		printf("umap_rename: rename component credit user was %d, group %d\n",
    583 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    584 
    585 	/* Map all ids in the credential structure. */
    586 
    587 	umap_mapids(vp->v_mount, compcredp);
    588 
    589 	if ((flags & LAYERFS_MBYPASSDEBUG) &&
    590 	    kauth_authorize_generic(compcredp, KAUTH_GENERIC_ISSUSER,
    591 	     NULL) != KAUTH_RESULT_ALLOW)
    592 		printf("umap_rename: rename component credit user now %d, group %d\n",
    593 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
    594 
    595 	tvp = ap->a_tvp;
    596 	if (tvp) {
    597 		if (tvp->v_mount != vp->v_mount)
    598 			tvp = NULL;
    599 		else
    600 			vref(tvp);
    601 	}
    602 	error = umap_bypass(ap);
    603 	if (tvp) {
    604 		if (error == 0)
    605 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
    606 		vrele(tvp);
    607 	}
    608 
    609 	/* Restore the additional mapped componentname cred structure. */
    610 
    611 	kauth_cred_free(compcredp);
    612 	compnamep->cn_cred = savecompcredp;
    613 
    614 	return error;
    615 }
    616