Home | History | Annotate | Line # | Download | only in puffs
puffs_node.c revision 1.11
      1 /*	$NetBSD: puffs_node.c,v 1.11 2008/01/28 21:06:36 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006, 2007  Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Google Summer of Code program, the Ulla Tuominen Foundation
      8  * and the Finnish Cultural Foundation.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     20  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     22  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: puffs_node.c,v 1.11 2008/01/28 21:06:36 pooka Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/hash.h>
     37 #include <sys/kmem.h>
     38 #include <sys/malloc.h>
     39 #include <sys/mount.h>
     40 #include <sys/namei.h>
     41 #include <sys/vnode.h>
     42 
     43 #include <fs/puffs/puffs_msgif.h>
     44 #include <fs/puffs/puffs_sys.h>
     45 
     46 #include <miscfs/genfs/genfs_node.h>
     47 #include <miscfs/specfs/specdev.h>
     48 
     49 static const struct genfs_ops puffs_genfsops = {
     50 	.gop_size = puffs_gop_size,
     51 	.gop_write = genfs_gop_write,
     52 	.gop_markupdate = puffs_gop_markupdate,
     53 #if 0
     54 	.gop_alloc, should ask userspace
     55 #endif
     56 };
     57 
     58 static __inline struct puffs_node_hashlist
     59 	*puffs_cookie2hashlist(struct puffs_mount *, puffs_cookie_t);
     60 static struct puffs_node *puffs_cookie2pnode(struct puffs_mount *,
     61 					     puffs_cookie_t);
     62 
     63 struct pool puffs_pnpool;
     64 
     65 /*
     66  * Grab a vnode, intialize all the puffs-dependant stuff.
     67  */
     68 int
     69 puffs_getvnode(struct mount *mp, puffs_cookie_t ck, enum vtype type,
     70 	voff_t vsize, dev_t rdev, struct vnode **vpp)
     71 {
     72 	struct puffs_mount *pmp;
     73 	struct puffs_newcookie *pnc;
     74 	struct vnode *vp;
     75 	struct puffs_node *pnode;
     76 	struct puffs_node_hashlist *plist;
     77 	int error;
     78 
     79 	pmp = MPTOPUFFSMP(mp);
     80 
     81 	error = EPROTO;
     82 	if (type <= VNON || type >= VBAD) {
     83 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EINVAL,
     84 		    "bad node type", ck);
     85 		goto bad;
     86 	}
     87 	if (vsize == VSIZENOTSET) {
     88 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EINVAL,
     89 		    "VSIZENOTSET is not a valid size", ck);
     90 		goto bad;
     91 	}
     92 
     93 	/*
     94 	 * XXX: there is a deadlock condition between vfs_busy() and
     95 	 * vnode locks.  For an unmounting file system the mountpoint
     96 	 * is frozen, but in unmount(FORCE) vflush() wants to access all
     97 	 * of the vnodes.  If we are here waiting for the mountpoint
     98 	 * lock while holding on to a vnode lock, well, we ain't
     99 	 * just pining for the fjords anymore.  If we release the
    100 	 * vnode lock, we will be in the situation "mount point
    101 	 * is dying" and panic() will ensue in insmntque.  So as a
    102 	 * temporary workaround, get a vnode without putting it on
    103 	 * the mount point list, check if mount point is still alive
    104 	 * and kicking and only then add the vnode to the list.
    105 	 */
    106 	error = getnewvnode(VT_PUFFS, NULL, puffs_vnodeop_p, &vp);
    107 	if (error)
    108 		goto bad;
    109 	vp->v_vnlock = NULL;
    110 	vp->v_type = type;
    111 
    112 	/*
    113 	 * Check what mount point isn't going away.  This will work
    114 	 * until we decide to remove biglock or make the kernel
    115 	 * preemptive.  But hopefully the real problem will be fixed
    116 	 * by then.
    117 	 *
    118 	 * XXX: yes, should call vfs_busy(), but thar be rabbits with
    119 	 * vicious streaks a mile wide ...
    120 	 *
    121 	 * XXX: there is a transient failure here: if someone is unmounting
    122 	 * the file system but doesn't succeed (due to it being busy),
    123 	 * we incorrectly fail new vnode allocation.  This is *very*
    124 	 * hard to fix with the current structure of file system unmounting.
    125 	 */
    126 	if (mp->mnt_iflag & IMNT_UNMOUNT) {
    127 		DPRINTF(("puffs_getvnode: mp %p unmount, unable to create "
    128 		    "vnode for cookie %p\n", mp, ck));
    129 		ungetnewvnode(vp);
    130 		error = ENXIO;
    131 		goto bad;
    132 	}
    133 
    134 	/*
    135 	 * Creation should not fail after this point.  Or if it does,
    136 	 * care must be taken so that VOP_INACTIVE() isn't called.
    137 	 */
    138 
    139 	/* So mp is not dead yet.. good.. inform new vnode of its master */
    140 	mutex_enter(&mntvnode_lock);
    141 	TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
    142 	vp->v_mount = mp;
    143 	mutex_exit(&mntvnode_lock);
    144 
    145 	/*
    146 	 * clerical tasks & footwork
    147 	 */
    148 
    149 	/* default size */
    150 	uvm_vnp_setsize(vp, 0);
    151 
    152 	/* dances based on vnode type. almost ufs_vinit(), but not quite */
    153 	switch (type) {
    154 	case VCHR:
    155 	case VBLK:
    156 		/*
    157 		 * replace vnode operation vector with the specops vector.
    158 		 * our user server has very little control over the node
    159 		 * if it decides its a character or block special file
    160 		 */
    161 		vp->v_op = puffs_specop_p;
    162 		spec_node_init(vp, rdev);
    163 		break;
    164 
    165 	case VFIFO:
    166 		vp->v_op = puffs_fifoop_p;
    167 		break;
    168 
    169 	case VREG:
    170 		uvm_vnp_setsize(vp, vsize);
    171 		break;
    172 
    173 	case VDIR:
    174 	case VLNK:
    175 	case VSOCK:
    176 		break;
    177 	default:
    178 		panic("puffs_getvnode: invalid vtype %d", type);
    179 	}
    180 
    181 	pnode = pool_get(&puffs_pnpool, PR_WAITOK);
    182 	memset(pnode, 0, sizeof(struct puffs_node));
    183 
    184 	pnode->pn_cookie = ck;
    185 	pnode->pn_refcount = 1;
    186 
    187 	/* insert cookie on list, take off of interlock list */
    188 	mutex_init(&pnode->pn_mtx, MUTEX_DEFAULT, IPL_NONE);
    189 	SLIST_INIT(&pnode->pn_sel.sel_klist);
    190 	plist = puffs_cookie2hashlist(pmp, ck);
    191 	mutex_enter(&pmp->pmp_lock);
    192 	LIST_INSERT_HEAD(plist, pnode, pn_hashent);
    193 	if (ck != pmp->pmp_root_cookie) {
    194 		LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
    195 			if (pnc->pnc_cookie == ck) {
    196 				LIST_REMOVE(pnc, pnc_entries);
    197 				kmem_free(pnc, sizeof(struct puffs_newcookie));
    198 				break;
    199 			}
    200 		}
    201 		KASSERT(pnc != NULL);
    202 	}
    203 	mutex_exit(&pmp->pmp_lock);
    204 
    205 	vp->v_data = pnode;
    206 	vp->v_type = type;
    207 	pnode->pn_vp = vp;
    208 	pnode->pn_serversize = vsize;
    209 
    210 	genfs_node_init(vp, &puffs_genfsops);
    211 	*vpp = vp;
    212 
    213 	DPRINTF(("new vnode at %p, pnode %p, cookie %p\n", vp,
    214 	    pnode, pnode->pn_cookie));
    215 
    216 	return 0;
    217 
    218  bad:
    219 	/* remove staging cookie from list */
    220 	if (ck != pmp->pmp_root_cookie) {
    221 		mutex_enter(&pmp->pmp_lock);
    222 		LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
    223 			if (pnc->pnc_cookie == ck) {
    224 				LIST_REMOVE(pnc, pnc_entries);
    225 				kmem_free(pnc, sizeof(struct puffs_newcookie));
    226 				break;
    227 			}
    228 		}
    229 		KASSERT(pnc != NULL);
    230 		mutex_exit(&pmp->pmp_lock);
    231 	}
    232 
    233 	return error;
    234 }
    235 
    236 /* new node creating for creative vop ops (create, symlink, mkdir, mknod) */
    237 int
    238 puffs_newnode(struct mount *mp, struct vnode *dvp, struct vnode **vpp,
    239 	puffs_cookie_t ck, struct componentname *cnp,
    240 	enum vtype type, dev_t rdev)
    241 {
    242 	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
    243 	struct puffs_newcookie *pnc;
    244 	struct vnode *vp;
    245 	int error;
    246 
    247 	/* userspace probably has this as a NULL op */
    248 	if (ck == NULL) {
    249 		error = EOPNOTSUPP;
    250 		return error;
    251 	}
    252 
    253 	/*
    254 	 * Check for previous node with the same designation.
    255 	 * Explicitly check the root node cookie, since it might be
    256 	 * reclaimed from the kernel when this check is made.
    257 	 */
    258 	mutex_enter(&pmp->pmp_lock);
    259 	if (ck == pmp->pmp_root_cookie
    260 	    || puffs_cookie2pnode(pmp, ck) != NULL) {
    261 		mutex_exit(&pmp->pmp_lock);
    262 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EEXIST,
    263 		    "cookie exists", ck);
    264 		return EPROTO;
    265 	}
    266 
    267 	LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
    268 		if (pnc->pnc_cookie == ck) {
    269 			mutex_exit(&pmp->pmp_lock);
    270 			puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EEXIST,
    271 			    "cookie exists", ck);
    272 			return EPROTO;
    273 		}
    274 	}
    275 	pnc = kmem_alloc(sizeof(struct puffs_newcookie), KM_SLEEP);
    276 	pnc->pnc_cookie = ck;
    277 	LIST_INSERT_HEAD(&pmp->pmp_newcookie, pnc, pnc_entries);
    278 	mutex_exit(&pmp->pmp_lock);
    279 
    280 	error = puffs_getvnode(dvp->v_mount, ck, type, 0, rdev, &vp);
    281 	if (error)
    282 		return error;
    283 
    284 	vp->v_type = type;
    285 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    286 	*vpp = vp;
    287 
    288 	if ((cnp->cn_flags & MAKEENTRY) && PUFFS_USE_NAMECACHE(pmp))
    289 		cache_enter(dvp, vp, cnp);
    290 
    291 	return 0;
    292 }
    293 
    294 void
    295 puffs_putvnode(struct vnode *vp)
    296 {
    297 	struct puffs_mount *pmp;
    298 	struct puffs_node *pnode;
    299 
    300 	pmp = VPTOPUFFSMP(vp);
    301 	pnode = VPTOPP(vp);
    302 
    303 #ifdef DIAGNOSTIC
    304 	if (vp->v_tag != VT_PUFFS)
    305 		panic("puffs_putvnode: %p not a puffs vnode", vp);
    306 #endif
    307 
    308 	LIST_REMOVE(pnode, pn_hashent);
    309 	genfs_node_destroy(vp);
    310 	puffs_releasenode(pnode);
    311 	vp->v_data = NULL;
    312 
    313 	return;
    314 }
    315 
    316 static __inline struct puffs_node_hashlist *
    317 puffs_cookie2hashlist(struct puffs_mount *pmp, puffs_cookie_t ck)
    318 {
    319 	uint32_t hash;
    320 
    321 	hash = hash32_buf(&ck, sizeof(void *), HASH32_BUF_INIT);
    322 	return &pmp->pmp_pnodehash[hash % pmp->pmp_npnodehash];
    323 }
    324 
    325 /*
    326  * Translate cookie to puffs_node.  Caller must hold pmp_lock
    327  * and it will be held upon return.
    328  */
    329 static struct puffs_node *
    330 puffs_cookie2pnode(struct puffs_mount *pmp, puffs_cookie_t ck)
    331 {
    332 	struct puffs_node_hashlist *plist;
    333 	struct puffs_node *pnode;
    334 
    335 	plist = puffs_cookie2hashlist(pmp, ck);
    336 	LIST_FOREACH(pnode, plist, pn_hashent) {
    337 		if (pnode->pn_cookie == ck)
    338 			break;
    339 	}
    340 
    341 	return pnode;
    342 }
    343 
    344 /*
    345  * Make sure root vnode exists and reference it.  Does NOT lock.
    346  */
    347 static int
    348 puffs_makeroot(struct puffs_mount *pmp)
    349 {
    350 	struct vnode *vp;
    351 	int rv;
    352 
    353 	/*
    354 	 * pmp_lock must be held if vref()'ing or vrele()'ing the
    355 	 * root vnode.  the latter is controlled by puffs_inactive().
    356 	 *
    357 	 * pmp_root is set here and cleared in puffs_reclaim().
    358 	 */
    359  retry:
    360 	mutex_enter(&pmp->pmp_lock);
    361 	vp = pmp->pmp_root;
    362 	if (vp) {
    363 		mutex_enter(&vp->v_interlock);
    364 		mutex_exit(&pmp->pmp_lock);
    365 		if (vget(vp, LK_INTERLOCK) == 0)
    366 			return 0;
    367 	} else
    368 		mutex_exit(&pmp->pmp_lock);
    369 
    370 	/*
    371 	 * So, didn't have the magic root vnode available.
    372 	 * No matter, grab another and stuff it with the cookie.
    373 	 */
    374 	if ((rv = puffs_getvnode(pmp->pmp_mp, pmp->pmp_root_cookie,
    375 	    pmp->pmp_root_vtype, pmp->pmp_root_vsize, pmp->pmp_root_rdev, &vp)))
    376 		return rv;
    377 
    378 	/*
    379 	 * Someone magically managed to race us into puffs_getvnode?
    380 	 * Put our previous new vnode back and retry.
    381 	 */
    382 	mutex_enter(&pmp->pmp_lock);
    383 	if (pmp->pmp_root) {
    384 		mutex_exit(&pmp->pmp_lock);
    385 		puffs_putvnode(vp);
    386 		goto retry;
    387 	}
    388 
    389 	/* store cache */
    390 	vp->v_vflag |= VV_ROOT;
    391 	pmp->pmp_root = vp;
    392 	mutex_exit(&pmp->pmp_lock);
    393 
    394 	return 0;
    395 }
    396 
    397 /*
    398  * Locate the in-kernel vnode based on the cookie received given
    399  * from userspace.  Returns a vnode, if found, NULL otherwise.
    400  * The parameter "lock" control whether to lock the possible or
    401  * not.  Locking always might cause us to lock against ourselves
    402  * in situations where we want the vnode but don't care for the
    403  * vnode lock, e.g. file server issued putpages.
    404  */
    405 int
    406 puffs_cookie2vnode(struct puffs_mount *pmp, puffs_cookie_t ck, int lock,
    407 	int willcreate, struct vnode **vpp)
    408 {
    409 	struct puffs_node *pnode;
    410 	struct puffs_newcookie *pnc;
    411 	struct vnode *vp;
    412 	int vgetflags, rv;
    413 
    414 	/*
    415 	 * Handle root in a special manner, since we want to make sure
    416 	 * pmp_root is properly set.
    417 	 */
    418 	if (ck == pmp->pmp_root_cookie) {
    419 		if ((rv = puffs_makeroot(pmp)))
    420 			return rv;
    421 		if (lock)
    422 			vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
    423 
    424 		*vpp = pmp->pmp_root;
    425 		return 0;
    426 	}
    427 
    428 	mutex_enter(&pmp->pmp_lock);
    429 	pnode = puffs_cookie2pnode(pmp, ck);
    430 	if (pnode == NULL) {
    431 		if (willcreate) {
    432 			pnc = kmem_alloc(sizeof(struct puffs_newcookie),
    433 			    KM_SLEEP);
    434 			pnc->pnc_cookie = ck;
    435 			LIST_INSERT_HEAD(&pmp->pmp_newcookie, pnc, pnc_entries);
    436 		}
    437 		mutex_exit(&pmp->pmp_lock);
    438 		return PUFFS_NOSUCHCOOKIE;
    439 	}
    440 	vp = pnode->pn_vp;
    441 	mutex_enter(&vp->v_interlock);
    442 	mutex_exit(&pmp->pmp_lock);
    443 
    444 	vgetflags = LK_INTERLOCK;
    445 	if (lock)
    446 		vgetflags |= LK_EXCLUSIVE | LK_RETRY;
    447 	if ((rv = vget(vp, vgetflags)))
    448 		return rv;
    449 
    450 	*vpp = vp;
    451 	return 0;
    452 }
    453 
    454 void
    455 puffs_updatenode(struct puffs_node *pn, int flags, voff_t size)
    456 {
    457 	struct timespec ts;
    458 
    459 	if (flags == 0)
    460 		return;
    461 
    462 	nanotime(&ts);
    463 
    464 	if (flags & PUFFS_UPDATEATIME) {
    465 		pn->pn_mc_atime = ts;
    466 		pn->pn_stat |= PNODE_METACACHE_ATIME;
    467 	}
    468 	if (flags & PUFFS_UPDATECTIME) {
    469 		pn->pn_mc_ctime = ts;
    470 		pn->pn_stat |= PNODE_METACACHE_CTIME;
    471 	}
    472 	if (flags & PUFFS_UPDATEMTIME) {
    473 		pn->pn_mc_mtime = ts;
    474 		pn->pn_stat |= PNODE_METACACHE_MTIME;
    475 	}
    476 	if (flags & PUFFS_UPDATESIZE) {
    477 		pn->pn_mc_size = size;
    478 		pn->pn_stat |= PNODE_METACACHE_SIZE;
    479 	}
    480 }
    481 
    482 /*
    483  * Add reference to node.
    484  *  mutex held on entry and return
    485  */
    486 void
    487 puffs_referencenode(struct puffs_node *pn)
    488 {
    489 
    490 	KASSERT(mutex_owned(&pn->pn_mtx));
    491 	pn->pn_refcount++;
    492 }
    493 
    494 /*
    495  * Release pnode structure which dealing with references to the
    496  * puffs_node instead of the vnode.  Can't use vref()/vrele() on
    497  * the vnode there, since that causes the lovely VOP_INACTIVE(),
    498  * which in turn causes the lovely deadlock when called by the one
    499  * who is supposed to handle it.
    500  */
    501 void
    502 puffs_releasenode(struct puffs_node *pn)
    503 {
    504 
    505 	mutex_enter(&pn->pn_mtx);
    506 	if (--pn->pn_refcount == 0) {
    507 		mutex_exit(&pn->pn_mtx);
    508 		mutex_destroy(&pn->pn_mtx);
    509 		pool_put(&puffs_pnpool, pn);
    510 	} else {
    511 		mutex_exit(&pn->pn_mtx);
    512 	}
    513 }
    514