Home | History | Annotate | Line # | Download | only in puffs
puffs_node.c revision 1.10
      1 /*	$NetBSD: puffs_node.c,v 1.10 2008/01/24 17:32:54 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006, 2007  Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Google Summer of Code program, the Ulla Tuominen Foundation
      8  * and the Finnish Cultural Foundation.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     20  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     22  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: puffs_node.c,v 1.10 2008/01/24 17:32:54 ad Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/hash.h>
     37 #include <sys/kmem.h>
     38 #include <sys/malloc.h>
     39 #include <sys/mount.h>
     40 #include <sys/namei.h>
     41 #include <sys/vnode.h>
     42 
     43 #include <fs/puffs/puffs_msgif.h>
     44 #include <fs/puffs/puffs_sys.h>
     45 
     46 #include <miscfs/genfs/genfs_node.h>
     47 #include <miscfs/specfs/specdev.h>
     48 
     49 static const struct genfs_ops puffs_genfsops = {
     50 	.gop_size = puffs_gop_size,
     51 	.gop_write = genfs_gop_write,
     52 	.gop_markupdate = puffs_gop_markupdate,
     53 #if 0
     54 	.gop_alloc, should ask userspace
     55 #endif
     56 };
     57 
     58 static __inline struct puffs_node_hashlist
     59 	*puffs_cookie2hashlist(struct puffs_mount *, void *);
     60 static struct puffs_node *puffs_cookie2pnode(struct puffs_mount *, void *);
     61 
     62 struct pool puffs_pnpool;
     63 
     64 /*
     65  * Grab a vnode, intialize all the puffs-dependant stuff.
     66  */
     67 int
     68 puffs_getvnode(struct mount *mp, void *cookie, enum vtype type,
     69 	voff_t vsize, dev_t rdev, struct vnode **vpp)
     70 {
     71 	struct puffs_mount *pmp;
     72 	struct puffs_newcookie *pnc;
     73 	struct vnode *vp;
     74 	struct puffs_node *pnode;
     75 	struct puffs_node_hashlist *plist;
     76 	int error;
     77 
     78 	pmp = MPTOPUFFSMP(mp);
     79 
     80 	error = EPROTO;
     81 	if (type <= VNON || type >= VBAD) {
     82 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EINVAL,
     83 		    "bad node type", cookie);
     84 		goto bad;
     85 	}
     86 	if (vsize == VSIZENOTSET) {
     87 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EINVAL,
     88 		    "VSIZENOTSET is not a valid size", cookie);
     89 		goto bad;
     90 	}
     91 
     92 	/*
     93 	 * XXX: there is a deadlock condition between vfs_busy() and
     94 	 * vnode locks.  For an unmounting file system the mountpoint
     95 	 * is frozen, but in unmount(FORCE) vflush() wants to access all
     96 	 * of the vnodes.  If we are here waiting for the mountpoint
     97 	 * lock while holding on to a vnode lock, well, we ain't
     98 	 * just pining for the fjords anymore.  If we release the
     99 	 * vnode lock, we will be in the situation "mount point
    100 	 * is dying" and panic() will ensue in insmntque.  So as a
    101 	 * temporary workaround, get a vnode without putting it on
    102 	 * the mount point list, check if mount point is still alive
    103 	 * and kicking and only then add the vnode to the list.
    104 	 */
    105 	error = getnewvnode(VT_PUFFS, NULL, puffs_vnodeop_p, &vp);
    106 	if (error)
    107 		goto bad;
    108 	vp->v_vnlock = NULL;
    109 	vp->v_type = type;
    110 
    111 	/*
    112 	 * Check what mount point isn't going away.  This will work
    113 	 * until we decide to remove biglock or make the kernel
    114 	 * preemptive.  But hopefully the real problem will be fixed
    115 	 * by then.
    116 	 *
    117 	 * XXX: yes, should call vfs_busy(), but thar be rabbits with
    118 	 * vicious streaks a mile wide ...
    119 	 *
    120 	 * XXX: there is a transient failure here: if someone is unmounting
    121 	 * the file system but doesn't succeed (due to it being busy),
    122 	 * we incorrectly fail new vnode allocation.  This is *very*
    123 	 * hard to fix with the current structure of file system unmounting.
    124 	 */
    125 	if (mp->mnt_iflag & IMNT_UNMOUNT) {
    126 		DPRINTF(("puffs_getvnode: mp %p unmount, unable to create "
    127 		    "vnode for cookie %p\n", mp, cookie));
    128 		ungetnewvnode(vp);
    129 		error = ENXIO;
    130 		goto bad;
    131 	}
    132 
    133 	/*
    134 	 * Creation should not fail after this point.  Or if it does,
    135 	 * care must be taken so that VOP_INACTIVE() isn't called.
    136 	 */
    137 
    138 	/* So mp is not dead yet.. good.. inform new vnode of its master */
    139 	mutex_enter(&mntvnode_lock);
    140 	TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
    141 	vp->v_mount = mp;
    142 	mutex_exit(&mntvnode_lock);
    143 
    144 	/*
    145 	 * clerical tasks & footwork
    146 	 */
    147 
    148 	/* default size */
    149 	uvm_vnp_setsize(vp, 0);
    150 
    151 	/* dances based on vnode type. almost ufs_vinit(), but not quite */
    152 	switch (type) {
    153 	case VCHR:
    154 	case VBLK:
    155 		/*
    156 		 * replace vnode operation vector with the specops vector.
    157 		 * our user server has very little control over the node
    158 		 * if it decides its a character or block special file
    159 		 */
    160 		vp->v_op = puffs_specop_p;
    161 		spec_node_init(vp, rdev);
    162 		break;
    163 
    164 	case VFIFO:
    165 		vp->v_op = puffs_fifoop_p;
    166 		break;
    167 
    168 	case VREG:
    169 		uvm_vnp_setsize(vp, vsize);
    170 		break;
    171 
    172 	case VDIR:
    173 	case VLNK:
    174 	case VSOCK:
    175 		break;
    176 	default:
    177 		panic("puffs_getvnode: invalid vtype %d", type);
    178 	}
    179 
    180 	pnode = pool_get(&puffs_pnpool, PR_WAITOK);
    181 	memset(pnode, 0, sizeof(struct puffs_node));
    182 
    183 	pnode->pn_cookie = cookie;
    184 	pnode->pn_refcount = 1;
    185 
    186 	/* insert cookie on list, take off of interlock list */
    187 	mutex_init(&pnode->pn_mtx, MUTEX_DEFAULT, IPL_NONE);
    188 	SLIST_INIT(&pnode->pn_sel.sel_klist);
    189 	plist = puffs_cookie2hashlist(pmp, cookie);
    190 	mutex_enter(&pmp->pmp_lock);
    191 	LIST_INSERT_HEAD(plist, pnode, pn_hashent);
    192 	if (cookie != pmp->pmp_root_cookie) {
    193 		LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
    194 			if (pnc->pnc_cookie == cookie) {
    195 				LIST_REMOVE(pnc, pnc_entries);
    196 				kmem_free(pnc, sizeof(struct puffs_newcookie));
    197 				break;
    198 			}
    199 		}
    200 		KASSERT(pnc != NULL);
    201 	}
    202 	mutex_exit(&pmp->pmp_lock);
    203 
    204 	vp->v_data = pnode;
    205 	vp->v_type = type;
    206 	pnode->pn_vp = vp;
    207 	pnode->pn_serversize = vsize;
    208 
    209 	genfs_node_init(vp, &puffs_genfsops);
    210 	*vpp = vp;
    211 
    212 	DPRINTF(("new vnode at %p, pnode %p, cookie %p\n", vp,
    213 	    pnode, pnode->pn_cookie));
    214 
    215 	return 0;
    216 
    217  bad:
    218 	/* remove staging cookie from list */
    219 	if (cookie != pmp->pmp_root_cookie) {
    220 		mutex_enter(&pmp->pmp_lock);
    221 		LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
    222 			if (pnc->pnc_cookie == cookie) {
    223 				LIST_REMOVE(pnc, pnc_entries);
    224 				kmem_free(pnc, sizeof(struct puffs_newcookie));
    225 				break;
    226 			}
    227 		}
    228 		KASSERT(pnc != NULL);
    229 		mutex_exit(&pmp->pmp_lock);
    230 	}
    231 
    232 	return error;
    233 }
    234 
    235 /* new node creating for creative vop ops (create, symlink, mkdir, mknod) */
    236 int
    237 puffs_newnode(struct mount *mp, struct vnode *dvp, struct vnode **vpp,
    238 	void *cookie, struct componentname *cnp, enum vtype type, dev_t rdev)
    239 {
    240 	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
    241 	struct puffs_newcookie *pnc;
    242 	struct vnode *vp;
    243 	int error;
    244 
    245 	/* userspace probably has this as a NULL op */
    246 	if (cookie == NULL) {
    247 		error = EOPNOTSUPP;
    248 		return error;
    249 	}
    250 
    251 	/*
    252 	 * Check for previous node with the same designation.
    253 	 * Explicitly check the root node cookie, since it might be
    254 	 * reclaimed from the kernel when this check is made.
    255 	 */
    256 	mutex_enter(&pmp->pmp_lock);
    257 	if (cookie == pmp->pmp_root_cookie
    258 	    || puffs_cookie2pnode(pmp, cookie) != NULL) {
    259 		mutex_exit(&pmp->pmp_lock);
    260 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EEXIST,
    261 		    "cookie exists", cookie);
    262 		return EPROTO;
    263 	}
    264 
    265 	LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
    266 		if (pnc->pnc_cookie == cookie) {
    267 			mutex_exit(&pmp->pmp_lock);
    268 			puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EEXIST,
    269 			    "cookie exists", cookie);
    270 			return EPROTO;
    271 		}
    272 	}
    273 	pnc = kmem_alloc(sizeof(struct puffs_newcookie), KM_SLEEP);
    274 	pnc->pnc_cookie = cookie;
    275 	LIST_INSERT_HEAD(&pmp->pmp_newcookie, pnc, pnc_entries);
    276 	mutex_exit(&pmp->pmp_lock);
    277 
    278 	error = puffs_getvnode(dvp->v_mount, cookie, type, 0, rdev, &vp);
    279 	if (error)
    280 		return error;
    281 
    282 	vp->v_type = type;
    283 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    284 	*vpp = vp;
    285 
    286 	if ((cnp->cn_flags & MAKEENTRY) && PUFFS_USE_NAMECACHE(pmp))
    287 		cache_enter(dvp, vp, cnp);
    288 
    289 	return 0;
    290 }
    291 
    292 void
    293 puffs_putvnode(struct vnode *vp)
    294 {
    295 	struct puffs_mount *pmp;
    296 	struct puffs_node *pnode;
    297 
    298 	pmp = VPTOPUFFSMP(vp);
    299 	pnode = VPTOPP(vp);
    300 
    301 #ifdef DIAGNOSTIC
    302 	if (vp->v_tag != VT_PUFFS)
    303 		panic("puffs_putvnode: %p not a puffs vnode", vp);
    304 #endif
    305 
    306 	LIST_REMOVE(pnode, pn_hashent);
    307 	genfs_node_destroy(vp);
    308 	puffs_releasenode(pnode);
    309 	vp->v_data = NULL;
    310 
    311 	return;
    312 }
    313 
    314 static __inline struct puffs_node_hashlist *
    315 puffs_cookie2hashlist(struct puffs_mount *pmp, void *cookie)
    316 {
    317 	uint32_t hash;
    318 
    319 	hash = hash32_buf(&cookie, sizeof(void *), HASH32_BUF_INIT);
    320 	return &pmp->pmp_pnodehash[hash % pmp->pmp_npnodehash];
    321 }
    322 
    323 /*
    324  * Translate cookie to puffs_node.  Caller must hold pmp_lock
    325  * and it will be held upon return.
    326  */
    327 static struct puffs_node *
    328 puffs_cookie2pnode(struct puffs_mount *pmp, void *cookie)
    329 {
    330 	struct puffs_node_hashlist *plist;
    331 	struct puffs_node *pnode;
    332 
    333 	plist = puffs_cookie2hashlist(pmp, cookie);
    334 	LIST_FOREACH(pnode, plist, pn_hashent) {
    335 		if (pnode->pn_cookie == cookie)
    336 			break;
    337 	}
    338 
    339 	return pnode;
    340 }
    341 
    342 /*
    343  * Make sure root vnode exists and reference it.  Does NOT lock.
    344  */
    345 static int
    346 puffs_makeroot(struct puffs_mount *pmp)
    347 {
    348 	struct vnode *vp;
    349 	int rv;
    350 
    351 	/*
    352 	 * pmp_lock must be held if vref()'ing or vrele()'ing the
    353 	 * root vnode.  the latter is controlled by puffs_inactive().
    354 	 *
    355 	 * pmp_root is set here and cleared in puffs_reclaim().
    356 	 */
    357  retry:
    358 	mutex_enter(&pmp->pmp_lock);
    359 	vp = pmp->pmp_root;
    360 	if (vp) {
    361 		mutex_enter(&vp->v_interlock);
    362 		mutex_exit(&pmp->pmp_lock);
    363 		if (vget(vp, LK_INTERLOCK) == 0)
    364 			return 0;
    365 	} else
    366 		mutex_exit(&pmp->pmp_lock);
    367 
    368 	/*
    369 	 * So, didn't have the magic root vnode available.
    370 	 * No matter, grab another and stuff it with the cookie.
    371 	 */
    372 	if ((rv = puffs_getvnode(pmp->pmp_mp, pmp->pmp_root_cookie,
    373 	    pmp->pmp_root_vtype, pmp->pmp_root_vsize, pmp->pmp_root_rdev, &vp)))
    374 		return rv;
    375 
    376 	/*
    377 	 * Someone magically managed to race us into puffs_getvnode?
    378 	 * Put our previous new vnode back and retry.
    379 	 */
    380 	mutex_enter(&pmp->pmp_lock);
    381 	if (pmp->pmp_root) {
    382 		mutex_exit(&pmp->pmp_lock);
    383 		puffs_putvnode(vp);
    384 		goto retry;
    385 	}
    386 
    387 	/* store cache */
    388 	vp->v_vflag |= VV_ROOT;
    389 	pmp->pmp_root = vp;
    390 	mutex_exit(&pmp->pmp_lock);
    391 
    392 	return 0;
    393 }
    394 
    395 /*
    396  * Locate the in-kernel vnode based on the cookie received given
    397  * from userspace.  Returns a vnode, if found, NULL otherwise.
    398  * The parameter "lock" control whether to lock the possible or
    399  * not.  Locking always might cause us to lock against ourselves
    400  * in situations where we want the vnode but don't care for the
    401  * vnode lock, e.g. file server issued putpages.
    402  */
    403 int
    404 puffs_cookie2vnode(struct puffs_mount *pmp, void *cookie, int lock,
    405 	int willcreate, struct vnode **vpp)
    406 {
    407 	struct puffs_node *pnode;
    408 	struct puffs_newcookie *pnc;
    409 	struct vnode *vp;
    410 	int vgetflags, rv;
    411 
    412 	/*
    413 	 * Handle root in a special manner, since we want to make sure
    414 	 * pmp_root is properly set.
    415 	 */
    416 	if (cookie == pmp->pmp_root_cookie) {
    417 		if ((rv = puffs_makeroot(pmp)))
    418 			return rv;
    419 		if (lock)
    420 			vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
    421 
    422 		*vpp = pmp->pmp_root;
    423 		return 0;
    424 	}
    425 
    426 	mutex_enter(&pmp->pmp_lock);
    427 	pnode = puffs_cookie2pnode(pmp, cookie);
    428 	if (pnode == NULL) {
    429 		if (willcreate) {
    430 			pnc = kmem_alloc(sizeof(struct puffs_newcookie),
    431 			    KM_SLEEP);
    432 			pnc->pnc_cookie = cookie;
    433 			LIST_INSERT_HEAD(&pmp->pmp_newcookie, pnc, pnc_entries);
    434 		}
    435 		mutex_exit(&pmp->pmp_lock);
    436 		return PUFFS_NOSUCHCOOKIE;
    437 	}
    438 	vp = pnode->pn_vp;
    439 	mutex_enter(&vp->v_interlock);
    440 	mutex_exit(&pmp->pmp_lock);
    441 
    442 	vgetflags = LK_INTERLOCK;
    443 	if (lock)
    444 		vgetflags |= LK_EXCLUSIVE | LK_RETRY;
    445 	if ((rv = vget(vp, vgetflags)))
    446 		return rv;
    447 
    448 	*vpp = vp;
    449 	return 0;
    450 }
    451 
    452 void
    453 puffs_updatenode(struct puffs_node *pn, int flags, voff_t size)
    454 {
    455 	struct timespec ts;
    456 
    457 	if (flags == 0)
    458 		return;
    459 
    460 	nanotime(&ts);
    461 
    462 	if (flags & PUFFS_UPDATEATIME) {
    463 		pn->pn_mc_atime = ts;
    464 		pn->pn_stat |= PNODE_METACACHE_ATIME;
    465 	}
    466 	if (flags & PUFFS_UPDATECTIME) {
    467 		pn->pn_mc_ctime = ts;
    468 		pn->pn_stat |= PNODE_METACACHE_CTIME;
    469 	}
    470 	if (flags & PUFFS_UPDATEMTIME) {
    471 		pn->pn_mc_mtime = ts;
    472 		pn->pn_stat |= PNODE_METACACHE_MTIME;
    473 	}
    474 	if (flags & PUFFS_UPDATESIZE) {
    475 		pn->pn_mc_size = size;
    476 		pn->pn_stat |= PNODE_METACACHE_SIZE;
    477 	}
    478 }
    479 
    480 /*
    481  * Add reference to node.
    482  *  mutex held on entry and return
    483  */
    484 void
    485 puffs_referencenode(struct puffs_node *pn)
    486 {
    487 
    488 	KASSERT(mutex_owned(&pn->pn_mtx));
    489 	pn->pn_refcount++;
    490 }
    491 
    492 /*
    493  * Release pnode structure which dealing with references to the
    494  * puffs_node instead of the vnode.  Can't use vref()/vrele() on
    495  * the vnode there, since that causes the lovely VOP_INACTIVE(),
    496  * which in turn causes the lovely deadlock when called by the one
    497  * who is supposed to handle it.
    498  */
    499 void
    500 puffs_releasenode(struct puffs_node *pn)
    501 {
    502 
    503 	mutex_enter(&pn->pn_mtx);
    504 	if (--pn->pn_refcount == 0) {
    505 		mutex_exit(&pn->pn_mtx);
    506 		mutex_destroy(&pn->pn_mtx);
    507 		pool_put(&puffs_pnpool, pn);
    508 	} else {
    509 		mutex_exit(&pn->pn_mtx);
    510 	}
    511 }
    512