Home | History | Annotate | Line # | Download | only in nfs
nfs_node.c revision 1.101.10.4
      1 /*	$NetBSD: nfs_node.c,v 1.101.10.4 2010/08/11 22:54:59 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)nfs_node.c	8.6 (Berkeley) 5/22/95
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.101.10.4 2010/08/11 22:54:59 yamt Exp $");
     39 
     40 #ifdef _KERNEL_OPT
     41 #include "opt_nfs.h"
     42 #endif
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/proc.h>
     47 #include <sys/mount.h>
     48 #include <sys/namei.h>
     49 #include <sys/vnode.h>
     50 #include <sys/kernel.h>
     51 #include <sys/pool.h>
     52 #include <sys/lock.h>
     53 #include <sys/hash.h>
     54 #include <sys/kauth.h>
     55 
     56 #include <nfs/rpcv2.h>
     57 #include <nfs/nfsproto.h>
     58 #include <nfs/nfs.h>
     59 #include <nfs/nfsnode.h>
     60 #include <nfs/nfsmount.h>
     61 #include <nfs/nfs_var.h>
     62 
     63 struct pool nfs_node_pool;
     64 struct pool nfs_vattr_pool;
     65 static struct workqueue *nfs_sillyworkq;
     66 
     67 extern int prtactive;
     68 
     69 static void nfs_gop_size(struct vnode *, off_t, off_t *, int);
     70 static int nfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t);
     71 static int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
     72 static void nfs_sillyworker(struct work *, void *);
     73 
     74 static const struct genfs_ops nfs_genfsops = {
     75 	.gop_size = nfs_gop_size,
     76 	.gop_alloc = nfs_gop_alloc,
     77 	.gop_write = nfs_gop_write,
     78 };
     79 
     80 /*
     81  * Reinitialize inode hash table.
     82  */
     83 void
     84 nfs_node_init(void)
     85 {
     86 
     87 	pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
     88 	    &pool_allocator_nointr, IPL_NONE);
     89 	pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
     90 	    &pool_allocator_nointr, IPL_NONE);
     91 	if (workqueue_create(&nfs_sillyworkq, "nfssilly", nfs_sillyworker,
     92 	    NULL, PRI_NONE, IPL_NONE, 0) != 0) {
     93 	    	panic("nfs_node_init");
     94 	}
     95 }
     96 
     97 /*
     98  * Free resources previously allocated in nfs_node_reinit().
     99  */
    100 void
    101 nfs_node_done(void)
    102 {
    103 
    104 	pool_destroy(&nfs_node_pool);
    105 	pool_destroy(&nfs_vattr_pool);
    106 	workqueue_destroy(nfs_sillyworkq);
    107 }
    108 
    109 #define	RBTONFSNODE(node) \
    110 	(void *)((uintptr_t)(node) - offsetof(struct nfsnode, n_rbnode))
    111 
    112 struct fh_match {
    113 	nfsfh_t *fhm_fhp;
    114 	size_t fhm_fhsize;
    115 	size_t fhm_fhoffset;
    116 };
    117 
    118 static int
    119 nfs_compare_nodes(const struct rb_node *parent, const struct rb_node *node)
    120 {
    121 	const struct nfsnode * const pnp = RBTONFSNODE(parent);
    122 	const struct nfsnode * const np = RBTONFSNODE(node);
    123 
    124 	if (pnp->n_fhsize != np->n_fhsize)
    125 		return np->n_fhsize - pnp->n_fhsize;
    126 
    127 	return memcmp(np->n_fhp, pnp->n_fhp, np->n_fhsize);
    128 }
    129 
    130 static int
    131 nfs_compare_node_fh(const struct rb_node *b, const void *key)
    132 {
    133 	const struct nfsnode * const pnp = RBTONFSNODE(b);
    134 	const struct fh_match * const fhm = key;
    135 
    136 	if (pnp->n_fhsize != fhm->fhm_fhsize)
    137 		return fhm->fhm_fhsize - pnp->n_fhsize;
    138 
    139 	return memcmp(fhm->fhm_fhp, pnp->n_fhp, pnp->n_fhsize);
    140 }
    141 
    142 static const struct rb_tree_ops nfs_node_rbtree_ops = {
    143 	.rbto_compare_nodes = nfs_compare_nodes,
    144 	.rbto_compare_key = nfs_compare_node_fh,
    145 };
    146 
    147 void
    148 nfs_rbtinit(struct nfsmount *nmp)
    149 {
    150 	rb_tree_init(&nmp->nm_rbtree, &nfs_node_rbtree_ops);
    151 }
    152 
    153 
    154 /*
    155  * Look up a vnode/nfsnode by file handle.
    156  * Callers must check for mount points!!
    157  * In all cases, a pointer to a
    158  * nfsnode structure is returned.
    159  */
    160 int
    161 nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int lkflags)
    162 {
    163 	struct nfsnode *np;
    164 	struct vnode *vp;
    165 	struct nfsmount *nmp = VFSTONFS(mntp);
    166 	int error;
    167 	struct fh_match fhm;
    168 	struct rb_node *node;
    169 
    170 	fhm.fhm_fhp = fhp;
    171 	fhm.fhm_fhsize = fhsize;
    172 
    173 loop:
    174 	rw_enter(&nmp->nm_rbtlock, RW_READER);
    175 	node = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
    176 	if (node != NULL) {
    177 		np = RBTONFSNODE(node);
    178 		vp = NFSTOV(np);
    179 		mutex_enter(&vp->v_interlock);
    180 		rw_exit(&nmp->nm_rbtlock);
    181 		error = vget(vp, LK_EXCLUSIVE | lkflags);
    182 		if (error == EBUSY)
    183 			return error;
    184 		if (error)
    185 			goto loop;
    186 		*npp = np;
    187 		return(0);
    188 	}
    189 	rw_exit(&nmp->nm_rbtlock);
    190 
    191 	error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &vp);
    192 	if (error) {
    193 		*npp = 0;
    194 		return (error);
    195 	}
    196 	np = pool_get(&nfs_node_pool, PR_WAITOK);
    197 	memset(np, 0, sizeof *np);
    198 	np->n_vnode = vp;
    199 
    200 	/*
    201 	 * Insert the nfsnode in the hash queue for its new file handle
    202 	 */
    203 
    204 	if (fhsize > NFS_SMALLFH) {
    205 		np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
    206 	} else
    207 		np->n_fhp = &np->n_fh;
    208 	memcpy(np->n_fhp, fhp, fhsize);
    209 	np->n_fhsize = fhsize;
    210 	np->n_accstamp = -1;
    211 	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
    212 
    213 	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
    214 	if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
    215 		rw_exit(&nmp->nm_rbtlock);
    216 		if (fhsize > NFS_SMALLFH) {
    217 			kmem_free(np->n_fhp, fhsize);
    218 		}
    219 		pool_put(&nfs_vattr_pool, np->n_vattr);
    220 		pool_put(&nfs_node_pool, np);
    221 		ungetnewvnode(vp);
    222 		goto loop;
    223 	}
    224 	mutex_init(&np->n_attrlock, MUTEX_DEFAULT, IPL_NONE);
    225 	vp->v_data = np;
    226 	genfs_node_init(vp, &nfs_genfsops);
    227 	/*
    228 	 * Initalize read/write creds to useful values. VOP_OPEN will
    229 	 * overwrite these.
    230 	 */
    231 	np->n_rcred = curlwp->l_cred;
    232 	kauth_cred_hold(np->n_rcred);
    233 	np->n_wcred = curlwp->l_cred;
    234 	kauth_cred_hold(np->n_wcred);
    235 	VOP_LOCK(vp, LK_EXCLUSIVE);
    236 	NFS_INVALIDATE_ATTRCACHE(np);
    237 	uvm_vnp_setsize(vp, 0);
    238 	rb_tree_insert_node(&nmp->nm_rbtree, &np->n_rbnode);
    239 	rw_exit(&nmp->nm_rbtlock);
    240 
    241 	*npp = np;
    242 	return (0);
    243 }
    244 
    245 int
    246 nfs_inactive(void *v)
    247 {
    248 	struct vop_inactive_args /* {
    249 		struct vnode *a_vp;
    250 		bool *a_recycle;
    251 	} */ *ap = v;
    252 	struct nfsnode *np;
    253 	struct sillyrename *sp;
    254 	struct vnode *vp = ap->a_vp;
    255 
    256 	np = VTONFS(vp);
    257 	if (vp->v_type != VDIR) {
    258 		sp = np->n_sillyrename;
    259 		np->n_sillyrename = (struct sillyrename *)0;
    260 	} else
    261 		sp = NULL;
    262 	if (sp != NULL)
    263 		nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1);
    264 	*ap->a_recycle = (np->n_flag & NREMOVED) != 0;
    265 	np->n_flag &=
    266 	    (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED);
    267 
    268 	if (vp->v_type == VDIR && np->n_dircache)
    269 		nfs_invaldircache(vp,
    270 		    NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF);
    271 
    272 	VOP_UNLOCK(vp);
    273 
    274 	if (sp != NULL) {
    275 		workqueue_enqueue(nfs_sillyworkq, &sp->s_work, NULL);
    276 	}
    277 
    278 	return (0);
    279 }
    280 
    281 /*
    282  * Reclaim an nfsnode so that it can be used for other purposes.
    283  */
    284 int
    285 nfs_reclaim(void *v)
    286 {
    287 	struct vop_reclaim_args /* {
    288 		struct vnode *a_vp;
    289 	} */ *ap = v;
    290 	struct vnode *vp = ap->a_vp;
    291 	struct nfsnode *np = VTONFS(vp);
    292 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    293 
    294 	if (prtactive && vp->v_usecount > 1)
    295 		vprint("nfs_reclaim: pushing active", vp);
    296 
    297 	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
    298 	rb_tree_remove_node(&nmp->nm_rbtree, &np->n_rbnode);
    299 	rw_exit(&nmp->nm_rbtlock);
    300 
    301 	/*
    302 	 * Free up any directory cookie structures and
    303 	 * large file handle structures that might be associated with
    304 	 * this nfs node.
    305 	 */
    306 	if (vp->v_type == VDIR && np->n_dircache != NULL) {
    307 		nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE);
    308 		hashdone(np->n_dircache, HASH_LIST, nfsdirhashmask);
    309 	}
    310 	KASSERT(np->n_dirgens == NULL);
    311 
    312 	if (np->n_fhsize > NFS_SMALLFH)
    313 		kmem_free(np->n_fhp, np->n_fhsize);
    314 
    315 	pool_put(&nfs_vattr_pool, np->n_vattr);
    316 	if (np->n_rcred)
    317 		kauth_cred_free(np->n_rcred);
    318 
    319 	if (np->n_wcred)
    320 		kauth_cred_free(np->n_wcred);
    321 
    322 	cache_purge(vp);
    323 	if (vp->v_type == VREG) {
    324 		mutex_destroy(&np->n_commitlock);
    325 	}
    326 	mutex_destroy(&np->n_attrlock);
    327 	genfs_node_destroy(vp);
    328 	pool_put(&nfs_node_pool, np);
    329 	vp->v_data = NULL;
    330 	return (0);
    331 }
    332 
    333 void
    334 nfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
    335 {
    336 
    337 	*eobp = MAX(size, vp->v_size);
    338 }
    339 
    340 int
    341 nfs_gop_alloc(struct vnode *vp, off_t off, off_t len, int flags,
    342     kauth_cred_t cred)
    343 {
    344 
    345 	return 0;
    346 }
    347 
    348 int
    349 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
    350 {
    351 	int i;
    352 
    353 	for (i = 0; i < npages; i++) {
    354 		pmap_page_protect(pgs[i], VM_PROT_READ);
    355 	}
    356 	return genfs_gop_write(vp, pgs, npages, flags);
    357 }
    358 
    359 /*
    360  * Remove a silly file that was rename'd earlier
    361  */
    362 static void
    363 nfs_sillyworker(struct work *work, void *arg)
    364 {
    365 	struct sillyrename *sp;
    366 	int error;
    367 
    368 	sp = (struct sillyrename *)work;
    369 	error = vn_lock(sp->s_dvp, LK_EXCLUSIVE);
    370 	if (error || sp->s_dvp->v_data == NULL) {
    371 		/* XXX should recover */
    372 		printf("%s: vp=%p error=%d\n", __func__, sp->s_dvp, error);
    373 		if (error == 0) {
    374 			vput(sp->s_dvp);
    375 		} else {
    376 			vrele(sp->s_dvp);
    377 		}
    378 	} else {
    379 		nfs_removeit(sp);
    380 		vput(sp->s_dvp);
    381 	}
    382 	kauth_cred_free(sp->s_cred);
    383 	kmem_free(sp, sizeof(*sp));
    384 }
    385