Home | History | Annotate | Line # | Download | only in nfs
      1 /*	$NetBSD: nfs_srvcache.c,v 1.45 2009/03/15 17:20:10 cegger Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)nfs_srvcache.c	8.3 (Berkeley) 3/30/95
     35  */
     36 
     37 /*
     38  * Reference: Chet Juszczak, "Improving the Performance and Correctness
     39  *		of an NFS Server", in Proc. Winter 1989 USENIX Conference,
     40  *		pages 53-63. San Diego, February 1989.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: nfs_srvcache.c,v 1.45 2009/03/15 17:20:10 cegger Exp $");
     45 
     46 #include <sys/param.h>
     47 #include <sys/vnode.h>
     48 #include <sys/condvar.h>
     49 #include <sys/mount.h>
     50 #include <sys/kernel.h>
     51 #include <sys/systm.h>
     52 #include <sys/lock.h>
     53 #include <sys/proc.h>
     54 #include <sys/pool.h>
     55 #include <sys/mbuf.h>
     56 #include <sys/mutex.h>
     57 #include <sys/socket.h>
     58 #include <sys/socketvar.h>
     59 
     60 #include <netinet/in.h>
     61 #include <nfs/nfsm_subs.h>
     62 #include <nfs/rpcv2.h>
     63 #include <nfs/nfsproto.h>
     64 #include <nfs/nfs.h>
     65 #include <nfs/nfsrvcache.h>
     66 #include <nfs/nfs_var.h>
     67 
     68 extern struct nfsstats nfsstats;
     69 extern const int nfsv2_procid[NFS_NPROCS];
     70 long numnfsrvcache, desirednfsrvcache = NFSRVCACHESIZ;
     71 struct pool nfs_reqcache_pool;
     72 
     73 #define	NFSRCHASH(xid) \
     74 	(&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
     75 LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
     76 TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
     77 kmutex_t nfsrv_reqcache_lock;
     78 u_long nfsrvhash;
     79 
     80 #if defined(MBUFTRACE)
     81 static struct mowner nfsd_cache_mowner = MOWNER_INIT("nfsd", "cache");
     82 #endif /* defined(MBUFTRACE) */
     83 
     84 #define	NETFAMILY(rp) \
     85 		(((rp)->rc_flags & RC_INETADDR) ? AF_INET : -1)
     86 
     87 static struct nfsrvcache *nfsrv_lookupcache(struct nfsrv_descript *nd);
     88 static void nfsrv_unlockcache(struct nfsrvcache *rp);
     89 
     90 /*
     91  * Static array that defines which nfs rpc's are nonidempotent
     92  */
     93 const int nonidempotent[NFS_NPROCS] = {
     94 	false,	/* NULL */
     95 	false,	/* GETATTR */
     96 	true,	/* SETATTR */
     97 	false,	/* LOOKUP */
     98 	false,	/* ACCESS */
     99 	false,	/* READLINK */
    100 	false,	/* READ */
    101 	true,	/* WRITE */
    102 	true,	/* CREATE */
    103 	true,	/* MKDIR */
    104 	true,	/* SYMLINK */
    105 	true,	/* MKNOD */
    106 	true,	/* REMOVE */
    107 	true,	/* RMDIR */
    108 	true,	/* RENAME */
    109 	true,	/* LINK */
    110 	false,	/* READDIR */
    111 	false,	/* READDIRPLUS */
    112 	false,	/* FSSTAT */
    113 	false,	/* FSINFO */
    114 	false,	/* PATHCONF */
    115 	false,	/* COMMIT */
    116 	false,	/* NOOP */
    117 };
    118 
    119 /* True iff the rpc reply is an nfs status ONLY! */
    120 static const int nfsv2_repstat[NFS_NPROCS] = {
    121 	false,	/* NULL */
    122 	false,	/* GETATTR */
    123 	false,	/* SETATTR */
    124 	false,	/* NOOP */
    125 	false,	/* LOOKUP */
    126 	false,	/* READLINK */
    127 	false,	/* READ */
    128 	false,	/* Obsolete WRITECACHE */
    129 	false,	/* WRITE */
    130 	false,	/* CREATE */
    131 	true,	/* REMOVE */
    132 	true,	/* RENAME */
    133 	true,	/* LINK */
    134 	true,	/* SYMLINK */
    135 	false,	/* MKDIR */
    136 	true,	/* RMDIR */
    137 	false,	/* READDIR */
    138 	false,	/* STATFS */
    139 };
    140 
    141 static void
    142 cleanentry(struct nfsrvcache *rp)
    143 {
    144 
    145 	if ((rp->rc_flags & RC_REPMBUF) != 0) {
    146 		m_freem(rp->rc_reply);
    147 	}
    148 	if ((rp->rc_flags & RC_NAM) != 0) {
    149 		m_free(rp->rc_nam);
    150 	}
    151 	rp->rc_flags &= ~(RC_REPSTATUS|RC_REPMBUF);
    152 }
    153 
    154 /*
    155  * Initialize the server request cache list
    156  */
    157 void
    158 nfsrv_initcache(void)
    159 {
    160 
    161 	mutex_init(&nfsrv_reqcache_lock, MUTEX_DEFAULT, IPL_NONE);
    162 	nfsrvhashtbl = hashinit(desirednfsrvcache, HASH_LIST, true,
    163 	    &nfsrvhash);
    164 	TAILQ_INIT(&nfsrvlruhead);
    165 	pool_init(&nfs_reqcache_pool, sizeof(struct nfsrvcache), 0, 0, 0,
    166 	    "nfsreqcachepl", &pool_allocator_nointr, IPL_NONE);
    167 	MOWNER_ATTACH(&nfsd_cache_mowner);
    168 }
    169 
    170 void
    171 nfsrv_finicache(void)
    172 {
    173 
    174 	nfsrv_cleancache();
    175 	KASSERT(TAILQ_EMPTY(&nfsrvlruhead));
    176 	pool_destroy(&nfs_reqcache_pool);
    177 	hashdone(nfsrvhashtbl, HASH_LIST, nfsrvhash);
    178 	MOWNER_DETACH(&nfsd_cache_mowner);
    179 	mutex_destroy(&nfsrv_reqcache_lock);
    180 }
    181 
    182 /*
    183  * Lookup a cache and lock it
    184  */
    185 static struct nfsrvcache *
    186 nfsrv_lookupcache(struct nfsrv_descript *nd)
    187 {
    188 	struct nfsrvcache *rp;
    189 
    190 	KASSERT(mutex_owned(&nfsrv_reqcache_lock));
    191 
    192 loop:
    193 	LIST_FOREACH(rp, NFSRCHASH(nd->nd_retxid), rc_hash) {
    194 		if (nd->nd_retxid == rp->rc_xid &&
    195 		    nd->nd_procnum == rp->rc_proc &&
    196 		    netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
    197 			if ((rp->rc_gflags & RC_G_LOCKED) != 0) {
    198 				cv_wait(&rp->rc_cv, &nfsrv_reqcache_lock);
    199 				goto loop;
    200 			}
    201 			rp->rc_gflags |= RC_G_LOCKED;
    202 			break;
    203 		}
    204 	}
    205 
    206 	return rp;
    207 }
    208 
    209 /*
    210  * Unlock a cache
    211  */
    212 static void
    213 nfsrv_unlockcache(struct nfsrvcache *rp)
    214 {
    215 
    216 	KASSERT(mutex_owned(&nfsrv_reqcache_lock));
    217 
    218 	KASSERT((rp->rc_gflags & RC_G_LOCKED) != 0);
    219 	rp->rc_gflags &= ~RC_G_LOCKED;
    220 	cv_broadcast(&rp->rc_cv);
    221 }
    222 
    223 /*
    224  * Look for the request in the cache
    225  * If found then
    226  *    return action and optionally reply
    227  * else
    228  *    insert it in the cache
    229  *
    230  * The rules are as follows:
    231  * - if in progress, return DROP request
    232  * - if completed within DELAY of the current time, return DROP it
    233  * - if completed a longer time ago return REPLY if the reply was cached or
    234  *   return DOIT
    235  * Update/add new request at end of lru list
    236  */
    237 int
    238 nfsrv_getcache(struct nfsrv_descript *nd, struct nfssvc_sock *slp, struct mbuf **repp)
    239 {
    240 	struct nfsrvcache *rp, *rpdup;
    241 	struct mbuf *mb;
    242 	struct sockaddr_in *saddr;
    243 	char *bpos;
    244 	int ret;
    245 
    246 	mutex_enter(&nfsrv_reqcache_lock);
    247 	rp = nfsrv_lookupcache(nd);
    248 	if (rp) {
    249 		mutex_exit(&nfsrv_reqcache_lock);
    250 found:
    251 		/* If not at end of LRU chain, move it there */
    252 		if (TAILQ_NEXT(rp, rc_lru)) { /* racy but ok */
    253 			mutex_enter(&nfsrv_reqcache_lock);
    254 			TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
    255 			TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
    256 			mutex_exit(&nfsrv_reqcache_lock);
    257 		}
    258 		if (rp->rc_state == RC_UNUSED)
    259 			panic("nfsrv cache");
    260 		if (rp->rc_state == RC_INPROG) {
    261 			nfsstats.srvcache_inproghits++;
    262 			ret = RC_DROPIT;
    263 		} else if (rp->rc_flags & RC_REPSTATUS) {
    264 			nfsstats.srvcache_nonidemdonehits++;
    265 			nfs_rephead(0, nd, slp, rp->rc_status,
    266 			   0, (u_quad_t *)0, repp, &mb, &bpos);
    267 			ret = RC_REPLY;
    268 		} else if (rp->rc_flags & RC_REPMBUF) {
    269 			nfsstats.srvcache_nonidemdonehits++;
    270 			*repp = m_copym(rp->rc_reply, 0, M_COPYALL,
    271 					M_WAIT);
    272 			ret = RC_REPLY;
    273 		} else {
    274 			nfsstats.srvcache_idemdonehits++;
    275 			rp->rc_state = RC_INPROG;
    276 			ret = RC_DOIT;
    277 		}
    278 		mutex_enter(&nfsrv_reqcache_lock);
    279 		nfsrv_unlockcache(rp);
    280 		mutex_exit(&nfsrv_reqcache_lock);
    281 		return ret;
    282 	}
    283 	nfsstats.srvcache_misses++;
    284 	if (numnfsrvcache < desirednfsrvcache) {
    285 		numnfsrvcache++;
    286 		mutex_exit(&nfsrv_reqcache_lock);
    287 		rp = pool_get(&nfs_reqcache_pool, PR_WAITOK);
    288 		memset(rp, 0, sizeof *rp);
    289 		cv_init(&rp->rc_cv, "nfsdrc");
    290 		rp->rc_gflags = RC_G_LOCKED;
    291 	} else {
    292 		rp = TAILQ_FIRST(&nfsrvlruhead);
    293 		while ((rp->rc_gflags & RC_G_LOCKED) != 0) {
    294 			cv_wait(&rp->rc_cv, &nfsrv_reqcache_lock);
    295 			rp = TAILQ_FIRST(&nfsrvlruhead);
    296 		}
    297 		rp->rc_gflags |= RC_G_LOCKED;
    298 		LIST_REMOVE(rp, rc_hash);
    299 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
    300 		mutex_exit(&nfsrv_reqcache_lock);
    301 		cleanentry(rp);
    302 		rp->rc_flags = 0;
    303 	}
    304 	rp->rc_state = RC_INPROG;
    305 	rp->rc_xid = nd->nd_retxid;
    306 	saddr = mtod(nd->nd_nam, struct sockaddr_in *);
    307 	switch (saddr->sin_family) {
    308 	case AF_INET:
    309 		rp->rc_flags |= RC_INETADDR;
    310 		rp->rc_inetaddr = saddr->sin_addr.s_addr;
    311 		break;
    312 	default:
    313 		rp->rc_flags |= RC_NAM;
    314 		rp->rc_nam = m_copym(nd->nd_nam, 0, M_COPYALL, M_WAIT);
    315 		m_claimm(rp->rc_nam, &nfsd_cache_mowner);
    316 		break;
    317 	};
    318 	rp->rc_proc = nd->nd_procnum;
    319 	mutex_enter(&nfsrv_reqcache_lock);
    320 	rpdup = nfsrv_lookupcache(nd);
    321 	if (rpdup != NULL) {
    322 		/*
    323 		 * other thread made duplicate cache entry.
    324 		 */
    325 		KASSERT(numnfsrvcache > 0);
    326 		numnfsrvcache--;
    327 		mutex_exit(&nfsrv_reqcache_lock);
    328 		cleanentry(rp);
    329 		cv_destroy(&rp->rc_cv);
    330 		pool_put(&nfs_reqcache_pool, rp);
    331 		rp = rpdup;
    332 		goto found;
    333 	}
    334 	TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
    335 	LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
    336 	nfsrv_unlockcache(rp);
    337 	mutex_exit(&nfsrv_reqcache_lock);
    338 	return RC_DOIT;
    339 }
    340 
    341 /*
    342  * Update a request cache entry after the rpc has been done
    343  */
    344 void
    345 nfsrv_updatecache(struct nfsrv_descript *nd, int repvalid, struct mbuf *repmbuf)
    346 {
    347 	struct nfsrvcache *rp;
    348 
    349 	mutex_enter(&nfsrv_reqcache_lock);
    350 	rp = nfsrv_lookupcache(nd);
    351 	mutex_exit(&nfsrv_reqcache_lock);
    352 	if (rp) {
    353 		cleanentry(rp);
    354 		rp->rc_state = RC_DONE;
    355 		/*
    356 		 * If we have a valid reply update status and save
    357 		 * the reply for non-idempotent rpc's.
    358 		 */
    359 		if (repvalid && nonidempotent[nd->nd_procnum]) {
    360 			if ((nd->nd_flag & ND_NFSV3) == 0 &&
    361 			  nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
    362 				rp->rc_status = nd->nd_repstat;
    363 				rp->rc_flags |= RC_REPSTATUS;
    364 			} else {
    365 				rp->rc_reply = m_copym(repmbuf,
    366 					0, M_COPYALL, M_WAIT);
    367 				m_claimm(rp->rc_reply, &nfsd_cache_mowner);
    368 				rp->rc_flags |= RC_REPMBUF;
    369 			}
    370 		}
    371 		mutex_enter(&nfsrv_reqcache_lock);
    372 		nfsrv_unlockcache(rp);
    373 		mutex_exit(&nfsrv_reqcache_lock);
    374 	}
    375 }
    376 
    377 /*
    378  * Clean out the cache. Called when the last nfsd terminates.
    379  */
    380 void
    381 nfsrv_cleancache(void)
    382 {
    383 	struct nfsrvcache *rp;
    384 
    385 	mutex_enter(&nfsrv_reqcache_lock);
    386 	while ((rp = TAILQ_FIRST(&nfsrvlruhead)) != NULL) {
    387 		KASSERT((rp->rc_gflags & RC_G_LOCKED) == 0);
    388 		LIST_REMOVE(rp, rc_hash);
    389 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
    390 		KASSERT(numnfsrvcache > 0);
    391 		numnfsrvcache--;
    392 		mutex_exit(&nfsrv_reqcache_lock);
    393 		cleanentry(rp);
    394 		cv_destroy(&rp->rc_cv);
    395 		pool_put(&nfs_reqcache_pool, rp);
    396 		mutex_enter(&nfsrv_reqcache_lock);
    397 	}
    398 	KASSERT(numnfsrvcache == 0);
    399 	mutex_exit(&nfsrv_reqcache_lock);
    400 }
    401