Home | History | Annotate | Line # | Download | only in nfs
nfs_srvcache.c revision 1.29
      1 /*	$NetBSD: nfs_srvcache.c,v 1.29 2003/11/20 16:17:25 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)nfs_srvcache.c	8.3 (Berkeley) 3/30/95
     35  */
     36 
     37 /*
     38  * Reference: Chet Juszczak, "Improving the Performance and Correctness
     39  *		of an NFS Server", in Proc. Winter 1989 USENIX Conference,
     40  *		pages 53-63. San Diego, February 1989.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: nfs_srvcache.c,v 1.29 2003/11/20 16:17:25 yamt Exp $");
     45 
     46 #include "opt_iso.h"
     47 
     48 #include <sys/param.h>
     49 #include <sys/vnode.h>
     50 #include <sys/mount.h>
     51 #include <sys/kernel.h>
     52 #include <sys/systm.h>
     53 #include <sys/lock.h>
     54 #include <sys/proc.h>
     55 #include <sys/pool.h>
     56 #include <sys/mbuf.h>
     57 #include <sys/malloc.h>
     58 #include <sys/socket.h>
     59 #include <sys/socketvar.h>
     60 
     61 #include <netinet/in.h>
     62 #ifdef ISO
     63 #include <netiso/iso.h>
     64 #endif
     65 #include <nfs/nfsm_subs.h>
     66 #include <nfs/rpcv2.h>
     67 #include <nfs/nfsproto.h>
     68 #include <nfs/nfs.h>
     69 #include <nfs/nfsrvcache.h>
     70 #include <nfs/nqnfs.h>
     71 #include <nfs/nfs_var.h>
     72 
     73 extern struct nfsstats nfsstats;
     74 extern const int nfsv2_procid[NFS_NPROCS];
     75 long numnfsrvcache, desirednfsrvcache = NFSRVCACHESIZ;
     76 struct pool nfs_reqcache_pool;
     77 
     78 #define	NFSRCHASH(xid) \
     79 	(&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
     80 LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
     81 TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
     82 struct simplelock nfsrv_reqcache_lock = SIMPLELOCK_INITIALIZER;
     83 u_long nfsrvhash;
     84 
     85 #define	NETFAMILY(rp) \
     86 		(((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO)
     87 
     88 static struct nfsrvcache *nfsrv_lookupcache(struct nfsrv_descript *nd);
     89 static void nfsrv_unlockcache(struct nfsrvcache *rp);
     90 
     91 /*
     92  * Static array that defines which nfs rpc's are nonidempotent
     93  */
     94 const int nonidempotent[NFS_NPROCS] = {
     95 	FALSE,
     96 	FALSE,
     97 	TRUE,
     98 	FALSE,
     99 	FALSE,
    100 	FALSE,
    101 	FALSE,
    102 	TRUE,
    103 	TRUE,
    104 	TRUE,
    105 	TRUE,
    106 	TRUE,
    107 	TRUE,
    108 	TRUE,
    109 	TRUE,
    110 	TRUE,
    111 	FALSE,
    112 	FALSE,
    113 	FALSE,
    114 	FALSE,
    115 	FALSE,
    116 	FALSE,
    117 	FALSE,
    118 	FALSE,
    119 	FALSE,
    120 	FALSE,
    121 };
    122 
    123 /* True iff the rpc reply is an nfs status ONLY! */
    124 static const int nfsv2_repstat[NFS_NPROCS] = {
    125 	FALSE,
    126 	FALSE,
    127 	FALSE,
    128 	FALSE,
    129 	FALSE,
    130 	FALSE,
    131 	FALSE,
    132 	FALSE,
    133 	FALSE,
    134 	FALSE,
    135 	TRUE,
    136 	TRUE,
    137 	TRUE,
    138 	TRUE,
    139 	FALSE,
    140 	TRUE,
    141 	FALSE,
    142 	FALSE,
    143 };
    144 
    145 /*
    146  * Initialize the server request cache list
    147  */
    148 void
    149 nfsrv_initcache()
    150 {
    151 
    152 	nfsrvhashtbl = hashinit(desirednfsrvcache, HASH_LIST, M_NFSD,
    153 	    M_WAITOK, &nfsrvhash);
    154 	TAILQ_INIT(&nfsrvlruhead);
    155 	pool_init(&nfs_reqcache_pool, sizeof(struct nfsrvcache), 0, 0, 0,
    156 	    "nfsreqcachepl", &pool_allocator_nointr);
    157 }
    158 
    159 /*
    160  * Lookup a cache and lock it
    161  */
    162 static struct nfsrvcache *
    163 nfsrv_lookupcache(nd)
    164 	struct nfsrv_descript *nd;
    165 {
    166 	struct nfsrvcache *rp;
    167 
    168 	LOCK_ASSERT(simple_lock_held(&nfsrv_reqcache_lock));
    169 
    170 loop:
    171 	LIST_FOREACH(rp, NFSRCHASH(nd->nd_retxid), rc_hash) {
    172 		if (nd->nd_retxid == rp->rc_xid &&
    173 		    nd->nd_procnum == rp->rc_proc &&
    174 		    netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
    175 			if ((rp->rc_flag & RC_LOCKED) != 0) {
    176 				rp->rc_flag |= RC_WANTED;
    177 				(void) ltsleep(rp, PZERO - 1, "nfsrc", 0,
    178 				    &nfsrv_reqcache_lock);
    179 				goto loop;
    180 			}
    181 			rp->rc_flag |= RC_LOCKED;
    182 			break;
    183 		}
    184 	}
    185 
    186 	return rp;
    187 }
    188 
    189 /*
    190  * Unlock a cache
    191  */
    192 static void
    193 nfsrv_unlockcache(rp)
    194 	struct nfsrvcache *rp;
    195 {
    196 
    197 	LOCK_ASSERT(simple_lock_held(&nfsrv_reqcache_lock));
    198 
    199 	rp->rc_flag &= ~RC_LOCKED;
    200 	if (rp->rc_flag & RC_WANTED) {
    201 		rp->rc_flag &= ~RC_WANTED;
    202 		wakeup(rp);
    203 	}
    204 }
    205 
    206 /*
    207  * Look for the request in the cache
    208  * If found then
    209  *    return action and optionally reply
    210  * else
    211  *    insert it in the cache
    212  *
    213  * The rules are as follows:
    214  * - if in progress, return DROP request
    215  * - if completed within DELAY of the current time, return DROP it
    216  * - if completed a longer time ago return REPLY if the reply was cached or
    217  *   return DOIT
    218  * Update/add new request at end of lru list
    219  */
    220 int
    221 nfsrv_getcache(nd, slp, repp)
    222 	struct nfsrv_descript *nd;
    223 	struct nfssvc_sock *slp;
    224 	struct mbuf **repp;
    225 {
    226 	struct nfsrvcache *rp, *rpdup;
    227 	struct mbuf *mb;
    228 	struct sockaddr_in *saddr;
    229 	caddr_t bpos;
    230 	int ret;
    231 
    232 	/*
    233 	 * Don't cache recent requests for reliable transport protocols.
    234 	 * (Maybe we should for the case of a reconnect, but..)
    235 	 */
    236 	if (!nd->nd_nam2)
    237 		return RC_DOIT;
    238 	simple_lock(&nfsrv_reqcache_lock);
    239 	rp = nfsrv_lookupcache(nd);
    240 	if (rp) {
    241 		simple_unlock(&nfsrv_reqcache_lock);
    242 found:
    243 		/* If not at end of LRU chain, move it there */
    244 		if (TAILQ_NEXT(rp, rc_lru)) { /* racy but ok */
    245 			simple_lock(&nfsrv_reqcache_lock);
    246 			TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
    247 			TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
    248 			simple_unlock(&nfsrv_reqcache_lock);
    249 		}
    250 		if (rp->rc_state == RC_UNUSED)
    251 			panic("nfsrv cache");
    252 		if (rp->rc_state == RC_INPROG) {
    253 			nfsstats.srvcache_inproghits++;
    254 			ret = RC_DROPIT;
    255 		} else if (rp->rc_flag & RC_REPSTATUS) {
    256 			nfsstats.srvcache_nonidemdonehits++;
    257 			nfs_rephead(0, nd, slp, rp->rc_status,
    258 			   0, (u_quad_t *)0, repp, &mb, &bpos);
    259 			ret = RC_REPLY;
    260 		} else if (rp->rc_flag & RC_REPMBUF) {
    261 			nfsstats.srvcache_nonidemdonehits++;
    262 			*repp = m_copym(rp->rc_reply, 0, M_COPYALL,
    263 					M_WAIT);
    264 			ret = RC_REPLY;
    265 		} else {
    266 			nfsstats.srvcache_idemdonehits++;
    267 			rp->rc_state = RC_INPROG;
    268 			ret = RC_DOIT;
    269 		}
    270 		simple_lock(&nfsrv_reqcache_lock);
    271 		nfsrv_unlockcache(rp);
    272 		simple_unlock(&nfsrv_reqcache_lock);
    273 		return ret;
    274 	}
    275 	nfsstats.srvcache_misses++;
    276 	if (numnfsrvcache < desirednfsrvcache) {
    277 		numnfsrvcache++;
    278 		simple_unlock(&nfsrv_reqcache_lock);
    279 		rp = pool_get(&nfs_reqcache_pool, PR_WAITOK);
    280 		memset(rp, 0, sizeof *rp);
    281 		rp->rc_flag = RC_LOCKED;
    282 	} else {
    283 		rp = TAILQ_FIRST(&nfsrvlruhead);
    284 		while ((rp->rc_flag & RC_LOCKED) != 0) {
    285 			rp->rc_flag |= RC_WANTED;
    286 			(void) ltsleep(rp, PZERO-1, "nfsrc", 0,
    287 			    &nfsrv_reqcache_lock);
    288 			rp = TAILQ_FIRST(&nfsrvlruhead);
    289 		}
    290 		rp->rc_flag |= RC_LOCKED;
    291 		LIST_REMOVE(rp, rc_hash);
    292 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
    293 		simple_unlock(&nfsrv_reqcache_lock);
    294 		if (rp->rc_flag & RC_REPMBUF)
    295 			m_freem(rp->rc_reply);
    296 		if (rp->rc_flag & RC_NAM)
    297 			(void) m_free(rp->rc_nam);
    298 		rp->rc_flag &= (RC_LOCKED | RC_WANTED);
    299 	}
    300 	rp->rc_state = RC_INPROG;
    301 	rp->rc_xid = nd->nd_retxid;
    302 	saddr = mtod(nd->nd_nam, struct sockaddr_in *);
    303 	switch (saddr->sin_family) {
    304 	case AF_INET:
    305 		rp->rc_flag |= RC_INETADDR;
    306 		rp->rc_inetaddr = saddr->sin_addr.s_addr;
    307 		break;
    308 	case AF_ISO:
    309 	default:
    310 		rp->rc_flag |= RC_NAM;
    311 		rp->rc_nam = m_copym(nd->nd_nam, 0, M_COPYALL, M_WAIT);
    312 		break;
    313 	};
    314 	rp->rc_proc = nd->nd_procnum;
    315 	simple_lock(&nfsrv_reqcache_lock);
    316 	rpdup = nfsrv_lookupcache(nd);
    317 	if (rpdup != NULL) {
    318 		/*
    319 		 * other thread made duplicate cache entry.
    320 		 */
    321 		simple_unlock(&nfsrv_reqcache_lock);
    322 		pool_put(&nfs_reqcache_pool, rp);
    323 		rp = rpdup;
    324 		goto found;
    325 	}
    326 	TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
    327 	LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
    328 	nfsrv_unlockcache(rp);
    329 	simple_unlock(&nfsrv_reqcache_lock);
    330 	return RC_DOIT;
    331 }
    332 
    333 /*
    334  * Update a request cache entry after the rpc has been done
    335  */
    336 void
    337 nfsrv_updatecache(nd, repvalid, repmbuf)
    338 	struct nfsrv_descript *nd;
    339 	int repvalid;
    340 	struct mbuf *repmbuf;
    341 {
    342 	struct nfsrvcache *rp;
    343 
    344 	if (!nd->nd_nam2)
    345 		return;
    346 	simple_lock(&nfsrv_reqcache_lock);
    347 	rp = nfsrv_lookupcache(nd);
    348 	simple_unlock(&nfsrv_reqcache_lock);
    349 	if (rp) {
    350 		rp->rc_state = RC_DONE;
    351 		/*
    352 		 * If we have a valid reply update status and save
    353 		 * the reply for non-idempotent rpc's.
    354 		 */
    355 		if (repvalid && nonidempotent[nd->nd_procnum]) {
    356 			if ((nd->nd_flag & ND_NFSV3) == 0 &&
    357 			  nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
    358 				rp->rc_status = nd->nd_repstat;
    359 				rp->rc_flag |= RC_REPSTATUS;
    360 			} else {
    361 				rp->rc_reply = m_copym(repmbuf,
    362 					0, M_COPYALL, M_WAIT);
    363 				rp->rc_flag |= RC_REPMBUF;
    364 			}
    365 		}
    366 		simple_lock(&nfsrv_reqcache_lock);
    367 		nfsrv_unlockcache(rp);
    368 		simple_unlock(&nfsrv_reqcache_lock);
    369 	}
    370 }
    371 
    372 /*
    373  * Clean out the cache. Called when the last nfsd terminates.
    374  */
    375 void
    376 nfsrv_cleancache()
    377 {
    378 	struct nfsrvcache *rp, *nextrp;
    379 
    380 	simple_lock(&nfsrv_reqcache_lock);
    381 	for (rp = TAILQ_FIRST(&nfsrvlruhead); rp != 0; rp = nextrp) {
    382 		nextrp = TAILQ_NEXT(rp, rc_lru);
    383 		LIST_REMOVE(rp, rc_hash);
    384 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
    385 		pool_put(&nfs_reqcache_pool, rp);
    386 	}
    387 	numnfsrvcache = 0;
    388 	simple_unlock(&nfsrv_reqcache_lock);
    389 }
    390