nfs_srvcache.c revision 1.31.12.3 1 /* $NetBSD: nfs_srvcache.c,v 1.31.12.3 2007/09/03 14:44:19 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_srvcache.c 8.3 (Berkeley) 3/30/95
35 */
36
37 /*
38 * Reference: Chet Juszczak, "Improving the Performance and Correctness
39 * of an NFS Server", in Proc. Winter 1989 USENIX Conference,
40 * pages 53-63. San Diego, February 1989.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: nfs_srvcache.c,v 1.31.12.3 2007/09/03 14:44:19 yamt Exp $");
45
46 #include "opt_iso.h"
47
48 #include <sys/param.h>
49 #include <sys/vnode.h>
50 #include <sys/condvar.h>
51 #include <sys/mount.h>
52 #include <sys/kernel.h>
53 #include <sys/systm.h>
54 #include <sys/lock.h>
55 #include <sys/proc.h>
56 #include <sys/pool.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62
63 #include <netinet/in.h>
64 #ifdef ISO
65 #include <netiso/iso.h>
66 #endif
67 #include <nfs/nfsm_subs.h>
68 #include <nfs/rpcv2.h>
69 #include <nfs/nfsproto.h>
70 #include <nfs/nfs.h>
71 #include <nfs/nfsrvcache.h>
72 #include <nfs/nfs_var.h>
73
74 extern struct nfsstats nfsstats;
75 extern const int nfsv2_procid[NFS_NPROCS];
76 long numnfsrvcache, desirednfsrvcache = NFSRVCACHESIZ;
77 struct pool nfs_reqcache_pool;
78
79 #define NFSRCHASH(xid) \
80 (&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
81 LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
82 TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
83 kmutex_t nfsrv_reqcache_lock;
84 u_long nfsrvhash;
85
86 #if defined(MBUFTRACE)
87 static struct mowner nfsd_cache_mowner = MOWNER_INIT("nfsd", "cache");
88 #endif /* defined(MBUFTRACE) */
89
90 #define NETFAMILY(rp) \
91 (((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO)
92
93 static struct nfsrvcache *nfsrv_lookupcache(struct nfsrv_descript *nd);
94 static void nfsrv_unlockcache(struct nfsrvcache *rp);
95
96 /*
97 * Static array that defines which nfs rpc's are nonidempotent
98 */
99 const int nonidempotent[NFS_NPROCS] = {
100 false, /* NULL */
101 false, /* GETATTR */
102 true, /* SETATTR */
103 false, /* LOOKUP */
104 false, /* ACCESS */
105 false, /* READLINK */
106 false, /* READ */
107 true, /* WRITE */
108 true, /* CREATE */
109 true, /* MKDIR */
110 true, /* SYMLINK */
111 true, /* MKNOD */
112 true, /* REMOVE */
113 true, /* RMDIR */
114 true, /* RENAME */
115 true, /* LINK */
116 false, /* READDIR */
117 false, /* READDIRPLUS */
118 false, /* FSSTAT */
119 false, /* FSINFO */
120 false, /* PATHCONF */
121 false, /* COMMIT */
122 false, /* NOOP */
123 };
124
125 /* True iff the rpc reply is an nfs status ONLY! */
126 static const int nfsv2_repstat[NFS_NPROCS] = {
127 false, /* NULL */
128 false, /* GETATTR */
129 false, /* SETATTR */
130 false, /* NOOP */
131 false, /* LOOKUP */
132 false, /* READLINK */
133 false, /* READ */
134 false, /* Obsolete WRITECACHE */
135 false, /* WRITE */
136 false, /* CREATE */
137 true, /* REMOVE */
138 true, /* RENAME */
139 true, /* LINK */
140 true, /* SYMLINK */
141 false, /* MKDIR */
142 true, /* RMDIR */
143 false, /* READDIR */
144 false, /* STATFS */
145 };
146
147 static void
148 cleanentry(struct nfsrvcache *rp)
149 {
150
151 if ((rp->rc_flag & RC_REPMBUF) != 0) {
152 m_freem(rp->rc_reply);
153 }
154 if ((rp->rc_flag & RC_NAM) != 0) {
155 m_free(rp->rc_nam);
156 }
157 rp->rc_flag &= ~(RC_REPSTATUS|RC_REPMBUF);
158 }
159
160 /*
161 * Initialize the server request cache list
162 */
163 void
164 nfsrv_initcache()
165 {
166
167 mutex_init(&nfsrv_reqcache_lock, MUTEX_DRIVER, IPL_NONE);
168 nfsrvhashtbl = hashinit(desirednfsrvcache, HASH_LIST, M_NFSD,
169 M_WAITOK, &nfsrvhash);
170 TAILQ_INIT(&nfsrvlruhead);
171 pool_init(&nfs_reqcache_pool, sizeof(struct nfsrvcache), 0, 0, 0,
172 "nfsreqcachepl", &pool_allocator_nointr, IPL_NONE);
173 MOWNER_ATTACH(&nfsd_cache_mowner);
174 }
175
176 /*
177 * Lookup a cache and lock it
178 */
179 static struct nfsrvcache *
180 nfsrv_lookupcache(nd)
181 struct nfsrv_descript *nd;
182 {
183 struct nfsrvcache *rp;
184
185 KASSERT(mutex_owned(&nfsrv_reqcache_lock));
186
187 loop:
188 LIST_FOREACH(rp, NFSRCHASH(nd->nd_retxid), rc_hash) {
189 if (nd->nd_retxid == rp->rc_xid &&
190 nd->nd_procnum == rp->rc_proc &&
191 netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
192 if ((rp->rc_flag & RC_LOCKED) != 0) {
193 cv_wait(&rp->rc_cv, &nfsrv_reqcache_lock);
194 goto loop;
195 }
196 rp->rc_flag |= RC_LOCKED;
197 break;
198 }
199 }
200
201 return rp;
202 }
203
204 /*
205 * Unlock a cache
206 */
207 static void
208 nfsrv_unlockcache(rp)
209 struct nfsrvcache *rp;
210 {
211
212 KASSERT(mutex_owned(&nfsrv_reqcache_lock));
213
214 rp->rc_flag &= ~RC_LOCKED;
215 cv_broadcast(&rp->rc_cv);
216 }
217
218 /*
219 * Look for the request in the cache
220 * If found then
221 * return action and optionally reply
222 * else
223 * insert it in the cache
224 *
225 * The rules are as follows:
226 * - if in progress, return DROP request
227 * - if completed within DELAY of the current time, return DROP it
228 * - if completed a longer time ago return REPLY if the reply was cached or
229 * return DOIT
230 * Update/add new request at end of lru list
231 */
232 int
233 nfsrv_getcache(nd, slp, repp)
234 struct nfsrv_descript *nd;
235 struct nfssvc_sock *slp;
236 struct mbuf **repp;
237 {
238 struct nfsrvcache *rp, *rpdup;
239 struct mbuf *mb;
240 struct sockaddr_in *saddr;
241 char *bpos;
242 int ret;
243
244 mutex_enter(&nfsrv_reqcache_lock);
245 rp = nfsrv_lookupcache(nd);
246 if (rp) {
247 mutex_exit(&nfsrv_reqcache_lock);
248 found:
249 /* If not at end of LRU chain, move it there */
250 if (TAILQ_NEXT(rp, rc_lru)) { /* racy but ok */
251 mutex_enter(&nfsrv_reqcache_lock);
252 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
253 TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
254 mutex_exit(&nfsrv_reqcache_lock);
255 }
256 if (rp->rc_state == RC_UNUSED)
257 panic("nfsrv cache");
258 if (rp->rc_state == RC_INPROG) {
259 nfsstats.srvcache_inproghits++;
260 ret = RC_DROPIT;
261 } else if (rp->rc_flag & RC_REPSTATUS) {
262 nfsstats.srvcache_nonidemdonehits++;
263 nfs_rephead(0, nd, slp, rp->rc_status,
264 0, (u_quad_t *)0, repp, &mb, &bpos);
265 ret = RC_REPLY;
266 } else if (rp->rc_flag & RC_REPMBUF) {
267 nfsstats.srvcache_nonidemdonehits++;
268 *repp = m_copym(rp->rc_reply, 0, M_COPYALL,
269 M_WAIT);
270 ret = RC_REPLY;
271 } else {
272 nfsstats.srvcache_idemdonehits++;
273 rp->rc_state = RC_INPROG;
274 ret = RC_DOIT;
275 }
276 mutex_enter(&nfsrv_reqcache_lock);
277 nfsrv_unlockcache(rp);
278 mutex_exit(&nfsrv_reqcache_lock);
279 return ret;
280 }
281 nfsstats.srvcache_misses++;
282 if (numnfsrvcache < desirednfsrvcache) {
283 numnfsrvcache++;
284 mutex_exit(&nfsrv_reqcache_lock);
285 rp = pool_get(&nfs_reqcache_pool, PR_WAITOK);
286 memset(rp, 0, sizeof *rp);
287 cv_init(&rp->rc_cv, "nfsdrc");
288 rp->rc_flag = RC_LOCKED;
289 } else {
290 rp = TAILQ_FIRST(&nfsrvlruhead);
291 while ((rp->rc_flag & RC_LOCKED) != 0) {
292 rp->rc_flag |= RC_WANTED;
293 cv_wait(&rp->rc_cv, &nfsrv_reqcache_lock);
294 rp = TAILQ_FIRST(&nfsrvlruhead);
295 }
296 rp->rc_flag |= RC_LOCKED;
297 LIST_REMOVE(rp, rc_hash);
298 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
299 mutex_exit(&nfsrv_reqcache_lock);
300 cleanentry(rp);
301 rp->rc_flag &= (RC_LOCKED | RC_WANTED);
302 }
303 rp->rc_state = RC_INPROG;
304 rp->rc_xid = nd->nd_retxid;
305 saddr = mtod(nd->nd_nam, struct sockaddr_in *);
306 switch (saddr->sin_family) {
307 case AF_INET:
308 rp->rc_flag |= RC_INETADDR;
309 rp->rc_inetaddr = saddr->sin_addr.s_addr;
310 break;
311 case AF_ISO:
312 default:
313 rp->rc_flag |= RC_NAM;
314 rp->rc_nam = m_copym(nd->nd_nam, 0, M_COPYALL, M_WAIT);
315 m_claimm(rp->rc_nam, &nfsd_cache_mowner);
316 break;
317 };
318 rp->rc_proc = nd->nd_procnum;
319 mutex_enter(&nfsrv_reqcache_lock);
320 rpdup = nfsrv_lookupcache(nd);
321 if (rpdup != NULL) {
322 /*
323 * other thread made duplicate cache entry.
324 */
325 mutex_exit(&nfsrv_reqcache_lock);
326 cv_destroy(&rp->rc_cv);
327 pool_put(&nfs_reqcache_pool, rp);
328 rp = rpdup;
329 goto found;
330 }
331 TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
332 LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
333 nfsrv_unlockcache(rp);
334 mutex_exit(&nfsrv_reqcache_lock);
335 return RC_DOIT;
336 }
337
338 /*
339 * Update a request cache entry after the rpc has been done
340 */
341 void
342 nfsrv_updatecache(nd, repvalid, repmbuf)
343 struct nfsrv_descript *nd;
344 int repvalid;
345 struct mbuf *repmbuf;
346 {
347 struct nfsrvcache *rp;
348
349 mutex_enter(&nfsrv_reqcache_lock);
350 rp = nfsrv_lookupcache(nd);
351 mutex_exit(&nfsrv_reqcache_lock);
352 if (rp) {
353 cleanentry(rp);
354 rp->rc_state = RC_DONE;
355 /*
356 * If we have a valid reply update status and save
357 * the reply for non-idempotent rpc's.
358 */
359 if (repvalid && nonidempotent[nd->nd_procnum]) {
360 if ((nd->nd_flag & ND_NFSV3) == 0 &&
361 nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
362 rp->rc_status = nd->nd_repstat;
363 rp->rc_flag |= RC_REPSTATUS;
364 } else {
365 rp->rc_reply = m_copym(repmbuf,
366 0, M_COPYALL, M_WAIT);
367 m_claimm(rp->rc_reply, &nfsd_cache_mowner);
368 rp->rc_flag |= RC_REPMBUF;
369 }
370 }
371 mutex_enter(&nfsrv_reqcache_lock);
372 nfsrv_unlockcache(rp);
373 mutex_exit(&nfsrv_reqcache_lock);
374 }
375 }
376
377 /*
378 * Clean out the cache. Called when the last nfsd terminates.
379 */
380 void
381 nfsrv_cleancache()
382 {
383 struct nfsrvcache *rp, *nextrp;
384
385 mutex_enter(&nfsrv_reqcache_lock);
386 for (rp = TAILQ_FIRST(&nfsrvlruhead); rp != 0; rp = nextrp) {
387 nextrp = TAILQ_NEXT(rp, rc_lru);
388 LIST_REMOVE(rp, rc_hash);
389 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
390 KASSERT((rp->rc_flag & (RC_LOCKED|RC_WANTED)) == 0);
391 cleanentry(rp);
392 cv_destroy(&rp->rc_cv);
393 pool_put(&nfs_reqcache_pool, rp);
394 }
395 numnfsrvcache = 0;
396 mutex_exit(&nfsrv_reqcache_lock);
397 }
398