nfs_srvcache.c revision 1.26 1 /* $NetBSD: nfs_srvcache.c,v 1.26 2003/05/21 13:56:21 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)nfs_srvcache.c 8.3 (Berkeley) 3/30/95
39 */
40
41 /*
42 * Reference: Chet Juszczak, "Improving the Performance and Correctness
43 * of an NFS Server", in Proc. Winter 1989 USENIX Conference,
44 * pages 53-63. San Diego, February 1989.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: nfs_srvcache.c,v 1.26 2003/05/21 13:56:21 yamt Exp $");
49
50 #include "opt_iso.h"
51
52 #include <sys/param.h>
53 #include <sys/vnode.h>
54 #include <sys/mount.h>
55 #include <sys/kernel.h>
56 #include <sys/systm.h>
57 #include <sys/proc.h>
58 #include <sys/pool.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
63
64 #include <netinet/in.h>
65 #ifdef ISO
66 #include <netiso/iso.h>
67 #endif
68 #include <nfs/nfsm_subs.h>
69 #include <nfs/rpcv2.h>
70 #include <nfs/nfsproto.h>
71 #include <nfs/nfs.h>
72 #include <nfs/nfsrvcache.h>
73 #include <nfs/nqnfs.h>
74 #include <nfs/nfs_var.h>
75
76 extern struct nfsstats nfsstats;
77 extern const int nfsv2_procid[NFS_NPROCS];
78 long numnfsrvcache, desirednfsrvcache = NFSRVCACHESIZ;
79 struct pool nfs_reqcache_pool;
80
81 #define NFSRCHASH(xid) \
82 (&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
83 LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
84 TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
85 u_long nfsrvhash;
86
87 #define NETFAMILY(rp) \
88 (((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO)
89
90 /*
91 * Static array that defines which nfs rpc's are nonidempotent
92 */
93 const int nonidempotent[NFS_NPROCS] = {
94 FALSE,
95 FALSE,
96 TRUE,
97 FALSE,
98 FALSE,
99 FALSE,
100 FALSE,
101 TRUE,
102 TRUE,
103 TRUE,
104 TRUE,
105 TRUE,
106 TRUE,
107 TRUE,
108 TRUE,
109 TRUE,
110 FALSE,
111 FALSE,
112 FALSE,
113 FALSE,
114 FALSE,
115 FALSE,
116 FALSE,
117 FALSE,
118 FALSE,
119 FALSE,
120 };
121
122 /* True iff the rpc reply is an nfs status ONLY! */
123 static const int nfsv2_repstat[NFS_NPROCS] = {
124 FALSE,
125 FALSE,
126 FALSE,
127 FALSE,
128 FALSE,
129 FALSE,
130 FALSE,
131 FALSE,
132 FALSE,
133 FALSE,
134 TRUE,
135 TRUE,
136 TRUE,
137 TRUE,
138 FALSE,
139 TRUE,
140 FALSE,
141 FALSE,
142 };
143
144 /*
145 * Initialize the server request cache list
146 */
147 void
148 nfsrv_initcache()
149 {
150
151 nfsrvhashtbl = hashinit(desirednfsrvcache, HASH_LIST, M_NFSD,
152 M_WAITOK, &nfsrvhash);
153 TAILQ_INIT(&nfsrvlruhead);
154 pool_init(&nfs_reqcache_pool, sizeof(struct nfsrvcache), 0, 0, 0,
155 "nfsreqcachepl", &pool_allocator_nointr);
156 }
157
158 /*
159 * Look for the request in the cache
160 * If found then
161 * return action and optionally reply
162 * else
163 * insert it in the cache
164 *
165 * The rules are as follows:
166 * - if in progress, return DROP request
167 * - if completed within DELAY of the current time, return DROP it
168 * - if completed a longer time ago return REPLY if the reply was cached or
169 * return DOIT
170 * Update/add new request at end of lru list
171 */
172 int
173 nfsrv_getcache(nd, slp, repp)
174 struct nfsrv_descript *nd;
175 struct nfssvc_sock *slp;
176 struct mbuf **repp;
177 {
178 struct nfsrvcache *rp;
179 struct mbuf *mb;
180 struct sockaddr_in *saddr;
181 caddr_t bpos;
182 int ret;
183
184 /*
185 * Don't cache recent requests for reliable transport protocols.
186 * (Maybe we should for the case of a reconnect, but..)
187 */
188 if (!nd->nd_nam2)
189 return RC_DOIT;
190 loop:
191 LIST_FOREACH(rp, NFSRCHASH(nd->nd_retxid), rc_hash) {
192 if (nd->nd_retxid == rp->rc_xid &&
193 nd->nd_procnum == rp->rc_proc &&
194 netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
195 if ((rp->rc_flag & RC_LOCKED) != 0) {
196 rp->rc_flag |= RC_WANTED;
197 (void) tsleep(rp, PZERO-1, "nfsrc", 0);
198 goto loop;
199 }
200 rp->rc_flag |= RC_LOCKED;
201 /* If not at end of LRU chain, move it there */
202 if (TAILQ_NEXT(rp, rc_lru)) {
203 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
204 TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
205 }
206 if (rp->rc_state == RC_UNUSED)
207 panic("nfsrv cache");
208 if (rp->rc_state == RC_INPROG) {
209 nfsstats.srvcache_inproghits++;
210 ret = RC_DROPIT;
211 } else if (rp->rc_flag & RC_REPSTATUS) {
212 nfsstats.srvcache_nonidemdonehits++;
213 nfs_rephead(0, nd, slp, rp->rc_status,
214 0, (u_quad_t *)0, repp, &mb, &bpos);
215 ret = RC_REPLY;
216 } else if (rp->rc_flag & RC_REPMBUF) {
217 nfsstats.srvcache_nonidemdonehits++;
218 *repp = m_copym(rp->rc_reply, 0, M_COPYALL,
219 M_WAIT);
220 ret = RC_REPLY;
221 } else {
222 nfsstats.srvcache_idemdonehits++;
223 rp->rc_state = RC_INPROG;
224 ret = RC_DOIT;
225 }
226 rp->rc_flag &= ~RC_LOCKED;
227 if (rp->rc_flag & RC_WANTED) {
228 rp->rc_flag &= ~RC_WANTED;
229 wakeup(rp);
230 }
231 return ret;
232 }
233 }
234 nfsstats.srvcache_misses++;
235 if (numnfsrvcache < desirednfsrvcache) {
236 rp = pool_get(&nfs_reqcache_pool, PR_WAITOK);
237 memset((char *)rp, 0, sizeof *rp);
238 numnfsrvcache++;
239 rp->rc_flag = RC_LOCKED;
240 } else {
241 rp = TAILQ_FIRST(&nfsrvlruhead);
242 while ((rp->rc_flag & RC_LOCKED) != 0) {
243 rp->rc_flag |= RC_WANTED;
244 (void) tsleep(rp, PZERO-1, "nfsrc", 0);
245 rp = TAILQ_FIRST(&nfsrvlruhead);
246 }
247 rp->rc_flag |= RC_LOCKED;
248 LIST_REMOVE(rp, rc_hash);
249 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
250 if (rp->rc_flag & RC_REPMBUF)
251 m_freem(rp->rc_reply);
252 if (rp->rc_flag & RC_NAM)
253 (void) m_free(rp->rc_nam);
254 rp->rc_flag &= (RC_LOCKED | RC_WANTED);
255 }
256 TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
257 rp->rc_state = RC_INPROG;
258 rp->rc_xid = nd->nd_retxid;
259 saddr = mtod(nd->nd_nam, struct sockaddr_in *);
260 switch (saddr->sin_family) {
261 case AF_INET:
262 rp->rc_flag |= RC_INETADDR;
263 rp->rc_inetaddr = saddr->sin_addr.s_addr;
264 break;
265 case AF_ISO:
266 default:
267 rp->rc_flag |= RC_NAM;
268 rp->rc_nam = m_copym(nd->nd_nam, 0, M_COPYALL, M_WAIT);
269 break;
270 };
271 rp->rc_proc = nd->nd_procnum;
272 LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
273 rp->rc_flag &= ~RC_LOCKED;
274 if (rp->rc_flag & RC_WANTED) {
275 rp->rc_flag &= ~RC_WANTED;
276 wakeup(rp);
277 }
278 return RC_DOIT;
279 }
280
281 /*
282 * Update a request cache entry after the rpc has been done
283 */
284 void
285 nfsrv_updatecache(nd, repvalid, repmbuf)
286 struct nfsrv_descript *nd;
287 int repvalid;
288 struct mbuf *repmbuf;
289 {
290 struct nfsrvcache *rp;
291
292 if (!nd->nd_nam2)
293 return;
294 loop:
295 LIST_FOREACH(rp, NFSRCHASH(nd->nd_retxid), rc_hash) {
296 if (nd->nd_retxid == rp->rc_xid &&
297 nd->nd_procnum == rp->rc_proc &&
298 netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
299 if ((rp->rc_flag & RC_LOCKED) != 0) {
300 rp->rc_flag |= RC_WANTED;
301 (void) tsleep(rp, PZERO-1, "nfsrc", 0);
302 goto loop;
303 }
304 rp->rc_flag |= RC_LOCKED;
305 rp->rc_state = RC_DONE;
306 /*
307 * If we have a valid reply update status and save
308 * the reply for non-idempotent rpc's.
309 */
310 if (repvalid && nonidempotent[nd->nd_procnum]) {
311 if ((nd->nd_flag & ND_NFSV3) == 0 &&
312 nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
313 rp->rc_status = nd->nd_repstat;
314 rp->rc_flag |= RC_REPSTATUS;
315 } else {
316 rp->rc_reply = m_copym(repmbuf,
317 0, M_COPYALL, M_WAIT);
318 rp->rc_flag |= RC_REPMBUF;
319 }
320 }
321 rp->rc_flag &= ~RC_LOCKED;
322 if (rp->rc_flag & RC_WANTED) {
323 rp->rc_flag &= ~RC_WANTED;
324 wakeup(rp);
325 }
326 return;
327 }
328 }
329 }
330
331 /*
332 * Clean out the cache. Called when the last nfsd terminates.
333 */
334 void
335 nfsrv_cleancache()
336 {
337 struct nfsrvcache *rp, *nextrp;
338
339 for (rp = TAILQ_FIRST(&nfsrvlruhead); rp != 0; rp = nextrp) {
340 nextrp = TAILQ_NEXT(rp, rc_lru);
341 LIST_REMOVE(rp, rc_hash);
342 TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
343 pool_put(&nfs_reqcache_pool, rp);
344 }
345 numnfsrvcache = 0;
346 }
347