nfs_kq.c revision 1.21.4.4 1 /* $NetBSD: nfs_kq.c,v 1.21.4.4 2010/03/21 13:06:36 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jaromir Dolecek.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nfs_kq.c,v 1.21.4.4 2010/03/21 13:06:36 yamt Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/condvar.h>
38 #include <sys/kernel.h>
39 #include <sys/proc.h>
40 #include <sys/kmem.h>
41 #include <sys/mount.h>
42 #include <sys/mutex.h>
43 #include <sys/vnode.h>
44 #include <sys/unistd.h>
45 #include <sys/file.h>
46 #include <sys/kthread.h>
47
48 #include <uvm/uvm_extern.h>
49 #include <uvm/uvm.h>
50
51 #include <nfs/rpcv2.h>
52 #include <nfs/nfsproto.h>
53 #include <nfs/nfs.h>
54 #include <nfs/nfsnode.h>
55 #include <nfs/nfs_var.h>
56
57 struct kevq {
58 SLIST_ENTRY(kevq) kev_link;
59 struct vnode *vp;
60 u_int usecount;
61 u_int flags;
62 #define KEVQ_BUSY 0x01 /* currently being processed */
63 struct timespec omtime; /* old modification time */
64 struct timespec octime; /* old change time */
65 nlink_t onlink; /* old number of references to file */
66 kcondvar_t cv;
67 };
68 SLIST_HEAD(kevqlist, kevq);
69
70 static kmutex_t nfskq_lock;
71 static struct lwp *nfskq_thread;
72 static kcondvar_t nfskq_cv;
73 static struct kevqlist kevlist = SLIST_HEAD_INITIALIZER(kevlist);
74 static bool nfskq_thread_exit;
75
76 void
77 nfs_kqinit(void)
78 {
79
80 mutex_init(&nfskq_lock, MUTEX_DEFAULT, IPL_NONE);
81 cv_init(&nfskq_cv, "nfskqpw");
82 }
83
84 void
85 nfs_kqfini(void)
86 {
87
88 if (nfskq_thread != NULL) {
89 mutex_enter(&nfskq_lock);
90 nfskq_thread_exit = true;
91 cv_broadcast(&nfskq_cv);
92 do {
93 cv_wait(&nfskq_cv, &nfskq_lock);
94 } while (nfskq_thread != NULL);
95 mutex_exit(&nfskq_lock);
96 }
97 mutex_destroy(&nfskq_lock);
98 cv_destroy(&nfskq_cv);
99 }
100
101 /*
102 * This quite simplistic routine periodically checks for server changes
103 * of any of the watched files every NFS_MINATTRTIMO/2 seconds.
104 * Only changes in size, modification time, change time and nlinks
105 * are being checked, everything else is ignored.
106 * The routine only calls VOP_GETATTR() when it's likely it would get
107 * some new data, i.e. when the vnode expires from attrcache. This
108 * should give same result as periodically running stat(2) from userland,
109 * while keeping CPU/network usage low, and still provide proper kevent
110 * semantics.
111 * The poller thread is created when first vnode is added to watch list,
112 * and exits when the watch list is empty. The overhead of thread creation
113 * isn't really important, neither speed of attach and detach of knote.
114 */
115 /* ARGSUSED */
116 static void
117 nfs_kqpoll(void *arg)
118 {
119 struct kevq *ke;
120 struct vattr attr;
121 struct lwp *l = curlwp;
122 u_quad_t osize;
123
124 mutex_enter(&nfskq_lock);
125 while (!nfskq_thread_exit) {
126 SLIST_FOREACH(ke, &kevlist, kev_link) {
127 /* skip if still in attrcache */
128 if (nfs_getattrcache(ke->vp, &attr) != ENOENT)
129 continue;
130
131 /*
132 * Mark entry busy, release lock and check
133 * for changes.
134 */
135 ke->flags |= KEVQ_BUSY;
136 mutex_exit(&nfskq_lock);
137
138 /* save v_size, nfs_getattr() updates it */
139 osize = ke->vp->v_size;
140
141 vn_lock(ke->vp, LK_SHARED | LK_RETRY);
142 (void) VOP_GETATTR(ke->vp, &attr, l->l_cred);
143 VOP_UNLOCK(ke->vp, 0);
144
145 /* following is a bit fragile, but about best
146 * we can get */
147 if (attr.va_size != osize) {
148 int extended = (attr.va_size > osize);
149 VN_KNOTE(ke->vp, NOTE_WRITE
150 | (extended ? NOTE_EXTEND : 0));
151 ke->omtime = attr.va_mtime;
152 } else if (attr.va_mtime.tv_sec != ke->omtime.tv_sec
153 || attr.va_mtime.tv_nsec != ke->omtime.tv_nsec) {
154 VN_KNOTE(ke->vp, NOTE_WRITE);
155 ke->omtime = attr.va_mtime;
156 }
157
158 if (attr.va_ctime.tv_sec != ke->octime.tv_sec
159 || attr.va_ctime.tv_nsec != ke->octime.tv_nsec) {
160 VN_KNOTE(ke->vp, NOTE_ATTRIB);
161 ke->octime = attr.va_ctime;
162 }
163
164 if (attr.va_nlink != ke->onlink) {
165 VN_KNOTE(ke->vp, NOTE_LINK);
166 ke->onlink = attr.va_nlink;
167 }
168
169 mutex_enter(&nfskq_lock);
170 ke->flags &= ~KEVQ_BUSY;
171 cv_signal(&ke->cv);
172 }
173
174 if (SLIST_EMPTY(&kevlist)) {
175 /* Nothing more to watch, exit */
176 nfskq_thread = NULL;
177 mutex_exit(&nfskq_lock);
178 kthread_exit(0);
179 }
180
181 /* wait a while before checking for changes again */
182 cv_timedwait(&nfskq_cv, &nfskq_lock,
183 NFS_MINATTRTIMO * hz / 2);
184 }
185 nfskq_thread = NULL;
186 cv_broadcast(&nfskq_cv);
187 mutex_exit(&nfskq_lock);
188 }
189
190 static void
191 filt_nfsdetach(struct knote *kn)
192 {
193 struct vnode *vp = (struct vnode *)kn->kn_hook;
194 struct kevq *ke;
195
196 mutex_enter(&vp->v_interlock);
197 SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
198 mutex_exit(&vp->v_interlock);
199
200 /* Remove the vnode from watch list */
201 mutex_enter(&nfskq_lock);
202 SLIST_FOREACH(ke, &kevlist, kev_link) {
203 if (ke->vp == vp) {
204 while (ke->flags & KEVQ_BUSY) {
205 cv_wait(&ke->cv, &nfskq_lock);
206 }
207
208 if (ke->usecount > 1) {
209 /* keep, other kevents need this */
210 ke->usecount--;
211 } else {
212 /* last user, g/c */
213 cv_destroy(&ke->cv);
214 SLIST_REMOVE(&kevlist, ke, kevq, kev_link);
215 kmem_free(ke, sizeof(*ke));
216 }
217 break;
218 }
219 }
220 mutex_exit(&nfskq_lock);
221 }
222
223 static int
224 filt_nfsread(struct knote *kn, long hint)
225 {
226 struct vnode *vp = (struct vnode *)kn->kn_hook;
227 int rv;
228
229 /*
230 * filesystem is gone, so set the EOF flag and schedule
231 * the knote for deletion.
232 */
233 switch (hint) {
234 case NOTE_REVOKE:
235 KASSERT(mutex_owned(&vp->v_interlock));
236 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
237 return (1);
238 case 0:
239 mutex_enter(&vp->v_interlock);
240 kn->kn_data = vp->v_size - ((file_t *)kn->kn_obj)->f_offset;
241 rv = (kn->kn_data != 0);
242 mutex_exit(&vp->v_interlock);
243 return rv;
244 default:
245 KASSERT(mutex_owned(&vp->v_interlock));
246 kn->kn_data = vp->v_size - ((file_t *)kn->kn_obj)->f_offset;
247 return (kn->kn_data != 0);
248 }
249 }
250
251 static int
252 filt_nfsvnode(struct knote *kn, long hint)
253 {
254 struct vnode *vp = (struct vnode *)kn->kn_hook;
255 int fflags;
256
257 switch (hint) {
258 case NOTE_REVOKE:
259 KASSERT(mutex_owned(&vp->v_interlock));
260 kn->kn_flags |= EV_EOF;
261 if ((kn->kn_sfflags & hint) != 0)
262 kn->kn_fflags |= hint;
263 return (1);
264 case 0:
265 mutex_enter(&vp->v_interlock);
266 fflags = kn->kn_fflags;
267 mutex_exit(&vp->v_interlock);
268 break;
269 default:
270 KASSERT(mutex_owned(&vp->v_interlock));
271 if ((kn->kn_sfflags & hint) != 0)
272 kn->kn_fflags |= hint;
273 fflags = kn->kn_fflags;
274 break;
275 }
276
277 return (fflags != 0);
278 }
279
280
281 static const struct filterops nfsread_filtops =
282 { 1, NULL, filt_nfsdetach, filt_nfsread };
283 static const struct filterops nfsvnode_filtops =
284 { 1, NULL, filt_nfsdetach, filt_nfsvnode };
285
286 int
287 nfs_kqfilter(void *v)
288 {
289 struct vop_kqfilter_args /* {
290 struct vnode *a_vp;
291 struct knote *a_kn;
292 } */ *ap = v;
293 struct vnode *vp;
294 struct knote *kn;
295 struct kevq *ke;
296 int error = 0;
297 struct vattr attr;
298 struct lwp *l = curlwp;
299
300 vp = ap->a_vp;
301 kn = ap->a_kn;
302 switch (kn->kn_filter) {
303 case EVFILT_READ:
304 kn->kn_fop = &nfsread_filtops;
305 break;
306 case EVFILT_VNODE:
307 kn->kn_fop = &nfsvnode_filtops;
308 break;
309 default:
310 return (EINVAL);
311 }
312
313 /*
314 * Put the vnode to watched list.
315 */
316
317 /*
318 * Fetch current attributes. It's only needed when the vnode
319 * is not watched yet, but we need to do this without lock
320 * held. This is likely cheap due to attrcache, so do it now.
321 */
322 memset(&attr, 0, sizeof(attr));
323 vn_lock(vp, LK_SHARED | LK_RETRY);
324 (void) VOP_GETATTR(vp, &attr, l->l_cred);
325 VOP_UNLOCK(vp, 0);
326
327 mutex_enter(&nfskq_lock);
328
329 /* ensure the poller is running */
330 if (!nfskq_thread) {
331 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
332 nfs_kqpoll, NULL, &nfskq_thread, "nfskqpoll");
333 if (error) {
334 mutex_exit(&nfskq_lock);
335 return error;
336 }
337 }
338
339 SLIST_FOREACH(ke, &kevlist, kev_link) {
340 if (ke->vp == vp)
341 break;
342 }
343
344 if (ke) {
345 /* already watched, so just bump usecount */
346 ke->usecount++;
347 } else {
348 /* need a new one */
349 ke = kmem_alloc(sizeof(*ke), KM_SLEEP);
350 ke->vp = vp;
351 ke->usecount = 1;
352 ke->flags = 0;
353 ke->omtime = attr.va_mtime;
354 ke->octime = attr.va_ctime;
355 ke->onlink = attr.va_nlink;
356 cv_init(&ke->cv, "nfskqdet");
357 SLIST_INSERT_HEAD(&kevlist, ke, kev_link);
358 }
359
360 mutex_enter(&vp->v_interlock);
361 SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
362 kn->kn_hook = vp;
363 mutex_exit(&vp->v_interlock);
364
365 /* kick the poller */
366 cv_signal(&nfskq_cv);
367 mutex_exit(&nfskq_lock);
368
369 return (error);
370 }
371