nfs_kq.c revision 1.14 1 /* $NetBSD: nfs_kq.c,v 1.14 2007/04/29 15:31:53 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jaromir Dolecek.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: nfs_kq.c,v 1.14 2007/04/29 15:31:53 yamt Exp $");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/condvar.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/vnode.h>
51 #include <sys/unistd.h>
52 #include <sys/file.h>
53 #include <sys/kthread.h>
54
55 #include <uvm/uvm_extern.h>
56 #include <uvm/uvm.h>
57
58 #include <nfs/rpcv2.h>
59 #include <nfs/nfsproto.h>
60 #include <nfs/nfs.h>
61 #include <nfs/nfsnode.h>
62 #include <nfs/nfs_var.h>
63
64 struct kevq {
65 SLIST_ENTRY(kevq) kev_link;
66 struct vnode *vp;
67 u_int usecount;
68 u_int flags;
69 #define KEVQ_BUSY 0x01 /* currently being processed */
70 struct timespec omtime; /* old modification time */
71 struct timespec octime; /* old change time */
72 nlink_t onlink; /* old number of references to file */
73 kcondvar_t cv;
74 };
75 SLIST_HEAD(kevqlist, kevq);
76
77 static kmutex_t nfskevq_lock;
78 static struct proc *pnfskq;
79 static kcondvar_t nfskq_cv;
80 static struct kevqlist kevlist = SLIST_HEAD_INITIALIZER(kevlist);
81
82 void
83 nfs_kqinit(void)
84 {
85
86 mutex_init(&nfskevq_lock, MUTEX_DEFAULT, IPL_NONE);
87 cv_init(&nfskq_cv, "nfskqpw");
88 }
89
90 /*
91 * This quite simplistic routine periodically checks for server changes
92 * of any of the watched files every NFS_MINATTRTIMO/2 seconds.
93 * Only changes in size, modification time, change time and nlinks
94 * are being checked, everything else is ignored.
95 * The routine only calls VOP_GETATTR() when it's likely it would get
96 * some new data, i.e. when the vnode expires from attrcache. This
97 * should give same result as periodically running stat(2) from userland,
98 * while keeping CPU/network usage low, and still provide proper kevent
99 * semantics.
100 * The poller thread is created when first vnode is added to watch list,
101 * and exits when the watch list is empty. The overhead of thread creation
102 * isn't really important, neither speed of attach and detach of knote.
103 */
104 /* ARGSUSED */
105 static void
106 nfs_kqpoll(void *arg)
107 {
108 struct kevq *ke;
109 struct vattr attr;
110 struct lwp *l = curlwp;
111 u_quad_t osize;
112
113 mutex_enter(&nfskevq_lock);
114 for (;;) {
115 SLIST_FOREACH(ke, &kevlist, kev_link) {
116 /* skip if still in attrcache */
117 if (nfs_getattrcache(ke->vp, &attr) != ENOENT)
118 continue;
119
120 /*
121 * Mark entry busy, release lock and check
122 * for changes.
123 */
124 ke->flags |= KEVQ_BUSY;
125 mutex_exit(&nfskevq_lock);
126
127 /* save v_size, nfs_getattr() updates it */
128 osize = ke->vp->v_size;
129
130 (void) VOP_GETATTR(ke->vp, &attr, l->l_cred, l);
131
132 /* following is a bit fragile, but about best
133 * we can get */
134 if (attr.va_size != osize) {
135 int extended = (attr.va_size > osize);
136 VN_KNOTE(ke->vp, NOTE_WRITE
137 | (extended ? NOTE_EXTEND : 0));
138 ke->omtime = attr.va_mtime;
139 } else if (attr.va_mtime.tv_sec != ke->omtime.tv_sec
140 || attr.va_mtime.tv_nsec != ke->omtime.tv_nsec) {
141 VN_KNOTE(ke->vp, NOTE_WRITE);
142 ke->omtime = attr.va_mtime;
143 }
144
145 if (attr.va_ctime.tv_sec != ke->octime.tv_sec
146 || attr.va_ctime.tv_nsec != ke->octime.tv_nsec) {
147 VN_KNOTE(ke->vp, NOTE_ATTRIB);
148 ke->octime = attr.va_ctime;
149 }
150
151 if (attr.va_nlink != ke->onlink) {
152 VN_KNOTE(ke->vp, NOTE_LINK);
153 ke->onlink = attr.va_nlink;
154 }
155
156 mutex_enter(&nfskevq_lock);
157 ke->flags &= ~KEVQ_BUSY;
158 cv_signal(&ke->cv);
159 }
160
161 if (SLIST_EMPTY(&kevlist)) {
162 /* Nothing more to watch, exit */
163 pnfskq = NULL;
164 mutex_exit(&nfskevq_lock);
165 kthread_exit(0);
166 }
167
168 /* wait a while before checking for changes again */
169 cv_timedwait(&nfskq_cv, &nfskevq_lock,
170 NFS_MINATTRTIMO * hz / 2);
171 }
172 }
173
174 static void
175 filt_nfsdetach(struct knote *kn)
176 {
177 struct vnode *vp = (struct vnode *)kn->kn_hook;
178 struct kevq *ke;
179
180 /* XXXLUKEM lock the struct? */
181 SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
182
183 /* Remove the vnode from watch list */
184 mutex_enter(&nfskevq_lock);
185 SLIST_FOREACH(ke, &kevlist, kev_link) {
186 if (ke->vp == vp) {
187 while (ke->flags & KEVQ_BUSY) {
188 cv_wait(&ke->cv, &nfskevq_lock);
189 }
190
191 if (ke->usecount > 1) {
192 /* keep, other kevents need this */
193 ke->usecount--;
194 } else {
195 /* last user, g/c */
196 cv_destroy(&ke->cv);
197 SLIST_REMOVE(&kevlist, ke, kevq, kev_link);
198 FREE(ke, M_KEVENT);
199 }
200 break;
201 }
202 }
203 mutex_exit(&nfskevq_lock);
204 }
205
206 static int
207 filt_nfsread(struct knote *kn, long hint)
208 {
209 struct vnode *vp = (struct vnode *)kn->kn_hook;
210
211 /*
212 * filesystem is gone, so set the EOF flag and schedule
213 * the knote for deletion.
214 */
215 if (hint == NOTE_REVOKE) {
216 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
217 return (1);
218 }
219
220 /* XXXLUKEM lock the struct? */
221 kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
222 return (kn->kn_data != 0);
223 }
224
225 static int
226 filt_nfsvnode(struct knote *kn, long hint)
227 {
228
229 if (kn->kn_sfflags & hint)
230 kn->kn_fflags |= hint;
231 if (hint == NOTE_REVOKE) {
232 kn->kn_flags |= EV_EOF;
233 return (1);
234 }
235 return (kn->kn_fflags != 0);
236 }
237
238 static const struct filterops nfsread_filtops =
239 { 1, NULL, filt_nfsdetach, filt_nfsread };
240 static const struct filterops nfsvnode_filtops =
241 { 1, NULL, filt_nfsdetach, filt_nfsvnode };
242
243 int
244 nfs_kqfilter(void *v)
245 {
246 struct vop_kqfilter_args /* {
247 struct vnode *a_vp;
248 struct knote *a_kn;
249 } */ *ap = v;
250 struct vnode *vp;
251 struct knote *kn;
252 struct kevq *ke;
253 int error = 0;
254 struct vattr attr;
255 struct lwp *l = curlwp; /* XXX */
256
257 vp = ap->a_vp;
258 kn = ap->a_kn;
259 switch (kn->kn_filter) {
260 case EVFILT_READ:
261 kn->kn_fop = &nfsread_filtops;
262 break;
263 case EVFILT_VNODE:
264 kn->kn_fop = &nfsvnode_filtops;
265 break;
266 default:
267 return (1);
268 }
269
270 kn->kn_hook = vp;
271
272 /*
273 * Put the vnode to watched list.
274 */
275
276 /*
277 * Fetch current attributes. It's only needed when the vnode
278 * is not watched yet, but we need to do this without lock
279 * held. This is likely cheap due to attrcache, so do it now.
280 */
281 memset(&attr, 0, sizeof(attr));
282 (void) VOP_GETATTR(vp, &attr, l->l_cred, l);
283
284 mutex_enter(&nfskevq_lock);
285
286 /* ensure the poller is running */
287 if (!pnfskq) {
288 error = kthread_create1(nfs_kqpoll, NULL, &pnfskq,
289 "nfskqpoll");
290 if (error)
291 goto out;
292 }
293
294 SLIST_FOREACH(ke, &kevlist, kev_link) {
295 if (ke->vp == vp)
296 break;
297 }
298
299 if (ke) {
300 /* already watched, so just bump usecount */
301 ke->usecount++;
302 } else {
303 /* need a new one */
304 MALLOC(ke, struct kevq *, sizeof(struct kevq), M_KEVENT,
305 M_WAITOK);
306 ke->vp = vp;
307 ke->usecount = 1;
308 ke->flags = 0;
309 ke->omtime = attr.va_mtime;
310 ke->octime = attr.va_ctime;
311 ke->onlink = attr.va_nlink;
312 cv_init(&ke->cv, "nfskqdet");
313 SLIST_INSERT_HEAD(&kevlist, ke, kev_link);
314 }
315
316 /* kick the poller */
317 cv_signal(&nfskq_cv);
318
319 /* XXXLUKEM lock the struct? */
320 SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
321
322 out:
323 mutex_exit(&nfskevq_lock);
324
325 return (error);
326 }
327