nfs_kq.c revision 1.31 1 /* $NetBSD: nfs_kq.c,v 1.31 2021/10/11 01:49:08 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jaromir Dolecek.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nfs_kq.c,v 1.31 2021/10/11 01:49:08 thorpej Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/condvar.h>
38 #include <sys/kernel.h>
39 #include <sys/proc.h>
40 #include <sys/kmem.h>
41 #include <sys/mount.h>
42 #include <sys/mutex.h>
43 #include <sys/vnode.h>
44 #include <sys/unistd.h>
45 #include <sys/file.h>
46 #include <sys/kthread.h>
47
48 #include <nfs/rpcv2.h>
49 #include <nfs/nfsproto.h>
50 #include <nfs/nfs.h>
51 #include <nfs/nfsnode.h>
52 #include <nfs/nfs_var.h>
53
54 struct kevq {
55 SLIST_ENTRY(kevq) kev_link;
56 struct vnode *vp;
57 u_int usecount;
58 u_int flags;
59 #define KEVQ_BUSY 0x01 /* currently being processed */
60 struct timespec omtime; /* old modification time */
61 struct timespec octime; /* old change time */
62 nlink_t onlink; /* old number of references to file */
63 kcondvar_t cv;
64 };
65 SLIST_HEAD(kevqlist, kevq);
66
67 static kmutex_t nfskq_lock;
68 static struct lwp *nfskq_thread;
69 static kcondvar_t nfskq_cv;
70 static struct kevqlist kevlist = SLIST_HEAD_INITIALIZER(kevlist);
71 static bool nfskq_thread_exit;
72
73 void
74 nfs_kqinit(void)
75 {
76
77 mutex_init(&nfskq_lock, MUTEX_DEFAULT, IPL_NONE);
78 cv_init(&nfskq_cv, "nfskqpw");
79 }
80
81 void
82 nfs_kqfini(void)
83 {
84
85 if (nfskq_thread != NULL) {
86 mutex_enter(&nfskq_lock);
87 nfskq_thread_exit = true;
88 cv_broadcast(&nfskq_cv);
89 do {
90 cv_wait(&nfskq_cv, &nfskq_lock);
91 } while (nfskq_thread != NULL);
92 mutex_exit(&nfskq_lock);
93 }
94 mutex_destroy(&nfskq_lock);
95 cv_destroy(&nfskq_cv);
96 }
97
98 /*
99 * This quite simplistic routine periodically checks for server changes
100 * of any of the watched files every NFS_MINATTRTIMO/2 seconds.
101 * Only changes in size, modification time, change time and nlinks
102 * are being checked, everything else is ignored.
103 * The routine only calls VOP_GETATTR() when it's likely it would get
104 * some new data, i.e. when the vnode expires from attrcache. This
105 * should give same result as periodically running stat(2) from userland,
106 * while keeping CPU/network usage low, and still provide proper kevent
107 * semantics.
108 * The poller thread is created when first vnode is added to watch list,
109 * and exits when the watch list is empty. The overhead of thread creation
110 * isn't really important, neither speed of attach and detach of knote.
111 */
112 /* ARGSUSED */
113 static void
114 nfs_kqpoll(void *arg)
115 {
116 struct kevq *ke;
117 struct vattr attr;
118 struct lwp *l = curlwp;
119 u_quad_t osize;
120
121 mutex_enter(&nfskq_lock);
122 while (!nfskq_thread_exit) {
123 SLIST_FOREACH(ke, &kevlist, kev_link) {
124 /* skip if still in attrcache */
125 if (nfs_getattrcache(ke->vp, &attr) != ENOENT)
126 continue;
127
128 /*
129 * Mark entry busy, release lock and check
130 * for changes.
131 */
132 ke->flags |= KEVQ_BUSY;
133 mutex_exit(&nfskq_lock);
134
135 /* save v_size, nfs_getattr() updates it */
136 osize = ke->vp->v_size;
137
138 memset(&attr, 0, sizeof(attr));
139 vn_lock(ke->vp, LK_SHARED | LK_RETRY);
140 (void) VOP_GETATTR(ke->vp, &attr, l->l_cred);
141 VOP_UNLOCK(ke->vp);
142
143 /* following is a bit fragile, but about best
144 * we can get */
145 if (attr.va_size != osize) {
146 int extended = (attr.va_size > osize);
147 VN_KNOTE(ke->vp, NOTE_WRITE
148 | (extended ? NOTE_EXTEND : 0));
149 ke->omtime = attr.va_mtime;
150 } else if (attr.va_mtime.tv_sec != ke->omtime.tv_sec
151 || attr.va_mtime.tv_nsec != ke->omtime.tv_nsec) {
152 VN_KNOTE(ke->vp, NOTE_WRITE);
153 ke->omtime = attr.va_mtime;
154 }
155
156 if (attr.va_ctime.tv_sec != ke->octime.tv_sec
157 || attr.va_ctime.tv_nsec != ke->octime.tv_nsec) {
158 VN_KNOTE(ke->vp, NOTE_ATTRIB);
159 ke->octime = attr.va_ctime;
160 }
161
162 if (attr.va_nlink != ke->onlink) {
163 VN_KNOTE(ke->vp, NOTE_LINK);
164 ke->onlink = attr.va_nlink;
165 }
166
167 mutex_enter(&nfskq_lock);
168 ke->flags &= ~KEVQ_BUSY;
169 cv_signal(&ke->cv);
170 }
171
172 if (SLIST_EMPTY(&kevlist)) {
173 /* Nothing more to watch, exit */
174 nfskq_thread = NULL;
175 mutex_exit(&nfskq_lock);
176 kthread_exit(0);
177 }
178
179 /* wait a while before checking for changes again */
180 cv_timedwait(&nfskq_cv, &nfskq_lock,
181 NFS_MINATTRTIMO * hz / 2);
182 }
183 nfskq_thread = NULL;
184 cv_broadcast(&nfskq_cv);
185 mutex_exit(&nfskq_lock);
186 }
187
188 static void
189 filt_nfsdetach(struct knote *kn)
190 {
191 struct vnode *vp = (struct vnode *)kn->kn_hook;
192 struct kevq *ke;
193
194 mutex_enter(vp->v_interlock);
195 SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
196 mutex_exit(vp->v_interlock);
197
198 /* Remove the vnode from watch list */
199 mutex_enter(&nfskq_lock);
200 SLIST_FOREACH(ke, &kevlist, kev_link) {
201 if (ke->vp == vp) {
202 while (ke->flags & KEVQ_BUSY) {
203 cv_wait(&ke->cv, &nfskq_lock);
204 }
205
206 if (ke->usecount > 1) {
207 /* keep, other kevents need this */
208 ke->usecount--;
209 } else {
210 /* last user, g/c */
211 cv_destroy(&ke->cv);
212 SLIST_REMOVE(&kevlist, ke, kevq, kev_link);
213 kmem_free(ke, sizeof(*ke));
214 }
215 break;
216 }
217 }
218 mutex_exit(&nfskq_lock);
219 }
220
221 static int
222 filt_nfsread(struct knote *kn, long hint)
223 {
224 struct vnode *vp = (struct vnode *)kn->kn_hook;
225 int rv;
226
227 /*
228 * filesystem is gone, so set the EOF flag and schedule
229 * the knote for deletion.
230 */
231 switch (hint) {
232 case NOTE_REVOKE:
233 KASSERT(mutex_owned(vp->v_interlock));
234 knote_set_eof(kn, EV_ONESHOT);
235 return (1);
236 case 0:
237 mutex_enter(vp->v_interlock);
238 kn->kn_data = vp->v_size - ((file_t *)kn->kn_obj)->f_offset;
239 rv = (kn->kn_data != 0);
240 mutex_exit(vp->v_interlock);
241 return rv;
242 default:
243 KASSERT(mutex_owned(vp->v_interlock));
244 kn->kn_data = vp->v_size - ((file_t *)kn->kn_obj)->f_offset;
245 return (kn->kn_data != 0);
246 }
247 }
248
249 static int
250 filt_nfsvnode(struct knote *kn, long hint)
251 {
252 struct vnode *vp = (struct vnode *)kn->kn_hook;
253 int fflags;
254
255 switch (hint) {
256 case NOTE_REVOKE:
257 KASSERT(mutex_owned(vp->v_interlock));
258 knote_set_eof(kn, 0);
259 if ((kn->kn_sfflags & hint) != 0)
260 kn->kn_fflags |= hint;
261 return (1);
262 case 0:
263 mutex_enter(vp->v_interlock);
264 fflags = kn->kn_fflags;
265 mutex_exit(vp->v_interlock);
266 break;
267 default:
268 KASSERT(mutex_owned(vp->v_interlock));
269 if ((kn->kn_sfflags & hint) != 0)
270 kn->kn_fflags |= hint;
271 fflags = kn->kn_fflags;
272 break;
273 }
274
275 return (fflags != 0);
276 }
277
278
279 static const struct filterops nfsread_filtops = {
280 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
281 .f_attach = NULL,
282 .f_detach = filt_nfsdetach,
283 .f_event = filt_nfsread,
284 };
285
286 static const struct filterops nfsvnode_filtops = {
287 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
288 .f_attach = NULL,
289 .f_detach = filt_nfsdetach,
290 .f_event = filt_nfsvnode,
291 };
292
293 int
294 nfs_kqfilter(void *v)
295 {
296 struct vop_kqfilter_args /* {
297 struct vnode *a_vp;
298 struct knote *a_kn;
299 } */ *ap = v;
300 struct vnode *vp;
301 struct knote *kn;
302 struct kevq *ke;
303 int error = 0;
304 struct vattr attr;
305 struct lwp *l = curlwp;
306
307 vp = ap->a_vp;
308 kn = ap->a_kn;
309 switch (kn->kn_filter) {
310 case EVFILT_READ:
311 kn->kn_fop = &nfsread_filtops;
312 break;
313 case EVFILT_VNODE:
314 kn->kn_fop = &nfsvnode_filtops;
315 break;
316 default:
317 return (EINVAL);
318 }
319
320 /*
321 * Put the vnode to watched list.
322 */
323
324 /*
325 * Fetch current attributes. It's only needed when the vnode
326 * is not watched yet, but we need to do this without lock
327 * held. This is likely cheap due to attrcache, so do it now.
328 */
329 memset(&attr, 0, sizeof(attr));
330 vn_lock(vp, LK_SHARED | LK_RETRY);
331 (void) VOP_GETATTR(vp, &attr, l->l_cred);
332 VOP_UNLOCK(vp);
333
334 mutex_enter(&nfskq_lock);
335
336 /* ensure the poller is running */
337 if (!nfskq_thread) {
338 error = kthread_create(PRI_NONE, 0, NULL, nfs_kqpoll,
339 NULL, &nfskq_thread, "nfskqpoll");
340 if (error) {
341 mutex_exit(&nfskq_lock);
342 return error;
343 }
344 }
345
346 SLIST_FOREACH(ke, &kevlist, kev_link) {
347 if (ke->vp == vp)
348 break;
349 }
350
351 if (ke) {
352 /* already watched, so just bump usecount */
353 ke->usecount++;
354 } else {
355 /* need a new one */
356 ke = kmem_alloc(sizeof(*ke), KM_SLEEP);
357 ke->vp = vp;
358 ke->usecount = 1;
359 ke->flags = 0;
360 ke->omtime = attr.va_mtime;
361 ke->octime = attr.va_ctime;
362 ke->onlink = attr.va_nlink;
363 cv_init(&ke->cv, "nfskqdet");
364 SLIST_INSERT_HEAD(&kevlist, ke, kev_link);
365 }
366
367 mutex_enter(vp->v_interlock);
368 SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
369 kn->kn_hook = vp;
370 mutex_exit(vp->v_interlock);
371
372 /* kick the poller */
373 cv_signal(&nfskq_cv);
374 mutex_exit(&nfskq_lock);
375
376 return (error);
377 }
378