sys_eventfd.c revision 1.3 1 /* $NetBSD: sys_eventfd.c,v 1.3 2021/09/20 11:12:35 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sys_eventfd.c,v 1.3 2021/09/20 11:12:35 skrll Exp $");
34
35 /*
36 * eventfd
37 *
38 * Eventfd objects present a simple counting object associated with a
39 * file descriptor. Writes and reads to this file descriptor increment
40 * and decrement the count, respectively. When the count is non-zero,
41 * the descriptor is considered "readable", and when less than the max
42 * value (EVENTFD_MAXVAL), is considered "writable".
43 *
44 * This implementation is API compatible with the Linux eventfd(2)
45 * interface.
46 */
47
48 #include <sys/param.h>
49 #include <sys/types.h>
50 #include <sys/condvar.h>
51 #include <sys/eventfd.h>
52 #include <sys/file.h>
53 #include <sys/filedesc.h>
54 #include <sys/kauth.h>
55 #include <sys/mutex.h>
56 #include <sys/poll.h>
57 #include <sys/proc.h>
58 #include <sys/select.h>
59 #include <sys/stat.h>
60 #include <sys/syscallargs.h>
61 #include <sys/uio.h>
62
63 struct eventfd {
64 kmutex_t efd_lock;
65 kcondvar_t efd_read_wait;
66 kcondvar_t efd_write_wait;
67 kcondvar_t efd_restart_wait;
68 struct selinfo efd_read_sel;
69 struct selinfo efd_write_sel;
70 eventfd_t efd_val;
71 int64_t efd_nwaiters;
72 bool efd_restarting;
73 bool efd_has_read_waiters;
74 bool efd_has_write_waiters;
75 bool efd_is_semaphore;
76
77 /*
78 * Information kept for stat(2).
79 */
80 struct timespec efd_btime; /* time created */
81 struct timespec efd_mtime; /* last write */
82 struct timespec efd_atime; /* last read */
83 };
84
85 #define EVENTFD_MAXVAL (UINT64_MAX - 1)
86
87 /*
88 * eventfd_create:
89 *
90 * Create an eventfd object.
91 */
92 static struct eventfd *
93 eventfd_create(unsigned int const val, int const flags)
94 {
95 struct eventfd * const efd = kmem_zalloc(sizeof(*efd), KM_SLEEP);
96
97 mutex_init(&efd->efd_lock, MUTEX_DEFAULT, IPL_NONE);
98 cv_init(&efd->efd_read_wait, "efdread");
99 cv_init(&efd->efd_write_wait, "efdwrite");
100 cv_init(&efd->efd_restart_wait, "efdrstrt");
101 selinit(&efd->efd_read_sel);
102 selinit(&efd->efd_write_sel);
103 efd->efd_val = val;
104 efd->efd_is_semaphore = !!(flags & EFD_SEMAPHORE);
105 getnanotime(&efd->efd_btime);
106
107 /* Caller deals with EFD_CLOEXEC and EFD_NONBLOCK. */
108
109 return efd;
110 }
111
112 /*
113 * eventfd_destroy:
114 *
115 * Destroy an eventfd object.
116 */
117 static void
118 eventfd_destroy(struct eventfd * const efd)
119 {
120
121 KASSERT(efd->efd_nwaiters == 0);
122 KASSERT(efd->efd_restarting == false);
123 KASSERT(efd->efd_has_read_waiters == false);
124 KASSERT(efd->efd_has_write_waiters == false);
125
126 cv_destroy(&efd->efd_read_wait);
127 cv_destroy(&efd->efd_write_wait);
128 cv_destroy(&efd->efd_restart_wait);
129
130 seldestroy(&efd->efd_read_sel);
131 seldestroy(&efd->efd_write_sel);
132
133 mutex_destroy(&efd->efd_lock);
134 }
135
136 /*
137 * eventfd_wait:
138 *
139 * Block on an eventfd. Handles non-blocking, as well as
140 * the restart cases.
141 */
142 static int
143 eventfd_wait(struct eventfd * const efd, int const fflag, bool const is_write)
144 {
145 kcondvar_t *waitcv;
146 int error;
147
148 if (fflag & FNONBLOCK) {
149 return EAGAIN;
150 }
151
152 /*
153 * We're going to block. If there is a restart in-progress,
154 * wait for that to complete first.
155 */
156 while (efd->efd_restarting) {
157 cv_wait(&efd->efd_restart_wait, &efd->efd_lock);
158 }
159
160 if (is_write) {
161 efd->efd_has_write_waiters = true;
162 waitcv = &efd->efd_write_wait;
163 } else {
164 efd->efd_has_read_waiters = true;
165 waitcv = &efd->efd_read_wait;
166 }
167
168 efd->efd_nwaiters++;
169 KASSERT(efd->efd_nwaiters > 0);
170 error = cv_wait_sig(waitcv, &efd->efd_lock);
171 efd->efd_nwaiters--;
172 KASSERT(efd->efd_nwaiters >= 0);
173
174 /*
175 * If a restart was triggered while we were asleep, we need
176 * to return ERESTART if no other error was returned. If we
177 * are the last waiter coming out of the restart drain, clear
178 * the condition.
179 */
180 if (efd->efd_restarting) {
181 if (error == 0) {
182 error = ERESTART;
183 }
184 if (efd->efd_nwaiters == 0) {
185 efd->efd_restarting = false;
186 cv_broadcast(&efd->efd_restart_wait);
187 }
188 }
189
190 return error;
191 }
192
193 /*
194 * eventfd_wake:
195 *
196 * Wake LWPs block on an eventfd.
197 */
198 static void
199 eventfd_wake(struct eventfd * const efd, bool const is_write)
200 {
201 kcondvar_t *waitcv = NULL;
202 struct selinfo *sel;
203 int pollev;
204
205 if (is_write) {
206 if (efd->efd_has_read_waiters) {
207 waitcv = &efd->efd_read_wait;
208 efd->efd_has_read_waiters = false;
209 }
210 sel = &efd->efd_read_sel;
211 pollev = POLLIN | POLLRDNORM;
212 } else {
213 if (efd->efd_has_write_waiters) {
214 waitcv = &efd->efd_write_wait;
215 efd->efd_has_write_waiters = false;
216 }
217 sel = &efd->efd_write_sel;
218 pollev = POLLOUT | POLLWRNORM;
219 }
220 if (waitcv != NULL) {
221 cv_broadcast(waitcv);
222 }
223 selnotify(sel, pollev, NOTE_SUBMIT);
224 }
225
226 /*
227 * eventfd file operations
228 */
229
230 static int
231 eventfd_fop_read(file_t * const fp, off_t * const offset,
232 struct uio * const uio, kauth_cred_t const cred, int const flags)
233 {
234 struct eventfd * const efd = fp->f_eventfd;
235 int const fflag = fp->f_flag;
236 eventfd_t return_value;
237 int error;
238
239 if (uio->uio_resid < sizeof(eventfd_t)) {
240 return EINVAL;
241 }
242
243 mutex_enter(&efd->efd_lock);
244
245 while (efd->efd_val == 0) {
246 if ((error = eventfd_wait(efd, fflag, false)) != 0) {
247 mutex_exit(&efd->efd_lock);
248 return error;
249 }
250 }
251
252 if (efd->efd_is_semaphore) {
253 return_value = 1;
254 efd->efd_val--;
255 } else {
256 return_value = efd->efd_val;
257 efd->efd_val = 0;
258 }
259
260 getnanotime(&efd->efd_atime);
261 eventfd_wake(efd, false);
262
263 mutex_exit(&efd->efd_lock);
264
265 error = uiomove(&return_value, sizeof(return_value), uio);
266
267 return error;
268 }
269
270 static int
271 eventfd_fop_write(file_t * const fp, off_t * const offset,
272 struct uio * const uio, kauth_cred_t const cred, int const flags)
273 {
274 struct eventfd * const efd = fp->f_eventfd;
275 int const fflag = fp->f_flag;
276 eventfd_t write_value;
277 int error;
278
279 if (uio->uio_resid < sizeof(eventfd_t)) {
280 return EINVAL;
281 }
282
283 if ((error = uiomove(&write_value, sizeof(write_value), uio)) != 0) {
284 return error;
285 }
286
287 if (write_value > EVENTFD_MAXVAL) {
288 error = EINVAL;
289 goto out;
290 }
291
292 mutex_enter(&efd->efd_lock);
293
294 KASSERT(efd->efd_val <= EVENTFD_MAXVAL);
295 while ((EVENTFD_MAXVAL - efd->efd_val) < write_value) {
296 if ((error = eventfd_wait(efd, fflag, true)) != 0) {
297 mutex_exit(&efd->efd_lock);
298 goto out;
299 }
300 }
301
302 efd->efd_val += write_value;
303 KASSERT(efd->efd_val <= EVENTFD_MAXVAL);
304
305 getnanotime(&efd->efd_mtime);
306 eventfd_wake(efd, true);
307
308 mutex_exit(&efd->efd_lock);
309
310 out:
311 if (error) {
312 /*
313 * Undo the effect of uiomove() so that the error
314 * gets reported correctly; see dofilewrite().
315 */
316 uio->uio_resid += sizeof(write_value);
317 }
318 return error;
319 }
320
321 static int
322 eventfd_fop_poll(file_t * const fp, int const events)
323 {
324 struct eventfd * const efd = fp->f_eventfd;
325 int revents = 0;
326
327 /*
328 * Note that Linux will return POLLERR if the eventfd count
329 * overflows, but that is not possible in the normal read/write
330 * API, only with Linux kernel-internal interfaces. So, this
331 * implementation never returns POLLERR.
332 *
333 * Also note that the Linux eventfd(2) man page does not
334 * specifically discuss returning POLLRDNORM, but we check
335 * for that event in addition to POLLIN.
336 */
337
338 mutex_enter(&efd->efd_lock);
339
340 if (events & (POLLIN | POLLRDNORM)) {
341 if (efd->efd_val != 0) {
342 revents |= events & (POLLIN | POLLRDNORM);
343 } else {
344 selrecord(curlwp, &efd->efd_read_sel);
345 }
346 }
347
348 if (events & (POLLOUT | POLLWRNORM)) {
349 if (efd->efd_val < EVENTFD_MAXVAL) {
350 revents |= events & (POLLOUT | POLLWRNORM);
351 } else {
352 selrecord(curlwp, &efd->efd_write_sel);
353 }
354 }
355
356 mutex_exit(&efd->efd_lock);
357
358 return revents;
359 }
360
361 static int
362 eventfd_fop_stat(file_t * const fp, struct stat * const st)
363 {
364 struct eventfd * const efd = fp->f_eventfd;
365
366 memset(st, 0, sizeof(*st));
367
368 mutex_enter(&efd->efd_lock);
369 st->st_size = (off_t)efd->efd_val;
370 st->st_blksize = sizeof(eventfd_t);
371 st->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
372 st->st_blocks = 1;
373 st->st_birthtimespec = st->st_ctimespec = efd->efd_btime;
374 st->st_atimespec = efd->efd_atime;
375 st->st_mtimespec = efd->efd_mtime;
376 st->st_uid = kauth_cred_geteuid(fp->f_cred);
377 st->st_gid = kauth_cred_getegid(fp->f_cred);
378 mutex_exit(&efd->efd_lock);
379
380 return 0;
381 }
382
383 static int
384 eventfd_fop_close(file_t * const fp)
385 {
386 struct eventfd * const efd = fp->f_eventfd;
387
388 fp->f_eventfd = NULL;
389 eventfd_destroy(efd);
390
391 return 0;
392 }
393
394 static void
395 eventfd_filt_read_detach(struct knote * const kn)
396 {
397 struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
398
399 mutex_enter(&efd->efd_lock);
400 KASSERT(kn->kn_hook == efd);
401 selremove_knote(&efd->efd_read_sel, kn);
402 mutex_exit(&efd->efd_lock);
403 }
404
405 static int
406 eventfd_filt_read(struct knote * const kn, long const hint)
407 {
408 struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
409
410 if (hint & NOTE_SUBMIT) {
411 KASSERT(mutex_owned(&efd->efd_lock));
412 } else {
413 mutex_enter(&efd->efd_lock);
414 }
415
416 kn->kn_data = (int64_t)efd->efd_val;
417
418 if ((hint & NOTE_SUBMIT) == 0) {
419 mutex_exit(&efd->efd_lock);
420 }
421
422 return (eventfd_t)kn->kn_data > 0;
423 }
424
425 static const struct filterops eventfd_read_filterops = {
426 .f_isfd = 1,
427 .f_detach = eventfd_filt_read_detach,
428 .f_event = eventfd_filt_read,
429 };
430
431 static void
432 eventfd_filt_write_detach(struct knote * const kn)
433 {
434 struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
435
436 mutex_enter(&efd->efd_lock);
437 KASSERT(kn->kn_hook == efd);
438 selremove_knote(&efd->efd_write_sel, kn);
439 mutex_exit(&efd->efd_lock);
440 }
441
442 static int
443 eventfd_filt_write(struct knote * const kn, long const hint)
444 {
445 struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
446
447 if (hint & NOTE_SUBMIT) {
448 KASSERT(mutex_owned(&efd->efd_lock));
449 } else {
450 mutex_enter(&efd->efd_lock);
451 }
452
453 kn->kn_data = (int64_t)efd->efd_val;
454
455 if ((hint & NOTE_SUBMIT) == 0) {
456 mutex_exit(&efd->efd_lock);
457 }
458
459 return (eventfd_t)kn->kn_data < EVENTFD_MAXVAL;
460 }
461
462 static const struct filterops eventfd_write_filterops = {
463 .f_isfd = 1,
464 .f_detach = eventfd_filt_write_detach,
465 .f_event = eventfd_filt_write,
466 };
467
468 static int
469 eventfd_fop_kqfilter(file_t * const fp, struct knote * const kn)
470 {
471 struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
472 struct selinfo *sel;
473
474 switch (kn->kn_filter) {
475 case EVFILT_READ:
476 sel = &efd->efd_read_sel;
477 kn->kn_fop = &eventfd_read_filterops;
478 break;
479
480 case EVFILT_WRITE:
481 sel = &efd->efd_write_sel;
482 kn->kn_fop = &eventfd_write_filterops;
483 break;
484
485 default:
486 return EINVAL;
487 }
488
489 kn->kn_hook = efd;
490
491 mutex_enter(&efd->efd_lock);
492 selrecord_knote(sel, kn);
493 mutex_exit(&efd->efd_lock);
494
495 return 0;
496 }
497
498 static void
499 eventfd_fop_restart(file_t * const fp)
500 {
501 struct eventfd * const efd = fp->f_eventfd;
502
503 /*
504 * Unblock blocked reads/writes in order to allow close() to complete.
505 * System calls return ERESTART so that the fd is revalidated.
506 */
507
508 mutex_enter(&efd->efd_lock);
509
510 if (efd->efd_nwaiters != 0) {
511 efd->efd_restarting = true;
512 if (efd->efd_has_read_waiters) {
513 cv_broadcast(&efd->efd_read_wait);
514 efd->efd_has_read_waiters = false;
515 }
516 if (efd->efd_has_write_waiters) {
517 cv_broadcast(&efd->efd_write_wait);
518 efd->efd_has_write_waiters = false;
519 }
520 }
521
522 mutex_exit(&efd->efd_lock);
523 }
524
525 static const struct fileops eventfd_fileops = {
526 .fo_name = "eventfd",
527 .fo_read = eventfd_fop_read,
528 .fo_write = eventfd_fop_write,
529 .fo_ioctl = fbadop_ioctl,
530 .fo_fcntl = fnullop_fcntl,
531 .fo_poll = eventfd_fop_poll,
532 .fo_stat = eventfd_fop_stat,
533 .fo_close = eventfd_fop_close,
534 .fo_kqfilter = eventfd_fop_kqfilter,
535 .fo_restart = eventfd_fop_restart,
536 };
537
538 /*
539 * eventfd(2) system call
540 */
541 int
542 do_eventfd(struct lwp * const l, unsigned int const val, int const flags,
543 register_t *retval)
544 {
545 file_t *fp;
546 int fd, error;
547
548 if (flags & ~(EFD_CLOEXEC | EFD_NONBLOCK | EFD_SEMAPHORE)) {
549 return EINVAL;
550 }
551
552 if ((error = fd_allocfile(&fp, &fd)) != 0) {
553 return error;
554 }
555
556 fp->f_flag = FREAD | FWRITE;
557 if (flags & EFD_NONBLOCK) {
558 fp->f_flag |= FNONBLOCK;
559 }
560 fp->f_type = DTYPE_EVENTFD;
561 fp->f_ops = &eventfd_fileops;
562 fp->f_eventfd = eventfd_create(val, flags);
563 fd_set_exclose(l, fd, !!(flags & EFD_CLOEXEC));
564 fd_affix(curproc, fp, fd);
565
566 *retval = fd;
567 return 0;
568 }
569
570 int
571 sys_eventfd(struct lwp *l, const struct sys_eventfd_args *uap,
572 register_t *retval)
573 {
574 /* {
575 syscallarg(unsigned int) val;
576 syscallarg(int) flags;
577 } */
578
579 return do_eventfd(l, SCARG(uap, val), SCARG(uap, flags), retval);
580 }
581