Home | History | Annotate | Line # | Download | only in kern
sys_eventfd.c revision 1.6
      1 /*	$NetBSD: sys_eventfd.c,v 1.6 2021/09/26 03:42:54 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: sys_eventfd.c,v 1.6 2021/09/26 03:42:54 thorpej Exp $");
     34 
     35 /*
     36  * eventfd
     37  *
     38  * Eventfd objects present a simple counting object associated with a
     39  * file descriptor.  Writes and reads to this file descriptor increment
     40  * and decrement the count, respectively.  When the count is non-zero,
     41  * the descriptor is considered "readable", and when less than the max
     42  * value (EVENTFD_MAXVAL), is considered "writable".
     43  *
     44  * This implementation is API compatible with the Linux eventfd(2)
     45  * interface.
     46  */
     47 
     48 #include <sys/param.h>
     49 #include <sys/types.h>
     50 #include <sys/condvar.h>
     51 #include <sys/eventfd.h>
     52 #include <sys/file.h>
     53 #include <sys/filedesc.h>
     54 #include <sys/kauth.h>
     55 #include <sys/mutex.h>
     56 #include <sys/poll.h>
     57 #include <sys/proc.h>
     58 #include <sys/select.h>
     59 #include <sys/stat.h>
     60 #include <sys/syscallargs.h>
     61 #include <sys/uio.h>
     62 
     63 struct eventfd {
     64 	kmutex_t	efd_lock;
     65 	kcondvar_t	efd_read_wait;
     66 	kcondvar_t	efd_write_wait;
     67 	kcondvar_t	efd_restart_wait;
     68 	struct selinfo	efd_read_sel;
     69 	struct selinfo	efd_write_sel;
     70 	eventfd_t	efd_val;
     71 	int64_t		efd_nwaiters;
     72 	bool		efd_restarting;
     73 	bool		efd_has_read_waiters;
     74 	bool		efd_has_write_waiters;
     75 	bool		efd_is_semaphore;
     76 
     77 	/*
     78 	 * Information kept for stat(2).
     79 	 */
     80 	struct timespec efd_btime;	/* time created */
     81 	struct timespec	efd_mtime;	/* last write */
     82 	struct timespec	efd_atime;	/* last read */
     83 };
     84 
     85 #define	EVENTFD_MAXVAL	(UINT64_MAX - 1)
     86 
     87 /*
     88  * eventfd_create:
     89  *
     90  *	Create an eventfd object.
     91  */
     92 static struct eventfd *
     93 eventfd_create(unsigned int const val, int const flags)
     94 {
     95 	struct eventfd * const efd = kmem_zalloc(sizeof(*efd), KM_SLEEP);
     96 
     97 	mutex_init(&efd->efd_lock, MUTEX_DEFAULT, IPL_NONE);
     98 	cv_init(&efd->efd_read_wait, "efdread");
     99 	cv_init(&efd->efd_write_wait, "efdwrite");
    100 	cv_init(&efd->efd_restart_wait, "efdrstrt");
    101 	selinit(&efd->efd_read_sel);
    102 	selinit(&efd->efd_write_sel);
    103 	efd->efd_val = val;
    104 	efd->efd_is_semaphore = !!(flags & EFD_SEMAPHORE);
    105 	getnanotime(&efd->efd_btime);
    106 
    107 	/* Caller deals with EFD_CLOEXEC and EFD_NONBLOCK. */
    108 
    109 	return efd;
    110 }
    111 
    112 /*
    113  * eventfd_destroy:
    114  *
    115  *	Destroy an eventfd object.
    116  */
    117 static void
    118 eventfd_destroy(struct eventfd * const efd)
    119 {
    120 
    121 	KASSERT(efd->efd_nwaiters == 0);
    122 	KASSERT(efd->efd_restarting == false);
    123 	KASSERT(efd->efd_has_read_waiters == false);
    124 	KASSERT(efd->efd_has_write_waiters == false);
    125 
    126 	cv_destroy(&efd->efd_read_wait);
    127 	cv_destroy(&efd->efd_write_wait);
    128 	cv_destroy(&efd->efd_restart_wait);
    129 
    130 	seldestroy(&efd->efd_read_sel);
    131 	seldestroy(&efd->efd_write_sel);
    132 
    133 	mutex_destroy(&efd->efd_lock);
    134 
    135 	kmem_free(efd, sizeof(*efd));
    136 }
    137 
    138 /*
    139  * eventfd_wait:
    140  *
    141  *	Block on an eventfd.  Handles non-blocking, as well as
    142  *	the restart cases.
    143  */
    144 static int
    145 eventfd_wait(struct eventfd * const efd, int const fflag, bool const is_write)
    146 {
    147 	kcondvar_t *waitcv;
    148 	int error;
    149 
    150 	if (fflag & FNONBLOCK) {
    151 		return EAGAIN;
    152 	}
    153 
    154 	/*
    155 	 * We're going to block.  If there is a restart in-progress,
    156 	 * wait for that to complete first.
    157 	 */
    158 	while (efd->efd_restarting) {
    159 		cv_wait(&efd->efd_restart_wait, &efd->efd_lock);
    160 	}
    161 
    162 	if (is_write) {
    163 		efd->efd_has_write_waiters = true;
    164 		waitcv = &efd->efd_write_wait;
    165 	} else {
    166 		efd->efd_has_read_waiters = true;
    167 		waitcv = &efd->efd_read_wait;
    168 	}
    169 
    170 	efd->efd_nwaiters++;
    171 	KASSERT(efd->efd_nwaiters > 0);
    172 	error = cv_wait_sig(waitcv, &efd->efd_lock);
    173 	efd->efd_nwaiters--;
    174 	KASSERT(efd->efd_nwaiters >= 0);
    175 
    176 	/*
    177 	 * If a restart was triggered while we were asleep, we need
    178 	 * to return ERESTART if no other error was returned.  If we
    179 	 * are the last waiter coming out of the restart drain, clear
    180 	 * the condition.
    181 	 */
    182 	if (efd->efd_restarting) {
    183 		if (error == 0) {
    184 			error = ERESTART;
    185 		}
    186 		if (efd->efd_nwaiters == 0) {
    187 			efd->efd_restarting = false;
    188 			cv_broadcast(&efd->efd_restart_wait);
    189 		}
    190 	}
    191 
    192 	return error;
    193 }
    194 
    195 /*
    196  * eventfd_wake:
    197  *
    198  *	Wake LWPs block on an eventfd.
    199  */
    200 static void
    201 eventfd_wake(struct eventfd * const efd, bool const is_write)
    202 {
    203 	kcondvar_t *waitcv = NULL;
    204 	struct selinfo *sel;
    205 	int pollev;
    206 
    207 	if (is_write) {
    208 		if (efd->efd_has_read_waiters) {
    209 			waitcv = &efd->efd_read_wait;
    210 			efd->efd_has_read_waiters = false;
    211 		}
    212 		sel = &efd->efd_read_sel;
    213 		pollev = POLLIN | POLLRDNORM;
    214 	} else {
    215 		if (efd->efd_has_write_waiters) {
    216 			waitcv = &efd->efd_write_wait;
    217 			efd->efd_has_write_waiters = false;
    218 		}
    219 		sel = &efd->efd_write_sel;
    220 		pollev = POLLOUT | POLLWRNORM;
    221 	}
    222 	if (waitcv != NULL) {
    223 		cv_broadcast(waitcv);
    224 	}
    225 	selnotify(sel, pollev, NOTE_SUBMIT);
    226 }
    227 
    228 /*
    229  * eventfd file operations
    230  */
    231 
    232 static int
    233 eventfd_fop_read(file_t * const fp, off_t * const offset,
    234     struct uio * const uio, kauth_cred_t const cred, int const flags)
    235 {
    236 	struct eventfd * const efd = fp->f_eventfd;
    237 	int const fflag = fp->f_flag;
    238 	eventfd_t return_value;
    239 	int error;
    240 
    241 	if (uio->uio_resid < sizeof(eventfd_t)) {
    242 		return EINVAL;
    243 	}
    244 
    245 	mutex_enter(&efd->efd_lock);
    246 
    247 	while (efd->efd_val == 0) {
    248 		if ((error = eventfd_wait(efd, fflag, false)) != 0) {
    249 			mutex_exit(&efd->efd_lock);
    250 			return error;
    251 		}
    252 	}
    253 
    254 	if (efd->efd_is_semaphore) {
    255 		return_value = 1;
    256 		efd->efd_val--;
    257 	} else {
    258 		return_value = efd->efd_val;
    259 		efd->efd_val = 0;
    260 	}
    261 
    262 	getnanotime(&efd->efd_atime);
    263 	eventfd_wake(efd, false);
    264 
    265 	mutex_exit(&efd->efd_lock);
    266 
    267 	error = uiomove(&return_value, sizeof(return_value), uio);
    268 
    269 	return error;
    270 }
    271 
    272 static int
    273 eventfd_fop_write(file_t * const fp, off_t * const offset,
    274     struct uio * const uio, kauth_cred_t const cred, int const flags)
    275 {
    276 	struct eventfd * const efd = fp->f_eventfd;
    277 	int const fflag = fp->f_flag;
    278 	eventfd_t write_value;
    279 	int error;
    280 
    281 	if (uio->uio_resid < sizeof(eventfd_t)) {
    282 		return EINVAL;
    283 	}
    284 
    285 	if ((error = uiomove(&write_value, sizeof(write_value), uio)) != 0) {
    286 		return error;
    287 	}
    288 
    289 	if (write_value > EVENTFD_MAXVAL) {
    290 		error = EINVAL;
    291 		goto out;
    292 	}
    293 
    294 	mutex_enter(&efd->efd_lock);
    295 
    296 	KASSERT(efd->efd_val <= EVENTFD_MAXVAL);
    297 	while ((EVENTFD_MAXVAL - efd->efd_val) < write_value) {
    298 		if ((error = eventfd_wait(efd, fflag, true)) != 0) {
    299 			mutex_exit(&efd->efd_lock);
    300 			goto out;
    301 		}
    302 	}
    303 
    304 	efd->efd_val += write_value;
    305 	KASSERT(efd->efd_val <= EVENTFD_MAXVAL);
    306 
    307 	getnanotime(&efd->efd_mtime);
    308 	eventfd_wake(efd, true);
    309 
    310 	mutex_exit(&efd->efd_lock);
    311 
    312  out:
    313 	if (error) {
    314 		/*
    315 		 * Undo the effect of uiomove() so that the error
    316 		 * gets reported correctly; see dofilewrite().
    317 		 */
    318 		uio->uio_resid += sizeof(write_value);
    319 	}
    320 	return error;
    321 }
    322 
    323 static int
    324 eventfd_fop_poll(file_t * const fp, int const events)
    325 {
    326 	struct eventfd * const efd = fp->f_eventfd;
    327 	int revents = 0;
    328 
    329 	/*
    330 	 * Note that Linux will return POLLERR if the eventfd count
    331 	 * overflows, but that is not possible in the normal read/write
    332 	 * API, only with Linux kernel-internal interfaces.  So, this
    333 	 * implementation never returns POLLERR.
    334 	 *
    335 	 * Also note that the Linux eventfd(2) man page does not
    336 	 * specifically discuss returning POLLRDNORM, but we check
    337 	 * for that event in addition to POLLIN.
    338 	 */
    339 
    340 	mutex_enter(&efd->efd_lock);
    341 
    342 	if (events & (POLLIN | POLLRDNORM)) {
    343 		if (efd->efd_val != 0) {
    344 			revents |= events & (POLLIN | POLLRDNORM);
    345 		} else {
    346 			selrecord(curlwp, &efd->efd_read_sel);
    347 		}
    348 	}
    349 
    350 	if (events & (POLLOUT | POLLWRNORM)) {
    351 		if (efd->efd_val < EVENTFD_MAXVAL) {
    352 			revents |= events & (POLLOUT | POLLWRNORM);
    353 		} else {
    354 			selrecord(curlwp, &efd->efd_write_sel);
    355 		}
    356 	}
    357 
    358 	mutex_exit(&efd->efd_lock);
    359 
    360 	return revents;
    361 }
    362 
    363 static int
    364 eventfd_fop_stat(file_t * const fp, struct stat * const st)
    365 {
    366 	struct eventfd * const efd = fp->f_eventfd;
    367 
    368 	memset(st, 0, sizeof(*st));
    369 
    370 	mutex_enter(&efd->efd_lock);
    371 	st->st_size = (off_t)efd->efd_val;
    372 	st->st_blksize = sizeof(eventfd_t);
    373 	st->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
    374 	st->st_blocks = 1;
    375 	st->st_birthtimespec = st->st_ctimespec = efd->efd_btime;
    376 	st->st_atimespec = efd->efd_atime;
    377 	st->st_mtimespec = efd->efd_mtime;
    378 	st->st_uid = kauth_cred_geteuid(fp->f_cred);
    379 	st->st_gid = kauth_cred_getegid(fp->f_cred);
    380 	mutex_exit(&efd->efd_lock);
    381 
    382 	return 0;
    383 }
    384 
    385 static int
    386 eventfd_fop_close(file_t * const fp)
    387 {
    388 	struct eventfd * const efd = fp->f_eventfd;
    389 
    390 	fp->f_eventfd = NULL;
    391 	eventfd_destroy(efd);
    392 
    393 	return 0;
    394 }
    395 
    396 static void
    397 eventfd_filt_read_detach(struct knote * const kn)
    398 {
    399 	struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
    400 
    401 	mutex_enter(&efd->efd_lock);
    402 	KASSERT(kn->kn_hook == efd);
    403 	selremove_knote(&efd->efd_read_sel, kn);
    404 	mutex_exit(&efd->efd_lock);
    405 }
    406 
    407 static int
    408 eventfd_filt_read(struct knote * const kn, long const hint)
    409 {
    410 	struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
    411 
    412 	if (hint & NOTE_SUBMIT) {
    413 		KASSERT(mutex_owned(&efd->efd_lock));
    414 	} else {
    415 		mutex_enter(&efd->efd_lock);
    416 	}
    417 
    418 	kn->kn_data = (int64_t)efd->efd_val;
    419 
    420 	if ((hint & NOTE_SUBMIT) == 0) {
    421 		mutex_exit(&efd->efd_lock);
    422 	}
    423 
    424 	return (eventfd_t)kn->kn_data > 0;
    425 }
    426 
    427 static const struct filterops eventfd_read_filterops = {
    428 	.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
    429 	.f_detach = eventfd_filt_read_detach,
    430 	.f_event = eventfd_filt_read,
    431 };
    432 
    433 static void
    434 eventfd_filt_write_detach(struct knote * const kn)
    435 {
    436 	struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
    437 
    438 	mutex_enter(&efd->efd_lock);
    439 	KASSERT(kn->kn_hook == efd);
    440 	selremove_knote(&efd->efd_write_sel, kn);
    441 	mutex_exit(&efd->efd_lock);
    442 }
    443 
    444 static int
    445 eventfd_filt_write(struct knote * const kn, long const hint)
    446 {
    447 	struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
    448 
    449 	if (hint & NOTE_SUBMIT) {
    450 		KASSERT(mutex_owned(&efd->efd_lock));
    451 	} else {
    452 		mutex_enter(&efd->efd_lock);
    453 	}
    454 
    455 	kn->kn_data = (int64_t)efd->efd_val;
    456 
    457 	if ((hint & NOTE_SUBMIT) == 0) {
    458 		mutex_exit(&efd->efd_lock);
    459 	}
    460 
    461 	return (eventfd_t)kn->kn_data < EVENTFD_MAXVAL;
    462 }
    463 
    464 static const struct filterops eventfd_write_filterops = {
    465 	.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
    466 	.f_detach = eventfd_filt_write_detach,
    467 	.f_event = eventfd_filt_write,
    468 };
    469 
    470 static int
    471 eventfd_fop_kqfilter(file_t * const fp, struct knote * const kn)
    472 {
    473 	struct eventfd * const efd = ((file_t *)kn->kn_obj)->f_eventfd;
    474 	struct selinfo *sel;
    475 
    476 	switch (kn->kn_filter) {
    477 	case EVFILT_READ:
    478 		sel = &efd->efd_read_sel;
    479 		kn->kn_fop = &eventfd_read_filterops;
    480 		break;
    481 
    482 	case EVFILT_WRITE:
    483 		sel = &efd->efd_write_sel;
    484 		kn->kn_fop = &eventfd_write_filterops;
    485 		break;
    486 
    487 	default:
    488 		return EINVAL;
    489 	}
    490 
    491 	kn->kn_hook = efd;
    492 
    493 	mutex_enter(&efd->efd_lock);
    494 	selrecord_knote(sel, kn);
    495 	mutex_exit(&efd->efd_lock);
    496 
    497 	return 0;
    498 }
    499 
    500 static void
    501 eventfd_fop_restart(file_t * const fp)
    502 {
    503 	struct eventfd * const efd = fp->f_eventfd;
    504 
    505 	/*
    506 	 * Unblock blocked reads/writes in order to allow close() to complete.
    507 	 * System calls return ERESTART so that the fd is revalidated.
    508 	 */
    509 
    510 	mutex_enter(&efd->efd_lock);
    511 
    512 	if (efd->efd_nwaiters != 0) {
    513 		efd->efd_restarting = true;
    514 		if (efd->efd_has_read_waiters) {
    515 			cv_broadcast(&efd->efd_read_wait);
    516 			efd->efd_has_read_waiters = false;
    517 		}
    518 		if (efd->efd_has_write_waiters) {
    519 			cv_broadcast(&efd->efd_write_wait);
    520 			efd->efd_has_write_waiters = false;
    521 		}
    522 	}
    523 
    524 	mutex_exit(&efd->efd_lock);
    525 }
    526 
    527 static const struct fileops eventfd_fileops = {
    528 	.fo_name = "eventfd",
    529 	.fo_read = eventfd_fop_read,
    530 	.fo_write = eventfd_fop_write,
    531 	.fo_ioctl = fbadop_ioctl,
    532 	.fo_fcntl = fnullop_fcntl,
    533 	.fo_poll = eventfd_fop_poll,
    534 	.fo_stat = eventfd_fop_stat,
    535 	.fo_close = eventfd_fop_close,
    536 	.fo_kqfilter = eventfd_fop_kqfilter,
    537 	.fo_restart = eventfd_fop_restart,
    538 };
    539 
    540 /*
    541  * eventfd(2) system call
    542  */
    543 int
    544 do_eventfd(struct lwp * const l, unsigned int const val, int const flags,
    545     register_t *retval)
    546 {
    547 	file_t *fp;
    548 	int fd, error;
    549 
    550 	if (flags & ~(EFD_CLOEXEC | EFD_NONBLOCK | EFD_SEMAPHORE)) {
    551 		return EINVAL;
    552 	}
    553 
    554 	if ((error = fd_allocfile(&fp, &fd)) != 0) {
    555 		return error;
    556 	}
    557 
    558 	fp->f_flag = FREAD | FWRITE;
    559 	if (flags & EFD_NONBLOCK) {
    560 		fp->f_flag |= FNONBLOCK;
    561 	}
    562 	fp->f_type = DTYPE_EVENTFD;
    563 	fp->f_ops = &eventfd_fileops;
    564 	fp->f_eventfd = eventfd_create(val, flags);
    565 	fd_set_exclose(l, fd, !!(flags & EFD_CLOEXEC));
    566 	fd_affix(curproc, fp, fd);
    567 
    568 	*retval = fd;
    569 	return 0;
    570 }
    571 
    572 int
    573 sys_eventfd(struct lwp *l, const struct sys_eventfd_args *uap,
    574     register_t *retval)
    575 {
    576 	/* {
    577 		syscallarg(unsigned int) val;
    578 		syscallarg(int) flags;
    579 	} */
    580 
    581 	return do_eventfd(l, SCARG(uap, val), SCARG(uap, flags), retval);
    582 }
    583