sys_select.c revision 1.50.2.1 1 /* $NetBSD: sys_select.c,v 1.50.2.1 2020/02/29 20:21:03 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2007, 2008, 2009, 2010, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
66 */
67
68 /*
69 * System calls of synchronous I/O multiplexing subsystem.
70 *
71 * Locking
72 *
73 * Two locks are used: <object-lock> and selcluster_t::sc_lock.
74 *
75 * The <object-lock> might be a device driver or another subsystem, e.g.
76 * socket or pipe. This lock is not exported, and thus invisible to this
77 * subsystem. Mainly, synchronisation between selrecord() and selnotify()
78 * routines depends on this lock, as it will be described in the comments.
79 *
80 * Lock order
81 *
82 * <object-lock> ->
83 * selcluster_t::sc_lock
84 */
85
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.50.2.1 2020/02/29 20:21:03 ad Exp $");
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/filedesc.h>
92 #include <sys/file.h>
93 #include <sys/proc.h>
94 #include <sys/socketvar.h>
95 #include <sys/signalvar.h>
96 #include <sys/uio.h>
97 #include <sys/kernel.h>
98 #include <sys/lwp.h>
99 #include <sys/poll.h>
100 #include <sys/mount.h>
101 #include <sys/syscallargs.h>
102 #include <sys/cpu.h>
103 #include <sys/atomic.h>
104 #include <sys/socketvar.h>
105 #include <sys/sleepq.h>
106 #include <sys/sysctl.h>
107 #include <sys/bitops.h>
108
109 /* Flags for lwp::l_selflag. */
110 #define SEL_RESET 0 /* awoken, interrupted, or not yet polling */
111 #define SEL_SCANNING 1 /* polling descriptors */
112 #define SEL_BLOCKING 2 /* blocking and waiting for event */
113 #define SEL_EVENT 3 /* interrupted, events set directly */
114
115 /*
116 * Per-cluster state for select()/poll(). For a system with fewer
117 * than 64 CPUs, this gives us per-CPU clusters.
118 */
119 #define SELCLUSTERS 64
120 #define SELCLUSTERMASK (SELCLUSTERS - 1)
121
122 typedef struct selcluster {
123 kmutex_t *sc_lock;
124 sleepq_t sc_sleepq;
125 uint64_t sc_mask;
126 int sc_ncoll;
127 } selcluster_t;
128
129 static inline int selscan(char *, const int, const size_t, register_t *);
130 static inline int pollscan(struct pollfd *, const int, register_t *);
131 static void selclear(void);
132
133 static const int sel_flag[] = {
134 POLLRDNORM | POLLHUP | POLLERR,
135 POLLWRNORM | POLLHUP | POLLERR,
136 POLLRDBAND
137 };
138
139 syncobj_t select_sobj = {
140 .sobj_flag = SOBJ_SLEEPQ_FIFO,
141 .sobj_unsleep = sleepq_unsleep,
142 .sobj_changepri = sleepq_changepri,
143 .sobj_lendpri = sleepq_lendpri,
144 .sobj_owner = syncobj_noowner,
145 };
146
147 static selcluster_t *selcluster[SELCLUSTERS] __read_mostly;
148 static int direct_select __read_mostly = 0;
149
150 /* Operations: either select() or poll(). */
151 const char selop_select[] = "select";
152 const char selop_poll[] = "poll";
153
154 /*
155 * Select system call.
156 */
157 int
158 sys___pselect50(struct lwp *l, const struct sys___pselect50_args *uap,
159 register_t *retval)
160 {
161 /* {
162 syscallarg(int) nd;
163 syscallarg(fd_set *) in;
164 syscallarg(fd_set *) ou;
165 syscallarg(fd_set *) ex;
166 syscallarg(const struct timespec *) ts;
167 syscallarg(sigset_t *) mask;
168 } */
169 struct timespec ats, *ts = NULL;
170 sigset_t amask, *mask = NULL;
171 int error;
172
173 if (SCARG(uap, ts)) {
174 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
175 if (error)
176 return error;
177 ts = &ats;
178 }
179 if (SCARG(uap, mask) != NULL) {
180 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
181 if (error)
182 return error;
183 mask = &amask;
184 }
185
186 return selcommon(retval, SCARG(uap, nd), SCARG(uap, in),
187 SCARG(uap, ou), SCARG(uap, ex), ts, mask);
188 }
189
190 int
191 sys___select50(struct lwp *l, const struct sys___select50_args *uap,
192 register_t *retval)
193 {
194 /* {
195 syscallarg(int) nd;
196 syscallarg(fd_set *) in;
197 syscallarg(fd_set *) ou;
198 syscallarg(fd_set *) ex;
199 syscallarg(struct timeval *) tv;
200 } */
201 struct timeval atv;
202 struct timespec ats, *ts = NULL;
203 int error;
204
205 if (SCARG(uap, tv)) {
206 error = copyin(SCARG(uap, tv), (void *)&atv, sizeof(atv));
207 if (error)
208 return error;
209
210 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000)
211 return EINVAL;
212
213 TIMEVAL_TO_TIMESPEC(&atv, &ats);
214 ts = &ats;
215 }
216
217 return selcommon(retval, SCARG(uap, nd), SCARG(uap, in),
218 SCARG(uap, ou), SCARG(uap, ex), ts, NULL);
219 }
220
221 /*
222 * sel_do_scan: common code to perform the scan on descriptors.
223 */
224 static int
225 sel_do_scan(const char *opname, void *fds, const int nf, const size_t ni,
226 struct timespec *ts, sigset_t *mask, register_t *retval)
227 {
228 lwp_t * const l = curlwp;
229 selcluster_t *sc;
230 kmutex_t *lock;
231 struct timespec sleepts;
232 int error, timo;
233
234 timo = 0;
235 if (ts && inittimeleft(ts, &sleepts) == -1) {
236 return EINVAL;
237 }
238
239 if (__predict_false(mask))
240 sigsuspendsetup(l, mask);
241
242 /*
243 * We may context switch during or at any time after picking a CPU
244 * and cluster to associate with, but it doesn't matter. In the
245 * unlikely event we migrate elsewhere all we risk is a little lock
246 * contention; correctness is not sacrificed.
247 */
248 sc = curcpu()->ci_data.cpu_selcluster;
249 lock = sc->sc_lock;
250 l->l_selcluster = sc;
251
252 if (opname == selop_select) {
253 l->l_selbits = fds;
254 l->l_selni = ni;
255 } else {
256 l->l_selbits = NULL;
257 }
258
259 for (;;) {
260 int ncoll;
261
262 SLIST_INIT(&l->l_selwait);
263 l->l_selret = 0;
264
265 /*
266 * No need to lock. If this is overwritten by another value
267 * while scanning, we will retry below. We only need to see
268 * exact state from the descriptors that we are about to poll,
269 * and lock activity resulting from fo_poll is enough to
270 * provide an up to date value for new polling activity.
271 */
272 if (ts && (ts->tv_sec | ts->tv_nsec | direct_select) == 0) {
273 /* Non-blocking: no need for selrecord()/selclear() */
274 l->l_selflag = SEL_RESET;
275 } else {
276 l->l_selflag = SEL_SCANNING;
277 }
278 ncoll = sc->sc_ncoll;
279 membar_exit();
280
281 if (opname == selop_select) {
282 error = selscan((char *)fds, nf, ni, retval);
283 } else {
284 error = pollscan((struct pollfd *)fds, nf, retval);
285 }
286 if (error || *retval)
287 break;
288 if (ts && (timo = gettimeleft(ts, &sleepts)) <= 0)
289 break;
290 /*
291 * Acquire the lock and perform the (re)checks. Note, if
292 * collision has occured, then our state does not matter,
293 * as we must perform re-scan. Therefore, check it first.
294 */
295 state_check:
296 mutex_spin_enter(lock);
297 if (__predict_false(sc->sc_ncoll != ncoll)) {
298 /* Collision: perform re-scan. */
299 mutex_spin_exit(lock);
300 selclear();
301 continue;
302 }
303 if (__predict_true(l->l_selflag == SEL_EVENT)) {
304 /* Events occured, they are set directly. */
305 mutex_spin_exit(lock);
306 break;
307 }
308 if (__predict_true(l->l_selflag == SEL_RESET)) {
309 /* Events occured, but re-scan is requested. */
310 mutex_spin_exit(lock);
311 selclear();
312 continue;
313 }
314 /* Nothing happen, therefore - sleep. */
315 l->l_selflag = SEL_BLOCKING;
316 l->l_kpriority = true;
317 sleepq_enter(&sc->sc_sleepq, l, lock);
318 sleepq_enqueue(&sc->sc_sleepq, sc, opname, &select_sobj);
319 error = sleepq_block(timo, true);
320 if (error != 0) {
321 break;
322 }
323 /* Awoken: need to check the state. */
324 goto state_check;
325 }
326 selclear();
327
328 /* Add direct events if any. */
329 if (l->l_selflag == SEL_EVENT) {
330 KASSERT(l->l_selret != 0);
331 *retval += l->l_selret;
332 }
333
334 if (__predict_false(mask))
335 sigsuspendteardown(l);
336
337 /* select and poll are not restarted after signals... */
338 if (error == ERESTART)
339 return EINTR;
340 if (error == EWOULDBLOCK)
341 return 0;
342 return error;
343 }
344
345 int
346 selcommon(register_t *retval, int nd, fd_set *u_in, fd_set *u_ou,
347 fd_set *u_ex, struct timespec *ts, sigset_t *mask)
348 {
349 char smallbits[howmany(FD_SETSIZE, NFDBITS) *
350 sizeof(fd_mask) * 6];
351 char *bits;
352 int error, nf;
353 size_t ni;
354
355 if (nd < 0)
356 return (EINVAL);
357 nf = atomic_load_consume(&curlwp->l_fd->fd_dt)->dt_nfiles;
358 if (nd > nf) {
359 /* forgiving; slightly wrong */
360 nd = nf;
361 }
362 ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
363 if (ni * 6 > sizeof(smallbits))
364 bits = kmem_alloc(ni * 6, KM_SLEEP);
365 else
366 bits = smallbits;
367
368 #define getbits(name, x) \
369 if (u_ ## name) { \
370 error = copyin(u_ ## name, bits + ni * x, ni); \
371 if (error) \
372 goto fail; \
373 } else \
374 memset(bits + ni * x, 0, ni);
375 getbits(in, 0);
376 getbits(ou, 1);
377 getbits(ex, 2);
378 #undef getbits
379
380 error = sel_do_scan(selop_select, bits, nd, ni, ts, mask, retval);
381 if (error == 0 && u_in != NULL)
382 error = copyout(bits + ni * 3, u_in, ni);
383 if (error == 0 && u_ou != NULL)
384 error = copyout(bits + ni * 4, u_ou, ni);
385 if (error == 0 && u_ex != NULL)
386 error = copyout(bits + ni * 5, u_ex, ni);
387 fail:
388 if (bits != smallbits)
389 kmem_free(bits, ni * 6);
390 return (error);
391 }
392
393 static inline int
394 selscan(char *bits, const int nfd, const size_t ni, register_t *retval)
395 {
396 fd_mask *ibitp, *obitp;
397 int msk, i, j, fd, n;
398 file_t *fp;
399 lwp_t *l;
400
401 ibitp = (fd_mask *)(bits + ni * 0);
402 obitp = (fd_mask *)(bits + ni * 3);
403 n = 0;
404 l = curlwp;
405
406 memset(obitp, 0, ni * 3);
407 for (msk = 0; msk < 3; msk++) {
408 for (i = 0; i < nfd; i += NFDBITS) {
409 fd_mask ibits, obits;
410
411 ibits = *ibitp;
412 obits = 0;
413 while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
414 ibits &= ~(1U << j);
415 if ((fp = fd_getfile(fd)) == NULL)
416 return (EBADF);
417 /*
418 * Setup an argument to selrecord(), which is
419 * a file descriptor number.
420 */
421 l->l_selrec = fd;
422 if ((*fp->f_ops->fo_poll)(fp, sel_flag[msk])) {
423 if (!direct_select) {
424 /*
425 * Have events: do nothing in
426 * selrecord().
427 */
428 l->l_selflag = SEL_RESET;
429 }
430 obits |= (1U << j);
431 n++;
432 }
433 fd_putfile(fd);
434 }
435 if (obits != 0) {
436 if (direct_select) {
437 kmutex_t *lock;
438 lock = l->l_selcluster->sc_lock;
439 mutex_spin_enter(lock);
440 *obitp |= obits;
441 mutex_spin_exit(lock);
442 } else {
443 *obitp |= obits;
444 }
445 }
446 ibitp++;
447 obitp++;
448 }
449 }
450 *retval = n;
451 return (0);
452 }
453
454 /*
455 * Poll system call.
456 */
457 int
458 sys_poll(struct lwp *l, const struct sys_poll_args *uap, register_t *retval)
459 {
460 /* {
461 syscallarg(struct pollfd *) fds;
462 syscallarg(u_int) nfds;
463 syscallarg(int) timeout;
464 } */
465 struct timespec ats, *ts = NULL;
466
467 if (SCARG(uap, timeout) != INFTIM) {
468 ats.tv_sec = SCARG(uap, timeout) / 1000;
469 ats.tv_nsec = (SCARG(uap, timeout) % 1000) * 1000000;
470 ts = &ats;
471 }
472
473 return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, NULL);
474 }
475
476 /*
477 * Poll system call.
478 */
479 int
480 sys___pollts50(struct lwp *l, const struct sys___pollts50_args *uap,
481 register_t *retval)
482 {
483 /* {
484 syscallarg(struct pollfd *) fds;
485 syscallarg(u_int) nfds;
486 syscallarg(const struct timespec *) ts;
487 syscallarg(const sigset_t *) mask;
488 } */
489 struct timespec ats, *ts = NULL;
490 sigset_t amask, *mask = NULL;
491 int error;
492
493 if (SCARG(uap, ts)) {
494 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
495 if (error)
496 return error;
497 ts = &ats;
498 }
499 if (SCARG(uap, mask)) {
500 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
501 if (error)
502 return error;
503 mask = &amask;
504 }
505
506 return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, mask);
507 }
508
509 int
510 pollcommon(register_t *retval, struct pollfd *u_fds, u_int nfds,
511 struct timespec *ts, sigset_t *mask)
512 {
513 struct pollfd smallfds[32];
514 struct pollfd *fds;
515 int error;
516 size_t ni;
517
518 if (nfds > curlwp->l_proc->p_rlimit[RLIMIT_NOFILE].rlim_max + 1000) {
519 /*
520 * Prevent userland from causing over-allocation.
521 * Raising the default limit too high can still cause
522 * a lot of memory to be allocated, but this also means
523 * that the file descriptor array will also be large.
524 *
525 * To reduce the memory requirements here, we could
526 * process the 'fds' array in chunks, but that
527 * is a lot of code that isn't normally useful.
528 * (Or just move the copyin/out into pollscan().)
529 *
530 * Historically the code silently truncated 'fds' to
531 * dt_nfiles entries - but that does cause issues.
532 *
533 * Using the max limit equivalent to sysctl
534 * kern.maxfiles is the moral equivalent of OPEN_MAX
535 * as specified by POSIX.
536 *
537 * We add a slop of 1000 in case the resource limit was
538 * changed after opening descriptors or the same descriptor
539 * was specified more than once.
540 */
541 return EINVAL;
542 }
543 ni = nfds * sizeof(struct pollfd);
544 if (ni > sizeof(smallfds))
545 fds = kmem_alloc(ni, KM_SLEEP);
546 else
547 fds = smallfds;
548
549 error = copyin(u_fds, fds, ni);
550 if (error)
551 goto fail;
552
553 error = sel_do_scan(selop_poll, fds, nfds, ni, ts, mask, retval);
554 if (error == 0)
555 error = copyout(fds, u_fds, ni);
556 fail:
557 if (fds != smallfds)
558 kmem_free(fds, ni);
559 return (error);
560 }
561
562 static inline int
563 pollscan(struct pollfd *fds, const int nfd, register_t *retval)
564 {
565 file_t *fp;
566 int i, n = 0, revents;
567
568 for (i = 0; i < nfd; i++, fds++) {
569 fds->revents = 0;
570 if (fds->fd < 0) {
571 revents = 0;
572 } else if ((fp = fd_getfile(fds->fd)) == NULL) {
573 revents = POLLNVAL;
574 } else {
575 /*
576 * Perform poll: registers select request or returns
577 * the events which are set. Setup an argument for
578 * selrecord(), which is a pointer to struct pollfd.
579 */
580 curlwp->l_selrec = (uintptr_t)fds;
581 revents = (*fp->f_ops->fo_poll)(fp,
582 fds->events | POLLERR | POLLHUP);
583 fd_putfile(fds->fd);
584 }
585 if (revents) {
586 if (!direct_select) {
587 /* Have events: do nothing in selrecord(). */
588 curlwp->l_selflag = SEL_RESET;
589 }
590 fds->revents = revents;
591 n++;
592 }
593 }
594 *retval = n;
595 return (0);
596 }
597
598 int
599 seltrue(dev_t dev, int events, lwp_t *l)
600 {
601
602 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
603 }
604
605 /*
606 * Record a select request. Concurrency issues:
607 *
608 * The caller holds the same lock across calls to selrecord() and
609 * selnotify(), so we don't need to consider a concurrent wakeup
610 * while in this routine.
611 *
612 * The only activity we need to guard against is selclear(), called by
613 * another thread that is exiting sel_do_scan().
614 * `sel_lwp' can only become non-NULL while the caller's lock is held,
615 * so it cannot become non-NULL due to a change made by another thread
616 * while we are in this routine. It can only become _NULL_ due to a
617 * call to selclear().
618 *
619 * If it is non-NULL and != selector there is the potential for
620 * selclear() to be called by another thread. If either of those
621 * conditions are true, we're not interested in touching the `named
622 * waiter' part of the selinfo record because we need to record a
623 * collision. Hence there is no need for additional locking in this
624 * routine.
625 */
626 void
627 selrecord(lwp_t *selector, struct selinfo *sip)
628 {
629 selcluster_t *sc;
630 lwp_t *other;
631
632 KASSERT(selector == curlwp);
633
634 sc = selector->l_selcluster;
635 other = sip->sel_lwp;
636
637 if (selector->l_selflag == SEL_RESET) {
638 /* 0. We're not going to block - will poll again if needed. */
639 } else if (other == selector) {
640 /* 1. We (selector) already claimed to be the first LWP. */
641 KASSERT(sip->sel_cluster == sc);
642 } else if (other == NULL) {
643 /*
644 * 2. No first LWP, therefore we (selector) are the first.
645 *
646 * There may be unnamed waiters (collisions). Issue a memory
647 * barrier to ensure that we access sel_lwp (above) before
648 * other fields - this guards against a call to selclear().
649 */
650 membar_enter();
651 sip->sel_lwp = selector;
652 SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain);
653 /* Copy the argument, which is for selnotify(). */
654 sip->sel_fdinfo = selector->l_selrec;
655 /* Replace selinfo's lock with the chosen cluster's lock. */
656 sip->sel_cluster = sc;
657 } else {
658 /* 3. Multiple waiters: record a collision. */
659 sip->sel_collision |= sc->sc_mask;
660 KASSERT(sip->sel_cluster != NULL);
661 }
662 }
663
664 /*
665 * sel_setevents: a helper function for selnotify(), to set the events
666 * for LWP sleeping in selcommon() or pollcommon().
667 */
668 static inline bool
669 sel_setevents(lwp_t *l, struct selinfo *sip, const int events)
670 {
671 const int oflag = l->l_selflag;
672 int ret = 0;
673
674 /*
675 * If we require re-scan or it was required by somebody else,
676 * then just (re)set SEL_RESET and return.
677 */
678 if (__predict_false(events == 0 || oflag == SEL_RESET)) {
679 l->l_selflag = SEL_RESET;
680 return true;
681 }
682 /*
683 * Direct set. Note: select state of LWP is locked. First,
684 * determine whether it is selcommon() or pollcommon().
685 */
686 if (l->l_selbits != NULL) {
687 const size_t ni = l->l_selni;
688 fd_mask *fds = (fd_mask *)l->l_selbits;
689 fd_mask *ofds = (fd_mask *)((char *)fds + ni * 3);
690 const int fd = sip->sel_fdinfo, fbit = 1 << (fd & __NFDMASK);
691 const int idx = fd >> __NFDSHIFT;
692 int n;
693
694 for (n = 0; n < 3; n++) {
695 if ((fds[idx] & fbit) != 0 &&
696 (ofds[idx] & fbit) == 0 &&
697 (sel_flag[n] & events)) {
698 ofds[idx] |= fbit;
699 ret++;
700 }
701 fds = (fd_mask *)((char *)fds + ni);
702 ofds = (fd_mask *)((char *)ofds + ni);
703 }
704 } else {
705 struct pollfd *pfd = (void *)sip->sel_fdinfo;
706 int revents = events & (pfd->events | POLLERR | POLLHUP);
707
708 if (revents) {
709 if (pfd->revents == 0)
710 ret = 1;
711 pfd->revents |= revents;
712 }
713 }
714 /* Check whether there are any events to return. */
715 if (!ret) {
716 return false;
717 }
718 /* Indicate direct set and note the event (cluster lock is held). */
719 l->l_selflag = SEL_EVENT;
720 l->l_selret += ret;
721 return true;
722 }
723
724 /*
725 * Do a wakeup when a selectable event occurs. Concurrency issues:
726 *
727 * As per selrecord(), the caller's object lock is held. If there
728 * is a named waiter, we must acquire the associated selcluster's lock
729 * in order to synchronize with selclear() and pollers going to sleep
730 * in sel_do_scan().
731 *
732 * sip->sel_cluser cannot change at this point, as it is only changed
733 * in selrecord(), and concurrent calls to selrecord() are locked
734 * out by the caller.
735 */
736 void
737 selnotify(struct selinfo *sip, int events, long knhint)
738 {
739 selcluster_t *sc;
740 uint64_t mask;
741 int index, oflag;
742 lwp_t *l;
743 kmutex_t *lock;
744
745 KNOTE(&sip->sel_klist, knhint);
746
747 if (sip->sel_lwp != NULL) {
748 /* One named LWP is waiting. */
749 sc = sip->sel_cluster;
750 lock = sc->sc_lock;
751 mutex_spin_enter(lock);
752 /* Still there? */
753 if (sip->sel_lwp != NULL) {
754 /*
755 * Set the events for our LWP and indicate that.
756 * Otherwise, request for a full re-scan.
757 */
758 l = sip->sel_lwp;
759 oflag = l->l_selflag;
760
761 if (!direct_select) {
762 l->l_selflag = SEL_RESET;
763 } else if (!sel_setevents(l, sip, events)) {
764 /* No events to return. */
765 mutex_spin_exit(lock);
766 return;
767 }
768
769 /*
770 * If thread is sleeping, wake it up. If it's not
771 * yet asleep, it will notice the change in state
772 * and will re-poll the descriptors.
773 */
774 if (oflag == SEL_BLOCKING && l->l_mutex == lock) {
775 KASSERT(l->l_wchan == sc);
776 sleepq_unsleep(l, false);
777 }
778 }
779 mutex_spin_exit(lock);
780 }
781
782 if ((mask = sip->sel_collision) != 0) {
783 /*
784 * There was a collision (multiple waiters): we must
785 * inform all potentially interested waiters.
786 */
787 sip->sel_collision = 0;
788 do {
789 index = ffs64(mask) - 1;
790 mask ^= __BIT(index);
791 sc = selcluster[index];
792 lock = sc->sc_lock;
793 mutex_spin_enter(lock);
794 sc->sc_ncoll++;
795 sleepq_wake(&sc->sc_sleepq, sc, (u_int)-1, lock);
796 } while (__predict_false(mask != 0));
797 }
798 }
799
800 /*
801 * Remove an LWP from all objects that it is waiting for. Concurrency
802 * issues:
803 *
804 * The object owner's (e.g. device driver) lock is not held here. Calls
805 * can be made to selrecord() and we do not synchronize against those
806 * directly using locks. However, we use `sel_lwp' to lock out changes.
807 * Before clearing it we must use memory barriers to ensure that we can
808 * safely traverse the list of selinfo records.
809 */
810 static void
811 selclear(void)
812 {
813 struct selinfo *sip, *next;
814 selcluster_t *sc;
815 lwp_t *l;
816 kmutex_t *lock;
817
818 l = curlwp;
819 sc = l->l_selcluster;
820 lock = sc->sc_lock;
821
822 /*
823 * If the request was non-blocking, or we found events on the first
824 * descriptor, there will be no need to clear anything - avoid
825 * taking the lock.
826 */
827 if (SLIST_EMPTY(&l->l_selwait)) {
828 return;
829 }
830
831 mutex_spin_enter(lock);
832 for (sip = SLIST_FIRST(&l->l_selwait); sip != NULL; sip = next) {
833 KASSERT(sip->sel_lwp == l);
834 KASSERT(sip->sel_cluster == l->l_selcluster);
835
836 /*
837 * Read link to next selinfo record, if any.
838 * It's no longer safe to touch `sip' after clearing
839 * `sel_lwp', so ensure that the read of `sel_chain'
840 * completes before the clearing of sel_lwp becomes
841 * globally visible.
842 */
843 next = SLIST_NEXT(sip, sel_chain);
844 membar_exit();
845 /* Release the record for another named waiter to use. */
846 sip->sel_lwp = NULL;
847 }
848 mutex_spin_exit(lock);
849 }
850
851 /*
852 * Initialize the select/poll system calls. Called once for each
853 * CPU in the system, as they are attached.
854 */
855 void
856 selsysinit(struct cpu_info *ci)
857 {
858 selcluster_t *sc;
859 u_int index;
860
861 /* If already a cluster in place for this bit, re-use. */
862 index = cpu_index(ci) & SELCLUSTERMASK;
863 sc = selcluster[index];
864 if (sc == NULL) {
865 sc = kmem_alloc(roundup2(sizeof(selcluster_t),
866 coherency_unit) + coherency_unit, KM_SLEEP);
867 sc = (void *)roundup2((uintptr_t)sc, coherency_unit);
868 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
869 sleepq_init(&sc->sc_sleepq);
870 sc->sc_ncoll = 0;
871 sc->sc_mask = __BIT(index);
872 selcluster[index] = sc;
873 }
874 ci->ci_data.cpu_selcluster = sc;
875 }
876
877 /*
878 * Initialize a selinfo record.
879 */
880 void
881 selinit(struct selinfo *sip)
882 {
883
884 memset(sip, 0, sizeof(*sip));
885 }
886
887 /*
888 * Destroy a selinfo record. The owning object must not gain new
889 * references while this is in progress: all activity on the record
890 * must be stopped.
891 *
892 * Concurrency issues: we only need guard against a call to selclear()
893 * by a thread exiting sel_do_scan(). The caller has prevented further
894 * references being made to the selinfo record via selrecord(), and it
895 * will not call selnotify() again.
896 */
897 void
898 seldestroy(struct selinfo *sip)
899 {
900 selcluster_t *sc;
901 kmutex_t *lock;
902 lwp_t *l;
903
904 if (sip->sel_lwp == NULL)
905 return;
906
907 /*
908 * Lock out selclear(). The selcluster pointer can't change while
909 * we are here since it is only ever changed in selrecord(),
910 * and that will not be entered again for this record because
911 * it is dying.
912 */
913 KASSERT(sip->sel_cluster != NULL);
914 sc = sip->sel_cluster;
915 lock = sc->sc_lock;
916 mutex_spin_enter(lock);
917 if ((l = sip->sel_lwp) != NULL) {
918 /*
919 * This should rarely happen, so although SLIST_REMOVE()
920 * is slow, using it here is not a problem.
921 */
922 KASSERT(l->l_selcluster == sc);
923 SLIST_REMOVE(&l->l_selwait, sip, selinfo, sel_chain);
924 sip->sel_lwp = NULL;
925 }
926 mutex_spin_exit(lock);
927 }
928
929 /*
930 * System control nodes.
931 */
932 SYSCTL_SETUP(sysctl_select_setup, "sysctl select setup")
933 {
934
935 sysctl_createv(clog, 0, NULL, NULL,
936 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
937 CTLTYPE_INT, "direct_select",
938 SYSCTL_DESCR("Enable/disable direct select (for testing)"),
939 NULL, 0, &direct_select, 0,
940 CTL_KERN, CTL_CREATE, CTL_EOL);
941 }
942