sys_select.c revision 1.29 1 /* $NetBSD: sys_select.c,v 1.29 2010/12/18 01:36:19 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
66 */
67
68 /*
69 * System calls of synchronous I/O multiplexing subsystem.
70 *
71 * Locking
72 *
73 * Two locks are used: <object-lock> and selcluster_t::sc_lock.
74 *
75 * The <object-lock> might be a device driver or another subsystem, e.g.
76 * socket or pipe. This lock is not exported, and thus invisible to this
77 * subsystem. Mainly, synchronisation between selrecord() and selnotify()
78 * routines depends on this lock, as it will be described in the comments.
79 *
80 * Lock order
81 *
82 * <object-lock> ->
83 * selcluster_t::sc_lock
84 */
85
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.29 2010/12/18 01:36:19 rmind Exp $");
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/filedesc.h>
92 #include <sys/file.h>
93 #include <sys/proc.h>
94 #include <sys/socketvar.h>
95 #include <sys/signalvar.h>
96 #include <sys/uio.h>
97 #include <sys/kernel.h>
98 #include <sys/lwp.h>
99 #include <sys/poll.h>
100 #include <sys/mount.h>
101 #include <sys/syscallargs.h>
102 #include <sys/cpu.h>
103 #include <sys/atomic.h>
104 #include <sys/socketvar.h>
105 #include <sys/sleepq.h>
106
107 /* Flags for lwp::l_selflag. */
108 #define SEL_RESET 0 /* awoken, interrupted, or not yet polling */
109 #define SEL_SCANNING 1 /* polling descriptors */
110 #define SEL_BLOCKING 2 /* blocking and waiting for event */
111 #define SEL_EVENT 3 /* interrupted, events set directly */
112
113 /* Operations: either select() or poll(). */
114 #define SELOP_SELECT 1
115 #define SELOP_POLL 2
116
117 /*
118 * Per-cluster state for select()/poll(). For a system with fewer
119 * than 32 CPUs, this gives us per-CPU clusters.
120 */
121 #define SELCLUSTERS 32
122 #define SELCLUSTERMASK (SELCLUSTERS - 1)
123
124 typedef struct selcluster {
125 kmutex_t *sc_lock;
126 sleepq_t sc_sleepq;
127 int sc_ncoll;
128 uint32_t sc_mask;
129 } selcluster_t;
130
131 static inline int selscan(char *, const int, const size_t, register_t *);
132 static inline int pollscan(struct pollfd *, const int, register_t *);
133 static void selclear(void);
134
135 static const int sel_flag[] = {
136 POLLRDNORM | POLLHUP | POLLERR,
137 POLLWRNORM | POLLHUP | POLLERR,
138 POLLRDBAND
139 };
140
141 static syncobj_t select_sobj = {
142 SOBJ_SLEEPQ_FIFO,
143 sleepq_unsleep,
144 sleepq_changepri,
145 sleepq_lendpri,
146 syncobj_noowner,
147 };
148
149 static selcluster_t *selcluster[SELCLUSTERS] __read_mostly;
150
151 /*
152 * Select system call.
153 */
154 int
155 sys___pselect50(struct lwp *l, const struct sys___pselect50_args *uap,
156 register_t *retval)
157 {
158 /* {
159 syscallarg(int) nd;
160 syscallarg(fd_set *) in;
161 syscallarg(fd_set *) ou;
162 syscallarg(fd_set *) ex;
163 syscallarg(const struct timespec *) ts;
164 syscallarg(sigset_t *) mask;
165 } */
166 struct timespec ats, *ts = NULL;
167 sigset_t amask, *mask = NULL;
168 int error;
169
170 if (SCARG(uap, ts)) {
171 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
172 if (error)
173 return error;
174 ts = &ats;
175 }
176 if (SCARG(uap, mask) != NULL) {
177 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
178 if (error)
179 return error;
180 mask = &amask;
181 }
182
183 return selcommon(retval, SCARG(uap, nd), SCARG(uap, in),
184 SCARG(uap, ou), SCARG(uap, ex), ts, mask);
185 }
186
187 int
188 sys___select50(struct lwp *l, const struct sys___select50_args *uap,
189 register_t *retval)
190 {
191 /* {
192 syscallarg(int) nd;
193 syscallarg(fd_set *) in;
194 syscallarg(fd_set *) ou;
195 syscallarg(fd_set *) ex;
196 syscallarg(struct timeval *) tv;
197 } */
198 struct timeval atv;
199 struct timespec ats, *ts = NULL;
200 int error;
201
202 if (SCARG(uap, tv)) {
203 error = copyin(SCARG(uap, tv), (void *)&atv, sizeof(atv));
204 if (error)
205 return error;
206 TIMEVAL_TO_TIMESPEC(&atv, &ats);
207 ts = &ats;
208 }
209
210 return selcommon(retval, SCARG(uap, nd), SCARG(uap, in),
211 SCARG(uap, ou), SCARG(uap, ex), ts, NULL);
212 }
213
214 /*
215 * sel_do_scan: common code to perform the scan on descriptors.
216 */
217 static int
218 sel_do_scan(const int op, void *fds, const int nf, const size_t ni,
219 struct timespec *ts, sigset_t *mask, register_t *retval)
220 {
221 lwp_t * const l = curlwp;
222 proc_t * const p = l->l_proc;
223 selcluster_t *sc;
224 kmutex_t *lock;
225 sigset_t oldmask;
226 struct timespec sleepts;
227 int error, timo;
228
229 timo = 0;
230 if (ts && inittimeleft(ts, &sleepts) == -1) {
231 return EINVAL;
232 }
233
234 if (__predict_false(mask)) {
235 sigminusset(&sigcantmask, mask);
236 mutex_enter(p->p_lock);
237 oldmask = l->l_sigmask;
238 l->l_sigmask = *mask;
239 mutex_exit(p->p_lock);
240 } else {
241 /* XXXgcc */
242 oldmask = l->l_sigmask;
243 }
244
245 sc = curcpu()->ci_data.cpu_selcluster;
246 lock = sc->sc_lock;
247 l->l_selcluster = sc;
248 SLIST_INIT(&l->l_selwait);
249
250 l->l_selret = 0;
251 if (op == SELOP_SELECT) {
252 l->l_selbits = (char *)fds + ni * 3;
253 l->l_selni = ni;
254 } else {
255 l->l_selbits = NULL;
256 }
257 for (;;) {
258 int ncoll;
259
260 /*
261 * No need to lock. If this is overwritten by another value
262 * while scanning, we will retry below. We only need to see
263 * exact state from the descriptors that we are about to poll,
264 * and lock activity resulting from fo_poll is enough to
265 * provide an up to date value for new polling activity.
266 */
267 l->l_selflag = SEL_SCANNING;
268 ncoll = sc->sc_ncoll;
269
270 if (op == SELOP_SELECT) {
271 error = selscan((char *)fds, nf, ni, retval);
272 } else {
273 error = pollscan((struct pollfd *)fds, nf, retval);
274 }
275 if (error || *retval)
276 break;
277 if (ts && (timo = gettimeleft(ts, &sleepts)) <= 0)
278 break;
279 /*
280 * Acquire the lock and perform the (re)checks. Note, if
281 * collision has occured, then our state does not matter,
282 * as we must perform re-scan. Therefore, check it first.
283 */
284 state_check:
285 mutex_spin_enter(lock);
286 if (__predict_false(sc->sc_ncoll != ncoll)) {
287 /* Collision: perform re-scan. */
288 mutex_spin_exit(lock);
289 continue;
290 }
291 if (__predict_true(l->l_selflag == SEL_EVENT)) {
292 /* Events occured, they are set directly. */
293 mutex_spin_exit(lock);
294 KASSERT(l->l_selret != 0);
295 *retval = l->l_selret;
296 break;
297 }
298 if (__predict_true(l->l_selflag == SEL_RESET)) {
299 /* Events occured, but re-scan is requested. */
300 mutex_spin_exit(lock);
301 continue;
302 }
303 /* Nothing happen, therefore - sleep. */
304 l->l_selflag = SEL_BLOCKING;
305 l->l_kpriority = true;
306 sleepq_enter(&sc->sc_sleepq, l, lock);
307 sleepq_enqueue(&sc->sc_sleepq, sc, "select", &select_sobj);
308 error = sleepq_block(timo, true);
309 if (error != 0) {
310 break;
311 }
312 /* Awoken: need to check the state. */
313 goto state_check;
314 }
315 selclear();
316
317 if (__predict_false(mask)) {
318 mutex_enter(p->p_lock);
319 l->l_sigmask = oldmask;
320 mutex_exit(p->p_lock);
321 }
322
323 /* select and poll are not restarted after signals... */
324 if (error == ERESTART)
325 return EINTR;
326 if (error == EWOULDBLOCK)
327 return 0;
328 return error;
329 }
330
331 int
332 selcommon(register_t *retval, int nd, fd_set *u_in, fd_set *u_ou,
333 fd_set *u_ex, struct timespec *ts, sigset_t *mask)
334 {
335 char smallbits[howmany(FD_SETSIZE, NFDBITS) *
336 sizeof(fd_mask) * 6];
337 char *bits;
338 int error, nf;
339 size_t ni;
340
341 if (nd < 0)
342 return (EINVAL);
343 nf = curlwp->l_fd->fd_dt->dt_nfiles;
344 if (nd > nf) {
345 /* forgiving; slightly wrong */
346 nd = nf;
347 }
348 ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
349 if (ni * 6 > sizeof(smallbits)) {
350 bits = kmem_alloc(ni * 6, KM_SLEEP);
351 if (bits == NULL)
352 return ENOMEM;
353 } else
354 bits = smallbits;
355
356 #define getbits(name, x) \
357 if (u_ ## name) { \
358 error = copyin(u_ ## name, bits + ni * x, ni); \
359 if (error) \
360 goto fail; \
361 } else \
362 memset(bits + ni * x, 0, ni);
363 getbits(in, 0);
364 getbits(ou, 1);
365 getbits(ex, 2);
366 #undef getbits
367
368 error = sel_do_scan(SELOP_SELECT, bits, nd, ni, ts, mask, retval);
369 if (error == 0 && u_in != NULL)
370 error = copyout(bits + ni * 3, u_in, ni);
371 if (error == 0 && u_ou != NULL)
372 error = copyout(bits + ni * 4, u_ou, ni);
373 if (error == 0 && u_ex != NULL)
374 error = copyout(bits + ni * 5, u_ex, ni);
375 fail:
376 if (bits != smallbits)
377 kmem_free(bits, ni * 6);
378 return (error);
379 }
380
381 static inline int
382 selscan(char *bits, const int nfd, const size_t ni, register_t *retval)
383 {
384 fd_mask *ibitp, *obitp;
385 int msk, i, j, fd, n;
386 file_t *fp;
387
388 ibitp = (fd_mask *)(bits + ni * 0);
389 obitp = (fd_mask *)(bits + ni * 3);
390 n = 0;
391
392 for (msk = 0; msk < 3; msk++) {
393 for (i = 0; i < nfd; i += NFDBITS) {
394 fd_mask ibits, obits;
395
396 ibits = *ibitp++;
397 obits = 0;
398 while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
399 ibits &= ~(1 << j);
400 if ((fp = fd_getfile(fd)) == NULL)
401 return (EBADF);
402 /*
403 * Setup an argument to selrecord(), which is
404 * a file descriptor number.
405 */
406 curlwp->l_selrec = fd;
407 if ((*fp->f_ops->fo_poll)(fp, sel_flag[msk])) {
408 obits |= (1 << j);
409 n++;
410 }
411 fd_putfile(fd);
412 }
413 *obitp++ = obits;
414 }
415 }
416 *retval = n;
417 return (0);
418 }
419
420 /*
421 * Poll system call.
422 */
423 int
424 sys_poll(struct lwp *l, const struct sys_poll_args *uap, register_t *retval)
425 {
426 /* {
427 syscallarg(struct pollfd *) fds;
428 syscallarg(u_int) nfds;
429 syscallarg(int) timeout;
430 } */
431 struct timespec ats, *ts = NULL;
432
433 if (SCARG(uap, timeout) != INFTIM) {
434 ats.tv_sec = SCARG(uap, timeout) / 1000;
435 ats.tv_nsec = (SCARG(uap, timeout) % 1000) * 1000000;
436 ts = &ats;
437 }
438
439 return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, NULL);
440 }
441
442 /*
443 * Poll system call.
444 */
445 int
446 sys___pollts50(struct lwp *l, const struct sys___pollts50_args *uap,
447 register_t *retval)
448 {
449 /* {
450 syscallarg(struct pollfd *) fds;
451 syscallarg(u_int) nfds;
452 syscallarg(const struct timespec *) ts;
453 syscallarg(const sigset_t *) mask;
454 } */
455 struct timespec ats, *ts = NULL;
456 sigset_t amask, *mask = NULL;
457 int error;
458
459 if (SCARG(uap, ts)) {
460 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
461 if (error)
462 return error;
463 ts = &ats;
464 }
465 if (SCARG(uap, mask)) {
466 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
467 if (error)
468 return error;
469 mask = &amask;
470 }
471
472 return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), ts, mask);
473 }
474
475 int
476 pollcommon(register_t *retval, struct pollfd *u_fds, u_int nfds,
477 struct timespec *ts, sigset_t *mask)
478 {
479 struct pollfd smallfds[32];
480 struct pollfd *fds;
481 int error;
482 size_t ni;
483
484 if (nfds > 1000 + curlwp->l_fd->fd_dt->dt_nfiles) {
485 /*
486 * Either the user passed in a very sparse 'fds' or junk!
487 * The kmem_alloc() call below would be bad news.
488 * We could process the 'fds' array in chunks, but that
489 * is a lot of code that isn't normally useful.
490 * (Or just move the copyin/out into pollscan().)
491 * Historically the code silently truncated 'fds' to
492 * dt_nfiles entries - but that does cause issues.
493 */
494 return EINVAL;
495 }
496 ni = nfds * sizeof(struct pollfd);
497 if (ni > sizeof(smallfds)) {
498 fds = kmem_alloc(ni, KM_SLEEP);
499 if (fds == NULL)
500 return ENOMEM;
501 } else
502 fds = smallfds;
503
504 error = copyin(u_fds, fds, ni);
505 if (error)
506 goto fail;
507
508 error = sel_do_scan(SELOP_POLL, fds, nfds, ni, ts, mask, retval);
509 if (error == 0)
510 error = copyout(fds, u_fds, ni);
511 fail:
512 if (fds != smallfds)
513 kmem_free(fds, ni);
514 return (error);
515 }
516
517 static inline int
518 pollscan(struct pollfd *fds, const int nfd, register_t *retval)
519 {
520 file_t *fp;
521 int i, n = 0;
522
523 for (i = 0; i < nfd; i++, fds++) {
524 if (fds->fd < 0) {
525 fds->revents = 0;
526 } else if ((fp = fd_getfile(fds->fd)) == NULL) {
527 fds->revents = POLLNVAL;
528 n++;
529 } else {
530 /*
531 * Perform poll: registers select request or returns
532 * the events which are set. Setup an argument for
533 * selrecord(), which is a pointer to struct pollfd.
534 */
535 curlwp->l_selrec = (uintptr_t)fds;
536 fds->revents = (*fp->f_ops->fo_poll)(fp,
537 fds->events | POLLERR | POLLHUP);
538 if (fds->revents != 0)
539 n++;
540 fd_putfile(fds->fd);
541 }
542 }
543 *retval = n;
544 return (0);
545 }
546
547 int
548 seltrue(dev_t dev, int events, lwp_t *l)
549 {
550
551 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
552 }
553
554 /*
555 * Record a select request. Concurrency issues:
556 *
557 * The caller holds the same lock across calls to selrecord() and
558 * selnotify(), so we don't need to consider a concurrent wakeup
559 * while in this routine.
560 *
561 * The only activity we need to guard against is selclear(), called by
562 * another thread that is exiting sel_do_scan().
563 * `sel_lwp' can only become non-NULL while the caller's lock is held,
564 * so it cannot become non-NULL due to a change made by another thread
565 * while we are in this routine. It can only become _NULL_ due to a
566 * call to selclear().
567 *
568 * If it is non-NULL and != selector there is the potential for
569 * selclear() to be called by another thread. If either of those
570 * conditions are true, we're not interested in touching the `named
571 * waiter' part of the selinfo record because we need to record a
572 * collision. Hence there is no need for additional locking in this
573 * routine.
574 */
575 void
576 selrecord(lwp_t *selector, struct selinfo *sip)
577 {
578 selcluster_t *sc;
579 lwp_t *other;
580
581 KASSERT(selector == curlwp);
582
583 sc = selector->l_selcluster;
584 other = sip->sel_lwp;
585
586 if (other == selector) {
587 /* 1. We (selector) already claimed to be the first LWP. */
588 KASSERT(sip->sel_cluster = sc);
589 } else if (other == NULL) {
590 /*
591 * 2. No first LWP, therefore we (selector) are the first.
592 *
593 * There may be unnamed waiters (collisions). Issue a memory
594 * barrier to ensure that we access sel_lwp (above) before
595 * other fields - this guards against a call to selclear().
596 */
597 membar_enter();
598 sip->sel_lwp = selector;
599 SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain);
600 /* Copy the argument, which is for selnotify(). */
601 sip->sel_fdinfo = selector->l_selrec;
602 /* Replace selinfo's lock with the chosen cluster's lock. */
603 sip->sel_cluster = sc;
604 } else {
605 /* 3. Multiple waiters: record a collision. */
606 sip->sel_collision |= sc->sc_mask;
607 KASSERT(sip->sel_cluster != NULL);
608 }
609 }
610
611 /*
612 * sel_setevents: a helper function for selnotify(), to set the events
613 * for LWP sleeping in selcommon() or pollcommon().
614 */
615 static inline void
616 sel_setevents(lwp_t *l, struct selinfo *sip, const int events)
617 {
618 const int oflag = l->l_selflag;
619
620 /*
621 * If we require re-scan or it was required by somebody else,
622 * then just (re)set SEL_RESET and return.
623 */
624 if (__predict_false(events == 0 || oflag == SEL_RESET)) {
625 l->l_selflag = SEL_RESET;
626 return;
627 }
628 /*
629 * Direct set. Note: select state of LWP is locked. First,
630 * determine whether it is selcommon() or pollcommon().
631 */
632 if (l->l_selbits != NULL) {
633 fd_mask *fds = (fd_mask *)l->l_selbits;
634 const size_t ni = l->l_selni;
635 const int fd = sip->sel_fdinfo;
636 const int idx = fd >> __NFDSHIFT;
637 int n;
638
639 for (n = 0; n < 3; n++) {
640 if (sel_flag[n] & events) {
641 fds[idx] |= 1 << (fd & __NFDMASK);
642 }
643 fds = (fd_mask *)((char *)fds + ni);
644 }
645 } else {
646 struct pollfd *pfd = (void *)sip->sel_fdinfo;
647 pfd->revents |= events;
648 }
649 /* Indicate direct set and note the event (cluster lock is held). */
650 l->l_selflag = SEL_EVENT;
651 l->l_selret++;
652 }
653
654 /*
655 * Do a wakeup when a selectable event occurs. Concurrency issues:
656 *
657 * As per selrecord(), the caller's object lock is held. If there
658 * is a named waiter, we must acquire the associated selcluster's lock
659 * in order to synchronize with selclear() and pollers going to sleep
660 * in sel_do_scan().
661 *
662 * sip->sel_cluser cannot change at this point, as it is only changed
663 * in selrecord(), and concurrent calls to selrecord() are locked
664 * out by the caller.
665 */
666 void
667 selnotify(struct selinfo *sip, int events, long knhint)
668 {
669 selcluster_t *sc;
670 uint32_t mask;
671 int index, oflag;
672 lwp_t *l;
673 kmutex_t *lock;
674
675 KNOTE(&sip->sel_klist, knhint);
676
677 if (sip->sel_lwp != NULL) {
678 /* One named LWP is waiting. */
679 sc = sip->sel_cluster;
680 lock = sc->sc_lock;
681 mutex_spin_enter(lock);
682 /* Still there? */
683 if (sip->sel_lwp != NULL) {
684 /*
685 * Set the events for our LWP and indicate that.
686 * Otherwise, request for a full re-scan.
687 */
688 l = sip->sel_lwp;
689 oflag = l->l_selflag;
690 #ifndef NO_DIRECT_SELECT
691 sel_setevents(l, sip, events);
692 #else
693 l->l_selflag = SEL_RESET;
694 #endif
695 /*
696 * If thread is sleeping, wake it up. If it's not
697 * yet asleep, it will notice the change in state
698 * and will re-poll the descriptors.
699 */
700 if (oflag == SEL_BLOCKING && l->l_mutex == lock) {
701 KASSERT(l->l_wchan == sc);
702 sleepq_unsleep(l, false);
703 }
704 }
705 mutex_spin_exit(lock);
706 }
707
708 if ((mask = sip->sel_collision) != 0) {
709 /*
710 * There was a collision (multiple waiters): we must
711 * inform all potentially interested waiters.
712 */
713 sip->sel_collision = 0;
714 do {
715 index = ffs(mask) - 1;
716 mask &= ~(1 << index);
717 sc = selcluster[index];
718 lock = sc->sc_lock;
719 mutex_spin_enter(lock);
720 sc->sc_ncoll++;
721 sleepq_wake(&sc->sc_sleepq, sc, (u_int)-1, lock);
722 } while (__predict_false(mask != 0));
723 }
724 }
725
726 /*
727 * Remove an LWP from all objects that it is waiting for. Concurrency
728 * issues:
729 *
730 * The object owner's (e.g. device driver) lock is not held here. Calls
731 * can be made to selrecord() and we do not synchronize against those
732 * directly using locks. However, we use `sel_lwp' to lock out changes.
733 * Before clearing it we must use memory barriers to ensure that we can
734 * safely traverse the list of selinfo records.
735 */
736 static void
737 selclear(void)
738 {
739 struct selinfo *sip, *next;
740 selcluster_t *sc;
741 lwp_t *l;
742 kmutex_t *lock;
743
744 l = curlwp;
745 sc = l->l_selcluster;
746 lock = sc->sc_lock;
747
748 mutex_spin_enter(lock);
749 for (sip = SLIST_FIRST(&l->l_selwait); sip != NULL; sip = next) {
750 KASSERT(sip->sel_lwp == l);
751 KASSERT(sip->sel_cluster == l->l_selcluster);
752
753 /*
754 * Read link to next selinfo record, if any.
755 * It's no longer safe to touch `sip' after clearing
756 * `sel_lwp', so ensure that the read of `sel_chain'
757 * completes before the clearing of sel_lwp becomes
758 * globally visible.
759 */
760 next = SLIST_NEXT(sip, sel_chain);
761 membar_exit();
762 /* Release the record for another named waiter to use. */
763 sip->sel_lwp = NULL;
764 }
765 mutex_spin_exit(lock);
766 }
767
768 /*
769 * Initialize the select/poll system calls. Called once for each
770 * CPU in the system, as they are attached.
771 */
772 void
773 selsysinit(struct cpu_info *ci)
774 {
775 selcluster_t *sc;
776 u_int index;
777
778 /* If already a cluster in place for this bit, re-use. */
779 index = cpu_index(ci) & SELCLUSTERMASK;
780 sc = selcluster[index];
781 if (sc == NULL) {
782 sc = kmem_alloc(roundup2(sizeof(selcluster_t),
783 coherency_unit) + coherency_unit, KM_SLEEP);
784 sc = (void *)roundup2((uintptr_t)sc, coherency_unit);
785 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
786 sleepq_init(&sc->sc_sleepq);
787 sc->sc_ncoll = 0;
788 sc->sc_mask = (1 << index);
789 selcluster[index] = sc;
790 }
791 ci->ci_data.cpu_selcluster = sc;
792 }
793
794 /*
795 * Initialize a selinfo record.
796 */
797 void
798 selinit(struct selinfo *sip)
799 {
800
801 memset(sip, 0, sizeof(*sip));
802 }
803
804 /*
805 * Destroy a selinfo record. The owning object must not gain new
806 * references while this is in progress: all activity on the record
807 * must be stopped.
808 *
809 * Concurrency issues: we only need guard against a call to selclear()
810 * by a thread exiting sel_do_scan(). The caller has prevented further
811 * references being made to the selinfo record via selrecord(), and it
812 * will not call selnotify() again.
813 */
814 void
815 seldestroy(struct selinfo *sip)
816 {
817 selcluster_t *sc;
818 kmutex_t *lock;
819 lwp_t *l;
820
821 if (sip->sel_lwp == NULL)
822 return;
823
824 /*
825 * Lock out selclear(). The selcluster pointer can't change while
826 * we are here since it is only ever changed in selrecord(),
827 * and that will not be entered again for this record because
828 * it is dying.
829 */
830 KASSERT(sip->sel_cluster != NULL);
831 sc = sip->sel_cluster;
832 lock = sc->sc_lock;
833 mutex_spin_enter(lock);
834 if ((l = sip->sel_lwp) != NULL) {
835 /*
836 * This should rarely happen, so although SLIST_REMOVE()
837 * is slow, using it here is not a problem.
838 */
839 KASSERT(l->l_selcluster == sc);
840 SLIST_REMOVE(&l->l_selwait, sip, selinfo, sel_chain);
841 sip->sel_lwp = NULL;
842 }
843 mutex_spin_exit(lock);
844 }
845
846 int
847 pollsock(struct socket *so, const struct timespec *tsp, int events)
848 {
849 int ncoll, error, timo;
850 struct timespec sleepts, ts;
851 selcluster_t *sc;
852 lwp_t *l;
853 kmutex_t *lock;
854
855 timo = 0;
856 if (tsp != NULL) {
857 ts = *tsp;
858 if (inittimeleft(&ts, &sleepts) == -1)
859 return EINVAL;
860 }
861
862 l = curlwp;
863 sc = curcpu()->ci_data.cpu_selcluster;
864 lock = sc->sc_lock;
865 l->l_selcluster = sc;
866 SLIST_INIT(&l->l_selwait);
867 error = 0;
868 for (;;) {
869 /*
870 * No need to lock. If this is overwritten by another
871 * value while scanning, we will retry below. We only
872 * need to see exact state from the descriptors that
873 * we are about to poll, and lock activity resulting
874 * from fo_poll is enough to provide an up to date value
875 * for new polling activity.
876 */
877 ncoll = sc->sc_ncoll;
878 l->l_selflag = SEL_SCANNING;
879 if (sopoll(so, events) != 0)
880 break;
881 if (tsp && (timo = gettimeleft(&ts, &sleepts)) <= 0)
882 break;
883 mutex_spin_enter(lock);
884 if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) {
885 mutex_spin_exit(lock);
886 continue;
887 }
888 l->l_selflag = SEL_BLOCKING;
889 sleepq_enter(&sc->sc_sleepq, l, lock);
890 sleepq_enqueue(&sc->sc_sleepq, sc, "pollsock", &select_sobj);
891 error = sleepq_block(timo, true);
892 if (error != 0)
893 break;
894 }
895 selclear();
896 /* poll is not restarted after signals... */
897 if (error == ERESTART)
898 error = EINTR;
899 if (error == EWOULDBLOCK)
900 error = 0;
901 return (error);
902 }
903