sys_select.c revision 1.10.14.1 1 1.10.14.1 msaitoh /* $NetBSD: sys_select.c,v 1.10.14.1 2015/04/24 05:46:09 msaitoh Exp $ */
2 1.1 ad
3 1.1 ad /*-
4 1.1 ad * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
5 1.1 ad * All rights reserved.
6 1.1 ad *
7 1.1 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 ad * by Andrew Doran.
9 1.1 ad *
10 1.1 ad * Redistribution and use in source and binary forms, with or without
11 1.1 ad * modification, are permitted provided that the following conditions
12 1.1 ad * are met:
13 1.1 ad * 1. Redistributions of source code must retain the above copyright
14 1.1 ad * notice, this list of conditions and the following disclaimer.
15 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 ad * notice, this list of conditions and the following disclaimer in the
17 1.1 ad * documentation and/or other materials provided with the distribution.
18 1.1 ad *
19 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 ad */
31 1.1 ad
32 1.1 ad /*
33 1.1 ad * Copyright (c) 1982, 1986, 1989, 1993
34 1.1 ad * The Regents of the University of California. All rights reserved.
35 1.1 ad * (c) UNIX System Laboratories, Inc.
36 1.1 ad * All or some portions of this file are derived from material licensed
37 1.1 ad * to the University of California by American Telephone and Telegraph
38 1.1 ad * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 1.1 ad * the permission of UNIX System Laboratories, Inc.
40 1.1 ad *
41 1.1 ad * Redistribution and use in source and binary forms, with or without
42 1.1 ad * modification, are permitted provided that the following conditions
43 1.1 ad * are met:
44 1.1 ad * 1. Redistributions of source code must retain the above copyright
45 1.1 ad * notice, this list of conditions and the following disclaimer.
46 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright
47 1.1 ad * notice, this list of conditions and the following disclaimer in the
48 1.1 ad * documentation and/or other materials provided with the distribution.
49 1.1 ad * 3. Neither the name of the University nor the names of its contributors
50 1.1 ad * may be used to endorse or promote products derived from this software
51 1.1 ad * without specific prior written permission.
52 1.1 ad *
53 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 1.1 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 1.1 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 1.1 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 1.1 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 1.1 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 1.1 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 1.1 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 1.1 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 1.1 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 1.1 ad * SUCH DAMAGE.
64 1.1 ad *
65 1.1 ad * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
66 1.1 ad */
67 1.1 ad
68 1.1 ad /*
69 1.1 ad * System calls relating to files.
70 1.1 ad */
71 1.1 ad
72 1.1 ad #include <sys/cdefs.h>
73 1.10.14.1 msaitoh __KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.10.14.1 2015/04/24 05:46:09 msaitoh Exp $");
74 1.1 ad
75 1.1 ad #include <sys/param.h>
76 1.1 ad #include <sys/systm.h>
77 1.1 ad #include <sys/filedesc.h>
78 1.1 ad #include <sys/ioctl.h>
79 1.1 ad #include <sys/file.h>
80 1.1 ad #include <sys/proc.h>
81 1.1 ad #include <sys/socketvar.h>
82 1.1 ad #include <sys/signalvar.h>
83 1.1 ad #include <sys/uio.h>
84 1.1 ad #include <sys/kernel.h>
85 1.1 ad #include <sys/stat.h>
86 1.1 ad #include <sys/poll.h>
87 1.1 ad #include <sys/vnode.h>
88 1.1 ad #include <sys/mount.h>
89 1.1 ad #include <sys/syscallargs.h>
90 1.1 ad #include <sys/cpu.h>
91 1.1 ad #include <sys/atomic.h>
92 1.1 ad #include <sys/socketvar.h>
93 1.1 ad #include <sys/sleepq.h>
94 1.1 ad
95 1.1 ad /* Flags for lwp::l_selflag. */
96 1.1 ad #define SEL_RESET 0 /* awoken, interrupted, or not yet polling */
97 1.1 ad #define SEL_SCANNING 1 /* polling descriptors */
98 1.1 ad #define SEL_BLOCKING 2 /* about to block on select_cv */
99 1.1 ad
100 1.1 ad /* Per-CPU state for select()/poll(). */
101 1.1 ad #if MAXCPUS > 32
102 1.1 ad #error adjust this code
103 1.1 ad #endif
104 1.1 ad typedef struct selcpu {
105 1.1 ad kmutex_t sc_lock;
106 1.1 ad sleepq_t sc_sleepq;
107 1.1 ad int sc_ncoll;
108 1.1 ad uint32_t sc_mask;
109 1.1 ad } selcpu_t;
110 1.1 ad
111 1.1 ad static int selscan(lwp_t *, fd_mask *, fd_mask *, int, register_t *);
112 1.1 ad static int pollscan(lwp_t *, struct pollfd *, int, register_t *);
113 1.1 ad static void selclear(void);
114 1.1 ad
115 1.1 ad static syncobj_t select_sobj = {
116 1.1 ad SOBJ_SLEEPQ_FIFO,
117 1.1 ad sleepq_unsleep,
118 1.1 ad sleepq_changepri,
119 1.1 ad sleepq_lendpri,
120 1.1 ad syncobj_noowner,
121 1.1 ad };
122 1.1 ad
123 1.1 ad /*
124 1.1 ad * Select system call.
125 1.1 ad */
126 1.1 ad int
127 1.1 ad sys_pselect(struct lwp *l, const struct sys_pselect_args *uap, register_t *retval)
128 1.1 ad {
129 1.1 ad /* {
130 1.1 ad syscallarg(int) nd;
131 1.1 ad syscallarg(fd_set *) in;
132 1.1 ad syscallarg(fd_set *) ou;
133 1.1 ad syscallarg(fd_set *) ex;
134 1.1 ad syscallarg(const struct timespec *) ts;
135 1.1 ad syscallarg(sigset_t *) mask;
136 1.1 ad } */
137 1.1 ad struct timespec ats;
138 1.1 ad struct timeval atv, *tv = NULL;
139 1.1 ad sigset_t amask, *mask = NULL;
140 1.1 ad int error;
141 1.1 ad
142 1.1 ad if (SCARG(uap, ts)) {
143 1.1 ad error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
144 1.1 ad if (error)
145 1.1 ad return error;
146 1.1 ad atv.tv_sec = ats.tv_sec;
147 1.1 ad atv.tv_usec = ats.tv_nsec / 1000;
148 1.1 ad tv = &atv;
149 1.1 ad }
150 1.1 ad if (SCARG(uap, mask) != NULL) {
151 1.1 ad error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
152 1.1 ad if (error)
153 1.1 ad return error;
154 1.1 ad mask = &amask;
155 1.1 ad }
156 1.1 ad
157 1.1 ad return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
158 1.1 ad SCARG(uap, ou), SCARG(uap, ex), tv, mask);
159 1.1 ad }
160 1.1 ad
161 1.1 ad int
162 1.1 ad inittimeleft(struct timeval *tv, struct timeval *sleeptv)
163 1.1 ad {
164 1.1 ad if (itimerfix(tv))
165 1.1 ad return -1;
166 1.1 ad getmicrouptime(sleeptv);
167 1.1 ad return 0;
168 1.1 ad }
169 1.1 ad
170 1.1 ad int
171 1.1 ad gettimeleft(struct timeval *tv, struct timeval *sleeptv)
172 1.1 ad {
173 1.1 ad /*
174 1.1 ad * We have to recalculate the timeout on every retry.
175 1.1 ad */
176 1.1 ad struct timeval slepttv;
177 1.1 ad /*
178 1.1 ad * reduce tv by elapsed time
179 1.1 ad * based on monotonic time scale
180 1.1 ad */
181 1.1 ad getmicrouptime(&slepttv);
182 1.1 ad timeradd(tv, sleeptv, tv);
183 1.1 ad timersub(tv, &slepttv, tv);
184 1.1 ad *sleeptv = slepttv;
185 1.1 ad return tvtohz(tv);
186 1.1 ad }
187 1.1 ad
188 1.1 ad int
189 1.1 ad sys_select(struct lwp *l, const struct sys_select_args *uap, register_t *retval)
190 1.1 ad {
191 1.1 ad /* {
192 1.1 ad syscallarg(int) nd;
193 1.1 ad syscallarg(fd_set *) in;
194 1.1 ad syscallarg(fd_set *) ou;
195 1.1 ad syscallarg(fd_set *) ex;
196 1.1 ad syscallarg(struct timeval *) tv;
197 1.1 ad } */
198 1.1 ad struct timeval atv, *tv = NULL;
199 1.1 ad int error;
200 1.1 ad
201 1.1 ad if (SCARG(uap, tv)) {
202 1.1 ad error = copyin(SCARG(uap, tv), (void *)&atv,
203 1.1 ad sizeof(atv));
204 1.1 ad if (error)
205 1.1 ad return error;
206 1.1 ad tv = &atv;
207 1.1 ad }
208 1.1 ad
209 1.1 ad return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
210 1.1 ad SCARG(uap, ou), SCARG(uap, ex), tv, NULL);
211 1.1 ad }
212 1.1 ad
213 1.1 ad int
214 1.1 ad selcommon(lwp_t *l, register_t *retval, int nd, fd_set *u_in,
215 1.1 ad fd_set *u_ou, fd_set *u_ex, struct timeval *tv, sigset_t *mask)
216 1.1 ad {
217 1.1 ad char smallbits[howmany(FD_SETSIZE, NFDBITS) *
218 1.1 ad sizeof(fd_mask) * 6];
219 1.1 ad proc_t * const p = l->l_proc;
220 1.1 ad char *bits;
221 1.1 ad int ncoll, error, timo;
222 1.1 ad size_t ni;
223 1.1 ad sigset_t oldmask;
224 1.1 ad struct timeval sleeptv;
225 1.1 ad selcpu_t *sc;
226 1.1 ad
227 1.1 ad error = 0;
228 1.1 ad if (nd < 0)
229 1.1 ad return (EINVAL);
230 1.1 ad if (nd > p->p_fd->fd_nfiles) {
231 1.1 ad /* forgiving; slightly wrong */
232 1.1 ad nd = p->p_fd->fd_nfiles;
233 1.1 ad }
234 1.1 ad ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
235 1.9 rmind if (ni * 6 > sizeof(smallbits)) {
236 1.1 ad bits = kmem_alloc(ni * 6, KM_SLEEP);
237 1.9 rmind if (bits == NULL)
238 1.9 rmind return ENOMEM;
239 1.9 rmind } else
240 1.1 ad bits = smallbits;
241 1.1 ad
242 1.1 ad #define getbits(name, x) \
243 1.1 ad if (u_ ## name) { \
244 1.1 ad error = copyin(u_ ## name, bits + ni * x, ni); \
245 1.1 ad if (error) \
246 1.1 ad goto done; \
247 1.1 ad } else \
248 1.1 ad memset(bits + ni * x, 0, ni);
249 1.1 ad getbits(in, 0);
250 1.1 ad getbits(ou, 1);
251 1.1 ad getbits(ex, 2);
252 1.1 ad #undef getbits
253 1.1 ad
254 1.1 ad timo = 0;
255 1.1 ad if (tv && inittimeleft(tv, &sleeptv) == -1) {
256 1.1 ad error = EINVAL;
257 1.1 ad goto done;
258 1.1 ad }
259 1.1 ad
260 1.1 ad if (mask) {
261 1.1 ad sigminusset(&sigcantmask, mask);
262 1.5 ad mutex_enter(p->p_lock);
263 1.1 ad oldmask = l->l_sigmask;
264 1.1 ad l->l_sigmask = *mask;
265 1.5 ad mutex_exit(p->p_lock);
266 1.1 ad } else
267 1.1 ad oldmask = l->l_sigmask; /* XXXgcc */
268 1.1 ad
269 1.1 ad sc = curcpu()->ci_data.cpu_selcpu;
270 1.1 ad l->l_selcpu = sc;
271 1.1 ad SLIST_INIT(&l->l_selwait);
272 1.1 ad for (;;) {
273 1.1 ad /*
274 1.1 ad * No need to lock. If this is overwritten by another
275 1.1 ad * value while scanning, we will retry below. We only
276 1.1 ad * need to see exact state from the descriptors that
277 1.1 ad * we are about to poll, and lock activity resulting
278 1.1 ad * from fo_poll is enough to provide an up to date value
279 1.1 ad * for new polling activity.
280 1.1 ad */
281 1.1 ad l->l_selflag = SEL_SCANNING;
282 1.1 ad ncoll = sc->sc_ncoll;
283 1.1 ad
284 1.1 ad error = selscan(l, (fd_mask *)(bits + ni * 0),
285 1.1 ad (fd_mask *)(bits + ni * 3), nd, retval);
286 1.1 ad
287 1.1 ad if (error || *retval)
288 1.1 ad break;
289 1.1 ad if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
290 1.1 ad break;
291 1.1 ad mutex_spin_enter(&sc->sc_lock);
292 1.1 ad if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) {
293 1.1 ad mutex_spin_exit(&sc->sc_lock);
294 1.1 ad continue;
295 1.1 ad }
296 1.1 ad l->l_selflag = SEL_BLOCKING;
297 1.7 ad l->l_kpriority = true;
298 1.8 ad sleepq_enter(&sc->sc_sleepq, l, &sc->sc_lock);
299 1.1 ad sleepq_enqueue(&sc->sc_sleepq, sc, "select", &select_sobj);
300 1.1 ad error = sleepq_block(timo, true);
301 1.1 ad if (error != 0)
302 1.1 ad break;
303 1.1 ad }
304 1.1 ad selclear();
305 1.1 ad
306 1.1 ad if (mask) {
307 1.5 ad mutex_enter(p->p_lock);
308 1.1 ad l->l_sigmask = oldmask;
309 1.5 ad mutex_exit(p->p_lock);
310 1.1 ad }
311 1.1 ad
312 1.1 ad done:
313 1.1 ad /* select is not restarted after signals... */
314 1.1 ad if (error == ERESTART)
315 1.1 ad error = EINTR;
316 1.1 ad if (error == EWOULDBLOCK)
317 1.1 ad error = 0;
318 1.1 ad if (error == 0 && u_in != NULL)
319 1.1 ad error = copyout(bits + ni * 3, u_in, ni);
320 1.1 ad if (error == 0 && u_ou != NULL)
321 1.1 ad error = copyout(bits + ni * 4, u_ou, ni);
322 1.1 ad if (error == 0 && u_ex != NULL)
323 1.1 ad error = copyout(bits + ni * 5, u_ex, ni);
324 1.1 ad if (bits != smallbits)
325 1.1 ad kmem_free(bits, ni * 6);
326 1.1 ad return (error);
327 1.1 ad }
328 1.1 ad
329 1.1 ad int
330 1.1 ad selscan(lwp_t *l, fd_mask *ibitp, fd_mask *obitp, int nfd,
331 1.1 ad register_t *retval)
332 1.1 ad {
333 1.1 ad static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
334 1.1 ad POLLWRNORM | POLLHUP | POLLERR,
335 1.1 ad POLLRDBAND };
336 1.1 ad int msk, i, j, fd, n;
337 1.1 ad fd_mask ibits, obits;
338 1.1 ad file_t *fp;
339 1.1 ad
340 1.1 ad n = 0;
341 1.1 ad for (msk = 0; msk < 3; msk++) {
342 1.1 ad for (i = 0; i < nfd; i += NFDBITS) {
343 1.1 ad ibits = *ibitp++;
344 1.1 ad obits = 0;
345 1.1 ad while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
346 1.1 ad ibits &= ~(1 << j);
347 1.1 ad if ((fp = fd_getfile(fd)) == NULL)
348 1.1 ad return (EBADF);
349 1.1 ad if ((*fp->f_ops->fo_poll)(fp, flag[msk])) {
350 1.1 ad obits |= (1 << j);
351 1.1 ad n++;
352 1.1 ad }
353 1.1 ad fd_putfile(fd);
354 1.1 ad }
355 1.1 ad *obitp++ = obits;
356 1.1 ad }
357 1.1 ad }
358 1.1 ad *retval = n;
359 1.1 ad return (0);
360 1.1 ad }
361 1.1 ad
362 1.1 ad /*
363 1.1 ad * Poll system call.
364 1.1 ad */
365 1.1 ad int
366 1.1 ad sys_poll(struct lwp *l, const struct sys_poll_args *uap, register_t *retval)
367 1.1 ad {
368 1.1 ad /* {
369 1.1 ad syscallarg(struct pollfd *) fds;
370 1.1 ad syscallarg(u_int) nfds;
371 1.1 ad syscallarg(int) timeout;
372 1.1 ad } */
373 1.1 ad struct timeval atv, *tv = NULL;
374 1.1 ad
375 1.1 ad if (SCARG(uap, timeout) != INFTIM) {
376 1.1 ad atv.tv_sec = SCARG(uap, timeout) / 1000;
377 1.1 ad atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
378 1.1 ad tv = &atv;
379 1.1 ad }
380 1.1 ad
381 1.1 ad return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
382 1.1 ad tv, NULL);
383 1.1 ad }
384 1.1 ad
385 1.1 ad /*
386 1.1 ad * Poll system call.
387 1.1 ad */
388 1.1 ad int
389 1.1 ad sys_pollts(struct lwp *l, const struct sys_pollts_args *uap, register_t *retval)
390 1.1 ad {
391 1.1 ad /* {
392 1.1 ad syscallarg(struct pollfd *) fds;
393 1.1 ad syscallarg(u_int) nfds;
394 1.1 ad syscallarg(const struct timespec *) ts;
395 1.1 ad syscallarg(const sigset_t *) mask;
396 1.1 ad } */
397 1.1 ad struct timespec ats;
398 1.1 ad struct timeval atv, *tv = NULL;
399 1.1 ad sigset_t amask, *mask = NULL;
400 1.1 ad int error;
401 1.1 ad
402 1.1 ad if (SCARG(uap, ts)) {
403 1.1 ad error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
404 1.1 ad if (error)
405 1.1 ad return error;
406 1.1 ad atv.tv_sec = ats.tv_sec;
407 1.1 ad atv.tv_usec = ats.tv_nsec / 1000;
408 1.1 ad tv = &atv;
409 1.1 ad }
410 1.1 ad if (SCARG(uap, mask)) {
411 1.1 ad error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
412 1.1 ad if (error)
413 1.1 ad return error;
414 1.1 ad mask = &amask;
415 1.1 ad }
416 1.1 ad
417 1.1 ad return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
418 1.1 ad tv, mask);
419 1.1 ad }
420 1.1 ad
421 1.1 ad int
422 1.1 ad pollcommon(lwp_t *l, register_t *retval,
423 1.1 ad struct pollfd *u_fds, u_int nfds,
424 1.1 ad struct timeval *tv, sigset_t *mask)
425 1.1 ad {
426 1.1 ad char smallbits[32 * sizeof(struct pollfd)];
427 1.1 ad proc_t * const p = l->l_proc;
428 1.1 ad void * bits;
429 1.1 ad sigset_t oldmask;
430 1.1 ad int ncoll, error, timo;
431 1.1 ad size_t ni;
432 1.1 ad struct timeval sleeptv;
433 1.1 ad selcpu_t *sc;
434 1.1 ad
435 1.10.14.1 msaitoh if (nfds > 1000 + p->p_fd->fd_nfiles) {
436 1.10.14.1 msaitoh /*
437 1.10.14.1 msaitoh * Either the user passed in a very sparse 'fds' or junk!
438 1.10.14.1 msaitoh * The kmem_alloc() call below would be bad news.
439 1.10.14.1 msaitoh * We could process the 'fds' array in chunks, but that
440 1.10.14.1 msaitoh * is a lot of code that isn't normally useful.
441 1.10.14.1 msaitoh * (Or just move the copyin/out into pollscan().)
442 1.10.14.1 msaitoh * Historically the code silently truncated 'fds' to
443 1.10.14.1 msaitoh * dt_nfiles entries - but that does cause issues.
444 1.10.14.1 msaitoh */
445 1.10.14.1 msaitoh return EINVAL;
446 1.1 ad }
447 1.1 ad ni = nfds * sizeof(struct pollfd);
448 1.9 rmind if (ni > sizeof(smallbits)) {
449 1.1 ad bits = kmem_alloc(ni, KM_SLEEP);
450 1.9 rmind if (bits == NULL)
451 1.9 rmind return ENOMEM;
452 1.9 rmind } else
453 1.1 ad bits = smallbits;
454 1.1 ad
455 1.1 ad error = copyin(u_fds, bits, ni);
456 1.1 ad if (error)
457 1.1 ad goto done;
458 1.1 ad
459 1.1 ad timo = 0;
460 1.1 ad if (tv && inittimeleft(tv, &sleeptv) == -1) {
461 1.1 ad error = EINVAL;
462 1.1 ad goto done;
463 1.1 ad }
464 1.1 ad
465 1.1 ad if (mask) {
466 1.1 ad sigminusset(&sigcantmask, mask);
467 1.5 ad mutex_enter(p->p_lock);
468 1.1 ad oldmask = l->l_sigmask;
469 1.1 ad l->l_sigmask = *mask;
470 1.5 ad mutex_exit(p->p_lock);
471 1.1 ad } else
472 1.1 ad oldmask = l->l_sigmask; /* XXXgcc */
473 1.1 ad
474 1.1 ad sc = curcpu()->ci_data.cpu_selcpu;
475 1.1 ad l->l_selcpu = sc;
476 1.1 ad SLIST_INIT(&l->l_selwait);
477 1.1 ad for (;;) {
478 1.1 ad /*
479 1.1 ad * No need to lock. If this is overwritten by another
480 1.1 ad * value while scanning, we will retry below. We only
481 1.1 ad * need to see exact state from the descriptors that
482 1.1 ad * we are about to poll, and lock activity resulting
483 1.1 ad * from fo_poll is enough to provide an up to date value
484 1.1 ad * for new polling activity.
485 1.1 ad */
486 1.1 ad ncoll = sc->sc_ncoll;
487 1.1 ad l->l_selflag = SEL_SCANNING;
488 1.1 ad
489 1.1 ad error = pollscan(l, (struct pollfd *)bits, nfds, retval);
490 1.1 ad
491 1.1 ad if (error || *retval)
492 1.1 ad break;
493 1.1 ad if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
494 1.1 ad break;
495 1.1 ad mutex_spin_enter(&sc->sc_lock);
496 1.1 ad if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) {
497 1.1 ad mutex_spin_exit(&sc->sc_lock);
498 1.1 ad continue;
499 1.1 ad }
500 1.1 ad l->l_selflag = SEL_BLOCKING;
501 1.7 ad l->l_kpriority = true;
502 1.8 ad sleepq_enter(&sc->sc_sleepq, l, &sc->sc_lock);
503 1.1 ad sleepq_enqueue(&sc->sc_sleepq, sc, "select", &select_sobj);
504 1.1 ad error = sleepq_block(timo, true);
505 1.1 ad if (error != 0)
506 1.1 ad break;
507 1.1 ad }
508 1.1 ad selclear();
509 1.1 ad
510 1.1 ad if (mask) {
511 1.5 ad mutex_enter(p->p_lock);
512 1.1 ad l->l_sigmask = oldmask;
513 1.5 ad mutex_exit(p->p_lock);
514 1.1 ad }
515 1.1 ad done:
516 1.1 ad /* poll is not restarted after signals... */
517 1.1 ad if (error == ERESTART)
518 1.1 ad error = EINTR;
519 1.1 ad if (error == EWOULDBLOCK)
520 1.1 ad error = 0;
521 1.1 ad if (error == 0)
522 1.1 ad error = copyout(bits, u_fds, ni);
523 1.1 ad if (bits != smallbits)
524 1.1 ad kmem_free(bits, ni);
525 1.1 ad return (error);
526 1.1 ad }
527 1.1 ad
528 1.1 ad int
529 1.1 ad pollscan(lwp_t *l, struct pollfd *fds, int nfd, register_t *retval)
530 1.1 ad {
531 1.1 ad int i, n;
532 1.1 ad file_t *fp;
533 1.1 ad
534 1.1 ad n = 0;
535 1.1 ad for (i = 0; i < nfd; i++, fds++) {
536 1.1 ad if (fds->fd < 0) {
537 1.1 ad fds->revents = 0;
538 1.1 ad } else if ((fp = fd_getfile(fds->fd)) == NULL) {
539 1.1 ad fds->revents = POLLNVAL;
540 1.1 ad n++;
541 1.1 ad } else {
542 1.1 ad fds->revents = (*fp->f_ops->fo_poll)(fp,
543 1.1 ad fds->events | POLLERR | POLLHUP);
544 1.1 ad if (fds->revents != 0)
545 1.1 ad n++;
546 1.1 ad fd_putfile(fds->fd);
547 1.1 ad }
548 1.1 ad }
549 1.1 ad *retval = n;
550 1.1 ad return (0);
551 1.1 ad }
552 1.1 ad
553 1.1 ad /*ARGSUSED*/
554 1.1 ad int
555 1.1 ad seltrue(dev_t dev, int events, lwp_t *l)
556 1.1 ad {
557 1.1 ad
558 1.1 ad return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
559 1.1 ad }
560 1.1 ad
561 1.1 ad /*
562 1.1 ad * Record a select request. Concurrency issues:
563 1.1 ad *
564 1.1 ad * The caller holds the same lock across calls to selrecord() and
565 1.4 yamt * selnotify(), so we don't need to consider a concurrent wakeup
566 1.1 ad * while in this routine.
567 1.1 ad *
568 1.1 ad * The only activity we need to guard against is selclear(), called by
569 1.1 ad * another thread that is exiting selcommon() or pollcommon().
570 1.1 ad * `sel_lwp' can only become non-NULL while the caller's lock is held,
571 1.1 ad * so it cannot become non-NULL due to a change made by another thread
572 1.1 ad * while we are in this routine. It can only become _NULL_ due to a
573 1.1 ad * call to selclear().
574 1.1 ad *
575 1.1 ad * If it is non-NULL and != selector there is the potential for
576 1.1 ad * selclear() to be called by another thread. If either of those
577 1.1 ad * conditions are true, we're not interested in touching the `named
578 1.1 ad * waiter' part of the selinfo record because we need to record a
579 1.1 ad * collision. Hence there is no need for additional locking in this
580 1.1 ad * routine.
581 1.1 ad */
582 1.1 ad void
583 1.1 ad selrecord(lwp_t *selector, struct selinfo *sip)
584 1.1 ad {
585 1.1 ad selcpu_t *sc;
586 1.1 ad lwp_t *other;
587 1.1 ad
588 1.1 ad KASSERT(selector == curlwp);
589 1.1 ad
590 1.1 ad sc = selector->l_selcpu;
591 1.1 ad other = sip->sel_lwp;
592 1.1 ad
593 1.1 ad if (other == selector) {
594 1.1 ad /* `selector' has already claimed it. */
595 1.1 ad KASSERT(sip->sel_cpu = sc);
596 1.1 ad } else if (other == NULL) {
597 1.1 ad /*
598 1.1 ad * First named waiter, although there may be unnamed
599 1.1 ad * waiters (collisions). Issue a memory barrier to
600 1.1 ad * ensure that we access sel_lwp (above) before other
601 1.1 ad * fields - this guards against a call to selclear().
602 1.1 ad */
603 1.1 ad membar_enter();
604 1.1 ad sip->sel_lwp = selector;
605 1.1 ad SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain);
606 1.1 ad /* Replace selinfo's lock with our chosen CPU's lock. */
607 1.1 ad sip->sel_cpu = sc;
608 1.1 ad } else {
609 1.1 ad /* Multiple waiters: record a collision. */
610 1.1 ad sip->sel_collision |= sc->sc_mask;
611 1.1 ad KASSERT(sip->sel_cpu != NULL);
612 1.1 ad }
613 1.1 ad }
614 1.1 ad
615 1.1 ad /*
616 1.1 ad * Do a wakeup when a selectable event occurs. Concurrency issues:
617 1.1 ad *
618 1.1 ad * As per selrecord(), the caller's object lock is held. If there
619 1.1 ad * is a named waiter, we must acquire the associated selcpu's lock
620 1.1 ad * in order to synchronize with selclear() and pollers going to sleep
621 1.1 ad * in selcommon() and/or pollcommon().
622 1.1 ad *
623 1.1 ad * sip->sel_cpu cannot change at this point, as it is only changed
624 1.1 ad * in selrecord(), and concurrent calls to selrecord() are locked
625 1.1 ad * out by the caller.
626 1.1 ad */
627 1.1 ad void
628 1.1 ad selnotify(struct selinfo *sip, int events, long knhint)
629 1.1 ad {
630 1.1 ad selcpu_t *sc;
631 1.1 ad uint32_t mask;
632 1.1 ad int index, oflag, swapin;
633 1.1 ad lwp_t *l;
634 1.1 ad
635 1.1 ad KNOTE(&sip->sel_klist, knhint);
636 1.1 ad
637 1.1 ad if (sip->sel_lwp != NULL) {
638 1.1 ad /* One named LWP is waiting. */
639 1.1 ad swapin = 0;
640 1.1 ad sc = sip->sel_cpu;
641 1.1 ad mutex_spin_enter(&sc->sc_lock);
642 1.1 ad /* Still there? */
643 1.1 ad if (sip->sel_lwp != NULL) {
644 1.1 ad l = sip->sel_lwp;
645 1.1 ad /*
646 1.1 ad * If thread is sleeping, wake it up. If it's not
647 1.1 ad * yet asleep, it will notice the change in state
648 1.1 ad * and will re-poll the descriptors.
649 1.1 ad */
650 1.1 ad oflag = l->l_selflag;
651 1.1 ad l->l_selflag = SEL_RESET;
652 1.1 ad if (oflag == SEL_BLOCKING &&
653 1.1 ad l->l_mutex == &sc->sc_lock) {
654 1.1 ad KASSERT(l->l_wchan == sc);
655 1.1 ad swapin = sleepq_unsleep(l, false);
656 1.1 ad }
657 1.1 ad }
658 1.1 ad mutex_spin_exit(&sc->sc_lock);
659 1.1 ad if (swapin)
660 1.1 ad uvm_kick_scheduler();
661 1.1 ad }
662 1.1 ad
663 1.1 ad if ((mask = sip->sel_collision) != 0) {
664 1.1 ad /*
665 1.1 ad * There was a collision (multiple waiters): we must
666 1.1 ad * inform all potentially interested waiters.
667 1.1 ad */
668 1.1 ad sip->sel_collision = 0;
669 1.3 ad do {
670 1.1 ad index = ffs(mask) - 1;
671 1.1 ad mask &= ~(1 << index);
672 1.10 ad sc = cpu_lookup(index)->ci_data.cpu_selcpu;
673 1.1 ad mutex_spin_enter(&sc->sc_lock);
674 1.1 ad sc->sc_ncoll++;
675 1.8 ad sleepq_wake(&sc->sc_sleepq, sc, (u_int)-1,
676 1.8 ad &sc->sc_lock);
677 1.3 ad } while (__predict_false(mask != 0));
678 1.1 ad }
679 1.1 ad }
680 1.1 ad
681 1.1 ad /*
682 1.1 ad * Remove an LWP from all objects that it is waiting for. Concurrency
683 1.1 ad * issues:
684 1.1 ad *
685 1.1 ad * The object owner's (e.g. device driver) lock is not held here. Calls
686 1.1 ad * can be made to selrecord() and we do not synchronize against those
687 1.1 ad * directly using locks. However, we use `sel_lwp' to lock out changes.
688 1.1 ad * Before clearing it we must use memory barriers to ensure that we can
689 1.1 ad * safely traverse the list of selinfo records.
690 1.1 ad */
691 1.1 ad static void
692 1.1 ad selclear(void)
693 1.1 ad {
694 1.1 ad struct selinfo *sip, *next;
695 1.1 ad selcpu_t *sc;
696 1.1 ad lwp_t *l;
697 1.1 ad
698 1.1 ad l = curlwp;
699 1.1 ad sc = l->l_selcpu;
700 1.1 ad
701 1.1 ad mutex_spin_enter(&sc->sc_lock);
702 1.1 ad for (sip = SLIST_FIRST(&l->l_selwait); sip != NULL; sip = next) {
703 1.1 ad KASSERT(sip->sel_lwp == l);
704 1.1 ad KASSERT(sip->sel_cpu == l->l_selcpu);
705 1.1 ad /*
706 1.1 ad * Read link to next selinfo record, if any.
707 1.1 ad * It's no longer safe to touch `sip' after clearing
708 1.1 ad * `sel_lwp', so ensure that the read of `sel_chain'
709 1.1 ad * completes before the clearing of sel_lwp becomes
710 1.1 ad * globally visible.
711 1.1 ad */
712 1.1 ad next = SLIST_NEXT(sip, sel_chain);
713 1.1 ad membar_exit();
714 1.1 ad /* Release the record for another named waiter to use. */
715 1.1 ad sip->sel_lwp = NULL;
716 1.1 ad }
717 1.1 ad mutex_spin_exit(&sc->sc_lock);
718 1.1 ad }
719 1.1 ad
720 1.1 ad /*
721 1.1 ad * Initialize the select/poll system calls. Called once for each
722 1.1 ad * CPU in the system, as they are attached.
723 1.1 ad */
724 1.1 ad void
725 1.1 ad selsysinit(struct cpu_info *ci)
726 1.1 ad {
727 1.1 ad selcpu_t *sc;
728 1.1 ad
729 1.2 ad sc = kmem_alloc(roundup2(sizeof(selcpu_t), coherency_unit) +
730 1.2 ad coherency_unit, KM_SLEEP);
731 1.2 ad sc = (void *)roundup2((uintptr_t)sc, coherency_unit);
732 1.1 ad mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SCHED);
733 1.8 ad sleepq_init(&sc->sc_sleepq);
734 1.1 ad sc->sc_ncoll = 0;
735 1.1 ad sc->sc_mask = (1 << cpu_index(ci));
736 1.1 ad ci->ci_data.cpu_selcpu = sc;
737 1.1 ad }
738 1.1 ad
739 1.1 ad /*
740 1.1 ad * Initialize a selinfo record.
741 1.1 ad */
742 1.1 ad void
743 1.1 ad selinit(struct selinfo *sip)
744 1.1 ad {
745 1.1 ad
746 1.1 ad memset(sip, 0, sizeof(*sip));
747 1.1 ad }
748 1.1 ad
749 1.1 ad /*
750 1.1 ad * Destroy a selinfo record. The owning object must not gain new
751 1.1 ad * references while this is in progress: all activity on the record
752 1.1 ad * must be stopped.
753 1.1 ad *
754 1.1 ad * Concurrency issues: we only need guard against a call to selclear()
755 1.1 ad * by a thread exiting selcommon() and/or pollcommon(). The caller has
756 1.1 ad * prevented further references being made to the selinfo record via
757 1.1 ad * selrecord(), and it won't call selwakeup() again.
758 1.1 ad */
759 1.1 ad void
760 1.1 ad seldestroy(struct selinfo *sip)
761 1.1 ad {
762 1.1 ad selcpu_t *sc;
763 1.1 ad lwp_t *l;
764 1.1 ad
765 1.1 ad if (sip->sel_lwp == NULL)
766 1.1 ad return;
767 1.1 ad
768 1.1 ad /*
769 1.1 ad * Lock out selclear(). The selcpu pointer can't change while
770 1.1 ad * we are here since it is only ever changed in selrecord(),
771 1.1 ad * and that will not be entered again for this record because
772 1.1 ad * it is dying.
773 1.1 ad */
774 1.1 ad KASSERT(sip->sel_cpu != NULL);
775 1.1 ad sc = sip->sel_cpu;
776 1.1 ad mutex_spin_enter(&sc->sc_lock);
777 1.1 ad if ((l = sip->sel_lwp) != NULL) {
778 1.1 ad /*
779 1.1 ad * This should rarely happen, so although SLIST_REMOVE()
780 1.1 ad * is slow, using it here is not a problem.
781 1.1 ad */
782 1.1 ad KASSERT(l->l_selcpu == sc);
783 1.1 ad SLIST_REMOVE(&l->l_selwait, sip, selinfo, sel_chain);
784 1.1 ad sip->sel_lwp = NULL;
785 1.1 ad }
786 1.1 ad mutex_spin_exit(&sc->sc_lock);
787 1.1 ad }
788 1.1 ad
789 1.1 ad int
790 1.1 ad pollsock(struct socket *so, const struct timeval *tvp, int events)
791 1.1 ad {
792 1.1 ad int ncoll, error, timo;
793 1.1 ad struct timeval sleeptv, tv;
794 1.1 ad selcpu_t *sc;
795 1.1 ad lwp_t *l;
796 1.1 ad
797 1.1 ad timo = 0;
798 1.1 ad if (tvp != NULL) {
799 1.1 ad tv = *tvp;
800 1.1 ad if (inittimeleft(&tv, &sleeptv) == -1)
801 1.1 ad return EINVAL;
802 1.1 ad }
803 1.1 ad
804 1.1 ad l = curlwp;
805 1.1 ad sc = l->l_cpu->ci_data.cpu_selcpu;
806 1.1 ad l->l_selcpu = sc;
807 1.1 ad SLIST_INIT(&l->l_selwait);
808 1.1 ad error = 0;
809 1.1 ad for (;;) {
810 1.1 ad /*
811 1.1 ad * No need to lock. If this is overwritten by another
812 1.1 ad * value while scanning, we will retry below. We only
813 1.1 ad * need to see exact state from the descriptors that
814 1.1 ad * we are about to poll, and lock activity resulting
815 1.1 ad * from fo_poll is enough to provide an up to date value
816 1.1 ad * for new polling activity.
817 1.1 ad */
818 1.1 ad ncoll = sc->sc_ncoll;
819 1.1 ad l->l_selflag = SEL_SCANNING;
820 1.1 ad if (sopoll(so, events) != 0)
821 1.1 ad break;
822 1.1 ad if (tvp && (timo = gettimeleft(&tv, &sleeptv)) <= 0)
823 1.1 ad break;
824 1.1 ad mutex_spin_enter(&sc->sc_lock);
825 1.1 ad if (l->l_selflag != SEL_SCANNING || sc->sc_ncoll != ncoll) {
826 1.1 ad mutex_spin_exit(&sc->sc_lock);
827 1.1 ad continue;
828 1.1 ad }
829 1.1 ad l->l_selflag = SEL_BLOCKING;
830 1.8 ad sleepq_enter(&sc->sc_sleepq, l, &sc->sc_lock);
831 1.1 ad sleepq_enqueue(&sc->sc_sleepq, sc, "pollsock", &select_sobj);
832 1.1 ad error = sleepq_block(timo, true);
833 1.1 ad if (error != 0)
834 1.1 ad break;
835 1.1 ad }
836 1.1 ad selclear();
837 1.1 ad /* poll is not restarted after signals... */
838 1.1 ad if (error == ERESTART)
839 1.1 ad error = EINTR;
840 1.1 ad if (error == EWOULDBLOCK)
841 1.1 ad error = 0;
842 1.1 ad return (error);
843 1.1 ad }
844