sys_generic.c revision 1.100.2.5 1 /* $NetBSD: sys_generic.c,v 1.100.2.5 2007/04/13 15:49:48 ad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
37 */
38
39 /*
40 * System calls relating to files.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: sys_generic.c,v 1.100.2.5 2007/04/13 15:49:48 ad Exp $");
45
46 #include "opt_ktrace.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/filedesc.h>
51 #include <sys/ioctl.h>
52 #include <sys/file.h>
53 #include <sys/proc.h>
54 #include <sys/socketvar.h>
55 #include <sys/signalvar.h>
56 #include <sys/uio.h>
57 #include <sys/kernel.h>
58 #include <sys/stat.h>
59 #include <sys/kmem.h>
60 #include <sys/poll.h>
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64
65 #include <sys/mount.h>
66 #include <sys/syscallargs.h>
67
68 #include <uvm/uvm_extern.h>
69
70 int selscan(struct lwp *, fd_mask *, fd_mask *, int, register_t *);
71 int pollscan(struct lwp *, struct pollfd *, int, register_t *);
72
73 static void selclear(void);
74
75 kmutex_t select_lock;
76 kcondvar_t select_cv;
77
78 /*
79 * Read system call.
80 */
81 /* ARGSUSED */
82 int
83 sys_read(struct lwp *l, void *v, register_t *retval)
84 {
85 struct sys_read_args /* {
86 syscallarg(int) fd;
87 syscallarg(void *) buf;
88 syscallarg(size_t) nbyte;
89 } */ *uap = v;
90 int fd;
91 struct file *fp;
92 struct proc *p;
93 struct filedesc *fdp;
94
95 fd = SCARG(uap, fd);
96 p = l->l_proc;
97 fdp = p->p_fd;
98
99 if ((fp = fd_getfile(fdp, fd)) == NULL)
100 return (EBADF);
101
102 if ((fp->f_flag & FREAD) == 0) {
103 mutex_exit(&fp->f_lock);
104 return (EBADF);
105 }
106
107 FILE_USE(fp);
108
109 /* dofileread() will unuse the descriptor for us */
110 return (dofileread(l, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
111 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
112 }
113
114 int
115 dofileread(struct lwp *l, int fd, struct file *fp, void *buf, size_t nbyte,
116 off_t *offset, int flags, register_t *retval)
117 {
118 struct iovec aiov;
119 struct uio auio;
120 struct proc *p;
121 struct vmspace *vm;
122 size_t cnt;
123 int error;
124 #ifdef KTRACE
125 struct iovec ktriov = { .iov_base = NULL, };
126 #endif
127 p = l->l_proc;
128
129 error = proc_vmspace_getref(p, &vm);
130 if (error) {
131 goto out;
132 }
133
134 aiov.iov_base = (void *)buf;
135 aiov.iov_len = nbyte;
136 auio.uio_iov = &aiov;
137 auio.uio_iovcnt = 1;
138 auio.uio_resid = nbyte;
139 auio.uio_rw = UIO_READ;
140 auio.uio_vmspace = vm;
141
142 /*
143 * Reads return ssize_t because -1 is returned on error. Therefore
144 * we must restrict the length to SSIZE_MAX to avoid garbage return
145 * values.
146 */
147 if (auio.uio_resid > SSIZE_MAX) {
148 error = EINVAL;
149 goto out;
150 }
151
152 #ifdef KTRACE
153 /*
154 * if tracing, save a copy of iovec
155 */
156 if (KTRPOINT(p, KTR_GENIO))
157 ktriov = aiov;
158 #endif
159 cnt = auio.uio_resid;
160 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
161 if (error)
162 if (auio.uio_resid != cnt && (error == ERESTART ||
163 error == EINTR || error == EWOULDBLOCK))
164 error = 0;
165 cnt -= auio.uio_resid;
166 #ifdef KTRACE
167 if (KTRPOINT(p, KTR_GENIO) && error == 0)
168 ktrgenio(l, fd, UIO_READ, &ktriov, cnt, error);
169 #endif
170 *retval = cnt;
171 out:
172 FILE_UNUSE(fp, l);
173 uvmspace_free(vm);
174 return (error);
175 }
176
177 /*
178 * Scatter read system call.
179 */
180 int
181 sys_readv(struct lwp *l, void *v, register_t *retval)
182 {
183 struct sys_readv_args /* {
184 syscallarg(int) fd;
185 syscallarg(const struct iovec *) iovp;
186 syscallarg(int) iovcnt;
187 } */ *uap = v;
188 struct filedesc *fdp;
189 struct file *fp;
190 struct proc *p;
191 int fd;
192
193 fd = SCARG(uap, fd);
194 p = l->l_proc;
195 fdp = p->p_fd;
196
197 if ((fp = fd_getfile(fdp, fd)) == NULL)
198 return (EBADF);
199
200 if ((fp->f_flag & FREAD) == 0) {
201 mutex_exit(&fp->f_lock);
202 return (EBADF);
203 }
204
205 FILE_USE(fp);
206
207 /* dofilereadv() will unuse the descriptor for us */
208 return (dofilereadv(l, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
209 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
210 }
211
212 int
213 dofilereadv(struct lwp *l, int fd, struct file *fp, const struct iovec *iovp,
214 int iovcnt, off_t *offset, int flags, register_t *retval)
215 {
216 struct proc *p;
217 struct uio auio;
218 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
219 struct vmspace *vm;
220 int i, error;
221 size_t cnt;
222 u_int iovlen;
223 #ifdef KTRACE
224 struct iovec *ktriov;
225 #endif
226
227 p = l->l_proc;
228 error = proc_vmspace_getref(p, &vm);
229 if (error) {
230 goto out;
231 }
232
233 #ifdef KTRACE
234 ktriov = NULL;
235 #endif
236 /* note: can't use iovlen until iovcnt is validated */
237 iovlen = iovcnt * sizeof(struct iovec);
238 if ((u_int)iovcnt > UIO_SMALLIOV) {
239 if ((u_int)iovcnt > IOV_MAX) {
240 error = EINVAL;
241 goto out;
242 }
243 iov = kmem_alloc(iovlen, KM_SLEEP);
244 needfree = iov;
245 } else if ((u_int)iovcnt > 0) {
246 iov = aiov;
247 needfree = NULL;
248 } else {
249 error = EINVAL;
250 goto out;
251 }
252
253 auio.uio_iov = iov;
254 auio.uio_iovcnt = iovcnt;
255 auio.uio_rw = UIO_READ;
256 auio.uio_vmspace = vm;
257 error = copyin(iovp, iov, iovlen);
258 if (error)
259 goto done;
260 auio.uio_resid = 0;
261 for (i = 0; i < iovcnt; i++) {
262 auio.uio_resid += iov->iov_len;
263 /*
264 * Reads return ssize_t because -1 is returned on error.
265 * Therefore we must restrict the length to SSIZE_MAX to
266 * avoid garbage return values.
267 */
268 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
269 error = EINVAL;
270 goto done;
271 }
272 iov++;
273 }
274 #ifdef KTRACE
275 /*
276 * if tracing, save a copy of iovec
277 */
278 if (KTRPOINT(p, KTR_GENIO)) {
279 ktriov = kmem_alloc(iovlen, KM_SLEEP);
280 memcpy((void *)ktriov, (void *)auio.uio_iov, iovlen);
281 }
282 #endif
283 cnt = auio.uio_resid;
284 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
285 if (error)
286 if (auio.uio_resid != cnt && (error == ERESTART ||
287 error == EINTR || error == EWOULDBLOCK))
288 error = 0;
289 cnt -= auio.uio_resid;
290 #ifdef KTRACE
291 if (ktriov != NULL) {
292 if (KTRPOINT(p, KTR_GENIO) && (error == 0))
293 ktrgenio(l, fd, UIO_READ, ktriov, cnt, error);
294 kmem_free(ktriov, iovlen);
295 }
296 #endif
297 *retval = cnt;
298 done:
299 if (needfree)
300 kmem_free(needfree, iovlen);
301 out:
302 FILE_UNUSE(fp, l);
303 uvmspace_free(vm);
304 return (error);
305 }
306
307 /*
308 * Write system call
309 */
310 int
311 sys_write(struct lwp *l, void *v, register_t *retval)
312 {
313 struct sys_write_args /* {
314 syscallarg(int) fd;
315 syscallarg(const void *) buf;
316 syscallarg(size_t) nbyte;
317 } */ *uap = v;
318 int fd;
319 struct file *fp;
320 struct proc *p;
321 struct filedesc *fdp;
322
323 fd = SCARG(uap, fd);
324 p = l->l_proc;
325 fdp = p->p_fd;
326
327 if ((fp = fd_getfile(fdp, fd)) == NULL)
328 return (EBADF);
329
330 if ((fp->f_flag & FWRITE) == 0) {
331 mutex_exit(&fp->f_lock);
332 return (EBADF);
333 }
334
335 FILE_USE(fp);
336
337 /* dofilewrite() will unuse the descriptor for us */
338 return (dofilewrite(l, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
339 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
340 }
341
342 int
343 dofilewrite(struct lwp *l, int fd, struct file *fp, const void *buf,
344 size_t nbyte, off_t *offset, int flags, register_t *retval)
345 {
346 struct iovec aiov;
347 struct uio auio;
348 struct proc *p;
349 struct vmspace *vm;
350 size_t cnt;
351 int error;
352 #ifdef KTRACE
353 struct iovec ktriov = { .iov_base = NULL, };
354 #endif
355
356 p = l->l_proc;
357 error = proc_vmspace_getref(p, &vm);
358 if (error) {
359 goto out;
360 }
361 aiov.iov_base = __UNCONST(buf); /* XXXUNCONST kills const */
362 aiov.iov_len = nbyte;
363 auio.uio_iov = &aiov;
364 auio.uio_iovcnt = 1;
365 auio.uio_resid = nbyte;
366 auio.uio_rw = UIO_WRITE;
367 auio.uio_vmspace = vm;
368
369 /*
370 * Writes return ssize_t because -1 is returned on error. Therefore
371 * we must restrict the length to SSIZE_MAX to avoid garbage return
372 * values.
373 */
374 if (auio.uio_resid > SSIZE_MAX) {
375 error = EINVAL;
376 goto out;
377 }
378
379 #ifdef KTRACE
380 /*
381 * if tracing, save a copy of iovec
382 */
383 if (KTRPOINT(p, KTR_GENIO))
384 ktriov = aiov;
385 #endif
386 cnt = auio.uio_resid;
387 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
388 if (error) {
389 if (auio.uio_resid != cnt && (error == ERESTART ||
390 error == EINTR || error == EWOULDBLOCK))
391 error = 0;
392 if (error == EPIPE) {
393 mutex_enter(&proclist_mutex);
394 psignal(p, SIGPIPE);
395 mutex_exit(&proclist_mutex);
396 }
397 }
398 cnt -= auio.uio_resid;
399 #ifdef KTRACE
400 if (KTRPOINT(p, KTR_GENIO) && error == 0)
401 ktrgenio(l, fd, UIO_WRITE, &ktriov, cnt, error);
402 #endif
403 *retval = cnt;
404 out:
405 FILE_UNUSE(fp, l);
406 uvmspace_free(vm);
407 return (error);
408 }
409
410 /*
411 * Gather write system call
412 */
413 int
414 sys_writev(struct lwp *l, void *v, register_t *retval)
415 {
416 struct sys_writev_args /* {
417 syscallarg(int) fd;
418 syscallarg(const struct iovec *) iovp;
419 syscallarg(int) iovcnt;
420 } */ *uap = v;
421 int fd;
422 struct file *fp;
423 struct proc *p;
424 struct filedesc *fdp;
425
426 fd = SCARG(uap, fd);
427 p = l->l_proc;
428 fdp = p->p_fd;
429
430 if ((fp = fd_getfile(fdp, fd)) == NULL)
431 return (EBADF);
432
433 if ((fp->f_flag & FWRITE) == 0) {
434 mutex_exit(&fp->f_lock);
435 return (EBADF);
436 }
437
438 FILE_USE(fp);
439
440 /* dofilewritev() will unuse the descriptor for us */
441 return (dofilewritev(l, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
442 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
443 }
444
445 int
446 dofilewritev(struct lwp *l, int fd, struct file *fp, const struct iovec *iovp,
447 int iovcnt, off_t *offset, int flags, register_t *retval)
448 {
449 struct proc *p;
450 struct uio auio;
451 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
452 struct vmspace *vm;
453 int i, error;
454 size_t cnt;
455 u_int iovlen;
456 #ifdef KTRACE
457 struct iovec *ktriov;
458 #endif
459
460 p = l->l_proc;
461 error = proc_vmspace_getref(p, &vm);
462 if (error) {
463 goto out;
464 }
465 #ifdef KTRACE
466 ktriov = NULL;
467 #endif
468 /* note: can't use iovlen until iovcnt is validated */
469 iovlen = iovcnt * sizeof(struct iovec);
470 if ((u_int)iovcnt > UIO_SMALLIOV) {
471 if ((u_int)iovcnt > IOV_MAX) {
472 error = EINVAL;
473 goto out;
474 }
475 iov = kmem_alloc(iovlen, KM_SLEEP);
476 needfree = iov;
477 } else if ((u_int)iovcnt > 0) {
478 iov = aiov;
479 needfree = NULL;
480 } else {
481 error = EINVAL;
482 goto out;
483 }
484
485 auio.uio_iov = iov;
486 auio.uio_iovcnt = iovcnt;
487 auio.uio_rw = UIO_WRITE;
488 auio.uio_vmspace = vm;
489 error = copyin(iovp, iov, iovlen);
490 if (error)
491 goto done;
492 auio.uio_resid = 0;
493 for (i = 0; i < iovcnt; i++) {
494 auio.uio_resid += iov->iov_len;
495 /*
496 * Writes return ssize_t because -1 is returned on error.
497 * Therefore we must restrict the length to SSIZE_MAX to
498 * avoid garbage return values.
499 */
500 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
501 error = EINVAL;
502 goto done;
503 }
504 iov++;
505 }
506 #ifdef KTRACE
507 /*
508 * if tracing, save a copy of iovec
509 */
510 if (KTRPOINT(p, KTR_GENIO)) {
511 ktriov = kmem_alloc(iovlen, KM_SLEEP);
512 memcpy((void *)ktriov, (void *)auio.uio_iov, iovlen);
513 }
514 #endif
515 cnt = auio.uio_resid;
516 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
517 if (error) {
518 if (auio.uio_resid != cnt && (error == ERESTART ||
519 error == EINTR || error == EWOULDBLOCK))
520 error = 0;
521 if (error == EPIPE) {
522 mutex_enter(&proclist_mutex);
523 psignal(p, SIGPIPE);
524 mutex_exit(&proclist_mutex);
525 }
526 }
527 cnt -= auio.uio_resid;
528 #ifdef KTRACE
529 if (ktriov != NULL) {
530 if (KTRPOINT(p, KTR_GENIO) && (error == 0))
531 ktrgenio(l, fd, UIO_WRITE, ktriov, cnt, error);
532 kmem_free(ktriov, iovlen);
533 }
534 #endif
535 *retval = cnt;
536 done:
537 if (needfree)
538 kmem_free(needfree, iovlen);
539 out:
540 FILE_UNUSE(fp, l);
541 uvmspace_free(vm);
542 return (error);
543 }
544
545 /*
546 * Ioctl system call
547 */
548 /* ARGSUSED */
549 int
550 sys_ioctl(struct lwp *l, void *v, register_t *retval)
551 {
552 struct sys_ioctl_args /* {
553 syscallarg(int) fd;
554 syscallarg(u_long) com;
555 syscallarg(void *) data;
556 } */ *uap = v;
557 struct file *fp;
558 struct proc *p;
559 struct filedesc *fdp;
560 u_long com;
561 int error;
562 u_int size;
563 void *data, *memp;
564 #define STK_PARAMS 128
565 u_long stkbuf[STK_PARAMS/sizeof(u_long)];
566
567 error = 0;
568 p = l->l_proc;
569 fdp = p->p_fd;
570
571 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
572 return (EBADF);
573
574 FILE_USE(fp);
575
576 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
577 error = EBADF;
578 com = 0;
579 goto out;
580 }
581
582 switch (com = SCARG(uap, com)) {
583 case FIONCLEX:
584 rw_enter(&fdp->fd_lock, RW_WRITER);
585 fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
586 rw_exit(&fdp->fd_lock);
587 goto out;
588
589 case FIOCLEX:
590 rw_enter(&fdp->fd_lock, RW_WRITER);
591 fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
592 rw_exit(&fdp->fd_lock);
593 goto out;
594 }
595
596 /*
597 * Interpret high order word to find amount of data to be
598 * copied to/from the user's address space.
599 */
600 size = IOCPARM_LEN(com);
601 if (size > IOCPARM_MAX) {
602 error = ENOTTY;
603 goto out;
604 }
605 memp = NULL;
606 if (size > sizeof(stkbuf)) {
607 memp = kmem_alloc(size, KM_SLEEP);
608 data = memp;
609 } else
610 data = (void *)stkbuf;
611 if (com&IOC_IN) {
612 if (size) {
613 error = copyin(SCARG(uap, data), data, size);
614 if (error) {
615 if (memp)
616 kmem_free(memp, size);
617 goto out;
618 }
619 #ifdef KTRACE
620 if (KTRPOINT(p, KTR_GENIO)) {
621 struct iovec iov;
622 iov.iov_base = SCARG(uap, data);
623 iov.iov_len = size;
624 ktrgenio(l, SCARG(uap, fd), UIO_WRITE, &iov,
625 size, 0);
626 }
627 #endif
628 } else
629 *(void **)data = SCARG(uap, data);
630 } else if ((com&IOC_OUT) && size)
631 /*
632 * Zero the buffer so the user always
633 * gets back something deterministic.
634 */
635 memset(data, 0, size);
636 else if (com&IOC_VOID)
637 *(void **)data = SCARG(uap, data);
638
639 switch (com) {
640
641 case FIONBIO:
642 mutex_enter(&fp->f_lock);
643 if (*(int *)data != 0)
644 fp->f_flag |= FNONBLOCK;
645 else
646 fp->f_flag &= ~FNONBLOCK;
647 mutex_exit(&fp->f_lock);
648 error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, data, l);
649 break;
650
651 case FIOASYNC:
652 mutex_enter(&fp->f_lock);
653 if (*(int *)data != 0)
654 fp->f_flag |= FASYNC;
655 else
656 fp->f_flag &= ~FASYNC;
657 mutex_exit(&fp->f_lock);
658 error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, data, l);
659 break;
660
661 default:
662 error = (*fp->f_ops->fo_ioctl)(fp, com, data, l);
663 /*
664 * Copy any data to user, size was
665 * already set and checked above.
666 */
667 if (error == 0 && (com&IOC_OUT) && size) {
668 error = copyout(data, SCARG(uap, data), size);
669 #ifdef KTRACE
670 if (KTRPOINT(p, KTR_GENIO)) {
671 struct iovec iov;
672 iov.iov_base = SCARG(uap, data);
673 iov.iov_len = size;
674 ktrgenio(l, SCARG(uap, fd), UIO_READ, &iov,
675 size, error);
676 }
677 #endif
678 }
679 break;
680 }
681 if (memp)
682 kmem_free(memp, size);
683 out:
684 FILE_UNUSE(fp, l);
685 switch (error) {
686 case -1:
687 printf("sys_ioctl: _IO%s%s('%c', %lu, %lu) returned -1: "
688 "pid=%d comm=%s\n",
689 (com & IOC_IN) ? "W" : "", (com & IOC_OUT) ? "R" : "",
690 (char)IOCGROUP(com), (com & 0xff), IOCPARM_LEN(com),
691 p->p_pid, p->p_comm);
692 /* FALLTHROUGH */
693 case EPASSTHROUGH:
694 error = ENOTTY;
695 /* FALLTHROUGH */
696 default:
697 return (error);
698 }
699 }
700
701 int selwait, nselcoll;
702
703 /*
704 * Select system call.
705 */
706 int
707 sys_pselect(struct lwp *l, void *v, register_t *retval)
708 {
709 struct sys_pselect_args /* {
710 syscallarg(int) nd;
711 syscallarg(fd_set *) in;
712 syscallarg(fd_set *) ou;
713 syscallarg(fd_set *) ex;
714 syscallarg(const struct timespec *) ts;
715 syscallarg(sigset_t *) mask;
716 } */ * const uap = v;
717 struct timespec ats;
718 struct timeval atv, *tv = NULL;
719 sigset_t amask, *mask = NULL;
720 int error;
721
722 if (SCARG(uap, ts)) {
723 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
724 if (error)
725 return error;
726 atv.tv_sec = ats.tv_sec;
727 atv.tv_usec = ats.tv_nsec / 1000;
728 tv = &atv;
729 }
730 if (SCARG(uap, mask) != NULL) {
731 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
732 if (error)
733 return error;
734 mask = &amask;
735 }
736
737 return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
738 SCARG(uap, ou), SCARG(uap, ex), tv, mask);
739 }
740
741 int
742 inittimeleft(struct timeval *tv, struct timeval *sleeptv)
743 {
744 if (itimerfix(tv))
745 return -1;
746 getmicrouptime(sleeptv);
747 return 0;
748 }
749
750 int
751 gettimeleft(struct timeval *tv, struct timeval *sleeptv)
752 {
753 /*
754 * We have to recalculate the timeout on every retry.
755 */
756 struct timeval slepttv;
757 /*
758 * reduce tv by elapsed time
759 * based on monotonic time scale
760 */
761 getmicrouptime(&slepttv);
762 timeradd(tv, sleeptv, tv);
763 timersub(tv, &slepttv, tv);
764 *sleeptv = slepttv;
765 return tvtohz(tv);
766 }
767
768 int
769 sys_select(struct lwp *l, void *v, register_t *retval)
770 {
771 struct sys_select_args /* {
772 syscallarg(int) nd;
773 syscallarg(fd_set *) in;
774 syscallarg(fd_set *) ou;
775 syscallarg(fd_set *) ex;
776 syscallarg(struct timeval *) tv;
777 } */ * const uap = v;
778 struct timeval atv, *tv = NULL;
779 int error;
780
781 if (SCARG(uap, tv)) {
782 error = copyin(SCARG(uap, tv), (void *)&atv,
783 sizeof(atv));
784 if (error)
785 return error;
786 tv = &atv;
787 }
788
789 return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
790 SCARG(uap, ou), SCARG(uap, ex), tv, NULL);
791 }
792
793 int
794 selcommon(struct lwp *l, register_t *retval, int nd, fd_set *u_in,
795 fd_set *u_ou, fd_set *u_ex, struct timeval *tv, sigset_t *mask)
796 {
797 char smallbits[howmany(FD_SETSIZE, NFDBITS) *
798 sizeof(fd_mask) * 6];
799 struct proc * const p = l->l_proc;
800 char *bits;
801 int ncoll, error, timo;
802 size_t ni;
803 sigset_t oldmask;
804 struct timeval sleeptv;
805
806 error = 0;
807 if (nd < 0)
808 return (EINVAL);
809 if (nd > p->p_fd->fd_nfiles) {
810 /* forgiving; slightly wrong */
811 nd = p->p_fd->fd_nfiles;
812 }
813 ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
814 if (ni * 6 > sizeof(smallbits))
815 bits = kmem_alloc(ni * 6, KM_SLEEP);
816 else
817 bits = smallbits;
818
819 #define getbits(name, x) \
820 if (u_ ## name) { \
821 error = copyin(u_ ## name, bits + ni * x, ni); \
822 if (error) \
823 goto done; \
824 } else \
825 memset(bits + ni * x, 0, ni);
826 getbits(in, 0);
827 getbits(ou, 1);
828 getbits(ex, 2);
829 #undef getbits
830
831 timo = 0;
832 if (tv && inittimeleft(tv, &sleeptv) == -1) {
833 error = EINVAL;
834 goto done;
835 }
836
837 if (mask) {
838 sigminusset(&sigcantmask, mask);
839 mutex_enter(&p->p_smutex);
840 oldmask = l->l_sigmask;
841 l->l_sigmask = *mask;
842 mutex_exit(&p->p_smutex);
843 } else
844 oldmask = l->l_sigmask; /* XXXgcc */
845
846 mutex_enter(&select_lock);
847 for (;;) {
848 l->l_selflag = 1;
849 ncoll = nselcoll;
850 mutex_exit(&select_lock);
851
852 error = selscan(l, (fd_mask *)(bits + ni * 0),
853 (fd_mask *)(bits + ni * 3), nd, retval);
854
855 mutex_enter(&select_lock);
856 if (error || *retval)
857 break;
858 if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
859 break;
860 if (l->l_selflag == 0 || ncoll != nselcoll)
861 continue;
862 l->l_selflag = 0;
863 error = cv_timedwait_sig(&select_cv, &select_lock, timo);
864 if (error != 0)
865 break;
866 }
867 selclear();
868 mutex_exit(&select_lock);
869
870 if (mask) {
871 mutex_enter(&p->p_smutex);
872 l->l_sigmask = oldmask;
873 mutex_exit(&p->p_smutex);
874 }
875
876 done:
877 /* select is not restarted after signals... */
878 if (error == ERESTART)
879 error = EINTR;
880 if (error == EWOULDBLOCK)
881 error = 0;
882 if (error == 0 && u_in != NULL)
883 error = copyout(bits + ni * 3, u_in, ni);
884 if (error == 0 && u_ou != NULL)
885 error = copyout(bits + ni * 4, u_ou, ni);
886 if (error == 0 && u_ex != NULL)
887 error = copyout(bits + ni * 5, u_ex, ni);
888 if (bits != smallbits)
889 kmem_free(bits, ni * 6);
890 return (error);
891 }
892
893 int
894 selscan(struct lwp *l, fd_mask *ibitp, fd_mask *obitp, int nfd,
895 register_t *retval)
896 {
897 static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
898 POLLWRNORM | POLLHUP | POLLERR,
899 POLLRDBAND };
900 struct proc *p = l->l_proc;
901 struct filedesc *fdp;
902 int msk, i, j, fd, n;
903 fd_mask ibits, obits;
904 struct file *fp;
905
906 fdp = p->p_fd;
907 n = 0;
908 for (msk = 0; msk < 3; msk++) {
909 for (i = 0; i < nfd; i += NFDBITS) {
910 ibits = *ibitp++;
911 obits = 0;
912 while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
913 ibits &= ~(1 << j);
914 if ((fp = fd_getfile(fdp, fd)) == NULL)
915 return (EBADF);
916 FILE_USE(fp);
917 if ((*fp->f_ops->fo_poll)(fp, flag[msk], l)) {
918 obits |= (1 << j);
919 n++;
920 }
921 FILE_UNUSE(fp, l);
922 }
923 *obitp++ = obits;
924 }
925 }
926 *retval = n;
927 return (0);
928 }
929
930 /*
931 * Poll system call.
932 */
933 int
934 sys_poll(struct lwp *l, void *v, register_t *retval)
935 {
936 struct sys_poll_args /* {
937 syscallarg(struct pollfd *) fds;
938 syscallarg(u_int) nfds;
939 syscallarg(int) timeout;
940 } */ * const uap = v;
941 struct timeval atv, *tv = NULL;
942
943 if (SCARG(uap, timeout) != INFTIM) {
944 atv.tv_sec = SCARG(uap, timeout) / 1000;
945 atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
946 tv = &atv;
947 }
948
949 return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
950 tv, NULL);
951 }
952
953 /*
954 * Poll system call.
955 */
956 int
957 sys_pollts(struct lwp *l, void *v, register_t *retval)
958 {
959 struct sys_pollts_args /* {
960 syscallarg(struct pollfd *) fds;
961 syscallarg(u_int) nfds;
962 syscallarg(const struct timespec *) ts;
963 syscallarg(const sigset_t *) mask;
964 } */ * const uap = v;
965 struct timespec ats;
966 struct timeval atv, *tv = NULL;
967 sigset_t amask, *mask = NULL;
968 int error;
969
970 if (SCARG(uap, ts)) {
971 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
972 if (error)
973 return error;
974 atv.tv_sec = ats.tv_sec;
975 atv.tv_usec = ats.tv_nsec / 1000;
976 tv = &atv;
977 }
978 if (SCARG(uap, mask)) {
979 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
980 if (error)
981 return error;
982 mask = &amask;
983 }
984
985 return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
986 tv, mask);
987 }
988
989 int
990 pollcommon(struct lwp *l, register_t *retval,
991 struct pollfd *u_fds, u_int nfds,
992 struct timeval *tv, sigset_t *mask)
993 {
994 char smallbits[32 * sizeof(struct pollfd)];
995 struct proc * const p = l->l_proc;
996 void * bits;
997 sigset_t oldmask;
998 int ncoll, error, timo;
999 size_t ni;
1000 struct timeval sleeptv;
1001
1002 if (nfds > p->p_fd->fd_nfiles) {
1003 /* forgiving; slightly wrong */
1004 nfds = p->p_fd->fd_nfiles;
1005 }
1006 ni = nfds * sizeof(struct pollfd);
1007 if (ni > sizeof(smallbits))
1008 bits = kmem_alloc(ni, KM_SLEEP);
1009 else
1010 bits = smallbits;
1011
1012 error = copyin(u_fds, bits, ni);
1013 if (error)
1014 goto done;
1015
1016 timo = 0;
1017 if (tv && inittimeleft(tv, &sleeptv) == -1) {
1018 error = EINVAL;
1019 goto done;
1020 }
1021
1022 if (mask) {
1023 sigminusset(&sigcantmask, mask);
1024 mutex_enter(&p->p_smutex);
1025 oldmask = l->l_sigmask;
1026 l->l_sigmask = *mask;
1027 mutex_exit(&p->p_smutex);
1028 } else
1029 oldmask = l->l_sigmask; /* XXXgcc */
1030
1031 mutex_enter(&select_lock);
1032 for (;;) {
1033 ncoll = nselcoll;
1034 l->l_selflag = 1;
1035 mutex_exit(&select_lock);
1036
1037 error = pollscan(l, (struct pollfd *)bits, nfds, retval);
1038
1039 mutex_enter(&select_lock);
1040 if (error || *retval)
1041 break;
1042 if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
1043 break;
1044 if (l->l_selflag == 0 || nselcoll != ncoll)
1045 continue;
1046 l->l_selflag = 0;
1047 error = cv_timedwait_sig(&select_cv, &select_lock, timo);
1048 if (error != 0)
1049 break;
1050 }
1051 selclear();
1052 mutex_exit(&select_lock);
1053
1054 if (mask) {
1055 mutex_enter(&p->p_smutex);
1056 l->l_sigmask = oldmask;
1057 mutex_exit(&p->p_smutex);
1058 }
1059 done:
1060 /* poll is not restarted after signals... */
1061 if (error == ERESTART)
1062 error = EINTR;
1063 if (error == EWOULDBLOCK)
1064 error = 0;
1065 if (error == 0)
1066 error = copyout(bits, u_fds, ni);
1067 if (bits != smallbits)
1068 kmem_free(bits, ni);
1069 return (error);
1070 }
1071
1072 int
1073 pollscan(struct lwp *l, struct pollfd *fds, int nfd, register_t *retval)
1074 {
1075 struct proc *p = l->l_proc;
1076 struct filedesc *fdp;
1077 int i, n;
1078 struct file *fp;
1079
1080 fdp = p->p_fd;
1081 n = 0;
1082 for (i = 0; i < nfd; i++, fds++) {
1083 if (fds->fd >= fdp->fd_nfiles) {
1084 fds->revents = POLLNVAL;
1085 n++;
1086 } else if (fds->fd < 0) {
1087 fds->revents = 0;
1088 } else {
1089 if ((fp = fd_getfile(fdp, fds->fd)) == NULL) {
1090 fds->revents = POLLNVAL;
1091 n++;
1092 } else {
1093 FILE_USE(fp);
1094 fds->revents = (*fp->f_ops->fo_poll)(fp,
1095 fds->events | POLLERR | POLLHUP, l);
1096 if (fds->revents != 0)
1097 n++;
1098 FILE_UNUSE(fp, l);
1099 }
1100 }
1101 }
1102 *retval = n;
1103 return (0);
1104 }
1105
1106 /*ARGSUSED*/
1107 int
1108 seltrue(dev_t dev, int events, struct lwp *l)
1109 {
1110
1111 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1112 }
1113
1114 /*
1115 * Record a select request.
1116 */
1117 void
1118 selrecord(struct lwp *selector, struct selinfo *sip)
1119 {
1120
1121 mutex_enter(&select_lock);
1122 if (sip->sel_lwp == NULL) {
1123 /* First waiter. */
1124 sip->sel_lwp = selector;
1125 TAILQ_INSERT_TAIL(&selector->l_selwait, sip, sel_chain);
1126 } else if (sip->sel_lwp != selector) {
1127 /* More than 2 waiters. */
1128 sip->sel_collision = true;
1129 }
1130 mutex_exit(&select_lock);
1131 }
1132
1133 /*
1134 * Do a wakeup when a selectable event occurs.
1135 */
1136 void
1137 selwakeup(struct selinfo *sip)
1138 {
1139 struct lwp *l;
1140
1141 mutex_enter(&select_lock);
1142 if (sip->sel_collision) {
1143 /* Multiple waiters - just notify everybody. */
1144 nselcoll++;
1145 sip->sel_collision = false;
1146 cv_broadcast(&select_cv);
1147 } else if (sip->sel_lwp != NULL) {
1148 /* Only one LWP waiting. */
1149 l = sip->sel_lwp;
1150 if (l->l_selflag != 0) {
1151 /* Not yet asleep - make it go around again. */
1152 l->l_selflag = 0;
1153 } else {
1154 /*
1155 * If it's sleeping, wake it up. If not, it's already
1156 * awake but hasn't had a chance to remove itself from
1157 * the selector yet.
1158 */
1159 lwp_lock(l);
1160 if (l->l_wchan == &select_cv) {
1161 /* lwp_unsleep() releases the LWP lock. */
1162 lwp_unsleep(l);
1163 } else
1164 lwp_unlock(l);
1165 }
1166 }
1167 mutex_exit(&select_lock);
1168 }
1169
1170 void
1171 selnotify(struct selinfo *sip, long knhint)
1172 {
1173
1174 selwakeup(sip);
1175 KNOTE(&sip->sel_klist, knhint);
1176 }
1177
1178 /*
1179 * Remove an LWP from all objects that it is waiting for.
1180 */
1181 static void
1182 selclear(void)
1183 {
1184 struct selinfo *sip;
1185 struct lwp *l = curlwp;
1186
1187 KASSERT(mutex_owned(&select_lock));
1188
1189 TAILQ_FOREACH(sip, &l->l_selwait, sel_chain) {
1190 KASSERT(sip->sel_lwp == l);
1191 sip->sel_lwp = NULL;
1192 }
1193 TAILQ_INIT(&l->l_selwait);
1194 }
1195
1196 /*
1197 * Initialize the select/poll system calls.
1198 */
1199 void
1200 selsysinit(void)
1201 {
1202
1203 mutex_init(&select_lock, MUTEX_DRIVER, IPL_VM);
1204 cv_init(&select_cv, "select");
1205 }
1206