sys_generic.c revision 1.98 1 /* $NetBSD: sys_generic.c,v 1.98 2007/02/09 21:55:31 ad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: sys_generic.c,v 1.98 2007/02/09 21:55:31 ad Exp $");
41
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/filedesc.h>
47 #include <sys/ioctl.h>
48 #include <sys/file.h>
49 #include <sys/proc.h>
50 #include <sys/socketvar.h>
51 #include <sys/signalvar.h>
52 #include <sys/uio.h>
53 #include <sys/kernel.h>
54 #include <sys/stat.h>
55 #include <sys/malloc.h>
56 #include <sys/poll.h>
57 #ifdef KTRACE
58 #include <sys/ktrace.h>
59 #endif
60
61 #include <sys/mount.h>
62 #include <sys/syscallargs.h>
63
64 #include <uvm/uvm_extern.h>
65
66 int selscan(struct lwp *, fd_mask *, fd_mask *, int, register_t *);
67 int pollscan(struct lwp *, struct pollfd *, int, register_t *);
68
69
70 /*
71 * Read system call.
72 */
73 /* ARGSUSED */
74 int
75 sys_read(struct lwp *l, void *v, register_t *retval)
76 {
77 struct sys_read_args /* {
78 syscallarg(int) fd;
79 syscallarg(void *) buf;
80 syscallarg(size_t) nbyte;
81 } */ *uap = v;
82 int fd;
83 struct file *fp;
84 struct proc *p;
85 struct filedesc *fdp;
86
87 fd = SCARG(uap, fd);
88 p = l->l_proc;
89 fdp = p->p_fd;
90
91 if ((fp = fd_getfile(fdp, fd)) == NULL)
92 return (EBADF);
93
94 if ((fp->f_flag & FREAD) == 0) {
95 simple_unlock(&fp->f_slock);
96 return (EBADF);
97 }
98
99 FILE_USE(fp);
100
101 /* dofileread() will unuse the descriptor for us */
102 return (dofileread(l, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
103 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
104 }
105
106 int
107 dofileread(struct lwp *l, int fd, struct file *fp, void *buf, size_t nbyte,
108 off_t *offset, int flags, register_t *retval)
109 {
110 struct iovec aiov;
111 struct uio auio;
112 struct proc *p;
113 struct vmspace *vm;
114 size_t cnt;
115 int error;
116 #ifdef KTRACE
117 struct iovec ktriov = { .iov_base = NULL, };
118 #endif
119 p = l->l_proc;
120
121 error = proc_vmspace_getref(p, &vm);
122 if (error) {
123 goto out;
124 }
125
126 aiov.iov_base = (caddr_t)buf;
127 aiov.iov_len = nbyte;
128 auio.uio_iov = &aiov;
129 auio.uio_iovcnt = 1;
130 auio.uio_resid = nbyte;
131 auio.uio_rw = UIO_READ;
132 auio.uio_vmspace = vm;
133
134 /*
135 * Reads return ssize_t because -1 is returned on error. Therefore
136 * we must restrict the length to SSIZE_MAX to avoid garbage return
137 * values.
138 */
139 if (auio.uio_resid > SSIZE_MAX) {
140 error = EINVAL;
141 goto out;
142 }
143
144 #ifdef KTRACE
145 /*
146 * if tracing, save a copy of iovec
147 */
148 if (KTRPOINT(p, KTR_GENIO))
149 ktriov = aiov;
150 #endif
151 cnt = auio.uio_resid;
152 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
153 if (error)
154 if (auio.uio_resid != cnt && (error == ERESTART ||
155 error == EINTR || error == EWOULDBLOCK))
156 error = 0;
157 cnt -= auio.uio_resid;
158 #ifdef KTRACE
159 if (KTRPOINT(p, KTR_GENIO) && error == 0)
160 ktrgenio(l, fd, UIO_READ, &ktriov, cnt, error);
161 #endif
162 *retval = cnt;
163 out:
164 FILE_UNUSE(fp, l);
165 uvmspace_free(vm);
166 return (error);
167 }
168
169 /*
170 * Scatter read system call.
171 */
172 int
173 sys_readv(struct lwp *l, void *v, register_t *retval)
174 {
175 struct sys_readv_args /* {
176 syscallarg(int) fd;
177 syscallarg(const struct iovec *) iovp;
178 syscallarg(int) iovcnt;
179 } */ *uap = v;
180 struct filedesc *fdp;
181 struct file *fp;
182 struct proc *p;
183 int fd;
184
185 fd = SCARG(uap, fd);
186 p = l->l_proc;
187 fdp = p->p_fd;
188
189 if ((fp = fd_getfile(fdp, fd)) == NULL)
190 return (EBADF);
191
192 if ((fp->f_flag & FREAD) == 0) {
193 simple_unlock(&fp->f_slock);
194 return (EBADF);
195 }
196
197 FILE_USE(fp);
198
199 /* dofilereadv() will unuse the descriptor for us */
200 return (dofilereadv(l, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
201 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
202 }
203
204 int
205 dofilereadv(struct lwp *l, int fd, struct file *fp, const struct iovec *iovp,
206 int iovcnt, off_t *offset, int flags, register_t *retval)
207 {
208 struct proc *p;
209 struct uio auio;
210 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
211 struct vmspace *vm;
212 int i, error;
213 size_t cnt;
214 u_int iovlen;
215 #ifdef KTRACE
216 struct iovec *ktriov;
217 #endif
218
219 p = l->l_proc;
220 error = proc_vmspace_getref(p, &vm);
221 if (error) {
222 goto out;
223 }
224
225 #ifdef KTRACE
226 ktriov = NULL;
227 #endif
228 /* note: can't use iovlen until iovcnt is validated */
229 iovlen = iovcnt * sizeof(struct iovec);
230 if ((u_int)iovcnt > UIO_SMALLIOV) {
231 if ((u_int)iovcnt > IOV_MAX) {
232 error = EINVAL;
233 goto out;
234 }
235 iov = malloc(iovlen, M_IOV, M_WAITOK);
236 needfree = iov;
237 } else if ((u_int)iovcnt > 0) {
238 iov = aiov;
239 needfree = NULL;
240 } else {
241 error = EINVAL;
242 goto out;
243 }
244
245 auio.uio_iov = iov;
246 auio.uio_iovcnt = iovcnt;
247 auio.uio_rw = UIO_READ;
248 auio.uio_vmspace = vm;
249 error = copyin(iovp, iov, iovlen);
250 if (error)
251 goto done;
252 auio.uio_resid = 0;
253 for (i = 0; i < iovcnt; i++) {
254 auio.uio_resid += iov->iov_len;
255 /*
256 * Reads return ssize_t because -1 is returned on error.
257 * Therefore we must restrict the length to SSIZE_MAX to
258 * avoid garbage return values.
259 */
260 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
261 error = EINVAL;
262 goto done;
263 }
264 iov++;
265 }
266 #ifdef KTRACE
267 /*
268 * if tracing, save a copy of iovec
269 */
270 if (KTRPOINT(p, KTR_GENIO)) {
271 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
272 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
273 }
274 #endif
275 cnt = auio.uio_resid;
276 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
277 if (error)
278 if (auio.uio_resid != cnt && (error == ERESTART ||
279 error == EINTR || error == EWOULDBLOCK))
280 error = 0;
281 cnt -= auio.uio_resid;
282 #ifdef KTRACE
283 if (ktriov != NULL) {
284 if (KTRPOINT(p, KTR_GENIO) && (error == 0))
285 ktrgenio(l, fd, UIO_READ, ktriov, cnt, error);
286 free(ktriov, M_TEMP);
287 }
288 #endif
289 *retval = cnt;
290 done:
291 if (needfree)
292 free(needfree, M_IOV);
293 out:
294 FILE_UNUSE(fp, l);
295 uvmspace_free(vm);
296 return (error);
297 }
298
299 /*
300 * Write system call
301 */
302 int
303 sys_write(struct lwp *l, void *v, register_t *retval)
304 {
305 struct sys_write_args /* {
306 syscallarg(int) fd;
307 syscallarg(const void *) buf;
308 syscallarg(size_t) nbyte;
309 } */ *uap = v;
310 int fd;
311 struct file *fp;
312 struct proc *p;
313 struct filedesc *fdp;
314
315 fd = SCARG(uap, fd);
316 p = l->l_proc;
317 fdp = p->p_fd;
318
319 if ((fp = fd_getfile(fdp, fd)) == NULL)
320 return (EBADF);
321
322 if ((fp->f_flag & FWRITE) == 0) {
323 simple_unlock(&fp->f_slock);
324 return (EBADF);
325 }
326
327 FILE_USE(fp);
328
329 /* dofilewrite() will unuse the descriptor for us */
330 return (dofilewrite(l, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
331 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
332 }
333
334 int
335 dofilewrite(struct lwp *l, int fd, struct file *fp, const void *buf,
336 size_t nbyte, off_t *offset, int flags, register_t *retval)
337 {
338 struct iovec aiov;
339 struct uio auio;
340 struct proc *p;
341 struct vmspace *vm;
342 size_t cnt;
343 int error;
344 #ifdef KTRACE
345 struct iovec ktriov = { .iov_base = NULL, };
346 #endif
347
348 p = l->l_proc;
349 error = proc_vmspace_getref(p, &vm);
350 if (error) {
351 goto out;
352 }
353 aiov.iov_base = __UNCONST(buf); /* XXXUNCONST kills const */
354 aiov.iov_len = nbyte;
355 auio.uio_iov = &aiov;
356 auio.uio_iovcnt = 1;
357 auio.uio_resid = nbyte;
358 auio.uio_rw = UIO_WRITE;
359 auio.uio_vmspace = vm;
360
361 /*
362 * Writes return ssize_t because -1 is returned on error. Therefore
363 * we must restrict the length to SSIZE_MAX to avoid garbage return
364 * values.
365 */
366 if (auio.uio_resid > SSIZE_MAX) {
367 error = EINVAL;
368 goto out;
369 }
370
371 #ifdef KTRACE
372 /*
373 * if tracing, save a copy of iovec
374 */
375 if (KTRPOINT(p, KTR_GENIO))
376 ktriov = aiov;
377 #endif
378 cnt = auio.uio_resid;
379 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
380 if (error) {
381 if (auio.uio_resid != cnt && (error == ERESTART ||
382 error == EINTR || error == EWOULDBLOCK))
383 error = 0;
384 if (error == EPIPE) {
385 mutex_enter(&proclist_mutex);
386 psignal(p, SIGPIPE);
387 mutex_exit(&proclist_mutex);
388 }
389 }
390 cnt -= auio.uio_resid;
391 #ifdef KTRACE
392 if (KTRPOINT(p, KTR_GENIO) && error == 0)
393 ktrgenio(l, fd, UIO_WRITE, &ktriov, cnt, error);
394 #endif
395 *retval = cnt;
396 out:
397 FILE_UNUSE(fp, l);
398 uvmspace_free(vm);
399 return (error);
400 }
401
402 /*
403 * Gather write system call
404 */
405 int
406 sys_writev(struct lwp *l, void *v, register_t *retval)
407 {
408 struct sys_writev_args /* {
409 syscallarg(int) fd;
410 syscallarg(const struct iovec *) iovp;
411 syscallarg(int) iovcnt;
412 } */ *uap = v;
413 int fd;
414 struct file *fp;
415 struct proc *p;
416 struct filedesc *fdp;
417
418 fd = SCARG(uap, fd);
419 p = l->l_proc;
420 fdp = p->p_fd;
421
422 if ((fp = fd_getfile(fdp, fd)) == NULL)
423 return (EBADF);
424
425 if ((fp->f_flag & FWRITE) == 0) {
426 simple_unlock(&fp->f_slock);
427 return (EBADF);
428 }
429
430 FILE_USE(fp);
431
432 /* dofilewritev() will unuse the descriptor for us */
433 return (dofilewritev(l, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
434 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
435 }
436
437 int
438 dofilewritev(struct lwp *l, int fd, struct file *fp, const struct iovec *iovp,
439 int iovcnt, off_t *offset, int flags, register_t *retval)
440 {
441 struct proc *p;
442 struct uio auio;
443 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
444 struct vmspace *vm;
445 int i, error;
446 size_t cnt;
447 u_int iovlen;
448 #ifdef KTRACE
449 struct iovec *ktriov;
450 #endif
451
452 p = l->l_proc;
453 error = proc_vmspace_getref(p, &vm);
454 if (error) {
455 goto out;
456 }
457 #ifdef KTRACE
458 ktriov = NULL;
459 #endif
460 /* note: can't use iovlen until iovcnt is validated */
461 iovlen = iovcnt * sizeof(struct iovec);
462 if ((u_int)iovcnt > UIO_SMALLIOV) {
463 if ((u_int)iovcnt > IOV_MAX) {
464 error = EINVAL;
465 goto out;
466 }
467 iov = malloc(iovlen, M_IOV, M_WAITOK);
468 needfree = iov;
469 } else if ((u_int)iovcnt > 0) {
470 iov = aiov;
471 needfree = NULL;
472 } else {
473 error = EINVAL;
474 goto out;
475 }
476
477 auio.uio_iov = iov;
478 auio.uio_iovcnt = iovcnt;
479 auio.uio_rw = UIO_WRITE;
480 auio.uio_vmspace = vm;
481 error = copyin(iovp, iov, iovlen);
482 if (error)
483 goto done;
484 auio.uio_resid = 0;
485 for (i = 0; i < iovcnt; i++) {
486 auio.uio_resid += iov->iov_len;
487 /*
488 * Writes return ssize_t because -1 is returned on error.
489 * Therefore we must restrict the length to SSIZE_MAX to
490 * avoid garbage return values.
491 */
492 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
493 error = EINVAL;
494 goto done;
495 }
496 iov++;
497 }
498 #ifdef KTRACE
499 /*
500 * if tracing, save a copy of iovec
501 */
502 if (KTRPOINT(p, KTR_GENIO)) {
503 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
504 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
505 }
506 #endif
507 cnt = auio.uio_resid;
508 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
509 if (error) {
510 if (auio.uio_resid != cnt && (error == ERESTART ||
511 error == EINTR || error == EWOULDBLOCK))
512 error = 0;
513 if (error == EPIPE) {
514 mutex_enter(&proclist_mutex);
515 psignal(p, SIGPIPE);
516 mutex_exit(&proclist_mutex);
517 }
518 }
519 cnt -= auio.uio_resid;
520 #ifdef KTRACE
521 if (ktriov != NULL) {
522 if (KTRPOINT(p, KTR_GENIO) && (error == 0))
523 ktrgenio(l, fd, UIO_WRITE, ktriov, cnt, error);
524 free(ktriov, M_TEMP);
525 }
526 #endif
527 *retval = cnt;
528 done:
529 if (needfree)
530 free(needfree, M_IOV);
531 out:
532 FILE_UNUSE(fp, l);
533 uvmspace_free(vm);
534 return (error);
535 }
536
537 /*
538 * Ioctl system call
539 */
540 /* ARGSUSED */
541 int
542 sys_ioctl(struct lwp *l, void *v, register_t *retval)
543 {
544 struct sys_ioctl_args /* {
545 syscallarg(int) fd;
546 syscallarg(u_long) com;
547 syscallarg(caddr_t) data;
548 } */ *uap = v;
549 struct file *fp;
550 struct proc *p;
551 struct filedesc *fdp;
552 u_long com;
553 int error;
554 u_int size;
555 caddr_t data, memp;
556 #define STK_PARAMS 128
557 u_long stkbuf[STK_PARAMS/sizeof(u_long)];
558
559 error = 0;
560 p = l->l_proc;
561 fdp = p->p_fd;
562
563 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
564 return (EBADF);
565
566 FILE_USE(fp);
567
568 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
569 error = EBADF;
570 com = 0;
571 goto out;
572 }
573
574 switch (com = SCARG(uap, com)) {
575 case FIONCLEX:
576 fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
577 goto out;
578
579 case FIOCLEX:
580 fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
581 goto out;
582 }
583
584 /*
585 * Interpret high order word to find amount of data to be
586 * copied to/from the user's address space.
587 */
588 size = IOCPARM_LEN(com);
589 if (size > IOCPARM_MAX) {
590 error = ENOTTY;
591 goto out;
592 }
593 memp = NULL;
594 if (size > sizeof(stkbuf)) {
595 memp = (caddr_t)malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
596 data = memp;
597 } else
598 data = (caddr_t)stkbuf;
599 if (com&IOC_IN) {
600 if (size) {
601 error = copyin(SCARG(uap, data), data, size);
602 if (error) {
603 if (memp)
604 free(memp, M_IOCTLOPS);
605 goto out;
606 }
607 #ifdef KTRACE
608 if (KTRPOINT(p, KTR_GENIO)) {
609 struct iovec iov;
610 iov.iov_base = SCARG(uap, data);
611 iov.iov_len = size;
612 ktrgenio(l, SCARG(uap, fd), UIO_WRITE, &iov,
613 size, 0);
614 }
615 #endif
616 } else
617 *(caddr_t *)data = SCARG(uap, data);
618 } else if ((com&IOC_OUT) && size)
619 /*
620 * Zero the buffer so the user always
621 * gets back something deterministic.
622 */
623 memset(data, 0, size);
624 else if (com&IOC_VOID)
625 *(caddr_t *)data = SCARG(uap, data);
626
627 switch (com) {
628
629 case FIONBIO:
630 if (*(int *)data != 0)
631 fp->f_flag |= FNONBLOCK;
632 else
633 fp->f_flag &= ~FNONBLOCK;
634 error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, data, l);
635 break;
636
637 case FIOASYNC:
638 if (*(int *)data != 0)
639 fp->f_flag |= FASYNC;
640 else
641 fp->f_flag &= ~FASYNC;
642 error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, data, l);
643 break;
644
645 default:
646 error = (*fp->f_ops->fo_ioctl)(fp, com, data, l);
647 /*
648 * Copy any data to user, size was
649 * already set and checked above.
650 */
651 if (error == 0 && (com&IOC_OUT) && size) {
652 error = copyout(data, SCARG(uap, data), size);
653 #ifdef KTRACE
654 if (KTRPOINT(p, KTR_GENIO)) {
655 struct iovec iov;
656 iov.iov_base = SCARG(uap, data);
657 iov.iov_len = size;
658 ktrgenio(l, SCARG(uap, fd), UIO_READ, &iov,
659 size, error);
660 }
661 #endif
662 }
663 break;
664 }
665 if (memp)
666 free(memp, M_IOCTLOPS);
667 out:
668 FILE_UNUSE(fp, l);
669 switch (error) {
670 case -1:
671 printf("sys_ioctl: _IO%s%s('%c', %lu, %lu) returned -1: "
672 "pid=%d comm=%s\n",
673 (com & IOC_IN) ? "W" : "", (com & IOC_OUT) ? "R" : "",
674 (char)IOCGROUP(com), (com & 0xff), IOCPARM_LEN(com),
675 p->p_pid, p->p_comm);
676 /* FALLTHROUGH */
677 case EPASSTHROUGH:
678 error = ENOTTY;
679 /* FALLTHROUGH */
680 default:
681 return (error);
682 }
683 }
684
685 int selwait, nselcoll;
686
687 /*
688 * Select system call.
689 */
690 int
691 sys_pselect(struct lwp *l, void *v, register_t *retval)
692 {
693 struct sys_pselect_args /* {
694 syscallarg(int) nd;
695 syscallarg(fd_set *) in;
696 syscallarg(fd_set *) ou;
697 syscallarg(fd_set *) ex;
698 syscallarg(const struct timespec *) ts;
699 syscallarg(sigset_t *) mask;
700 } */ * const uap = v;
701 struct timespec ats;
702 struct timeval atv, *tv = NULL;
703 sigset_t amask, *mask = NULL;
704 int error;
705
706 if (SCARG(uap, ts)) {
707 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
708 if (error)
709 return error;
710 atv.tv_sec = ats.tv_sec;
711 atv.tv_usec = ats.tv_nsec / 1000;
712 tv = &atv;
713 }
714 if (SCARG(uap, mask) != NULL) {
715 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
716 if (error)
717 return error;
718 mask = &amask;
719 }
720
721 return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
722 SCARG(uap, ou), SCARG(uap, ex), tv, mask);
723 }
724
725 int
726 inittimeleft(struct timeval *tv, struct timeval *sleeptv)
727 {
728 if (itimerfix(tv))
729 return -1;
730 getmicrouptime(sleeptv);
731 return 0;
732 }
733
734 int
735 gettimeleft(struct timeval *tv, struct timeval *sleeptv)
736 {
737 /*
738 * We have to recalculate the timeout on every retry.
739 */
740 struct timeval slepttv;
741 /*
742 * reduce tv by elapsed time
743 * based on monotonic time scale
744 */
745 getmicrouptime(&slepttv);
746 timeradd(tv, sleeptv, tv);
747 timersub(tv, &slepttv, tv);
748 *sleeptv = slepttv;
749 return tvtohz(tv);
750 }
751
752 int
753 sys_select(struct lwp *l, void *v, register_t *retval)
754 {
755 struct sys_select_args /* {
756 syscallarg(int) nd;
757 syscallarg(fd_set *) in;
758 syscallarg(fd_set *) ou;
759 syscallarg(fd_set *) ex;
760 syscallarg(struct timeval *) tv;
761 } */ * const uap = v;
762 struct timeval atv, *tv = NULL;
763 int error;
764
765 if (SCARG(uap, tv)) {
766 error = copyin(SCARG(uap, tv), (caddr_t)&atv,
767 sizeof(atv));
768 if (error)
769 return error;
770 tv = &atv;
771 }
772
773 return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
774 SCARG(uap, ou), SCARG(uap, ex), tv, NULL);
775 }
776
777 int
778 selcommon(struct lwp *l, register_t *retval, int nd, fd_set *u_in,
779 fd_set *u_ou, fd_set *u_ex, struct timeval *tv, sigset_t *mask)
780 {
781 char smallbits[howmany(FD_SETSIZE, NFDBITS) *
782 sizeof(fd_mask) * 6];
783 struct proc * const p = l->l_proc;
784 caddr_t bits;
785 int s, ncoll, error, timo;
786 size_t ni;
787 sigset_t oldmask;
788 struct timeval sleeptv;
789
790 error = 0;
791 if (nd < 0)
792 return (EINVAL);
793 if (nd > p->p_fd->fd_nfiles) {
794 /* forgiving; slightly wrong */
795 nd = p->p_fd->fd_nfiles;
796 }
797 ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
798 if (ni * 6 > sizeof(smallbits))
799 bits = malloc(ni * 6, M_TEMP, M_WAITOK);
800 else
801 bits = smallbits;
802
803 #define getbits(name, x) \
804 if (u_ ## name) { \
805 error = copyin(u_ ## name, bits + ni * x, ni); \
806 if (error) \
807 goto done; \
808 } else \
809 memset(bits + ni * x, 0, ni);
810 getbits(in, 0);
811 getbits(ou, 1);
812 getbits(ex, 2);
813 #undef getbits
814
815 timo = 0;
816 if (tv && inittimeleft(tv, &sleeptv) == -1) {
817 error = EINVAL;
818 goto done;
819 }
820
821 if (mask) {
822 sigminusset(&sigcantmask, mask);
823 mutex_enter(&p->p_smutex);
824 oldmask = l->l_sigmask;
825 l->l_sigmask = *mask;
826 mutex_exit(&p->p_smutex);
827 } else
828 oldmask = l->l_sigmask; /* XXXgcc */
829
830 retry:
831 ncoll = nselcoll;
832 l->l_flag |= L_SELECT;
833 error = selscan(l, (fd_mask *)(bits + ni * 0),
834 (fd_mask *)(bits + ni * 3), nd, retval);
835 if (error || *retval)
836 goto donemask;
837 if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
838 goto donemask;
839 s = splsched();
840 if ((l->l_flag & L_SELECT) == 0 || nselcoll != ncoll) {
841 splx(s);
842 goto retry;
843 }
844 l->l_flag &= ~L_SELECT;
845 error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
846 splx(s);
847 if (error == 0)
848 goto retry;
849 donemask:
850 if (mask) {
851 mutex_enter(&p->p_smutex);
852 l->l_sigmask = oldmask;
853 mutex_exit(&p->p_smutex);
854 }
855 l->l_flag &= ~L_SELECT;
856 done:
857 /* select is not restarted after signals... */
858 if (error == ERESTART)
859 error = EINTR;
860 if (error == EWOULDBLOCK)
861 error = 0;
862 if (error == 0) {
863
864 #define putbits(name, x) \
865 if (u_ ## name) { \
866 error = copyout(bits + ni * x, u_ ## name, ni); \
867 if (error) \
868 goto out; \
869 }
870 putbits(in, 3);
871 putbits(ou, 4);
872 putbits(ex, 5);
873 #undef putbits
874 }
875 out:
876 if (ni * 6 > sizeof(smallbits))
877 free(bits, M_TEMP);
878 return (error);
879 }
880
881 int
882 selscan(struct lwp *l, fd_mask *ibitp, fd_mask *obitp, int nfd,
883 register_t *retval)
884 {
885 static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
886 POLLWRNORM | POLLHUP | POLLERR,
887 POLLRDBAND };
888 struct proc *p = l->l_proc;
889 struct filedesc *fdp;
890 int msk, i, j, fd, n;
891 fd_mask ibits, obits;
892 struct file *fp;
893
894 fdp = p->p_fd;
895 n = 0;
896 for (msk = 0; msk < 3; msk++) {
897 for (i = 0; i < nfd; i += NFDBITS) {
898 ibits = *ibitp++;
899 obits = 0;
900 while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
901 ibits &= ~(1 << j);
902 if ((fp = fd_getfile(fdp, fd)) == NULL)
903 return (EBADF);
904 FILE_USE(fp);
905 if ((*fp->f_ops->fo_poll)(fp, flag[msk], l)) {
906 obits |= (1 << j);
907 n++;
908 }
909 FILE_UNUSE(fp, l);
910 }
911 *obitp++ = obits;
912 }
913 }
914 *retval = n;
915 return (0);
916 }
917
918 /*
919 * Poll system call.
920 */
921 int
922 sys_poll(struct lwp *l, void *v, register_t *retval)
923 {
924 struct sys_poll_args /* {
925 syscallarg(struct pollfd *) fds;
926 syscallarg(u_int) nfds;
927 syscallarg(int) timeout;
928 } */ * const uap = v;
929 struct timeval atv, *tv = NULL;
930
931 if (SCARG(uap, timeout) != INFTIM) {
932 atv.tv_sec = SCARG(uap, timeout) / 1000;
933 atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
934 tv = &atv;
935 }
936
937 return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
938 tv, NULL);
939 }
940
941 /*
942 * Poll system call.
943 */
944 int
945 sys_pollts(struct lwp *l, void *v, register_t *retval)
946 {
947 struct sys_pollts_args /* {
948 syscallarg(struct pollfd *) fds;
949 syscallarg(u_int) nfds;
950 syscallarg(const struct timespec *) ts;
951 syscallarg(const sigset_t *) mask;
952 } */ * const uap = v;
953 struct timespec ats;
954 struct timeval atv, *tv = NULL;
955 sigset_t amask, *mask = NULL;
956 int error;
957
958 if (SCARG(uap, ts)) {
959 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
960 if (error)
961 return error;
962 atv.tv_sec = ats.tv_sec;
963 atv.tv_usec = ats.tv_nsec / 1000;
964 tv = &atv;
965 }
966 if (SCARG(uap, mask)) {
967 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
968 if (error)
969 return error;
970 mask = &amask;
971 }
972
973 return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
974 tv, mask);
975 }
976
977 int
978 pollcommon(struct lwp *l, register_t *retval,
979 struct pollfd *u_fds, u_int nfds,
980 struct timeval *tv, sigset_t *mask)
981 {
982 char smallbits[32 * sizeof(struct pollfd)];
983 struct proc * const p = l->l_proc;
984 caddr_t bits;
985 sigset_t oldmask;
986 int s, ncoll, error, timo;
987 size_t ni;
988 struct timeval sleeptv;
989
990 if (nfds > p->p_fd->fd_nfiles) {
991 /* forgiving; slightly wrong */
992 nfds = p->p_fd->fd_nfiles;
993 }
994 ni = nfds * sizeof(struct pollfd);
995 if (ni > sizeof(smallbits))
996 bits = malloc(ni, M_TEMP, M_WAITOK);
997 else
998 bits = smallbits;
999
1000 error = copyin(u_fds, bits, ni);
1001 if (error)
1002 goto done;
1003
1004 timo = 0;
1005 if (tv && inittimeleft(tv, &sleeptv) == -1) {
1006 error = EINVAL;
1007 goto done;
1008 }
1009
1010 if (mask) {
1011 sigminusset(&sigcantmask, mask);
1012 mutex_enter(&p->p_smutex);
1013 oldmask = l->l_sigmask;
1014 l->l_sigmask = *mask;
1015 mutex_exit(&p->p_smutex);
1016 } else
1017 oldmask = l->l_sigmask; /* XXXgcc */
1018
1019 retry:
1020 ncoll = nselcoll;
1021 l->l_flag |= L_SELECT;
1022 error = pollscan(l, (struct pollfd *)bits, nfds, retval);
1023 if (error || *retval)
1024 goto donemask;
1025 if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
1026 goto donemask;
1027 s = splsched();
1028 if ((l->l_flag & L_SELECT) == 0 || nselcoll != ncoll) {
1029 splx(s);
1030 goto retry;
1031 }
1032 l->l_flag &= ~L_SELECT;
1033 error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "poll", timo);
1034 splx(s);
1035 if (error == 0)
1036 goto retry;
1037 donemask:
1038 if (mask) {
1039 mutex_enter(&p->p_smutex);
1040 l->l_sigmask = oldmask;
1041 mutex_exit(&p->p_smutex);
1042 }
1043
1044 l->l_flag &= ~L_SELECT;
1045 done:
1046 /* poll is not restarted after signals... */
1047 if (error == ERESTART)
1048 error = EINTR;
1049 if (error == EWOULDBLOCK)
1050 error = 0;
1051 if (error == 0) {
1052 error = copyout(bits, u_fds, ni);
1053 if (error)
1054 goto out;
1055 }
1056 out:
1057 if (ni > sizeof(smallbits))
1058 free(bits, M_TEMP);
1059 return (error);
1060 }
1061
1062 int
1063 pollscan(struct lwp *l, struct pollfd *fds, int nfd, register_t *retval)
1064 {
1065 struct proc *p = l->l_proc;
1066 struct filedesc *fdp;
1067 int i, n;
1068 struct file *fp;
1069
1070 fdp = p->p_fd;
1071 n = 0;
1072 for (i = 0; i < nfd; i++, fds++) {
1073 if (fds->fd >= fdp->fd_nfiles) {
1074 fds->revents = POLLNVAL;
1075 n++;
1076 } else if (fds->fd < 0) {
1077 fds->revents = 0;
1078 } else {
1079 if ((fp = fd_getfile(fdp, fds->fd)) == NULL) {
1080 fds->revents = POLLNVAL;
1081 n++;
1082 } else {
1083 FILE_USE(fp);
1084 fds->revents = (*fp->f_ops->fo_poll)(fp,
1085 fds->events | POLLERR | POLLHUP, l);
1086 if (fds->revents != 0)
1087 n++;
1088 FILE_UNUSE(fp, l);
1089 }
1090 }
1091 }
1092 *retval = n;
1093 return (0);
1094 }
1095
1096 /*ARGSUSED*/
1097 int
1098 seltrue(dev_t dev, int events, struct lwp *l)
1099 {
1100
1101 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1102 }
1103
1104 /*
1105 * Record a select request.
1106 */
1107 void
1108 selrecord(struct lwp *selector, struct selinfo *sip)
1109 {
1110 struct lwp *l;
1111 struct proc *p;
1112 pid_t mypid;
1113
1114 mypid = selector->l_proc->p_pid;
1115 if (sip->sel_pid == mypid)
1116 return;
1117
1118 mutex_enter(&proclist_mutex);
1119 if (sip->sel_pid && (p = p_find(sip->sel_pid, PFIND_LOCKED))) {
1120 mutex_enter(&p->p_smutex);
1121 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1122 lwp_lock(l);
1123 if (l->l_wchan == (caddr_t)&selwait &&
1124 l->l_stat == LSSLEEP) {
1125 sip->sel_collision = 1;
1126 lwp_unlock(l);
1127 break;
1128 }
1129 lwp_unlock(l);
1130 }
1131 mutex_exit(&p->p_smutex);
1132 }
1133 mutex_exit(&proclist_mutex);
1134
1135 if (!sip->sel_collision)
1136 sip->sel_pid = mypid;
1137 }
1138
1139 /*
1140 * Do a wakeup when a selectable event occurs.
1141 */
1142 void
1143 selwakeup(sip)
1144 struct selinfo *sip;
1145 {
1146 struct lwp *l;
1147 struct proc *p;
1148
1149 if (sip->sel_pid == 0)
1150 return;
1151 if (sip->sel_collision) {
1152 sip->sel_pid = 0;
1153 nselcoll++;
1154 sip->sel_collision = 0;
1155 wakeup((caddr_t)&selwait);
1156 return;
1157 }
1158
1159 /*
1160 * We must use the proclist_mutex as we can be called from an
1161 * interrupt context.
1162 */
1163 mutex_enter(&proclist_mutex);
1164 p = p_find(sip->sel_pid, PFIND_LOCKED);
1165 sip->sel_pid = 0;
1166 if (p == NULL) {
1167 mutex_exit(&proclist_mutex);
1168 return;
1169 }
1170
1171 mutex_enter(&p->p_smutex);
1172 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1173 lwp_lock(l);
1174 if (l->l_wchan == (wchan_t)&selwait && l->l_stat == LSSLEEP) {
1175 /* setrunnable() will release the lock. */
1176 setrunnable(l);
1177 } else {
1178 if (l->l_flag & L_SELECT)
1179 l->l_flag &= ~L_SELECT;
1180 lwp_unlock(l);
1181 }
1182 }
1183 mutex_exit(&p->p_smutex);
1184 mutex_exit(&proclist_mutex);
1185 }
1186