sys_generic.c revision 1.54.2.6 1 /* $NetBSD: sys_generic.c,v 1.54.2.6 2002/04/01 07:47:57 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: sys_generic.c,v 1.54.2.6 2002/04/01 07:47:57 nathanw Exp $");
45
46 #include "opt_ktrace.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/filedesc.h>
51 #include <sys/ioctl.h>
52 #include <sys/file.h>
53 #include <sys/lwp.h>
54 #include <sys/proc.h>
55 #include <sys/socketvar.h>
56 #include <sys/signalvar.h>
57 #include <sys/uio.h>
58 #include <sys/kernel.h>
59 #include <sys/stat.h>
60 #include <sys/malloc.h>
61 #include <sys/poll.h>
62 #ifdef KTRACE
63 #include <sys/ktrace.h>
64 #endif
65
66 #include <sys/mount.h>
67 #include <sys/syscallargs.h>
68
69 int selscan __P((struct proc *, fd_mask *, fd_mask *, int, register_t *));
70 int pollscan __P((struct proc *, struct pollfd *, int, register_t *));
71
72 /*
73 * Read system call.
74 */
75 /* ARGSUSED */
76 int
77 sys_read(struct lwp *l, void *v, register_t *retval)
78 {
79 struct sys_read_args /* {
80 syscallarg(int) fd;
81 syscallarg(void *) buf;
82 syscallarg(size_t) nbyte;
83 } */ *uap = v;
84 int fd;
85 struct file *fp;
86 struct proc *p;
87 struct filedesc *fdp;
88
89 fd = SCARG(uap, fd);
90 p = l->l_proc;
91 fdp = p->p_fd;
92
93 if ((fp = fd_getfile(fdp, fd)) == NULL)
94 return (EBADF);
95
96 if ((fp->f_flag & FREAD) == 0)
97 return (EBADF);
98
99 FILE_USE(fp);
100
101 /* dofileread() will unuse the descriptor for us */
102 return (dofileread(p, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
103 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
104 }
105
106 int
107 dofileread(struct proc *p, int fd, struct file *fp, void *buf, size_t nbyte,
108 off_t *offset, int flags, register_t *retval)
109 {
110 struct uio auio;
111 struct iovec aiov;
112 long cnt, error;
113 #ifdef KTRACE
114 struct iovec ktriov;
115 #endif
116 error = 0;
117
118 aiov.iov_base = (caddr_t)buf;
119 aiov.iov_len = nbyte;
120 auio.uio_iov = &aiov;
121 auio.uio_iovcnt = 1;
122 auio.uio_resid = nbyte;
123 auio.uio_rw = UIO_READ;
124 auio.uio_segflg = UIO_USERSPACE;
125 auio.uio_procp = p;
126
127 /*
128 * Reads return ssize_t because -1 is returned on error. Therefore
129 * we must restrict the length to SSIZE_MAX to avoid garbage return
130 * values.
131 */
132 if (auio.uio_resid > SSIZE_MAX) {
133 error = EINVAL;
134 goto out;
135 }
136
137 #ifdef KTRACE
138 /*
139 * if tracing, save a copy of iovec
140 */
141 if (KTRPOINT(p, KTR_GENIO))
142 ktriov = aiov;
143 #endif
144 cnt = auio.uio_resid;
145 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
146 if (error)
147 if (auio.uio_resid != cnt && (error == ERESTART ||
148 error == EINTR || error == EWOULDBLOCK))
149 error = 0;
150 cnt -= auio.uio_resid;
151 #ifdef KTRACE
152 if (KTRPOINT(p, KTR_GENIO) && error == 0)
153 ktrgenio(p, fd, UIO_READ, &ktriov, cnt, error);
154 #endif
155 *retval = cnt;
156 out:
157 FILE_UNUSE(fp, p);
158 return (error);
159 }
160
161 /*
162 * Scatter read system call.
163 */
164 int
165 sys_readv(struct lwp *l, void *v, register_t *retval)
166 {
167 struct sys_readv_args /* {
168 syscallarg(int) fd;
169 syscallarg(const struct iovec *) iovp;
170 syscallarg(int) iovcnt;
171 } */ *uap = v;
172 int fd;
173 struct file *fp;
174 struct proc *p;
175 struct filedesc *fdp;
176
177 fd = SCARG(uap, fd);
178 p = l->l_proc;
179 fdp = p->p_fd;
180
181 if ((fp = fd_getfile(fdp, fd)) == NULL)
182 return (EBADF);
183
184 if ((fp->f_flag & FREAD) == 0)
185 return (EBADF);
186
187 FILE_USE(fp);
188
189 /* dofilereadv() will unuse the descriptor for us */
190 return (dofilereadv(p, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
191 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
192 }
193
194 int
195 dofilereadv(struct proc *p, int fd, struct file *fp, const struct iovec *iovp,
196 int iovcnt, off_t *offset, int flags, register_t *retval)
197 {
198 struct uio auio;
199 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
200 long i, cnt, error;
201 u_int iovlen;
202 #ifdef KTRACE
203 struct iovec *ktriov;
204 #endif
205
206 error = 0;
207 #ifdef KTRACE
208 ktriov = NULL;
209 #endif
210 /* note: can't use iovlen until iovcnt is validated */
211 iovlen = iovcnt * sizeof(struct iovec);
212 if ((u_int)iovcnt > UIO_SMALLIOV) {
213 if ((u_int)iovcnt > IOV_MAX) {
214 error = EINVAL;
215 goto out;
216 }
217 iov = malloc(iovlen, M_IOV, M_WAITOK);
218 needfree = iov;
219 } else if ((u_int)iovcnt > 0) {
220 iov = aiov;
221 needfree = NULL;
222 } else {
223 error = EINVAL;
224 goto out;
225 }
226
227 auio.uio_iov = iov;
228 auio.uio_iovcnt = iovcnt;
229 auio.uio_rw = UIO_READ;
230 auio.uio_segflg = UIO_USERSPACE;
231 auio.uio_procp = p;
232 error = copyin(iovp, iov, iovlen);
233 if (error)
234 goto done;
235 auio.uio_resid = 0;
236 for (i = 0; i < iovcnt; i++) {
237 auio.uio_resid += iov->iov_len;
238 /*
239 * Reads return ssize_t because -1 is returned on error.
240 * Therefore we must restrict the length to SSIZE_MAX to
241 * avoid garbage return values.
242 */
243 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
244 error = EINVAL;
245 goto done;
246 }
247 iov++;
248 }
249 #ifdef KTRACE
250 /*
251 * if tracing, save a copy of iovec
252 */
253 if (KTRPOINT(p, KTR_GENIO)) {
254 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
255 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
256 }
257 #endif
258 cnt = auio.uio_resid;
259 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
260 if (error)
261 if (auio.uio_resid != cnt && (error == ERESTART ||
262 error == EINTR || error == EWOULDBLOCK))
263 error = 0;
264 cnt -= auio.uio_resid;
265 #ifdef KTRACE
266 if (ktriov != NULL) {
267 if (error == 0)
268 ktrgenio(p, fd, UIO_READ, ktriov, cnt, error);
269 free(ktriov, M_TEMP);
270 }
271 #endif
272 *retval = cnt;
273 done:
274 if (needfree)
275 free(needfree, M_IOV);
276 out:
277 FILE_UNUSE(fp, p);
278 return (error);
279 }
280
281 /*
282 * Write system call
283 */
284 int
285 sys_write(struct lwp *l, void *v, register_t *retval)
286 {
287 struct sys_write_args /* {
288 syscallarg(int) fd;
289 syscallarg(const void *) buf;
290 syscallarg(size_t) nbyte;
291 } */ *uap = v;
292 int fd;
293 struct file *fp;
294 struct proc *p;
295 struct filedesc *fdp;
296
297 fd = SCARG(uap, fd);
298 p = l->l_proc;
299 fdp = p->p_fd;
300
301 if ((fp = fd_getfile(fdp, fd)) == NULL)
302 return (EBADF);
303
304 if ((fp->f_flag & FWRITE) == 0)
305 return (EBADF);
306
307 FILE_USE(fp);
308
309 /* dofilewrite() will unuse the descriptor for us */
310 return (dofilewrite(p, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
311 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
312 }
313
314 int
315 dofilewrite(struct proc *p, int fd, struct file *fp, const void *buf,
316 size_t nbyte, off_t *offset, int flags, register_t *retval)
317 {
318 struct uio auio;
319 struct iovec aiov;
320 long cnt, error;
321 #ifdef KTRACE
322 struct iovec ktriov;
323 #endif
324
325 error = 0;
326 aiov.iov_base = (caddr_t)buf; /* XXX kills const */
327 aiov.iov_len = nbyte;
328 auio.uio_iov = &aiov;
329 auio.uio_iovcnt = 1;
330 auio.uio_resid = nbyte;
331 auio.uio_rw = UIO_WRITE;
332 auio.uio_segflg = UIO_USERSPACE;
333 auio.uio_procp = p;
334
335 /*
336 * Writes return ssize_t because -1 is returned on error. Therefore
337 * we must restrict the length to SSIZE_MAX to avoid garbage return
338 * values.
339 */
340 if (auio.uio_resid > SSIZE_MAX) {
341 error = EINVAL;
342 goto out;
343 }
344
345 #ifdef KTRACE
346 /*
347 * if tracing, save a copy of iovec
348 */
349 if (KTRPOINT(p, KTR_GENIO))
350 ktriov = aiov;
351 #endif
352 cnt = auio.uio_resid;
353 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
354 if (error) {
355 if (auio.uio_resid != cnt && (error == ERESTART ||
356 error == EINTR || error == EWOULDBLOCK))
357 error = 0;
358 if (error == EPIPE)
359 psignal(p, SIGPIPE);
360 }
361 cnt -= auio.uio_resid;
362 #ifdef KTRACE
363 if (KTRPOINT(p, KTR_GENIO) && error == 0)
364 ktrgenio(p, fd, UIO_WRITE, &ktriov, cnt, error);
365 #endif
366 *retval = cnt;
367 out:
368 FILE_UNUSE(fp, p);
369 return (error);
370 }
371
372 /*
373 * Gather write system call
374 */
375 int
376 sys_writev(struct lwp *l, void *v, register_t *retval)
377 {
378 struct sys_writev_args /* {
379 syscallarg(int) fd;
380 syscallarg(const struct iovec *) iovp;
381 syscallarg(int) iovcnt;
382 } */ *uap = v;
383 int fd;
384 struct file *fp;
385 struct proc *p;
386 struct filedesc *fdp;
387
388 fd = SCARG(uap, fd);
389 p = l->l_proc;
390 fdp = p->p_fd;
391
392 if ((fp = fd_getfile(fdp, fd)) == NULL)
393 return (EBADF);
394
395 if ((fp->f_flag & FWRITE) == 0)
396 return (EBADF);
397
398 FILE_USE(fp);
399
400 /* dofilewritev() will unuse the descriptor for us */
401 return (dofilewritev(p, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
402 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
403 }
404
405 int
406 dofilewritev(struct proc *p, int fd, struct file *fp, const struct iovec *iovp,
407 int iovcnt, off_t *offset, int flags, register_t *retval)
408 {
409 struct uio auio;
410 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
411 long i, cnt, error;
412 u_int iovlen;
413 #ifdef KTRACE
414 struct iovec *ktriov;
415 #endif
416
417 error = 0;
418 #ifdef KTRACE
419 ktriov = NULL;
420 #endif
421 /* note: can't use iovlen until iovcnt is validated */
422 iovlen = iovcnt * sizeof(struct iovec);
423 if ((u_int)iovcnt > UIO_SMALLIOV) {
424 if ((u_int)iovcnt > IOV_MAX) {
425 error = EINVAL;
426 goto out;
427 }
428 iov = malloc(iovlen, M_IOV, M_WAITOK);
429 needfree = iov;
430 } else if ((u_int)iovcnt > 0) {
431 iov = aiov;
432 needfree = NULL;
433 } else {
434 error = EINVAL;
435 goto out;
436 }
437
438 auio.uio_iov = iov;
439 auio.uio_iovcnt = iovcnt;
440 auio.uio_rw = UIO_WRITE;
441 auio.uio_segflg = UIO_USERSPACE;
442 auio.uio_procp = p;
443 error = copyin(iovp, iov, iovlen);
444 if (error)
445 goto done;
446 auio.uio_resid = 0;
447 for (i = 0; i < iovcnt; i++) {
448 auio.uio_resid += iov->iov_len;
449 /*
450 * Writes return ssize_t because -1 is returned on error.
451 * Therefore we must restrict the length to SSIZE_MAX to
452 * avoid garbage return values.
453 */
454 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
455 error = EINVAL;
456 goto done;
457 }
458 iov++;
459 }
460 #ifdef KTRACE
461 /*
462 * if tracing, save a copy of iovec
463 */
464 if (KTRPOINT(p, KTR_GENIO)) {
465 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
466 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
467 }
468 #endif
469 cnt = auio.uio_resid;
470 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
471 if (error) {
472 if (auio.uio_resid != cnt && (error == ERESTART ||
473 error == EINTR || error == EWOULDBLOCK))
474 error = 0;
475 if (error == EPIPE)
476 psignal(p, SIGPIPE);
477 }
478 cnt -= auio.uio_resid;
479 #ifdef KTRACE
480 if (KTRPOINT(p, KTR_GENIO))
481 if (error == 0) {
482 ktrgenio(p, fd, UIO_WRITE, ktriov, cnt, error);
483 free(ktriov, M_TEMP);
484 }
485 #endif
486 *retval = cnt;
487 done:
488 if (needfree)
489 free(needfree, M_IOV);
490 out:
491 FILE_UNUSE(fp, p);
492 return (error);
493 }
494
495 /*
496 * Ioctl system call
497 */
498 /* ARGSUSED */
499 int
500 sys_ioctl(struct lwp *l, void *v, register_t *retval)
501 {
502 struct sys_ioctl_args /* {
503 syscallarg(int) fd;
504 syscallarg(u_long) com;
505 syscallarg(caddr_t) data;
506 } */ *uap = v;
507 struct file *fp;
508 struct proc *p;
509 struct filedesc *fdp;
510 u_long com;
511 int error;
512 u_int size;
513 caddr_t data, memp;
514 int tmp;
515 #define STK_PARAMS 128
516 u_long stkbuf[STK_PARAMS/sizeof(u_long)];
517
518 error = 0;
519 p = l->l_proc;
520 fdp = p->p_fd;
521
522 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
523 return (EBADF);
524
525 FILE_USE(fp);
526
527 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
528 error = EBADF;
529 goto out;
530 }
531
532 switch (com = SCARG(uap, com)) {
533 case FIONCLEX:
534 fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
535 goto out;
536
537 case FIOCLEX:
538 fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
539 goto out;
540 }
541
542 /*
543 * Interpret high order word to find amount of data to be
544 * copied to/from the user's address space.
545 */
546 size = IOCPARM_LEN(com);
547 if (size > IOCPARM_MAX) {
548 error = ENOTTY;
549 goto out;
550 }
551 memp = NULL;
552 if (size > sizeof(stkbuf)) {
553 memp = (caddr_t)malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
554 data = memp;
555 } else
556 data = (caddr_t)stkbuf;
557 if (com&IOC_IN) {
558 if (size) {
559 error = copyin(SCARG(uap, data), data, size);
560 if (error) {
561 if (memp)
562 free(memp, M_IOCTLOPS);
563 goto out;
564 }
565 } else
566 *(caddr_t *)data = SCARG(uap, data);
567 } else if ((com&IOC_OUT) && size)
568 /*
569 * Zero the buffer so the user always
570 * gets back something deterministic.
571 */
572 memset(data, 0, size);
573 else if (com&IOC_VOID)
574 *(caddr_t *)data = SCARG(uap, data);
575
576 switch (com) {
577
578 case FIONBIO:
579 if ((tmp = *(int *)data) != 0)
580 fp->f_flag |= FNONBLOCK;
581 else
582 fp->f_flag &= ~FNONBLOCK;
583 error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&tmp, p);
584 break;
585
586 case FIOASYNC:
587 if ((tmp = *(int *)data) != 0)
588 fp->f_flag |= FASYNC;
589 else
590 fp->f_flag &= ~FASYNC;
591 error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, (caddr_t)&tmp, p);
592 break;
593
594 case FIOSETOWN:
595 tmp = *(int *)data;
596 if (fp->f_type == DTYPE_SOCKET) {
597 ((struct socket *)fp->f_data)->so_pgid = tmp;
598 error = 0;
599 break;
600 }
601 if (tmp <= 0) {
602 tmp = -tmp;
603 } else {
604 struct proc *p1 = pfind(tmp);
605 if (p1 == 0) {
606 error = ESRCH;
607 break;
608 }
609 tmp = p1->p_pgrp->pg_id;
610 }
611 error = (*fp->f_ops->fo_ioctl)
612 (fp, TIOCSPGRP, (caddr_t)&tmp, p);
613 break;
614
615 case FIOGETOWN:
616 if (fp->f_type == DTYPE_SOCKET) {
617 error = 0;
618 *(int *)data = ((struct socket *)fp->f_data)->so_pgid;
619 break;
620 }
621 error = (*fp->f_ops->fo_ioctl)(fp, TIOCGPGRP, data, p);
622 if (error == 0)
623 *(int *)data = -*(int *)data;
624 break;
625
626 default:
627 error = (*fp->f_ops->fo_ioctl)(fp, com, data, p);
628 /*
629 * Copy any data to user, size was
630 * already set and checked above.
631 */
632 if (error == 0 && (com&IOC_OUT) && size)
633 error = copyout(data, SCARG(uap, data), size);
634 break;
635 }
636 if (memp)
637 free(memp, M_IOCTLOPS);
638 out:
639 FILE_UNUSE(fp, p);
640 switch (error) {
641 case -1:
642 printf("sys_ioctl: _IO%s%s('%c', %lu, %lu) returned -1: "
643 "pid=%d comm=%s\n",
644 (com & IOC_IN) ? "W" : "", (com & IOC_OUT) ? "R" : "",
645 (char)IOCGROUP(com), (com & 0xff), IOCPARM_LEN(com),
646 p->p_pid, p->p_comm);
647 /* FALLTHROUGH */
648 case EPASSTHROUGH:
649 error = ENOTTY;
650 /* FALLTHROUGH */
651 default:
652 return (error);
653 }
654 }
655
656 int selwait, nselcoll;
657
658 /*
659 * Select system call.
660 */
661 int
662 sys_select(struct lwp *l, void *v, register_t *retval)
663 {
664 struct sys_select_args /* {
665 syscallarg(int) nd;
666 syscallarg(fd_set *) in;
667 syscallarg(fd_set *) ou;
668 syscallarg(fd_set *) ex;
669 syscallarg(struct timeval *) tv;
670 } */ *uap = v;
671 struct proc *p;
672 caddr_t bits;
673 char smallbits[howmany(FD_SETSIZE, NFDBITS) *
674 sizeof(fd_mask) * 6];
675 struct timeval atv;
676 int s, ncoll, error, timo;
677 size_t ni;
678
679 error = 0;
680 p = l->l_proc;
681 if (SCARG(uap, nd) < 0)
682 return (EINVAL);
683 if (SCARG(uap, nd) > p->p_fd->fd_nfiles) {
684 /* forgiving; slightly wrong */
685 SCARG(uap, nd) = p->p_fd->fd_nfiles;
686 }
687 ni = howmany(SCARG(uap, nd), NFDBITS) * sizeof(fd_mask);
688 if (ni * 6 > sizeof(smallbits))
689 bits = malloc(ni * 6, M_TEMP, M_WAITOK);
690 else
691 bits = smallbits;
692
693 #define getbits(name, x) \
694 if (SCARG(uap, name)) { \
695 error = copyin(SCARG(uap, name), bits + ni * x, ni); \
696 if (error) \
697 goto done; \
698 } else \
699 memset(bits + ni * x, 0, ni);
700 getbits(in, 0);
701 getbits(ou, 1);
702 getbits(ex, 2);
703 #undef getbits
704
705 if (SCARG(uap, tv)) {
706 error = copyin(SCARG(uap, tv), (caddr_t)&atv,
707 sizeof(atv));
708 if (error)
709 goto done;
710 if (itimerfix(&atv)) {
711 error = EINVAL;
712 goto done;
713 }
714 s = splclock();
715 timeradd(&atv, &time, &atv);
716 splx(s);
717 } else
718 timo = 0;
719 retry:
720 ncoll = nselcoll;
721 l->l_flag |= L_SELECT;
722 error = selscan(p, (fd_mask *)(bits + ni * 0),
723 (fd_mask *)(bits + ni * 3), SCARG(uap, nd), retval);
724 if (error || *retval)
725 goto done;
726 if (SCARG(uap, tv)) {
727 /*
728 * We have to recalculate the timeout on every retry.
729 */
730 timo = hzto(&atv);
731 if (timo <= 0)
732 goto done;
733 }
734 s = splsched();
735 if ((l->l_flag & L_SELECT) == 0 || nselcoll != ncoll) {
736 splx(s);
737 goto retry;
738 }
739 l->l_flag &= ~L_SELECT;
740 error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
741 splx(s);
742 if (error == 0)
743 goto retry;
744 done:
745 l->l_flag &= ~L_SELECT;
746 /* select is not restarted after signals... */
747 if (error == ERESTART)
748 error = EINTR;
749 if (error == EWOULDBLOCK)
750 error = 0;
751 if (error == 0) {
752
753 #define putbits(name, x) \
754 if (SCARG(uap, name)) { \
755 error = copyout(bits + ni * x, SCARG(uap, name), ni); \
756 if (error) \
757 goto out; \
758 }
759 putbits(in, 3);
760 putbits(ou, 4);
761 putbits(ex, 5);
762 #undef putbits
763 }
764 out:
765 if (ni * 6 > sizeof(smallbits))
766 free(bits, M_TEMP);
767 return (error);
768 }
769
770 int
771 selscan(struct proc *p, fd_mask *ibitp, fd_mask *obitp, int nfd,
772 register_t *retval)
773 {
774 struct filedesc *fdp;
775 int msk, i, j, fd, n;
776 fd_mask ibits, obits;
777 struct file *fp;
778 static int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
779 POLLWRNORM | POLLHUP | POLLERR,
780 POLLRDBAND };
781
782 fdp = p->p_fd;
783 n = 0;
784 for (msk = 0; msk < 3; msk++) {
785 for (i = 0; i < nfd; i += NFDBITS) {
786 ibits = *ibitp++;
787 obits = 0;
788 while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
789 ibits &= ~(1 << j);
790 if ((fp = fd_getfile(fdp, fd)) == NULL)
791 return (EBADF);
792 FILE_USE(fp);
793 if ((*fp->f_ops->fo_poll)(fp, flag[msk], p)) {
794 obits |= (1 << j);
795 n++;
796 }
797 FILE_UNUSE(fp, p);
798 }
799 *obitp++ = obits;
800 }
801 }
802 *retval = n;
803 return (0);
804 }
805
806 /*
807 * Poll system call.
808 */
809 int
810 sys_poll(struct lwp *l, void *v, register_t *retval)
811 {
812 struct sys_poll_args /* {
813 syscallarg(struct pollfd *) fds;
814 syscallarg(u_int) nfds;
815 syscallarg(int) timeout;
816 } */ *uap = v;
817 struct proc *p;
818 caddr_t bits;
819 char smallbits[32 * sizeof(struct pollfd)];
820 struct timeval atv;
821 int s, ncoll, error, timo;
822 size_t ni;
823
824 error = 0;
825 p = l->l_proc;
826 if (SCARG(uap, nfds) > p->p_fd->fd_nfiles) {
827 /* forgiving; slightly wrong */
828 SCARG(uap, nfds) = p->p_fd->fd_nfiles;
829 }
830 ni = SCARG(uap, nfds) * sizeof(struct pollfd);
831 if (ni > sizeof(smallbits))
832 bits = malloc(ni, M_TEMP, M_WAITOK);
833 else
834 bits = smallbits;
835
836 error = copyin(SCARG(uap, fds), bits, ni);
837 if (error)
838 goto done;
839
840 if (SCARG(uap, timeout) != INFTIM) {
841 atv.tv_sec = SCARG(uap, timeout) / 1000;
842 atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
843 if (itimerfix(&atv)) {
844 error = EINVAL;
845 goto done;
846 }
847 s = splclock();
848 timeradd(&atv, &time, &atv);
849 splx(s);
850 } else
851 timo = 0;
852 retry:
853 ncoll = nselcoll;
854 l->l_flag |= L_SELECT;
855 error = pollscan(p, (struct pollfd *)bits, SCARG(uap, nfds), retval);
856 if (error || *retval)
857 goto done;
858 if (SCARG(uap, timeout) != INFTIM) {
859 /*
860 * We have to recalculate the timeout on every retry.
861 */
862 timo = hzto(&atv);
863 if (timo <= 0)
864 goto done;
865 }
866 s = splsched();
867 if ((l->l_flag & L_SELECT) == 0 || nselcoll != ncoll) {
868 splx(s);
869 goto retry;
870 }
871 l->l_flag &= ~L_SELECT;
872 error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
873 splx(s);
874 if (error == 0)
875 goto retry;
876 done:
877 l->l_flag &= ~L_SELECT;
878 /* poll is not restarted after signals... */
879 if (error == ERESTART)
880 error = EINTR;
881 if (error == EWOULDBLOCK)
882 error = 0;
883 if (error == 0) {
884 error = copyout(bits, SCARG(uap, fds), ni);
885 if (error)
886 goto out;
887 }
888 out:
889 if (ni > sizeof(smallbits))
890 free(bits, M_TEMP);
891 return (error);
892 }
893
894 int
895 pollscan(struct proc *p, struct pollfd *fds, int nfd, register_t *retval)
896 {
897 struct filedesc *fdp;
898 int i, n;
899 struct file *fp;
900
901 fdp = p->p_fd;
902 n = 0;
903 for (i = 0; i < nfd; i++, fds++) {
904 if (fds->fd >= fdp->fd_nfiles) {
905 fds->revents = POLLNVAL;
906 n++;
907 } else if (fds->fd < 0) {
908 fds->revents = 0;
909 } else {
910 if ((fp = fd_getfile(fdp, fds->fd)) == NULL) {
911 fds->revents = POLLNVAL;
912 n++;
913 } else {
914 FILE_USE(fp);
915 fds->revents = (*fp->f_ops->fo_poll)(fp,
916 fds->events | POLLERR | POLLHUP, p);
917 if (fds->revents != 0)
918 n++;
919 FILE_UNUSE(fp, p);
920 }
921 }
922 }
923 *retval = n;
924 return (0);
925 }
926
927 /*ARGSUSED*/
928 int
929 seltrue(dev_t dev, int events, struct proc *p)
930 {
931
932 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
933 }
934
935 /*
936 * Record a select request.
937 */
938 void
939 selrecord(struct proc *selector, struct selinfo *sip)
940 {
941 struct lwp *l;
942 struct proc *p;
943 pid_t mypid;
944 int collision;
945
946 mypid = selector->p_pid;
947 if (sip->si_pid == mypid)
948 return;
949
950 collision = 0;
951 if (sip->si_pid && (p = pfind(sip->si_pid))) {
952 for (l = LIST_FIRST(&p->p_lwps); l != NULL;
953 l = LIST_NEXT(l, l_sibling)) {
954 if (l->l_wchan == (caddr_t)&selwait) {
955 collision = 1;
956 sip->si_flags |= SI_COLL;
957 }
958 }
959 }
960
961 if (collision == 0) {
962 sip->si_flags &= ~SI_COLL;
963 sip->si_pid = mypid;
964 }
965 }
966
967 /*
968 * Do a wakeup when a selectable event occurs.
969 */
970 void
971 selwakeup(sip)
972 struct selinfo *sip;
973 {
974 struct lwp *l;
975 struct proc *p;
976 int s;
977
978 if (sip->si_pid == 0)
979 return;
980 if (sip->si_flags & SI_COLL) {
981 nselcoll++;
982 sip->si_flags &= ~SI_COLL;
983 wakeup((caddr_t)&selwait);
984 }
985 p = pfind(sip->si_pid);
986 sip->si_pid = 0;
987 if (p != NULL) {
988 for (l = LIST_FIRST(&p->p_lwps); l != NULL;
989 l = LIST_NEXT(l, l_sibling)) {
990 SCHED_LOCK(s);
991 if (l->l_wchan == (caddr_t)&selwait) {
992 if (l->l_stat == LSSLEEP)
993 setrunnable(l);
994 else
995 unsleep(l);
996 } else if (l->l_flag & L_SELECT)
997 l->l_flag &= ~L_SELECT;
998 SCHED_UNLOCK(s);
999 }
1000 }
1001 }
1002