sys_generic.c revision 1.66 1 /* $NetBSD: sys_generic.c,v 1.66 2002/11/26 18:44:34 christos Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: sys_generic.c,v 1.66 2002/11/26 18:44:34 christos Exp $");
45
46 #include "opt_ktrace.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/filedesc.h>
51 #include <sys/ioctl.h>
52 #include <sys/file.h>
53 #include <sys/proc.h>
54 #include <sys/socketvar.h>
55 #include <sys/signalvar.h>
56 #include <sys/uio.h>
57 #include <sys/kernel.h>
58 #include <sys/stat.h>
59 #include <sys/malloc.h>
60 #include <sys/poll.h>
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64
65 #include <sys/mount.h>
66 #include <sys/syscallargs.h>
67
68 int selscan __P((struct proc *, fd_mask *, fd_mask *, int, register_t *));
69 int pollscan __P((struct proc *, struct pollfd *, int, register_t *));
70
71 /*
72 * Read system call.
73 */
74 /* ARGSUSED */
75 int
76 sys_read(struct proc *p, void *v, register_t *retval)
77 {
78 struct sys_read_args /* {
79 syscallarg(int) fd;
80 syscallarg(void *) buf;
81 syscallarg(size_t) nbyte;
82 } */ *uap = v;
83 int fd;
84 struct file *fp;
85 struct filedesc *fdp;
86
87 fd = SCARG(uap, fd);
88 fdp = p->p_fd;
89
90 if ((fp = fd_getfile(fdp, fd)) == NULL)
91 return (EBADF);
92
93 if ((fp->f_flag & FREAD) == 0)
94 return (EBADF);
95
96 FILE_USE(fp);
97
98 /* dofileread() will unuse the descriptor for us */
99 return (dofileread(p, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
100 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
101 }
102
103 int
104 dofileread(struct proc *p, int fd, struct file *fp, void *buf, size_t nbyte,
105 off_t *offset, int flags, register_t *retval)
106 {
107 struct uio auio;
108 struct iovec aiov;
109 size_t cnt;
110 int error;
111 #ifdef KTRACE
112 struct iovec ktriov;
113 #endif
114 error = 0;
115
116 aiov.iov_base = (caddr_t)buf;
117 aiov.iov_len = nbyte;
118 auio.uio_iov = &aiov;
119 auio.uio_iovcnt = 1;
120 auio.uio_resid = nbyte;
121 auio.uio_rw = UIO_READ;
122 auio.uio_segflg = UIO_USERSPACE;
123 auio.uio_procp = p;
124
125 /*
126 * Reads return ssize_t because -1 is returned on error. Therefore
127 * we must restrict the length to SSIZE_MAX to avoid garbage return
128 * values.
129 */
130 if (auio.uio_resid > SSIZE_MAX) {
131 error = EINVAL;
132 goto out;
133 }
134
135 #ifdef KTRACE
136 /*
137 * if tracing, save a copy of iovec
138 */
139 if (KTRPOINT(p, KTR_GENIO))
140 ktriov = aiov;
141 #endif
142 cnt = auio.uio_resid;
143 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
144 if (error)
145 if (auio.uio_resid != cnt && (error == ERESTART ||
146 error == EINTR || error == EWOULDBLOCK))
147 error = 0;
148 cnt -= auio.uio_resid;
149 #ifdef KTRACE
150 if (KTRPOINT(p, KTR_GENIO) && error == 0)
151 ktrgenio(p, fd, UIO_READ, &ktriov, cnt, error);
152 #endif
153 *retval = cnt;
154 out:
155 FILE_UNUSE(fp, p);
156 return (error);
157 }
158
159 /*
160 * Scatter read system call.
161 */
162 int
163 sys_readv(struct proc *p, void *v, register_t *retval)
164 {
165 struct sys_readv_args /* {
166 syscallarg(int) fd;
167 syscallarg(const struct iovec *) iovp;
168 syscallarg(int) iovcnt;
169 } */ *uap = v;
170 int fd;
171 struct file *fp;
172 struct filedesc *fdp;
173
174 fd = SCARG(uap, fd);
175 fdp = p->p_fd;
176
177 if ((fp = fd_getfile(fdp, fd)) == NULL)
178 return (EBADF);
179
180 if ((fp->f_flag & FREAD) == 0)
181 return (EBADF);
182
183 FILE_USE(fp);
184
185 /* dofilereadv() will unuse the descriptor for us */
186 return (dofilereadv(p, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
187 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
188 }
189
190 int
191 dofilereadv(struct proc *p, int fd, struct file *fp, const struct iovec *iovp,
192 int iovcnt, off_t *offset, int flags, register_t *retval)
193 {
194 struct uio auio;
195 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
196 int i, error;
197 size_t cnt;
198 u_int iovlen;
199 #ifdef KTRACE
200 struct iovec *ktriov;
201 #endif
202
203 error = 0;
204 #ifdef KTRACE
205 ktriov = NULL;
206 #endif
207 /* note: can't use iovlen until iovcnt is validated */
208 iovlen = iovcnt * sizeof(struct iovec);
209 if ((u_int)iovcnt > UIO_SMALLIOV) {
210 if ((u_int)iovcnt > IOV_MAX) {
211 error = EINVAL;
212 goto out;
213 }
214 iov = malloc(iovlen, M_IOV, M_WAITOK);
215 needfree = iov;
216 } else if ((u_int)iovcnt > 0) {
217 iov = aiov;
218 needfree = NULL;
219 } else {
220 error = EINVAL;
221 goto out;
222 }
223
224 auio.uio_iov = iov;
225 auio.uio_iovcnt = iovcnt;
226 auio.uio_rw = UIO_READ;
227 auio.uio_segflg = UIO_USERSPACE;
228 auio.uio_procp = p;
229 error = copyin(iovp, iov, iovlen);
230 if (error)
231 goto done;
232 auio.uio_resid = 0;
233 for (i = 0; i < iovcnt; i++) {
234 auio.uio_resid += iov->iov_len;
235 /*
236 * Reads return ssize_t because -1 is returned on error.
237 * Therefore we must restrict the length to SSIZE_MAX to
238 * avoid garbage return values.
239 */
240 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
241 error = EINVAL;
242 goto done;
243 }
244 iov++;
245 }
246 #ifdef KTRACE
247 /*
248 * if tracing, save a copy of iovec
249 */
250 if (KTRPOINT(p, KTR_GENIO)) {
251 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
252 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
253 }
254 #endif
255 cnt = auio.uio_resid;
256 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
257 if (error)
258 if (auio.uio_resid != cnt && (error == ERESTART ||
259 error == EINTR || error == EWOULDBLOCK))
260 error = 0;
261 cnt -= auio.uio_resid;
262 #ifdef KTRACE
263 if (ktriov != NULL) {
264 if (error == 0)
265 ktrgenio(p, fd, UIO_READ, ktriov, cnt, error);
266 free(ktriov, M_TEMP);
267 }
268 #endif
269 *retval = cnt;
270 done:
271 if (needfree)
272 free(needfree, M_IOV);
273 out:
274 FILE_UNUSE(fp, p);
275 return (error);
276 }
277
278 /*
279 * Write system call
280 */
281 int
282 sys_write(struct proc *p, void *v, register_t *retval)
283 {
284 struct sys_write_args /* {
285 syscallarg(int) fd;
286 syscallarg(const void *) buf;
287 syscallarg(size_t) nbyte;
288 } */ *uap = v;
289 int fd;
290 struct file *fp;
291 struct filedesc *fdp;
292
293 fd = SCARG(uap, fd);
294 fdp = p->p_fd;
295
296 if ((fp = fd_getfile(fdp, fd)) == NULL)
297 return (EBADF);
298
299 if ((fp->f_flag & FWRITE) == 0)
300 return (EBADF);
301
302 FILE_USE(fp);
303
304 /* dofilewrite() will unuse the descriptor for us */
305 return (dofilewrite(p, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
306 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
307 }
308
309 int
310 dofilewrite(struct proc *p, int fd, struct file *fp, const void *buf,
311 size_t nbyte, off_t *offset, int flags, register_t *retval)
312 {
313 struct uio auio;
314 struct iovec aiov;
315 size_t cnt;
316 int error;
317 #ifdef KTRACE
318 struct iovec ktriov;
319 #endif
320
321 error = 0;
322 aiov.iov_base = (caddr_t)buf; /* XXX kills const */
323 aiov.iov_len = nbyte;
324 auio.uio_iov = &aiov;
325 auio.uio_iovcnt = 1;
326 auio.uio_resid = nbyte;
327 auio.uio_rw = UIO_WRITE;
328 auio.uio_segflg = UIO_USERSPACE;
329 auio.uio_procp = p;
330
331 /*
332 * Writes return ssize_t because -1 is returned on error. Therefore
333 * we must restrict the length to SSIZE_MAX to avoid garbage return
334 * values.
335 */
336 if (auio.uio_resid > SSIZE_MAX) {
337 error = EINVAL;
338 goto out;
339 }
340
341 #ifdef KTRACE
342 /*
343 * if tracing, save a copy of iovec
344 */
345 if (KTRPOINT(p, KTR_GENIO))
346 ktriov = aiov;
347 #endif
348 cnt = auio.uio_resid;
349 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
350 if (error) {
351 if (auio.uio_resid != cnt && (error == ERESTART ||
352 error == EINTR || error == EWOULDBLOCK))
353 error = 0;
354 if (error == EPIPE)
355 psignal(p, SIGPIPE);
356 }
357 cnt -= auio.uio_resid;
358 #ifdef KTRACE
359 if (KTRPOINT(p, KTR_GENIO) && error == 0)
360 ktrgenio(p, fd, UIO_WRITE, &ktriov, cnt, error);
361 #endif
362 *retval = cnt;
363 out:
364 FILE_UNUSE(fp, p);
365 return (error);
366 }
367
368 /*
369 * Gather write system call
370 */
371 int
372 sys_writev(struct proc *p, void *v, register_t *retval)
373 {
374 struct sys_writev_args /* {
375 syscallarg(int) fd;
376 syscallarg(const struct iovec *) iovp;
377 syscallarg(int) iovcnt;
378 } */ *uap = v;
379 int fd;
380 struct file *fp;
381 struct filedesc *fdp;
382
383 fd = SCARG(uap, fd);
384 fdp = p->p_fd;
385
386 if ((fp = fd_getfile(fdp, fd)) == NULL)
387 return (EBADF);
388
389 if ((fp->f_flag & FWRITE) == 0)
390 return (EBADF);
391
392 FILE_USE(fp);
393
394 /* dofilewritev() will unuse the descriptor for us */
395 return (dofilewritev(p, fd, fp, SCARG(uap, iovp), SCARG(uap, iovcnt),
396 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
397 }
398
399 int
400 dofilewritev(struct proc *p, int fd, struct file *fp, const struct iovec *iovp,
401 int iovcnt, off_t *offset, int flags, register_t *retval)
402 {
403 struct uio auio;
404 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
405 int i, error;
406 size_t cnt;
407 u_int iovlen;
408 #ifdef KTRACE
409 struct iovec *ktriov;
410 #endif
411
412 error = 0;
413 #ifdef KTRACE
414 ktriov = NULL;
415 #endif
416 /* note: can't use iovlen until iovcnt is validated */
417 iovlen = iovcnt * sizeof(struct iovec);
418 if ((u_int)iovcnt > UIO_SMALLIOV) {
419 if ((u_int)iovcnt > IOV_MAX) {
420 error = EINVAL;
421 goto out;
422 }
423 iov = malloc(iovlen, M_IOV, M_WAITOK);
424 needfree = iov;
425 } else if ((u_int)iovcnt > 0) {
426 iov = aiov;
427 needfree = NULL;
428 } else {
429 error = EINVAL;
430 goto out;
431 }
432
433 auio.uio_iov = iov;
434 auio.uio_iovcnt = iovcnt;
435 auio.uio_rw = UIO_WRITE;
436 auio.uio_segflg = UIO_USERSPACE;
437 auio.uio_procp = p;
438 error = copyin(iovp, iov, iovlen);
439 if (error)
440 goto done;
441 auio.uio_resid = 0;
442 for (i = 0; i < iovcnt; i++) {
443 auio.uio_resid += iov->iov_len;
444 /*
445 * Writes return ssize_t because -1 is returned on error.
446 * Therefore we must restrict the length to SSIZE_MAX to
447 * avoid garbage return values.
448 */
449 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
450 error = EINVAL;
451 goto done;
452 }
453 iov++;
454 }
455 #ifdef KTRACE
456 /*
457 * if tracing, save a copy of iovec
458 */
459 if (KTRPOINT(p, KTR_GENIO)) {
460 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
461 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen);
462 }
463 #endif
464 cnt = auio.uio_resid;
465 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
466 if (error) {
467 if (auio.uio_resid != cnt && (error == ERESTART ||
468 error == EINTR || error == EWOULDBLOCK))
469 error = 0;
470 if (error == EPIPE)
471 psignal(p, SIGPIPE);
472 }
473 cnt -= auio.uio_resid;
474 #ifdef KTRACE
475 if (KTRPOINT(p, KTR_GENIO))
476 if (error == 0) {
477 ktrgenio(p, fd, UIO_WRITE, ktriov, cnt, error);
478 free(ktriov, M_TEMP);
479 }
480 #endif
481 *retval = cnt;
482 done:
483 if (needfree)
484 free(needfree, M_IOV);
485 out:
486 FILE_UNUSE(fp, p);
487 return (error);
488 }
489
490 /*
491 * Ioctl system call
492 */
493 /* ARGSUSED */
494 int
495 sys_ioctl(struct proc *p, void *v, register_t *retval)
496 {
497 struct sys_ioctl_args /* {
498 syscallarg(int) fd;
499 syscallarg(u_long) com;
500 syscallarg(caddr_t) data;
501 } */ *uap = v;
502 struct file *fp;
503 struct filedesc *fdp;
504 u_long com;
505 int error;
506 u_int size;
507 caddr_t data, memp;
508 int tmp;
509 #define STK_PARAMS 128
510 u_long stkbuf[STK_PARAMS/sizeof(u_long)];
511
512 error = 0;
513 fdp = p->p_fd;
514
515 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
516 return (EBADF);
517
518 FILE_USE(fp);
519
520 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
521 error = EBADF;
522 com = 0;
523 goto out;
524 }
525
526 switch (com = SCARG(uap, com)) {
527 case FIONCLEX:
528 fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
529 goto out;
530
531 case FIOCLEX:
532 fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
533 goto out;
534 }
535
536 /*
537 * Interpret high order word to find amount of data to be
538 * copied to/from the user's address space.
539 */
540 size = IOCPARM_LEN(com);
541 if (size > IOCPARM_MAX) {
542 error = ENOTTY;
543 goto out;
544 }
545 memp = NULL;
546 if (size > sizeof(stkbuf)) {
547 memp = (caddr_t)malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
548 data = memp;
549 } else
550 data = (caddr_t)stkbuf;
551 if (com&IOC_IN) {
552 if (size) {
553 error = copyin(SCARG(uap, data), data, size);
554 if (error) {
555 if (memp)
556 free(memp, M_IOCTLOPS);
557 goto out;
558 }
559 } else
560 *(caddr_t *)data = SCARG(uap, data);
561 } else if ((com&IOC_OUT) && size)
562 /*
563 * Zero the buffer so the user always
564 * gets back something deterministic.
565 */
566 memset(data, 0, size);
567 else if (com&IOC_VOID)
568 *(caddr_t *)data = SCARG(uap, data);
569
570 switch (com) {
571
572 case FIONBIO:
573 if ((tmp = *(int *)data) != 0)
574 fp->f_flag |= FNONBLOCK;
575 else
576 fp->f_flag &= ~FNONBLOCK;
577 error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&tmp, p);
578 break;
579
580 case FIOASYNC:
581 if ((tmp = *(int *)data) != 0)
582 fp->f_flag |= FASYNC;
583 else
584 fp->f_flag &= ~FASYNC;
585 error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, (caddr_t)&tmp, p);
586 break;
587
588 case FIOSETOWN:
589 tmp = *(int *)data;
590 if (fp->f_type == DTYPE_SOCKET) {
591 ((struct socket *)fp->f_data)->so_pgid = tmp;
592 error = 0;
593 break;
594 }
595 if (tmp <= 0) {
596 tmp = -tmp;
597 } else {
598 struct proc *p1 = pfind(tmp);
599 if (p1 == 0) {
600 error = ESRCH;
601 break;
602 }
603 tmp = p1->p_pgrp->pg_id;
604 }
605 error = (*fp->f_ops->fo_ioctl)
606 (fp, TIOCSPGRP, (caddr_t)&tmp, p);
607 break;
608
609 case FIOGETOWN:
610 if (fp->f_type == DTYPE_SOCKET) {
611 error = 0;
612 *(int *)data = ((struct socket *)fp->f_data)->so_pgid;
613 break;
614 }
615 error = (*fp->f_ops->fo_ioctl)(fp, TIOCGPGRP, data, p);
616 if (error == 0)
617 *(int *)data = -*(int *)data;
618 break;
619
620 default:
621 error = (*fp->f_ops->fo_ioctl)(fp, com, data, p);
622 /*
623 * Copy any data to user, size was
624 * already set and checked above.
625 */
626 if (error == 0 && (com&IOC_OUT) && size)
627 error = copyout(data, SCARG(uap, data), size);
628 break;
629 }
630 if (memp)
631 free(memp, M_IOCTLOPS);
632 out:
633 FILE_UNUSE(fp, p);
634 switch (error) {
635 case -1:
636 printf("sys_ioctl: _IO%s%s('%c', %lu, %lu) returned -1: "
637 "pid=%d comm=%s\n",
638 (com & IOC_IN) ? "W" : "", (com & IOC_OUT) ? "R" : "",
639 (char)IOCGROUP(com), (com & 0xff), IOCPARM_LEN(com),
640 p->p_pid, p->p_comm);
641 /* FALLTHROUGH */
642 case EPASSTHROUGH:
643 error = ENOTTY;
644 /* FALLTHROUGH */
645 default:
646 return (error);
647 }
648 }
649
650 int selwait, nselcoll;
651
652 /*
653 * Select system call.
654 */
655 int
656 sys_select(struct proc *p, void *v, register_t *retval)
657 {
658 struct sys_select_args /* {
659 syscallarg(int) nd;
660 syscallarg(fd_set *) in;
661 syscallarg(fd_set *) ou;
662 syscallarg(fd_set *) ex;
663 syscallarg(struct timeval *) tv;
664 } */ *uap = v;
665 caddr_t bits;
666 char smallbits[howmany(FD_SETSIZE, NFDBITS) *
667 sizeof(fd_mask) * 6];
668 struct timeval atv;
669 int s, ncoll, error, timo;
670 size_t ni;
671
672 error = 0;
673 if (SCARG(uap, nd) < 0)
674 return (EINVAL);
675 if (SCARG(uap, nd) > p->p_fd->fd_nfiles) {
676 /* forgiving; slightly wrong */
677 SCARG(uap, nd) = p->p_fd->fd_nfiles;
678 }
679 ni = howmany(SCARG(uap, nd), NFDBITS) * sizeof(fd_mask);
680 if (ni * 6 > sizeof(smallbits))
681 bits = malloc(ni * 6, M_TEMP, M_WAITOK);
682 else
683 bits = smallbits;
684
685 #define getbits(name, x) \
686 if (SCARG(uap, name)) { \
687 error = copyin(SCARG(uap, name), bits + ni * x, ni); \
688 if (error) \
689 goto done; \
690 } else \
691 memset(bits + ni * x, 0, ni);
692 getbits(in, 0);
693 getbits(ou, 1);
694 getbits(ex, 2);
695 #undef getbits
696
697 timo = 0;
698 if (SCARG(uap, tv)) {
699 error = copyin(SCARG(uap, tv), (caddr_t)&atv,
700 sizeof(atv));
701 if (error)
702 goto done;
703 if (itimerfix(&atv)) {
704 error = EINVAL;
705 goto done;
706 }
707 s = splclock();
708 timeradd(&atv, &time, &atv);
709 splx(s);
710 }
711
712 retry:
713 ncoll = nselcoll;
714 p->p_flag |= P_SELECT;
715 error = selscan(p, (fd_mask *)(bits + ni * 0),
716 (fd_mask *)(bits + ni * 3), SCARG(uap, nd), retval);
717 if (error || *retval)
718 goto done;
719 if (SCARG(uap, tv)) {
720 /*
721 * We have to recalculate the timeout on every retry.
722 */
723 timo = hzto(&atv);
724 if (timo <= 0)
725 goto done;
726 }
727 s = splsched();
728 if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
729 splx(s);
730 goto retry;
731 }
732 p->p_flag &= ~P_SELECT;
733 error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
734 splx(s);
735 if (error == 0)
736 goto retry;
737 done:
738 p->p_flag &= ~P_SELECT;
739 /* select is not restarted after signals... */
740 if (error == ERESTART)
741 error = EINTR;
742 if (error == EWOULDBLOCK)
743 error = 0;
744 if (error == 0) {
745
746 #define putbits(name, x) \
747 if (SCARG(uap, name)) { \
748 error = copyout(bits + ni * x, SCARG(uap, name), ni); \
749 if (error) \
750 goto out; \
751 }
752 putbits(in, 3);
753 putbits(ou, 4);
754 putbits(ex, 5);
755 #undef putbits
756 }
757 out:
758 if (ni * 6 > sizeof(smallbits))
759 free(bits, M_TEMP);
760 return (error);
761 }
762
763 int
764 selscan(struct proc *p, fd_mask *ibitp, fd_mask *obitp, int nfd,
765 register_t *retval)
766 {
767 struct filedesc *fdp;
768 int msk, i, j, fd, n;
769 fd_mask ibits, obits;
770 struct file *fp;
771 static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
772 POLLWRNORM | POLLHUP | POLLERR,
773 POLLRDBAND };
774
775 fdp = p->p_fd;
776 n = 0;
777 for (msk = 0; msk < 3; msk++) {
778 for (i = 0; i < nfd; i += NFDBITS) {
779 ibits = *ibitp++;
780 obits = 0;
781 while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
782 ibits &= ~(1 << j);
783 if ((fp = fd_getfile(fdp, fd)) == NULL)
784 return (EBADF);
785 FILE_USE(fp);
786 if ((*fp->f_ops->fo_poll)(fp, flag[msk], p)) {
787 obits |= (1 << j);
788 n++;
789 }
790 FILE_UNUSE(fp, p);
791 }
792 *obitp++ = obits;
793 }
794 }
795 *retval = n;
796 return (0);
797 }
798
799 /*
800 * Poll system call.
801 */
802 int
803 sys_poll(struct proc *p, void *v, register_t *retval)
804 {
805 struct sys_poll_args /* {
806 syscallarg(struct pollfd *) fds;
807 syscallarg(u_int) nfds;
808 syscallarg(int) timeout;
809 } */ *uap = v;
810 caddr_t bits;
811 char smallbits[32 * sizeof(struct pollfd)];
812 struct timeval atv;
813 int s, ncoll, error, timo;
814 size_t ni;
815
816 error = 0;
817 if (SCARG(uap, nfds) > p->p_fd->fd_nfiles) {
818 /* forgiving; slightly wrong */
819 SCARG(uap, nfds) = p->p_fd->fd_nfiles;
820 }
821 ni = SCARG(uap, nfds) * sizeof(struct pollfd);
822 if (ni > sizeof(smallbits))
823 bits = malloc(ni, M_TEMP, M_WAITOK);
824 else
825 bits = smallbits;
826
827 error = copyin(SCARG(uap, fds), bits, ni);
828 if (error)
829 goto done;
830
831 timo = 0;
832 if (SCARG(uap, timeout) != INFTIM) {
833 atv.tv_sec = SCARG(uap, timeout) / 1000;
834 atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
835 if (itimerfix(&atv)) {
836 error = EINVAL;
837 goto done;
838 }
839 s = splclock();
840 timeradd(&atv, &time, &atv);
841 splx(s);
842 }
843
844 retry:
845 ncoll = nselcoll;
846 p->p_flag |= P_SELECT;
847 error = pollscan(p, (struct pollfd *)bits, SCARG(uap, nfds), retval);
848 if (error || *retval)
849 goto done;
850 if (SCARG(uap, timeout) != INFTIM) {
851 /*
852 * We have to recalculate the timeout on every retry.
853 */
854 timo = hzto(&atv);
855 if (timo <= 0)
856 goto done;
857 }
858 s = splsched();
859 if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
860 splx(s);
861 goto retry;
862 }
863 p->p_flag &= ~P_SELECT;
864 error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo);
865 splx(s);
866 if (error == 0)
867 goto retry;
868 done:
869 p->p_flag &= ~P_SELECT;
870 /* poll is not restarted after signals... */
871 if (error == ERESTART)
872 error = EINTR;
873 if (error == EWOULDBLOCK)
874 error = 0;
875 if (error == 0) {
876 error = copyout(bits, SCARG(uap, fds), ni);
877 if (error)
878 goto out;
879 }
880 out:
881 if (ni > sizeof(smallbits))
882 free(bits, M_TEMP);
883 return (error);
884 }
885
886 int
887 pollscan(struct proc *p, struct pollfd *fds, int nfd, register_t *retval)
888 {
889 struct filedesc *fdp;
890 int i, n;
891 struct file *fp;
892
893 fdp = p->p_fd;
894 n = 0;
895 for (i = 0; i < nfd; i++, fds++) {
896 if (fds->fd >= fdp->fd_nfiles) {
897 fds->revents = POLLNVAL;
898 n++;
899 } else if (fds->fd < 0) {
900 fds->revents = 0;
901 } else {
902 if ((fp = fd_getfile(fdp, fds->fd)) == NULL) {
903 fds->revents = POLLNVAL;
904 n++;
905 } else {
906 FILE_USE(fp);
907 fds->revents = (*fp->f_ops->fo_poll)(fp,
908 fds->events | POLLERR | POLLHUP, p);
909 if (fds->revents != 0)
910 n++;
911 FILE_UNUSE(fp, p);
912 }
913 }
914 }
915 *retval = n;
916 return (0);
917 }
918
919 /*ARGSUSED*/
920 int
921 seltrue(dev_t dev, int events, struct proc *p)
922 {
923
924 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
925 }
926
927 /*
928 * Record a select request.
929 */
930 void
931 selrecord(struct proc *selector, struct selinfo *sip)
932 {
933 struct proc *p;
934 pid_t mypid;
935
936 mypid = selector->p_pid;
937 if (sip->sel_pid == mypid)
938 return;
939 if (sip->sel_pid && (p = pfind(sip->sel_pid)) &&
940 p->p_wchan == (caddr_t)&selwait)
941 sip->sel_flags |= SI_COLL;
942 else {
943 sip->sel_flags &= ~SI_COLL;
944 sip->sel_pid = mypid;
945 }
946 }
947
948 /*
949 * Do a wakeup when a selectable event occurs.
950 */
951 void
952 selwakeup(sip)
953 struct selinfo *sip;
954 {
955 struct proc *p;
956 int s;
957
958 if (sip->sel_pid == 0)
959 return;
960 if (sip->sel_flags & SI_COLL) {
961 nselcoll++;
962 sip->sel_flags &= ~SI_COLL;
963 wakeup((caddr_t)&selwait);
964 }
965 p = pfind(sip->sel_pid);
966 sip->sel_pid = 0;
967 if (p != NULL) {
968 SCHED_LOCK(s);
969 if (p->p_wchan == (caddr_t)&selwait) {
970 if (p->p_stat == SSLEEP)
971 setrunnable(p);
972 else
973 unsleep(p);
974 } else if (p->p_flag & P_SELECT)
975 p->p_flag &= ~P_SELECT;
976 SCHED_UNLOCK(s);
977 }
978 }
979