sys_generic.c revision 1.102 1 /* $NetBSD: sys_generic.c,v 1.102 2007/06/16 20:48:03 dsl Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: sys_generic.c,v 1.102 2007/06/16 20:48:03 dsl Exp $");
41
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/filedesc.h>
47 #include <sys/ioctl.h>
48 #include <sys/file.h>
49 #include <sys/proc.h>
50 #include <sys/socketvar.h>
51 #include <sys/signalvar.h>
52 #include <sys/uio.h>
53 #include <sys/kernel.h>
54 #include <sys/stat.h>
55 #include <sys/malloc.h>
56 #include <sys/vnode.h>
57 #include <sys/poll.h>
58 #ifdef KTRACE
59 #include <sys/ktrace.h>
60 #endif
61
62 #include <sys/mount.h>
63 #include <sys/syscallargs.h>
64
65 #include <uvm/uvm_extern.h>
66
67 int selscan(struct lwp *, fd_mask *, fd_mask *, int, register_t *);
68 int pollscan(struct lwp *, struct pollfd *, int, register_t *);
69
70
71 /*
72 * Read system call.
73 */
74 /* ARGSUSED */
75 int
76 sys_read(struct lwp *l, void *v, register_t *retval)
77 {
78 struct sys_read_args /* {
79 syscallarg(int) fd;
80 syscallarg(void *) buf;
81 syscallarg(size_t) nbyte;
82 } */ *uap = v;
83 int fd;
84 struct file *fp;
85 struct proc *p;
86 struct filedesc *fdp;
87
88 fd = SCARG(uap, fd);
89 p = l->l_proc;
90 fdp = p->p_fd;
91
92 if ((fp = fd_getfile(fdp, fd)) == NULL)
93 return (EBADF);
94
95 if ((fp->f_flag & FREAD) == 0) {
96 simple_unlock(&fp->f_slock);
97 return (EBADF);
98 }
99
100 FILE_USE(fp);
101
102 /* dofileread() will unuse the descriptor for us */
103 return (dofileread(l, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
104 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
105 }
106
107 int
108 dofileread(struct lwp *l, int fd, struct file *fp, void *buf, size_t nbyte,
109 off_t *offset, int flags, register_t *retval)
110 {
111 struct iovec aiov;
112 struct uio auio;
113 struct proc *p;
114 struct vmspace *vm;
115 size_t cnt;
116 int error;
117 #ifdef KTRACE
118 struct iovec ktriov;
119 #endif
120 p = l->l_proc;
121
122 error = proc_vmspace_getref(p, &vm);
123 if (error) {
124 goto out;
125 }
126
127 aiov.iov_base = (void *)buf;
128 aiov.iov_len = nbyte;
129 auio.uio_iov = &aiov;
130 auio.uio_iovcnt = 1;
131 auio.uio_resid = nbyte;
132 auio.uio_rw = UIO_READ;
133 auio.uio_vmspace = vm;
134
135 /*
136 * Reads return ssize_t because -1 is returned on error. Therefore
137 * we must restrict the length to SSIZE_MAX to avoid garbage return
138 * values.
139 */
140 if (auio.uio_resid > SSIZE_MAX) {
141 error = EINVAL;
142 goto out;
143 }
144
145 #ifdef KTRACE
146 /* In case we are tracing, save a copy of iovec */
147 ktriov = aiov;
148 #endif
149 cnt = auio.uio_resid;
150 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
151 if (error)
152 if (auio.uio_resid != cnt && (error == ERESTART ||
153 error == EINTR || error == EWOULDBLOCK))
154 error = 0;
155 cnt -= auio.uio_resid;
156 #ifdef KTRACE
157 if (KTRPOINT(p, KTR_GENIO) && error == 0)
158 ktrgenio(l, fd, UIO_READ, &ktriov, cnt, error);
159 #endif
160 *retval = cnt;
161 out:
162 FILE_UNUSE(fp, l);
163 uvmspace_free(vm);
164 return (error);
165 }
166
167 /*
168 * Scatter read system call.
169 */
170 int
171 sys_readv(struct lwp *l, void *v, register_t *retval)
172 {
173 struct sys_readv_args /* {
174 syscallarg(int) fd;
175 syscallarg(const struct iovec *) iovp;
176 syscallarg(int) iovcnt;
177 } */ *uap = v;
178
179 return do_filereadv(l, SCARG(uap, fd), SCARG(uap, iovp),
180 SCARG(uap, iovcnt), NULL, FOF_UPDATE_OFFSET, retval);
181 }
182
183 int
184 do_filereadv(struct lwp *l, int fd, const struct iovec *iovp, int iovcnt,
185 off_t *offset, int flags, register_t *retval)
186 {
187 struct proc *p;
188 struct uio auio;
189 struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
190 struct vmspace *vm;
191 int i, error;
192 size_t cnt;
193 u_int iovlen;
194 struct file *fp;
195 struct filedesc *fdp;
196 #ifdef KTRACE
197 struct iovec *ktriov = NULL;
198 #endif
199
200 if (iovcnt == 0)
201 return EINVAL;
202
203 p = l->l_proc;
204 fdp = p->p_fd;
205
206 if ((fp = fd_getfile(fdp, fd)) == NULL)
207 return EBADF;
208
209 if ((fp->f_flag & FREAD) == 0) {
210 simple_unlock(&fp->f_slock);
211 return EBADF;
212 }
213
214 FILE_USE(fp);
215
216 if (offset == NULL)
217 offset = &fp->f_offset;
218 else {
219 struct vnode *vp = fp->f_data;
220 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
221 error = ESPIPE;
222 goto out;
223 }
224 /*
225 * Test that the device is seekable ?
226 * XXX This works because no file systems actually
227 * XXX take any action on the seek operation.
228 */
229 error = VOP_SEEK(vp, fp->f_offset, *offset, fp->f_cred);
230 if (error != 0)
231 goto out;
232 }
233
234 error = proc_vmspace_getref(p, &vm);
235 if (error)
236 goto out;
237
238 iovlen = iovcnt * sizeof(struct iovec);
239 if (flags & FOF_IOV_SYSSPACE)
240 iov = __UNCONST(iovp);
241 else {
242 iov = aiov;
243 if ((u_int)iovcnt > UIO_SMALLIOV) {
244 if ((u_int)iovcnt > IOV_MAX) {
245 error = EINVAL;
246 goto out;
247 }
248 iov = malloc(iovlen, M_IOV, M_WAITOK);
249 needfree = iov;
250 }
251 error = copyin(iovp, iov, iovlen);
252 if (error)
253 goto done;
254 }
255
256 auio.uio_iov = iov;
257 auio.uio_iovcnt = iovcnt;
258 auio.uio_rw = UIO_READ;
259 auio.uio_vmspace = vm;
260
261 auio.uio_resid = 0;
262 for (i = 0; i < iovcnt; i++, iov++) {
263 auio.uio_resid += iov->iov_len;
264 /*
265 * Reads return ssize_t because -1 is returned on error.
266 * Therefore we must restrict the length to SSIZE_MAX to
267 * avoid garbage return values.
268 */
269 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
270 error = EINVAL;
271 goto done;
272 }
273 }
274
275 #ifdef KTRACE
276 /*
277 * if tracing, save a copy of iovec
278 */
279 if (KTRPOINT(p, KTR_GENIO)) {
280 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
281 memcpy(ktriov, auio.uio_iov, iovlen);
282 }
283 #endif
284
285 cnt = auio.uio_resid;
286 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
287 if (error)
288 if (auio.uio_resid != cnt && (error == ERESTART ||
289 error == EINTR || error == EWOULDBLOCK))
290 error = 0;
291 cnt -= auio.uio_resid;
292 *retval = cnt;
293
294 #ifdef KTRACE
295 if (ktriov != NULL) {
296 if (KTRPOINT(p, KTR_GENIO) && (error == 0))
297 ktrgenio(l, fd, UIO_READ, ktriov, cnt, error);
298 free(ktriov, M_TEMP);
299 }
300 #endif
301
302 done:
303 if (needfree)
304 free(needfree, M_IOV);
305 out:
306 FILE_UNUSE(fp, l);
307 uvmspace_free(vm);
308 return (error);
309 }
310
311 /*
312 * Write system call
313 */
314 int
315 sys_write(struct lwp *l, void *v, register_t *retval)
316 {
317 struct sys_write_args /* {
318 syscallarg(int) fd;
319 syscallarg(const void *) buf;
320 syscallarg(size_t) nbyte;
321 } */ *uap = v;
322 int fd;
323 struct file *fp;
324 struct proc *p;
325 struct filedesc *fdp;
326
327 fd = SCARG(uap, fd);
328 p = l->l_proc;
329 fdp = p->p_fd;
330
331 if ((fp = fd_getfile(fdp, fd)) == NULL)
332 return (EBADF);
333
334 if ((fp->f_flag & FWRITE) == 0) {
335 simple_unlock(&fp->f_slock);
336 return (EBADF);
337 }
338
339 FILE_USE(fp);
340
341 /* dofilewrite() will unuse the descriptor for us */
342 return (dofilewrite(l, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
343 &fp->f_offset, FOF_UPDATE_OFFSET, retval));
344 }
345
346 int
347 dofilewrite(struct lwp *l, int fd, struct file *fp, const void *buf,
348 size_t nbyte, off_t *offset, int flags, register_t *retval)
349 {
350 struct iovec aiov;
351 struct uio auio;
352 struct proc *p;
353 struct vmspace *vm;
354 size_t cnt;
355 int error;
356 #ifdef KTRACE
357 struct iovec ktriov;
358 #endif
359
360 p = l->l_proc;
361 error = proc_vmspace_getref(p, &vm);
362 if (error) {
363 goto out;
364 }
365 aiov.iov_base = __UNCONST(buf); /* XXXUNCONST kills const */
366 aiov.iov_len = nbyte;
367 auio.uio_iov = &aiov;
368 auio.uio_iovcnt = 1;
369 auio.uio_resid = nbyte;
370 auio.uio_rw = UIO_WRITE;
371 auio.uio_vmspace = vm;
372
373 /*
374 * Writes return ssize_t because -1 is returned on error. Therefore
375 * we must restrict the length to SSIZE_MAX to avoid garbage return
376 * values.
377 */
378 if (auio.uio_resid > SSIZE_MAX) {
379 error = EINVAL;
380 goto out;
381 }
382
383 #ifdef KTRACE
384 /* In case we are tracing, save a copy of iovec */
385 ktriov = aiov;
386 #endif
387 cnt = auio.uio_resid;
388 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
389 if (error) {
390 if (auio.uio_resid != cnt && (error == ERESTART ||
391 error == EINTR || error == EWOULDBLOCK))
392 error = 0;
393 if (error == EPIPE) {
394 mutex_enter(&proclist_mutex);
395 psignal(p, SIGPIPE);
396 mutex_exit(&proclist_mutex);
397 }
398 }
399 cnt -= auio.uio_resid;
400 #ifdef KTRACE
401 if (KTRPOINT(p, KTR_GENIO) && error == 0)
402 ktrgenio(l, fd, UIO_WRITE, &ktriov, cnt, error);
403 #endif
404 *retval = cnt;
405 out:
406 FILE_UNUSE(fp, l);
407 uvmspace_free(vm);
408 return (error);
409 }
410
411 /*
412 * Gather write system call
413 */
414 int
415 sys_writev(struct lwp *l, void *v, register_t *retval)
416 {
417 struct sys_writev_args /* {
418 syscallarg(int) fd;
419 syscallarg(const struct iovec *) iovp;
420 syscallarg(int) iovcnt;
421 } */ *uap = v;
422
423 return do_filewritev(l, SCARG(uap, fd), SCARG(uap, iovp),
424 SCARG(uap, iovcnt), NULL, FOF_UPDATE_OFFSET, retval);
425 }
426
427 int
428 do_filewritev(struct lwp *l, int fd, const struct iovec *iovp, int iovcnt,
429 off_t *offset, int flags, register_t *retval)
430 {
431 struct proc *p;
432 struct uio auio;
433 struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
434 struct vmspace *vm;
435 int i, error;
436 size_t cnt;
437 u_int iovlen;
438 struct file *fp;
439 struct filedesc *fdp;
440 #ifdef KTRACE
441 struct iovec *ktriov = NULL;
442 #endif
443
444 if (iovcnt == 0)
445 return EINVAL;
446
447 p = l->l_proc;
448 fdp = p->p_fd;
449
450 if ((fp = fd_getfile(fdp, fd)) == NULL)
451 return EBADF;
452
453 if ((fp->f_flag & FWRITE) == 0) {
454 simple_unlock(&fp->f_slock);
455 return EBADF;
456 }
457
458 FILE_USE(fp);
459
460 if (offset == NULL)
461 offset = &fp->f_offset;
462 else {
463 struct vnode *vp = fp->f_data;
464 if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
465 error = ESPIPE;
466 goto out;
467 }
468 /*
469 * Test that the device is seekable ?
470 * XXX This works because no file systems actually
471 * XXX take any action on the seek operation.
472 */
473 error = VOP_SEEK(vp, fp->f_offset, *offset, fp->f_cred);
474 if (error != 0)
475 goto out;
476 }
477
478 error = proc_vmspace_getref(p, &vm);
479 if (error)
480 goto out;
481
482 iovlen = iovcnt * sizeof(struct iovec);
483 if (flags & FOF_IOV_SYSSPACE)
484 iov = __UNCONST(iovp);
485 else {
486 iov = aiov;
487 if ((u_int)iovcnt > UIO_SMALLIOV) {
488 if ((u_int)iovcnt > IOV_MAX) {
489 error = EINVAL;
490 goto out;
491 }
492 iov = malloc(iovlen, M_IOV, M_WAITOK);
493 needfree = iov;
494 }
495 error = copyin(iovp, iov, iovlen);
496 if (error)
497 goto done;
498 }
499
500 auio.uio_iov = iov;
501 auio.uio_iovcnt = iovcnt;
502 auio.uio_rw = UIO_WRITE;
503 auio.uio_vmspace = vm;
504
505 auio.uio_resid = 0;
506 for (i = 0; i < iovcnt; i++, iov++) {
507 auio.uio_resid += iov->iov_len;
508 /*
509 * Writes return ssize_t because -1 is returned on error.
510 * Therefore we must restrict the length to SSIZE_MAX to
511 * avoid garbage return values.
512 */
513 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
514 error = EINVAL;
515 goto done;
516 }
517 }
518
519 #ifdef KTRACE
520 /*
521 * if tracing, save a copy of iovec
522 */
523 if (KTRPOINT(p, KTR_GENIO)) {
524 ktriov = malloc(iovlen, M_TEMP, M_WAITOK);
525 memcpy(ktriov, auio.uio_iov, iovlen);
526 }
527 #endif
528 cnt = auio.uio_resid;
529 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
530 if (error) {
531 if (auio.uio_resid != cnt && (error == ERESTART ||
532 error == EINTR || error == EWOULDBLOCK))
533 error = 0;
534 if (error == EPIPE) {
535 mutex_enter(&proclist_mutex);
536 psignal(p, SIGPIPE);
537 mutex_exit(&proclist_mutex);
538 }
539 }
540 cnt -= auio.uio_resid;
541 *retval = cnt;
542
543 #ifdef KTRACE
544 if (ktriov != NULL) {
545 if (KTRPOINT(p, KTR_GENIO) && (error == 0))
546 ktrgenio(l, fd, UIO_WRITE, ktriov, cnt, error);
547 free(ktriov, M_TEMP);
548 }
549 #endif
550
551 done:
552 if (needfree)
553 free(needfree, M_IOV);
554 out:
555 FILE_UNUSE(fp, l);
556 uvmspace_free(vm);
557 return (error);
558 }
559
560 /*
561 * Ioctl system call
562 */
563 /* ARGSUSED */
564 int
565 sys_ioctl(struct lwp *l, void *v, register_t *retval)
566 {
567 struct sys_ioctl_args /* {
568 syscallarg(int) fd;
569 syscallarg(u_long) com;
570 syscallarg(void *) data;
571 } */ *uap = v;
572 struct file *fp;
573 struct proc *p;
574 struct filedesc *fdp;
575 u_long com;
576 int error;
577 u_int size;
578 void *data, *memp;
579 #define STK_PARAMS 128
580 u_long stkbuf[STK_PARAMS/sizeof(u_long)];
581
582 error = 0;
583 p = l->l_proc;
584 fdp = p->p_fd;
585
586 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
587 return (EBADF);
588
589 FILE_USE(fp);
590
591 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
592 error = EBADF;
593 com = 0;
594 goto out;
595 }
596
597 switch (com = SCARG(uap, com)) {
598 case FIONCLEX:
599 fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
600 goto out;
601
602 case FIOCLEX:
603 fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
604 goto out;
605 }
606
607 /*
608 * Interpret high order word to find amount of data to be
609 * copied to/from the user's address space.
610 */
611 size = IOCPARM_LEN(com);
612 if (size > IOCPARM_MAX) {
613 error = ENOTTY;
614 goto out;
615 }
616 memp = NULL;
617 if (size > sizeof(stkbuf)) {
618 memp = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
619 data = memp;
620 } else
621 data = (void *)stkbuf;
622 if (com&IOC_IN) {
623 if (size) {
624 error = copyin(SCARG(uap, data), data, size);
625 if (error) {
626 if (memp)
627 free(memp, M_IOCTLOPS);
628 goto out;
629 }
630 #ifdef KTRACE
631 if (KTRPOINT(p, KTR_GENIO)) {
632 struct iovec iov;
633 iov.iov_base = SCARG(uap, data);
634 iov.iov_len = size;
635 ktrgenio(l, SCARG(uap, fd), UIO_WRITE, &iov,
636 size, 0);
637 }
638 #endif
639 } else
640 *(void **)data = SCARG(uap, data);
641 } else if ((com&IOC_OUT) && size)
642 /*
643 * Zero the buffer so the user always
644 * gets back something deterministic.
645 */
646 memset(data, 0, size);
647 else if (com&IOC_VOID)
648 *(void **)data = SCARG(uap, data);
649
650 switch (com) {
651
652 case FIONBIO:
653 if (*(int *)data != 0)
654 fp->f_flag |= FNONBLOCK;
655 else
656 fp->f_flag &= ~FNONBLOCK;
657 error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, data, l);
658 break;
659
660 case FIOASYNC:
661 if (*(int *)data != 0)
662 fp->f_flag |= FASYNC;
663 else
664 fp->f_flag &= ~FASYNC;
665 error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, data, l);
666 break;
667
668 default:
669 error = (*fp->f_ops->fo_ioctl)(fp, com, data, l);
670 /*
671 * Copy any data to user, size was
672 * already set and checked above.
673 */
674 if (error == 0 && (com&IOC_OUT) && size) {
675 error = copyout(data, SCARG(uap, data), size);
676 #ifdef KTRACE
677 if (KTRPOINT(p, KTR_GENIO)) {
678 struct iovec iov;
679 iov.iov_base = SCARG(uap, data);
680 iov.iov_len = size;
681 ktrgenio(l, SCARG(uap, fd), UIO_READ, &iov,
682 size, error);
683 }
684 #endif
685 }
686 break;
687 }
688 if (memp)
689 free(memp, M_IOCTLOPS);
690 out:
691 FILE_UNUSE(fp, l);
692 switch (error) {
693 case -1:
694 printf("sys_ioctl: _IO%s%s('%c', %lu, %lu) returned -1: "
695 "pid=%d comm=%s\n",
696 (com & IOC_IN) ? "W" : "", (com & IOC_OUT) ? "R" : "",
697 (char)IOCGROUP(com), (com & 0xff), IOCPARM_LEN(com),
698 p->p_pid, p->p_comm);
699 /* FALLTHROUGH */
700 case EPASSTHROUGH:
701 error = ENOTTY;
702 /* FALLTHROUGH */
703 default:
704 return (error);
705 }
706 }
707
708 int selwait, nselcoll;
709
710 /*
711 * Select system call.
712 */
713 int
714 sys_pselect(struct lwp *l, void *v, register_t *retval)
715 {
716 struct sys_pselect_args /* {
717 syscallarg(int) nd;
718 syscallarg(fd_set *) in;
719 syscallarg(fd_set *) ou;
720 syscallarg(fd_set *) ex;
721 syscallarg(const struct timespec *) ts;
722 syscallarg(sigset_t *) mask;
723 } */ * const uap = v;
724 struct timespec ats;
725 struct timeval atv, *tv = NULL;
726 sigset_t amask, *mask = NULL;
727 int error;
728
729 if (SCARG(uap, ts)) {
730 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
731 if (error)
732 return error;
733 atv.tv_sec = ats.tv_sec;
734 atv.tv_usec = ats.tv_nsec / 1000;
735 tv = &atv;
736 }
737 if (SCARG(uap, mask) != NULL) {
738 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
739 if (error)
740 return error;
741 mask = &amask;
742 }
743
744 return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
745 SCARG(uap, ou), SCARG(uap, ex), tv, mask);
746 }
747
748 int
749 inittimeleft(struct timeval *tv, struct timeval *sleeptv)
750 {
751 if (itimerfix(tv))
752 return -1;
753 getmicrouptime(sleeptv);
754 return 0;
755 }
756
757 int
758 gettimeleft(struct timeval *tv, struct timeval *sleeptv)
759 {
760 /*
761 * We have to recalculate the timeout on every retry.
762 */
763 struct timeval slepttv;
764 /*
765 * reduce tv by elapsed time
766 * based on monotonic time scale
767 */
768 getmicrouptime(&slepttv);
769 timeradd(tv, sleeptv, tv);
770 timersub(tv, &slepttv, tv);
771 *sleeptv = slepttv;
772 return tvtohz(tv);
773 }
774
775 int
776 sys_select(struct lwp *l, void *v, register_t *retval)
777 {
778 struct sys_select_args /* {
779 syscallarg(int) nd;
780 syscallarg(fd_set *) in;
781 syscallarg(fd_set *) ou;
782 syscallarg(fd_set *) ex;
783 syscallarg(struct timeval *) tv;
784 } */ * const uap = v;
785 struct timeval atv, *tv = NULL;
786 int error;
787
788 if (SCARG(uap, tv)) {
789 error = copyin(SCARG(uap, tv), (void *)&atv,
790 sizeof(atv));
791 if (error)
792 return error;
793 tv = &atv;
794 }
795
796 return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
797 SCARG(uap, ou), SCARG(uap, ex), tv, NULL);
798 }
799
800 int
801 selcommon(struct lwp *l, register_t *retval, int nd, fd_set *u_in,
802 fd_set *u_ou, fd_set *u_ex, struct timeval *tv, sigset_t *mask)
803 {
804 char smallbits[howmany(FD_SETSIZE, NFDBITS) *
805 sizeof(fd_mask) * 6];
806 struct proc * const p = l->l_proc;
807 char *bits;
808 int s, ncoll, error, timo;
809 size_t ni;
810 sigset_t oldmask;
811 struct timeval sleeptv;
812
813 error = 0;
814 if (nd < 0)
815 return (EINVAL);
816 if (nd > p->p_fd->fd_nfiles) {
817 /* forgiving; slightly wrong */
818 nd = p->p_fd->fd_nfiles;
819 }
820 ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
821 if (ni * 6 > sizeof(smallbits))
822 bits = malloc(ni * 6, M_TEMP, M_WAITOK);
823 else
824 bits = smallbits;
825
826 #define getbits(name, x) \
827 if (u_ ## name) { \
828 error = copyin(u_ ## name, bits + ni * x, ni); \
829 if (error) \
830 goto done; \
831 } else \
832 memset(bits + ni * x, 0, ni);
833 getbits(in, 0);
834 getbits(ou, 1);
835 getbits(ex, 2);
836 #undef getbits
837
838 timo = 0;
839 if (tv && inittimeleft(tv, &sleeptv) == -1) {
840 error = EINVAL;
841 goto done;
842 }
843
844 if (mask) {
845 sigminusset(&sigcantmask, mask);
846 mutex_enter(&p->p_smutex);
847 oldmask = l->l_sigmask;
848 l->l_sigmask = *mask;
849 mutex_exit(&p->p_smutex);
850 } else
851 oldmask = l->l_sigmask; /* XXXgcc */
852
853 retry:
854 ncoll = nselcoll;
855 l->l_flag |= LW_SELECT;
856 error = selscan(l, (fd_mask *)(bits + ni * 0),
857 (fd_mask *)(bits + ni * 3), nd, retval);
858 if (error || *retval)
859 goto donemask;
860 if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
861 goto donemask;
862 s = splsched();
863 if ((l->l_flag & LW_SELECT) == 0 || nselcoll != ncoll) {
864 splx(s);
865 goto retry;
866 }
867 l->l_flag &= ~LW_SELECT;
868 error = tsleep((void *)&selwait, PSOCK | PCATCH, "select", timo);
869 splx(s);
870 if (error == 0)
871 goto retry;
872 donemask:
873 if (mask) {
874 mutex_enter(&p->p_smutex);
875 l->l_sigmask = oldmask;
876 mutex_exit(&p->p_smutex);
877 }
878 l->l_flag &= ~LW_SELECT;
879 done:
880 /* select is not restarted after signals... */
881 if (error == ERESTART)
882 error = EINTR;
883 if (error == EWOULDBLOCK)
884 error = 0;
885 if (error == 0) {
886
887 #define putbits(name, x) \
888 if (u_ ## name) { \
889 error = copyout(bits + ni * x, u_ ## name, ni); \
890 if (error) \
891 goto out; \
892 }
893 putbits(in, 3);
894 putbits(ou, 4);
895 putbits(ex, 5);
896 #undef putbits
897 }
898 out:
899 if (ni * 6 > sizeof(smallbits))
900 free(bits, M_TEMP);
901 return (error);
902 }
903
904 int
905 selscan(struct lwp *l, fd_mask *ibitp, fd_mask *obitp, int nfd,
906 register_t *retval)
907 {
908 static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
909 POLLWRNORM | POLLHUP | POLLERR,
910 POLLRDBAND };
911 struct proc *p = l->l_proc;
912 struct filedesc *fdp;
913 int msk, i, j, fd, n;
914 fd_mask ibits, obits;
915 struct file *fp;
916
917 fdp = p->p_fd;
918 n = 0;
919 for (msk = 0; msk < 3; msk++) {
920 for (i = 0; i < nfd; i += NFDBITS) {
921 ibits = *ibitp++;
922 obits = 0;
923 while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
924 ibits &= ~(1 << j);
925 if ((fp = fd_getfile(fdp, fd)) == NULL)
926 return (EBADF);
927 FILE_USE(fp);
928 if ((*fp->f_ops->fo_poll)(fp, flag[msk], l)) {
929 obits |= (1 << j);
930 n++;
931 }
932 FILE_UNUSE(fp, l);
933 }
934 *obitp++ = obits;
935 }
936 }
937 *retval = n;
938 return (0);
939 }
940
941 /*
942 * Poll system call.
943 */
944 int
945 sys_poll(struct lwp *l, void *v, register_t *retval)
946 {
947 struct sys_poll_args /* {
948 syscallarg(struct pollfd *) fds;
949 syscallarg(u_int) nfds;
950 syscallarg(int) timeout;
951 } */ * const uap = v;
952 struct timeval atv, *tv = NULL;
953
954 if (SCARG(uap, timeout) != INFTIM) {
955 atv.tv_sec = SCARG(uap, timeout) / 1000;
956 atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
957 tv = &atv;
958 }
959
960 return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
961 tv, NULL);
962 }
963
964 /*
965 * Poll system call.
966 */
967 int
968 sys_pollts(struct lwp *l, void *v, register_t *retval)
969 {
970 struct sys_pollts_args /* {
971 syscallarg(struct pollfd *) fds;
972 syscallarg(u_int) nfds;
973 syscallarg(const struct timespec *) ts;
974 syscallarg(const sigset_t *) mask;
975 } */ * const uap = v;
976 struct timespec ats;
977 struct timeval atv, *tv = NULL;
978 sigset_t amask, *mask = NULL;
979 int error;
980
981 if (SCARG(uap, ts)) {
982 error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
983 if (error)
984 return error;
985 atv.tv_sec = ats.tv_sec;
986 atv.tv_usec = ats.tv_nsec / 1000;
987 tv = &atv;
988 }
989 if (SCARG(uap, mask)) {
990 error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
991 if (error)
992 return error;
993 mask = &amask;
994 }
995
996 return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
997 tv, mask);
998 }
999
1000 int
1001 pollcommon(struct lwp *l, register_t *retval,
1002 struct pollfd *u_fds, u_int nfds,
1003 struct timeval *tv, sigset_t *mask)
1004 {
1005 char smallbits[32 * sizeof(struct pollfd)];
1006 struct proc * const p = l->l_proc;
1007 void * bits;
1008 sigset_t oldmask;
1009 int s, ncoll, error, timo;
1010 size_t ni;
1011 struct timeval sleeptv;
1012
1013 if (nfds > p->p_fd->fd_nfiles) {
1014 /* forgiving; slightly wrong */
1015 nfds = p->p_fd->fd_nfiles;
1016 }
1017 ni = nfds * sizeof(struct pollfd);
1018 if (ni > sizeof(smallbits))
1019 bits = malloc(ni, M_TEMP, M_WAITOK);
1020 else
1021 bits = smallbits;
1022
1023 error = copyin(u_fds, bits, ni);
1024 if (error)
1025 goto done;
1026
1027 timo = 0;
1028 if (tv && inittimeleft(tv, &sleeptv) == -1) {
1029 error = EINVAL;
1030 goto done;
1031 }
1032
1033 if (mask) {
1034 sigminusset(&sigcantmask, mask);
1035 mutex_enter(&p->p_smutex);
1036 oldmask = l->l_sigmask;
1037 l->l_sigmask = *mask;
1038 mutex_exit(&p->p_smutex);
1039 } else
1040 oldmask = l->l_sigmask; /* XXXgcc */
1041
1042 retry:
1043 ncoll = nselcoll;
1044 l->l_flag |= LW_SELECT;
1045 error = pollscan(l, (struct pollfd *)bits, nfds, retval);
1046 if (error || *retval)
1047 goto donemask;
1048 if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
1049 goto donemask;
1050 s = splsched();
1051 if ((l->l_flag & LW_SELECT) == 0 || nselcoll != ncoll) {
1052 splx(s);
1053 goto retry;
1054 }
1055 l->l_flag &= ~LW_SELECT;
1056 error = tsleep((void *)&selwait, PSOCK | PCATCH, "poll", timo);
1057 splx(s);
1058 if (error == 0)
1059 goto retry;
1060 donemask:
1061 if (mask) {
1062 mutex_enter(&p->p_smutex);
1063 l->l_sigmask = oldmask;
1064 mutex_exit(&p->p_smutex);
1065 }
1066
1067 l->l_flag &= ~LW_SELECT;
1068 done:
1069 /* poll is not restarted after signals... */
1070 if (error == ERESTART)
1071 error = EINTR;
1072 if (error == EWOULDBLOCK)
1073 error = 0;
1074 if (error == 0) {
1075 error = copyout(bits, u_fds, ni);
1076 if (error)
1077 goto out;
1078 }
1079 out:
1080 if (ni > sizeof(smallbits))
1081 free(bits, M_TEMP);
1082 return (error);
1083 }
1084
1085 int
1086 pollscan(struct lwp *l, struct pollfd *fds, int nfd, register_t *retval)
1087 {
1088 struct proc *p = l->l_proc;
1089 struct filedesc *fdp;
1090 int i, n;
1091 struct file *fp;
1092
1093 fdp = p->p_fd;
1094 n = 0;
1095 for (i = 0; i < nfd; i++, fds++) {
1096 if (fds->fd >= fdp->fd_nfiles) {
1097 fds->revents = POLLNVAL;
1098 n++;
1099 } else if (fds->fd < 0) {
1100 fds->revents = 0;
1101 } else {
1102 if ((fp = fd_getfile(fdp, fds->fd)) == NULL) {
1103 fds->revents = POLLNVAL;
1104 n++;
1105 } else {
1106 FILE_USE(fp);
1107 fds->revents = (*fp->f_ops->fo_poll)(fp,
1108 fds->events | POLLERR | POLLHUP, l);
1109 if (fds->revents != 0)
1110 n++;
1111 FILE_UNUSE(fp, l);
1112 }
1113 }
1114 }
1115 *retval = n;
1116 return (0);
1117 }
1118
1119 /*ARGSUSED*/
1120 int
1121 seltrue(dev_t dev, int events, struct lwp *l)
1122 {
1123
1124 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1125 }
1126
1127 /*
1128 * Record a select request.
1129 */
1130 void
1131 selrecord(struct lwp *selector, struct selinfo *sip)
1132 {
1133 struct lwp *l;
1134 struct proc *p;
1135 pid_t mypid;
1136
1137 mypid = selector->l_proc->p_pid;
1138 if (sip->sel_pid == mypid)
1139 return;
1140
1141 mutex_enter(&proclist_mutex);
1142 if (sip->sel_pid && (p = p_find(sip->sel_pid, PFIND_LOCKED))) {
1143 mutex_enter(&p->p_smutex);
1144 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1145 lwp_lock(l);
1146 if (l->l_wchan == (void *)&selwait &&
1147 l->l_stat == LSSLEEP) {
1148 sip->sel_collision = 1;
1149 lwp_unlock(l);
1150 break;
1151 }
1152 lwp_unlock(l);
1153 }
1154 mutex_exit(&p->p_smutex);
1155 }
1156 mutex_exit(&proclist_mutex);
1157
1158 if (!sip->sel_collision)
1159 sip->sel_pid = mypid;
1160 }
1161
1162 /*
1163 * Do a wakeup when a selectable event occurs.
1164 */
1165 void
1166 selwakeup(sip)
1167 struct selinfo *sip;
1168 {
1169 struct lwp *l;
1170 struct proc *p;
1171
1172 if (sip->sel_pid == 0)
1173 return;
1174 if (sip->sel_collision) {
1175 sip->sel_pid = 0;
1176 nselcoll++;
1177 sip->sel_collision = 0;
1178 wakeup((void *)&selwait);
1179 return;
1180 }
1181
1182 /*
1183 * We must use the proclist_mutex as we can be called from an
1184 * interrupt context.
1185 */
1186 mutex_enter(&proclist_mutex);
1187 p = p_find(sip->sel_pid, PFIND_LOCKED);
1188 sip->sel_pid = 0;
1189 if (p == NULL) {
1190 mutex_exit(&proclist_mutex);
1191 return;
1192 }
1193
1194 mutex_enter(&p->p_smutex);
1195 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1196 lwp_lock(l);
1197 if (l->l_wchan == (wchan_t)&selwait && l->l_stat == LSSLEEP) {
1198 /* setrunnable() will release the lock. */
1199 setrunnable(l);
1200 } else {
1201 if (l->l_flag & LW_SELECT)
1202 l->l_flag &= ~LW_SELECT;
1203 lwp_unlock(l);
1204 }
1205 }
1206 mutex_exit(&p->p_smutex);
1207 mutex_exit(&proclist_mutex);
1208 }
1209