sys_generic.c revision 1.83.2.5 1 1.83.2.5 yamt /* $NetBSD: sys_generic.c,v 1.83.2.5 2007/10/27 11:35:35 yamt Exp $ */
2 1.83.2.4 yamt
3 1.83.2.4 yamt /*-
4 1.83.2.4 yamt * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 1.83.2.4 yamt * All rights reserved.
6 1.83.2.4 yamt *
7 1.83.2.4 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.83.2.4 yamt * by Andrew Doran.
9 1.83.2.4 yamt *
10 1.83.2.4 yamt * Redistribution and use in source and binary forms, with or without
11 1.83.2.4 yamt * modification, are permitted provided that the following conditions
12 1.83.2.4 yamt * are met:
13 1.83.2.4 yamt * 1. Redistributions of source code must retain the above copyright
14 1.83.2.4 yamt * notice, this list of conditions and the following disclaimer.
15 1.83.2.4 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.83.2.4 yamt * notice, this list of conditions and the following disclaimer in the
17 1.83.2.4 yamt * documentation and/or other materials provided with the distribution.
18 1.83.2.4 yamt * 3. All advertising materials mentioning features or use of this software
19 1.83.2.4 yamt * must display the following acknowledgement:
20 1.83.2.4 yamt * This product includes software developed by the NetBSD
21 1.83.2.4 yamt * Foundation, Inc. and its contributors.
22 1.83.2.4 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.83.2.4 yamt * contributors may be used to endorse or promote products derived
24 1.83.2.4 yamt * from this software without specific prior written permission.
25 1.83.2.4 yamt *
26 1.83.2.4 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.83.2.4 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.83.2.4 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.83.2.4 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.83.2.4 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.83.2.4 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.83.2.4 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.83.2.4 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.83.2.4 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.83.2.4 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.83.2.4 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.83.2.4 yamt */
38 1.15 cgd
39 1.15 cgd /*
40 1.15 cgd * Copyright (c) 1982, 1986, 1989, 1993
41 1.15 cgd * The Regents of the University of California. All rights reserved.
42 1.15 cgd * (c) UNIX System Laboratories, Inc.
43 1.15 cgd * All or some portions of this file are derived from material licensed
44 1.15 cgd * to the University of California by American Telephone and Telegraph
45 1.15 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
46 1.15 cgd * the permission of UNIX System Laboratories, Inc.
47 1.15 cgd *
48 1.15 cgd * Redistribution and use in source and binary forms, with or without
49 1.15 cgd * modification, are permitted provided that the following conditions
50 1.15 cgd * are met:
51 1.15 cgd * 1. Redistributions of source code must retain the above copyright
52 1.15 cgd * notice, this list of conditions and the following disclaimer.
53 1.15 cgd * 2. Redistributions in binary form must reproduce the above copyright
54 1.15 cgd * notice, this list of conditions and the following disclaimer in the
55 1.15 cgd * documentation and/or other materials provided with the distribution.
56 1.77 agc * 3. Neither the name of the University nor the names of its contributors
57 1.15 cgd * may be used to endorse or promote products derived from this software
58 1.15 cgd * without specific prior written permission.
59 1.15 cgd *
60 1.15 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 1.15 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 1.15 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 1.15 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 1.15 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 1.15 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 1.15 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 1.15 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 1.15 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 1.15 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 1.15 cgd * SUCH DAMAGE.
71 1.15 cgd *
72 1.36 fvdl * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
73 1.15 cgd */
74 1.59 lukem
75 1.83.2.4 yamt /*
76 1.83.2.4 yamt * System calls relating to files.
77 1.83.2.4 yamt */
78 1.37 thorpej
79 1.83.2.4 yamt #include <sys/cdefs.h>
80 1.83.2.5 yamt __KERNEL_RCSID(0, "$NetBSD: sys_generic.c,v 1.83.2.5 2007/10/27 11:35:35 yamt Exp $");
81 1.15 cgd
82 1.15 cgd #include <sys/param.h>
83 1.15 cgd #include <sys/systm.h>
84 1.15 cgd #include <sys/filedesc.h>
85 1.15 cgd #include <sys/ioctl.h>
86 1.15 cgd #include <sys/file.h>
87 1.15 cgd #include <sys/proc.h>
88 1.15 cgd #include <sys/socketvar.h>
89 1.22 christos #include <sys/signalvar.h>
90 1.15 cgd #include <sys/uio.h>
91 1.15 cgd #include <sys/kernel.h>
92 1.15 cgd #include <sys/stat.h>
93 1.83.2.4 yamt #include <sys/kmem.h>
94 1.28 mycroft #include <sys/poll.h>
95 1.83.2.4 yamt #include <sys/vnode.h>
96 1.16 cgd #include <sys/mount.h>
97 1.16 cgd #include <sys/syscallargs.h>
98 1.83.2.4 yamt #include <sys/ktrace.h>
99 1.22 christos
100 1.83.2.1 yamt #include <uvm/uvm_extern.h>
101 1.83.2.1 yamt
102 1.83.2.4 yamt /* Flags for lwp::l_selflag. */
103 1.83.2.4 yamt #define SEL_RESET 0 /* awoken, interrupted, or not yet polling */
104 1.83.2.4 yamt #define SEL_SCANNING 1 /* polling descriptors */
105 1.83.2.4 yamt #define SEL_BLOCKING 2 /* about to block on select_cv */
106 1.83.2.4 yamt
107 1.83.2.4 yamt static int selscan(lwp_t *, fd_mask *, fd_mask *, int, register_t *);
108 1.83.2.4 yamt static int pollscan(lwp_t *, struct pollfd *, int, register_t *);
109 1.83.2.4 yamt static void selclear(void);
110 1.83.2.4 yamt
111 1.83.2.4 yamt /* Global state for select()/poll(). */
112 1.83.2.4 yamt kmutex_t select_lock;
113 1.83.2.4 yamt kcondvar_t select_cv;
114 1.83.2.4 yamt int nselcoll;
115 1.82 matt
116 1.15 cgd /*
117 1.15 cgd * Read system call.
118 1.15 cgd */
119 1.15 cgd /* ARGSUSED */
120 1.22 christos int
121 1.83.2.4 yamt sys_read(lwp_t *l, void *v, register_t *retval)
122 1.20 thorpej {
123 1.47 augustss struct sys_read_args /* {
124 1.53 lukem syscallarg(int) fd;
125 1.53 lukem syscallarg(void *) buf;
126 1.53 lukem syscallarg(size_t) nbyte;
127 1.20 thorpej } */ *uap = v;
128 1.53 lukem int fd;
129 1.53 lukem struct file *fp;
130 1.83.2.4 yamt proc_t *p;
131 1.53 lukem struct filedesc *fdp;
132 1.39 thorpej
133 1.53 lukem fd = SCARG(uap, fd);
134 1.69 thorpej p = l->l_proc;
135 1.53 lukem fdp = p->p_fd;
136 1.56 thorpej
137 1.56 thorpej if ((fp = fd_getfile(fdp, fd)) == NULL)
138 1.56 thorpej return (EBADF);
139 1.56 thorpej
140 1.70 pk if ((fp->f_flag & FREAD) == 0) {
141 1.83.2.5 yamt mutex_exit(&fp->f_lock);
142 1.39 thorpej return (EBADF);
143 1.70 pk }
144 1.39 thorpej
145 1.45 thorpej FILE_USE(fp);
146 1.45 thorpej
147 1.45 thorpej /* dofileread() will unuse the descriptor for us */
148 1.83.2.5 yamt return (dofileread(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
149 1.39 thorpej &fp->f_offset, FOF_UPDATE_OFFSET, retval));
150 1.39 thorpej }
151 1.39 thorpej
152 1.39 thorpej int
153 1.83.2.5 yamt dofileread(int fd, struct file *fp, void *buf, size_t nbyte,
154 1.53 lukem off_t *offset, int flags, register_t *retval)
155 1.53 lukem {
156 1.83.2.1 yamt struct iovec aiov;
157 1.83.2.1 yamt struct uio auio;
158 1.83.2.1 yamt size_t cnt;
159 1.83.2.1 yamt int error;
160 1.83.2.5 yamt lwp_t *l;
161 1.83.2.1 yamt
162 1.83.2.5 yamt l = curlwp;
163 1.15 cgd
164 1.83.2.4 yamt aiov.iov_base = (void *)buf;
165 1.39 thorpej aiov.iov_len = nbyte;
166 1.15 cgd auio.uio_iov = &aiov;
167 1.15 cgd auio.uio_iovcnt = 1;
168 1.39 thorpej auio.uio_resid = nbyte;
169 1.15 cgd auio.uio_rw = UIO_READ;
170 1.83.2.5 yamt auio.uio_vmspace = l->l_proc->p_vmspace;
171 1.40 thorpej
172 1.40 thorpej /*
173 1.40 thorpej * Reads return ssize_t because -1 is returned on error. Therefore
174 1.40 thorpej * we must restrict the length to SSIZE_MAX to avoid garbage return
175 1.40 thorpej * values.
176 1.40 thorpej */
177 1.45 thorpej if (auio.uio_resid > SSIZE_MAX) {
178 1.45 thorpej error = EINVAL;
179 1.45 thorpej goto out;
180 1.45 thorpej }
181 1.40 thorpej
182 1.38 thorpej cnt = auio.uio_resid;
183 1.39 thorpej error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
184 1.22 christos if (error)
185 1.15 cgd if (auio.uio_resid != cnt && (error == ERESTART ||
186 1.15 cgd error == EINTR || error == EWOULDBLOCK))
187 1.15 cgd error = 0;
188 1.15 cgd cnt -= auio.uio_resid;
189 1.83.2.4 yamt ktrgenio(fd, UIO_READ, buf, cnt, error);
190 1.15 cgd *retval = cnt;
191 1.45 thorpej out:
192 1.83.2.1 yamt FILE_UNUSE(fp, l);
193 1.15 cgd return (error);
194 1.15 cgd }
195 1.15 cgd
196 1.15 cgd /*
197 1.15 cgd * Scatter read system call.
198 1.15 cgd */
199 1.22 christos int
200 1.83.2.4 yamt sys_readv(lwp_t *l, void *v, register_t *retval)
201 1.20 thorpej {
202 1.47 augustss struct sys_readv_args /* {
203 1.53 lukem syscallarg(int) fd;
204 1.53 lukem syscallarg(const struct iovec *) iovp;
205 1.53 lukem syscallarg(int) iovcnt;
206 1.20 thorpej } */ *uap = v;
207 1.83.2.4 yamt
208 1.83.2.5 yamt return do_filereadv(SCARG(uap, fd), SCARG(uap, iovp),
209 1.83.2.4 yamt SCARG(uap, iovcnt), NULL, FOF_UPDATE_OFFSET, retval);
210 1.83.2.4 yamt }
211 1.83.2.4 yamt
212 1.83.2.4 yamt int
213 1.83.2.5 yamt do_filereadv(int fd, const struct iovec *iovp, int iovcnt,
214 1.83.2.4 yamt off_t *offset, int flags, register_t *retval)
215 1.83.2.4 yamt {
216 1.83.2.4 yamt struct uio auio;
217 1.83.2.4 yamt struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
218 1.83.2.4 yamt int i, error;
219 1.83.2.4 yamt size_t cnt;
220 1.83.2.4 yamt u_int iovlen;
221 1.83.2.4 yamt struct file *fp;
222 1.83.2.4 yamt struct iovec *ktriov = NULL;
223 1.83.2.5 yamt lwp_t *l;
224 1.83.2.4 yamt
225 1.83.2.4 yamt if (iovcnt == 0)
226 1.83.2.4 yamt return EINVAL;
227 1.39 thorpej
228 1.83.2.5 yamt l = curlwp;
229 1.56 thorpej
230 1.83.2.5 yamt if ((fp = fd_getfile(l->l_proc->p_fd, fd)) == NULL)
231 1.83.2.4 yamt return EBADF;
232 1.56 thorpej
233 1.70 pk if ((fp->f_flag & FREAD) == 0) {
234 1.83.2.5 yamt mutex_exit(&fp->f_lock);
235 1.83.2.4 yamt return EBADF;
236 1.70 pk }
237 1.39 thorpej
238 1.45 thorpej FILE_USE(fp);
239 1.45 thorpej
240 1.83.2.4 yamt if (offset == NULL)
241 1.83.2.4 yamt offset = &fp->f_offset;
242 1.83.2.4 yamt else {
243 1.83.2.4 yamt struct vnode *vp = fp->f_data;
244 1.83.2.4 yamt if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
245 1.83.2.4 yamt error = ESPIPE;
246 1.83.2.4 yamt goto out;
247 1.83.2.4 yamt }
248 1.83.2.4 yamt /*
249 1.83.2.4 yamt * Test that the device is seekable ?
250 1.83.2.4 yamt * XXX This works because no file systems actually
251 1.83.2.4 yamt * XXX take any action on the seek operation.
252 1.83.2.4 yamt */
253 1.83.2.4 yamt error = VOP_SEEK(vp, fp->f_offset, *offset, fp->f_cred);
254 1.83.2.4 yamt if (error != 0)
255 1.83.2.4 yamt goto out;
256 1.83.2.4 yamt }
257 1.15 cgd
258 1.42 perry iovlen = iovcnt * sizeof(struct iovec);
259 1.83.2.4 yamt if (flags & FOF_IOV_SYSSPACE)
260 1.83.2.4 yamt iov = __UNCONST(iovp);
261 1.83.2.4 yamt else {
262 1.15 cgd iov = aiov;
263 1.83.2.4 yamt if ((u_int)iovcnt > UIO_SMALLIOV) {
264 1.83.2.4 yamt if ((u_int)iovcnt > IOV_MAX) {
265 1.83.2.4 yamt error = EINVAL;
266 1.83.2.4 yamt goto out;
267 1.83.2.4 yamt }
268 1.83.2.4 yamt iov = kmem_alloc(iovlen, KM_SLEEP);
269 1.83.2.4 yamt if (iov == NULL) {
270 1.83.2.4 yamt error = ENOMEM;
271 1.83.2.4 yamt goto out;
272 1.83.2.4 yamt }
273 1.83.2.4 yamt needfree = iov;
274 1.83.2.4 yamt }
275 1.83.2.4 yamt error = copyin(iovp, iov, iovlen);
276 1.83.2.4 yamt if (error)
277 1.83.2.4 yamt goto done;
278 1.45 thorpej }
279 1.41 kleink
280 1.15 cgd auio.uio_iov = iov;
281 1.34 mycroft auio.uio_iovcnt = iovcnt;
282 1.15 cgd auio.uio_rw = UIO_READ;
283 1.83.2.5 yamt auio.uio_vmspace = l->l_proc->p_vmspace;
284 1.83.2.4 yamt
285 1.15 cgd auio.uio_resid = 0;
286 1.83.2.4 yamt for (i = 0; i < iovcnt; i++, iov++) {
287 1.15 cgd auio.uio_resid += iov->iov_len;
288 1.40 thorpej /*
289 1.40 thorpej * Reads return ssize_t because -1 is returned on error.
290 1.40 thorpej * Therefore we must restrict the length to SSIZE_MAX to
291 1.40 thorpej * avoid garbage return values.
292 1.40 thorpej */
293 1.40 thorpej if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
294 1.15 cgd error = EINVAL;
295 1.15 cgd goto done;
296 1.15 cgd }
297 1.15 cgd }
298 1.83.2.4 yamt
299 1.15 cgd /*
300 1.15 cgd * if tracing, save a copy of iovec
301 1.15 cgd */
302 1.83.2.4 yamt if (ktrpoint(KTR_GENIO)) {
303 1.83.2.4 yamt ktriov = kmem_alloc(iovlen, KM_SLEEP);
304 1.83.2.4 yamt if (ktriov != NULL)
305 1.83.2.4 yamt memcpy(ktriov, auio.uio_iov, iovlen);
306 1.15 cgd }
307 1.83.2.4 yamt
308 1.15 cgd cnt = auio.uio_resid;
309 1.39 thorpej error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
310 1.22 christos if (error)
311 1.15 cgd if (auio.uio_resid != cnt && (error == ERESTART ||
312 1.15 cgd error == EINTR || error == EWOULDBLOCK))
313 1.15 cgd error = 0;
314 1.15 cgd cnt -= auio.uio_resid;
315 1.83.2.4 yamt *retval = cnt;
316 1.83.2.4 yamt
317 1.58 itohy if (ktriov != NULL) {
318 1.83.2.4 yamt ktrgeniov(fd, UIO_READ, ktriov, cnt, error);
319 1.83.2.4 yamt kmem_free(ktriov, iovlen);
320 1.15 cgd }
321 1.83.2.4 yamt
322 1.45 thorpej done:
323 1.15 cgd if (needfree)
324 1.83.2.4 yamt kmem_free(needfree, iovlen);
325 1.45 thorpej out:
326 1.83.2.1 yamt FILE_UNUSE(fp, l);
327 1.15 cgd return (error);
328 1.15 cgd }
329 1.15 cgd
330 1.15 cgd /*
331 1.15 cgd * Write system call
332 1.15 cgd */
333 1.22 christos int
334 1.83.2.4 yamt sys_write(lwp_t *l, void *v, register_t *retval)
335 1.20 thorpej {
336 1.47 augustss struct sys_write_args /* {
337 1.53 lukem syscallarg(int) fd;
338 1.53 lukem syscallarg(const void *) buf;
339 1.53 lukem syscallarg(size_t) nbyte;
340 1.20 thorpej } */ *uap = v;
341 1.53 lukem int fd;
342 1.53 lukem struct file *fp;
343 1.39 thorpej
344 1.53 lukem fd = SCARG(uap, fd);
345 1.56 thorpej
346 1.83.2.5 yamt if ((fp = fd_getfile(curproc->p_fd, fd)) == NULL)
347 1.56 thorpej return (EBADF);
348 1.56 thorpej
349 1.70 pk if ((fp->f_flag & FWRITE) == 0) {
350 1.83.2.5 yamt mutex_exit(&fp->f_lock);
351 1.39 thorpej return (EBADF);
352 1.70 pk }
353 1.39 thorpej
354 1.45 thorpej FILE_USE(fp);
355 1.45 thorpej
356 1.45 thorpej /* dofilewrite() will unuse the descriptor for us */
357 1.83.2.5 yamt return (dofilewrite(fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
358 1.39 thorpej &fp->f_offset, FOF_UPDATE_OFFSET, retval));
359 1.39 thorpej }
360 1.39 thorpej
361 1.39 thorpej int
362 1.83.2.5 yamt dofilewrite(int fd, struct file *fp, const void *buf,
363 1.53 lukem size_t nbyte, off_t *offset, int flags, register_t *retval)
364 1.53 lukem {
365 1.83.2.1 yamt struct iovec aiov;
366 1.83.2.1 yamt struct uio auio;
367 1.83.2.1 yamt size_t cnt;
368 1.83.2.1 yamt int error;
369 1.83.2.5 yamt lwp_t *l;
370 1.83.2.5 yamt
371 1.83.2.5 yamt l = curlwp;
372 1.15 cgd
373 1.83 christos aiov.iov_base = __UNCONST(buf); /* XXXUNCONST kills const */
374 1.39 thorpej aiov.iov_len = nbyte;
375 1.15 cgd auio.uio_iov = &aiov;
376 1.15 cgd auio.uio_iovcnt = 1;
377 1.39 thorpej auio.uio_resid = nbyte;
378 1.15 cgd auio.uio_rw = UIO_WRITE;
379 1.83.2.5 yamt auio.uio_vmspace = l->l_proc->p_vmspace;
380 1.40 thorpej
381 1.40 thorpej /*
382 1.40 thorpej * Writes return ssize_t because -1 is returned on error. Therefore
383 1.40 thorpej * we must restrict the length to SSIZE_MAX to avoid garbage return
384 1.40 thorpej * values.
385 1.40 thorpej */
386 1.45 thorpej if (auio.uio_resid > SSIZE_MAX) {
387 1.45 thorpej error = EINVAL;
388 1.45 thorpej goto out;
389 1.45 thorpej }
390 1.40 thorpej
391 1.38 thorpej cnt = auio.uio_resid;
392 1.39 thorpej error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
393 1.22 christos if (error) {
394 1.15 cgd if (auio.uio_resid != cnt && (error == ERESTART ||
395 1.15 cgd error == EINTR || error == EWOULDBLOCK))
396 1.15 cgd error = 0;
397 1.83.2.3 yamt if (error == EPIPE) {
398 1.83.2.3 yamt mutex_enter(&proclist_mutex);
399 1.83.2.5 yamt psignal(l->l_proc, SIGPIPE);
400 1.83.2.3 yamt mutex_exit(&proclist_mutex);
401 1.83.2.3 yamt }
402 1.15 cgd }
403 1.15 cgd cnt -= auio.uio_resid;
404 1.83.2.4 yamt ktrgenio(fd, UIO_WRITE, buf, cnt, error);
405 1.15 cgd *retval = cnt;
406 1.45 thorpej out:
407 1.83.2.1 yamt FILE_UNUSE(fp, l);
408 1.15 cgd return (error);
409 1.15 cgd }
410 1.15 cgd
411 1.15 cgd /*
412 1.15 cgd * Gather write system call
413 1.15 cgd */
414 1.22 christos int
415 1.83.2.4 yamt sys_writev(lwp_t *l, void *v, register_t *retval)
416 1.20 thorpej {
417 1.47 augustss struct sys_writev_args /* {
418 1.53 lukem syscallarg(int) fd;
419 1.53 lukem syscallarg(const struct iovec *) iovp;
420 1.53 lukem syscallarg(int) iovcnt;
421 1.20 thorpej } */ *uap = v;
422 1.83.2.4 yamt
423 1.83.2.5 yamt return do_filewritev(SCARG(uap, fd), SCARG(uap, iovp),
424 1.83.2.4 yamt SCARG(uap, iovcnt), NULL, FOF_UPDATE_OFFSET, retval);
425 1.83.2.4 yamt }
426 1.83.2.4 yamt
427 1.83.2.4 yamt int
428 1.83.2.5 yamt do_filewritev(int fd, const struct iovec *iovp, int iovcnt,
429 1.83.2.4 yamt off_t *offset, int flags, register_t *retval)
430 1.83.2.4 yamt {
431 1.83.2.4 yamt struct uio auio;
432 1.83.2.4 yamt struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
433 1.83.2.4 yamt int i, error;
434 1.83.2.4 yamt size_t cnt;
435 1.83.2.4 yamt u_int iovlen;
436 1.83.2.4 yamt struct file *fp;
437 1.83.2.4 yamt struct iovec *ktriov = NULL;
438 1.83.2.5 yamt lwp_t *l;
439 1.83.2.5 yamt
440 1.83.2.5 yamt l = curlwp;
441 1.83.2.4 yamt
442 1.83.2.4 yamt if (iovcnt == 0)
443 1.83.2.4 yamt return EINVAL;
444 1.39 thorpej
445 1.83.2.5 yamt if ((fp = fd_getfile(l->l_proc->p_fd, fd)) == NULL)
446 1.83.2.4 yamt return EBADF;
447 1.56 thorpej
448 1.70 pk if ((fp->f_flag & FWRITE) == 0) {
449 1.83.2.5 yamt mutex_exit(&fp->f_lock);
450 1.83.2.4 yamt return EBADF;
451 1.70 pk }
452 1.39 thorpej
453 1.45 thorpej FILE_USE(fp);
454 1.45 thorpej
455 1.83.2.4 yamt if (offset == NULL)
456 1.83.2.4 yamt offset = &fp->f_offset;
457 1.83.2.4 yamt else {
458 1.83.2.4 yamt struct vnode *vp = fp->f_data;
459 1.83.2.4 yamt if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
460 1.83.2.4 yamt error = ESPIPE;
461 1.83.2.4 yamt goto out;
462 1.83.2.4 yamt }
463 1.83.2.4 yamt /*
464 1.83.2.4 yamt * Test that the device is seekable ?
465 1.83.2.4 yamt * XXX This works because no file systems actually
466 1.83.2.4 yamt * XXX take any action on the seek operation.
467 1.83.2.4 yamt */
468 1.83.2.4 yamt error = VOP_SEEK(vp, fp->f_offset, *offset, fp->f_cred);
469 1.83.2.4 yamt if (error != 0)
470 1.83.2.4 yamt goto out;
471 1.83.2.4 yamt }
472 1.15 cgd
473 1.42 perry iovlen = iovcnt * sizeof(struct iovec);
474 1.83.2.4 yamt if (flags & FOF_IOV_SYSSPACE)
475 1.83.2.4 yamt iov = __UNCONST(iovp);
476 1.83.2.4 yamt else {
477 1.15 cgd iov = aiov;
478 1.83.2.4 yamt if ((u_int)iovcnt > UIO_SMALLIOV) {
479 1.83.2.4 yamt if ((u_int)iovcnt > IOV_MAX) {
480 1.83.2.4 yamt error = EINVAL;
481 1.83.2.4 yamt goto out;
482 1.83.2.4 yamt }
483 1.83.2.4 yamt iov = kmem_alloc(iovlen, KM_SLEEP);
484 1.83.2.4 yamt if (iov == NULL) {
485 1.83.2.4 yamt error = ENOMEM;
486 1.83.2.4 yamt goto out;
487 1.83.2.4 yamt }
488 1.83.2.4 yamt needfree = iov;
489 1.83.2.4 yamt }
490 1.83.2.4 yamt error = copyin(iovp, iov, iovlen);
491 1.83.2.4 yamt if (error)
492 1.83.2.4 yamt goto done;
493 1.45 thorpej }
494 1.41 kleink
495 1.15 cgd auio.uio_iov = iov;
496 1.34 mycroft auio.uio_iovcnt = iovcnt;
497 1.15 cgd auio.uio_rw = UIO_WRITE;
498 1.83.2.5 yamt auio.uio_vmspace = curproc->p_vmspace;
499 1.83.2.4 yamt
500 1.15 cgd auio.uio_resid = 0;
501 1.83.2.4 yamt for (i = 0; i < iovcnt; i++, iov++) {
502 1.15 cgd auio.uio_resid += iov->iov_len;
503 1.40 thorpej /*
504 1.40 thorpej * Writes return ssize_t because -1 is returned on error.
505 1.40 thorpej * Therefore we must restrict the length to SSIZE_MAX to
506 1.40 thorpej * avoid garbage return values.
507 1.40 thorpej */
508 1.40 thorpej if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
509 1.15 cgd error = EINVAL;
510 1.15 cgd goto done;
511 1.15 cgd }
512 1.15 cgd }
513 1.83.2.4 yamt
514 1.15 cgd /*
515 1.15 cgd * if tracing, save a copy of iovec
516 1.15 cgd */
517 1.83.2.4 yamt if (ktrpoint(KTR_GENIO)) {
518 1.83.2.4 yamt ktriov = kmem_alloc(iovlen, KM_SLEEP);
519 1.83.2.4 yamt if (ktriov != NULL)
520 1.83.2.4 yamt memcpy(ktriov, auio.uio_iov, iovlen);
521 1.15 cgd }
522 1.83.2.4 yamt
523 1.15 cgd cnt = auio.uio_resid;
524 1.39 thorpej error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
525 1.22 christos if (error) {
526 1.15 cgd if (auio.uio_resid != cnt && (error == ERESTART ||
527 1.15 cgd error == EINTR || error == EWOULDBLOCK))
528 1.15 cgd error = 0;
529 1.83.2.3 yamt if (error == EPIPE) {
530 1.83.2.3 yamt mutex_enter(&proclist_mutex);
531 1.83.2.5 yamt psignal(l->l_proc, SIGPIPE);
532 1.83.2.3 yamt mutex_exit(&proclist_mutex);
533 1.83.2.3 yamt }
534 1.15 cgd }
535 1.15 cgd cnt -= auio.uio_resid;
536 1.83.2.4 yamt *retval = cnt;
537 1.83.2.4 yamt
538 1.78 drochner if (ktriov != NULL) {
539 1.83.2.4 yamt ktrgeniov(fd, UIO_WRITE, ktriov, cnt, error);
540 1.83.2.4 yamt kmem_free(ktriov, iovlen);
541 1.15 cgd }
542 1.83.2.4 yamt
543 1.45 thorpej done:
544 1.15 cgd if (needfree)
545 1.83.2.4 yamt kmem_free(needfree, iovlen);
546 1.45 thorpej out:
547 1.83.2.1 yamt FILE_UNUSE(fp, l);
548 1.15 cgd return (error);
549 1.15 cgd }
550 1.15 cgd
551 1.15 cgd /*
552 1.15 cgd * Ioctl system call
553 1.15 cgd */
554 1.15 cgd /* ARGSUSED */
555 1.22 christos int
556 1.83.2.5 yamt sys_ioctl(lwp_t *l, void *v, register_t *retval)
557 1.20 thorpej {
558 1.47 augustss struct sys_ioctl_args /* {
559 1.53 lukem syscallarg(int) fd;
560 1.53 lukem syscallarg(u_long) com;
561 1.83.2.4 yamt syscallarg(void *) data;
562 1.20 thorpej } */ *uap = v;
563 1.53 lukem struct file *fp;
564 1.83.2.4 yamt proc_t *p;
565 1.53 lukem struct filedesc *fdp;
566 1.53 lukem u_long com;
567 1.53 lukem int error;
568 1.53 lukem u_int size;
569 1.83.2.4 yamt void *data, *memp;
570 1.53 lukem #define STK_PARAMS 128
571 1.53 lukem u_long stkbuf[STK_PARAMS/sizeof(u_long)];
572 1.15 cgd
573 1.53 lukem error = 0;
574 1.69 thorpej p = l->l_proc;
575 1.15 cgd fdp = p->p_fd;
576 1.56 thorpej
577 1.56 thorpej if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
578 1.15 cgd return (EBADF);
579 1.15 cgd
580 1.45 thorpej FILE_USE(fp);
581 1.45 thorpej
582 1.45 thorpej if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
583 1.45 thorpej error = EBADF;
584 1.65 scw com = 0;
585 1.45 thorpej goto out;
586 1.45 thorpej }
587 1.15 cgd
588 1.16 cgd switch (com = SCARG(uap, com)) {
589 1.15 cgd case FIONCLEX:
590 1.83.2.5 yamt rw_enter(&fdp->fd_lock, RW_WRITER);
591 1.16 cgd fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
592 1.83.2.5 yamt rw_exit(&fdp->fd_lock);
593 1.45 thorpej goto out;
594 1.45 thorpej
595 1.15 cgd case FIOCLEX:
596 1.83.2.5 yamt rw_enter(&fdp->fd_lock, RW_WRITER);
597 1.16 cgd fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
598 1.83.2.5 yamt rw_exit(&fdp->fd_lock);
599 1.45 thorpej goto out;
600 1.15 cgd }
601 1.15 cgd
602 1.15 cgd /*
603 1.15 cgd * Interpret high order word to find amount of data to be
604 1.15 cgd * copied to/from the user's address space.
605 1.15 cgd */
606 1.15 cgd size = IOCPARM_LEN(com);
607 1.45 thorpej if (size > IOCPARM_MAX) {
608 1.45 thorpej error = ENOTTY;
609 1.45 thorpej goto out;
610 1.45 thorpej }
611 1.15 cgd memp = NULL;
612 1.42 perry if (size > sizeof(stkbuf)) {
613 1.83.2.4 yamt memp = kmem_alloc(size, KM_SLEEP);
614 1.15 cgd data = memp;
615 1.15 cgd } else
616 1.83.2.4 yamt data = (void *)stkbuf;
617 1.15 cgd if (com&IOC_IN) {
618 1.15 cgd if (size) {
619 1.31 cgd error = copyin(SCARG(uap, data), data, size);
620 1.15 cgd if (error) {
621 1.15 cgd if (memp)
622 1.83.2.4 yamt kmem_free(memp, size);
623 1.45 thorpej goto out;
624 1.15 cgd }
625 1.83.2.4 yamt ktrgenio(SCARG(uap, fd), UIO_WRITE, SCARG(uap, data),
626 1.83.2.4 yamt size, 0);
627 1.15 cgd } else
628 1.83.2.4 yamt *(void **)data = SCARG(uap, data);
629 1.15 cgd } else if ((com&IOC_OUT) && size)
630 1.15 cgd /*
631 1.15 cgd * Zero the buffer so the user always
632 1.15 cgd * gets back something deterministic.
633 1.15 cgd */
634 1.44 perry memset(data, 0, size);
635 1.15 cgd else if (com&IOC_VOID)
636 1.83.2.4 yamt *(void **)data = SCARG(uap, data);
637 1.15 cgd
638 1.15 cgd switch (com) {
639 1.15 cgd
640 1.15 cgd case FIONBIO:
641 1.83.2.5 yamt mutex_enter(&fp->f_lock);
642 1.79 jdolecek if (*(int *)data != 0)
643 1.15 cgd fp->f_flag |= FNONBLOCK;
644 1.15 cgd else
645 1.15 cgd fp->f_flag &= ~FNONBLOCK;
646 1.83.2.5 yamt mutex_exit(&fp->f_lock);
647 1.83.2.1 yamt error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, data, l);
648 1.15 cgd break;
649 1.15 cgd
650 1.15 cgd case FIOASYNC:
651 1.83.2.5 yamt mutex_enter(&fp->f_lock);
652 1.79 jdolecek if (*(int *)data != 0)
653 1.15 cgd fp->f_flag |= FASYNC;
654 1.15 cgd else
655 1.15 cgd fp->f_flag &= ~FASYNC;
656 1.83.2.5 yamt mutex_exit(&fp->f_lock);
657 1.83.2.1 yamt error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, data, l);
658 1.15 cgd break;
659 1.15 cgd
660 1.15 cgd default:
661 1.83.2.1 yamt error = (*fp->f_ops->fo_ioctl)(fp, com, data, l);
662 1.15 cgd /*
663 1.15 cgd * Copy any data to user, size was
664 1.15 cgd * already set and checked above.
665 1.15 cgd */
666 1.73 dsl if (error == 0 && (com&IOC_OUT) && size) {
667 1.31 cgd error = copyout(data, SCARG(uap, data), size);
668 1.83.2.4 yamt ktrgenio(SCARG(uap, fd), UIO_READ, SCARG(uap, data),
669 1.83.2.4 yamt size, error);
670 1.73 dsl }
671 1.15 cgd break;
672 1.15 cgd }
673 1.15 cgd if (memp)
674 1.83.2.4 yamt kmem_free(memp, size);
675 1.45 thorpej out:
676 1.83.2.1 yamt FILE_UNUSE(fp, l);
677 1.61 atatat switch (error) {
678 1.61 atatat case -1:
679 1.61 atatat printf("sys_ioctl: _IO%s%s('%c', %lu, %lu) returned -1: "
680 1.61 atatat "pid=%d comm=%s\n",
681 1.61 atatat (com & IOC_IN) ? "W" : "", (com & IOC_OUT) ? "R" : "",
682 1.61 atatat (char)IOCGROUP(com), (com & 0xff), IOCPARM_LEN(com),
683 1.61 atatat p->p_pid, p->p_comm);
684 1.61 atatat /* FALLTHROUGH */
685 1.61 atatat case EPASSTHROUGH:
686 1.61 atatat error = ENOTTY;
687 1.61 atatat /* FALLTHROUGH */
688 1.61 atatat default:
689 1.61 atatat return (error);
690 1.61 atatat }
691 1.15 cgd }
692 1.15 cgd
693 1.15 cgd /*
694 1.15 cgd * Select system call.
695 1.15 cgd */
696 1.22 christos int
697 1.83.2.4 yamt sys_pselect(lwp_t *l, void *v, register_t *retval)
698 1.82 matt {
699 1.82 matt struct sys_pselect_args /* {
700 1.82 matt syscallarg(int) nd;
701 1.82 matt syscallarg(fd_set *) in;
702 1.82 matt syscallarg(fd_set *) ou;
703 1.82 matt syscallarg(fd_set *) ex;
704 1.82 matt syscallarg(const struct timespec *) ts;
705 1.82 matt syscallarg(sigset_t *) mask;
706 1.82 matt } */ * const uap = v;
707 1.82 matt struct timespec ats;
708 1.82 matt struct timeval atv, *tv = NULL;
709 1.82 matt sigset_t amask, *mask = NULL;
710 1.82 matt int error;
711 1.82 matt
712 1.82 matt if (SCARG(uap, ts)) {
713 1.82 matt error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
714 1.82 matt if (error)
715 1.82 matt return error;
716 1.82 matt atv.tv_sec = ats.tv_sec;
717 1.82 matt atv.tv_usec = ats.tv_nsec / 1000;
718 1.82 matt tv = &atv;
719 1.82 matt }
720 1.82 matt if (SCARG(uap, mask) != NULL) {
721 1.82 matt error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
722 1.82 matt if (error)
723 1.82 matt return error;
724 1.82 matt mask = &amask;
725 1.82 matt }
726 1.82 matt
727 1.82 matt return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
728 1.82 matt SCARG(uap, ou), SCARG(uap, ex), tv, mask);
729 1.82 matt }
730 1.82 matt
731 1.82 matt int
732 1.83.2.2 yamt inittimeleft(struct timeval *tv, struct timeval *sleeptv)
733 1.83.2.2 yamt {
734 1.83.2.2 yamt if (itimerfix(tv))
735 1.83.2.2 yamt return -1;
736 1.83.2.2 yamt getmicrouptime(sleeptv);
737 1.83.2.2 yamt return 0;
738 1.83.2.2 yamt }
739 1.83.2.2 yamt
740 1.83.2.2 yamt int
741 1.83.2.2 yamt gettimeleft(struct timeval *tv, struct timeval *sleeptv)
742 1.83.2.2 yamt {
743 1.83.2.2 yamt /*
744 1.83.2.2 yamt * We have to recalculate the timeout on every retry.
745 1.83.2.2 yamt */
746 1.83.2.2 yamt struct timeval slepttv;
747 1.83.2.2 yamt /*
748 1.83.2.2 yamt * reduce tv by elapsed time
749 1.83.2.2 yamt * based on monotonic time scale
750 1.83.2.2 yamt */
751 1.83.2.2 yamt getmicrouptime(&slepttv);
752 1.83.2.2 yamt timeradd(tv, sleeptv, tv);
753 1.83.2.2 yamt timersub(tv, &slepttv, tv);
754 1.83.2.2 yamt *sleeptv = slepttv;
755 1.83.2.2 yamt return tvtohz(tv);
756 1.83.2.2 yamt }
757 1.83.2.2 yamt
758 1.83.2.2 yamt int
759 1.83.2.4 yamt sys_select(lwp_t *l, void *v, register_t *retval)
760 1.20 thorpej {
761 1.47 augustss struct sys_select_args /* {
762 1.53 lukem syscallarg(int) nd;
763 1.53 lukem syscallarg(fd_set *) in;
764 1.53 lukem syscallarg(fd_set *) ou;
765 1.53 lukem syscallarg(fd_set *) ex;
766 1.53 lukem syscallarg(struct timeval *) tv;
767 1.82 matt } */ * const uap = v;
768 1.82 matt struct timeval atv, *tv = NULL;
769 1.82 matt int error;
770 1.82 matt
771 1.82 matt if (SCARG(uap, tv)) {
772 1.83.2.4 yamt error = copyin(SCARG(uap, tv), (void *)&atv,
773 1.82 matt sizeof(atv));
774 1.82 matt if (error)
775 1.82 matt return error;
776 1.82 matt tv = &atv;
777 1.82 matt }
778 1.82 matt
779 1.82 matt return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
780 1.82 matt SCARG(uap, ou), SCARG(uap, ex), tv, NULL);
781 1.82 matt }
782 1.82 matt
783 1.82 matt int
784 1.83.2.4 yamt selcommon(lwp_t *l, register_t *retval, int nd, fd_set *u_in,
785 1.83.2.4 yamt fd_set *u_ou, fd_set *u_ex, struct timeval *tv, sigset_t *mask)
786 1.82 matt {
787 1.53 lukem char smallbits[howmany(FD_SETSIZE, NFDBITS) *
788 1.53 lukem sizeof(fd_mask) * 6];
789 1.83.2.4 yamt proc_t * const p = l->l_proc;
790 1.83.2.4 yamt char *bits;
791 1.83.2.4 yamt int ncoll, error, timo;
792 1.53 lukem size_t ni;
793 1.82 matt sigset_t oldmask;
794 1.83.2.2 yamt struct timeval sleeptv;
795 1.15 cgd
796 1.53 lukem error = 0;
797 1.82 matt if (nd < 0)
798 1.35 thorpej return (EINVAL);
799 1.82 matt if (nd > p->p_fd->fd_nfiles) {
800 1.16 cgd /* forgiving; slightly wrong */
801 1.82 matt nd = p->p_fd->fd_nfiles;
802 1.16 cgd }
803 1.82 matt ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
804 1.27 mycroft if (ni * 6 > sizeof(smallbits))
805 1.83.2.4 yamt bits = kmem_alloc(ni * 6, KM_SLEEP);
806 1.25 mycroft else
807 1.26 cgd bits = smallbits;
808 1.15 cgd
809 1.53 lukem #define getbits(name, x) \
810 1.82 matt if (u_ ## name) { \
811 1.82 matt error = copyin(u_ ## name, bits + ni * x, ni); \
812 1.53 lukem if (error) \
813 1.53 lukem goto done; \
814 1.53 lukem } else \
815 1.44 perry memset(bits + ni * x, 0, ni);
816 1.15 cgd getbits(in, 0);
817 1.15 cgd getbits(ou, 1);
818 1.15 cgd getbits(ex, 2);
819 1.15 cgd #undef getbits
820 1.15 cgd
821 1.65 scw timo = 0;
822 1.83.2.2 yamt if (tv && inittimeleft(tv, &sleeptv) == -1) {
823 1.83.2.1 yamt error = EINVAL;
824 1.83.2.1 yamt goto done;
825 1.65 scw }
826 1.83.2.2 yamt
827 1.83.2.3 yamt if (mask) {
828 1.83.2.3 yamt sigminusset(&sigcantmask, mask);
829 1.83.2.3 yamt mutex_enter(&p->p_smutex);
830 1.83.2.3 yamt oldmask = l->l_sigmask;
831 1.83.2.3 yamt l->l_sigmask = *mask;
832 1.83.2.3 yamt mutex_exit(&p->p_smutex);
833 1.83.2.3 yamt } else
834 1.83.2.3 yamt oldmask = l->l_sigmask; /* XXXgcc */
835 1.65 scw
836 1.83.2.4 yamt mutex_enter(&select_lock);
837 1.83.2.4 yamt SLIST_INIT(&l->l_selwait);
838 1.83.2.4 yamt for (;;) {
839 1.83.2.4 yamt l->l_selflag = SEL_SCANNING;
840 1.83.2.4 yamt ncoll = nselcoll;
841 1.83.2.4 yamt mutex_exit(&select_lock);
842 1.83.2.4 yamt
843 1.83.2.4 yamt error = selscan(l, (fd_mask *)(bits + ni * 0),
844 1.83.2.4 yamt (fd_mask *)(bits + ni * 3), nd, retval);
845 1.83.2.4 yamt
846 1.83.2.4 yamt mutex_enter(&select_lock);
847 1.83.2.4 yamt if (error || *retval)
848 1.83.2.4 yamt break;
849 1.83.2.4 yamt if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
850 1.83.2.4 yamt break;
851 1.83.2.4 yamt if (l->l_selflag != SEL_SCANNING || ncoll != nselcoll)
852 1.83.2.4 yamt continue;
853 1.83.2.4 yamt l->l_selflag = SEL_BLOCKING;
854 1.83.2.4 yamt error = cv_timedwait_sig(&select_cv, &select_lock, timo);
855 1.83.2.4 yamt if (error != 0)
856 1.83.2.4 yamt break;
857 1.83.2.4 yamt }
858 1.83.2.4 yamt selclear();
859 1.83.2.4 yamt mutex_exit(&select_lock);
860 1.83.2.4 yamt
861 1.83.2.3 yamt if (mask) {
862 1.83.2.3 yamt mutex_enter(&p->p_smutex);
863 1.83.2.3 yamt l->l_sigmask = oldmask;
864 1.83.2.3 yamt mutex_exit(&p->p_smutex);
865 1.83.2.3 yamt }
866 1.83.2.4 yamt
867 1.83.2.2 yamt done:
868 1.15 cgd /* select is not restarted after signals... */
869 1.15 cgd if (error == ERESTART)
870 1.15 cgd error = EINTR;
871 1.15 cgd if (error == EWOULDBLOCK)
872 1.15 cgd error = 0;
873 1.83.2.4 yamt if (error == 0 && u_in != NULL)
874 1.83.2.4 yamt error = copyout(bits + ni * 3, u_in, ni);
875 1.83.2.4 yamt if (error == 0 && u_ou != NULL)
876 1.83.2.4 yamt error = copyout(bits + ni * 4, u_ou, ni);
877 1.83.2.4 yamt if (error == 0 && u_ex != NULL)
878 1.83.2.4 yamt error = copyout(bits + ni * 5, u_ex, ni);
879 1.83.2.4 yamt if (bits != smallbits)
880 1.83.2.4 yamt kmem_free(bits, ni * 6);
881 1.15 cgd return (error);
882 1.15 cgd }
883 1.15 cgd
884 1.22 christos int
885 1.83.2.4 yamt selscan(lwp_t *l, fd_mask *ibitp, fd_mask *obitp, int nfd,
886 1.53 lukem register_t *retval)
887 1.53 lukem {
888 1.63 jdolecek static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
889 1.28 mycroft POLLWRNORM | POLLHUP | POLLERR,
890 1.28 mycroft POLLRDBAND };
891 1.83.2.4 yamt proc_t *p = l->l_proc;
892 1.83.2.1 yamt struct filedesc *fdp;
893 1.83.2.1 yamt int msk, i, j, fd, n;
894 1.83.2.1 yamt fd_mask ibits, obits;
895 1.83.2.1 yamt struct file *fp;
896 1.15 cgd
897 1.53 lukem fdp = p->p_fd;
898 1.53 lukem n = 0;
899 1.15 cgd for (msk = 0; msk < 3; msk++) {
900 1.15 cgd for (i = 0; i < nfd; i += NFDBITS) {
901 1.25 mycroft ibits = *ibitp++;
902 1.25 mycroft obits = 0;
903 1.25 mycroft while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
904 1.25 mycroft ibits &= ~(1 << j);
905 1.56 thorpej if ((fp = fd_getfile(fdp, fd)) == NULL)
906 1.15 cgd return (EBADF);
907 1.45 thorpej FILE_USE(fp);
908 1.83.2.1 yamt if ((*fp->f_ops->fo_poll)(fp, flag[msk], l)) {
909 1.25 mycroft obits |= (1 << j);
910 1.15 cgd n++;
911 1.15 cgd }
912 1.83.2.1 yamt FILE_UNUSE(fp, l);
913 1.15 cgd }
914 1.25 mycroft *obitp++ = obits;
915 1.15 cgd }
916 1.15 cgd }
917 1.15 cgd *retval = n;
918 1.15 cgd return (0);
919 1.15 cgd }
920 1.15 cgd
921 1.28 mycroft /*
922 1.28 mycroft * Poll system call.
923 1.28 mycroft */
924 1.28 mycroft int
925 1.83.2.4 yamt sys_poll(lwp_t *l, void *v, register_t *retval)
926 1.28 mycroft {
927 1.47 augustss struct sys_poll_args /* {
928 1.53 lukem syscallarg(struct pollfd *) fds;
929 1.53 lukem syscallarg(u_int) nfds;
930 1.53 lukem syscallarg(int) timeout;
931 1.82 matt } */ * const uap = v;
932 1.82 matt struct timeval atv, *tv = NULL;
933 1.82 matt
934 1.82 matt if (SCARG(uap, timeout) != INFTIM) {
935 1.82 matt atv.tv_sec = SCARG(uap, timeout) / 1000;
936 1.82 matt atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
937 1.82 matt tv = &atv;
938 1.82 matt }
939 1.82 matt
940 1.82 matt return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
941 1.82 matt tv, NULL);
942 1.82 matt }
943 1.82 matt
944 1.82 matt /*
945 1.82 matt * Poll system call.
946 1.82 matt */
947 1.82 matt int
948 1.83.2.4 yamt sys_pollts(lwp_t *l, void *v, register_t *retval)
949 1.82 matt {
950 1.82 matt struct sys_pollts_args /* {
951 1.82 matt syscallarg(struct pollfd *) fds;
952 1.82 matt syscallarg(u_int) nfds;
953 1.82 matt syscallarg(const struct timespec *) ts;
954 1.82 matt syscallarg(const sigset_t *) mask;
955 1.82 matt } */ * const uap = v;
956 1.82 matt struct timespec ats;
957 1.82 matt struct timeval atv, *tv = NULL;
958 1.82 matt sigset_t amask, *mask = NULL;
959 1.82 matt int error;
960 1.82 matt
961 1.82 matt if (SCARG(uap, ts)) {
962 1.82 matt error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
963 1.82 matt if (error)
964 1.82 matt return error;
965 1.82 matt atv.tv_sec = ats.tv_sec;
966 1.82 matt atv.tv_usec = ats.tv_nsec / 1000;
967 1.82 matt tv = &atv;
968 1.82 matt }
969 1.82 matt if (SCARG(uap, mask)) {
970 1.82 matt error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
971 1.82 matt if (error)
972 1.82 matt return error;
973 1.82 matt mask = &amask;
974 1.82 matt }
975 1.82 matt
976 1.82 matt return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
977 1.82 matt tv, mask);
978 1.82 matt }
979 1.82 matt
980 1.82 matt int
981 1.83.2.4 yamt pollcommon(lwp_t *l, register_t *retval,
982 1.82 matt struct pollfd *u_fds, u_int nfds,
983 1.82 matt struct timeval *tv, sigset_t *mask)
984 1.82 matt {
985 1.83.2.1 yamt char smallbits[32 * sizeof(struct pollfd)];
986 1.83.2.4 yamt proc_t * const p = l->l_proc;
987 1.83.2.4 yamt void * bits;
988 1.82 matt sigset_t oldmask;
989 1.83.2.4 yamt int ncoll, error, timo;
990 1.53 lukem size_t ni;
991 1.83.2.2 yamt struct timeval sleeptv;
992 1.28 mycroft
993 1.82 matt if (nfds > p->p_fd->fd_nfiles) {
994 1.28 mycroft /* forgiving; slightly wrong */
995 1.82 matt nfds = p->p_fd->fd_nfiles;
996 1.28 mycroft }
997 1.82 matt ni = nfds * sizeof(struct pollfd);
998 1.28 mycroft if (ni > sizeof(smallbits))
999 1.83.2.4 yamt bits = kmem_alloc(ni, KM_SLEEP);
1000 1.28 mycroft else
1001 1.28 mycroft bits = smallbits;
1002 1.28 mycroft
1003 1.82 matt error = copyin(u_fds, bits, ni);
1004 1.28 mycroft if (error)
1005 1.28 mycroft goto done;
1006 1.28 mycroft
1007 1.65 scw timo = 0;
1008 1.83.2.2 yamt if (tv && inittimeleft(tv, &sleeptv) == -1) {
1009 1.83.2.1 yamt error = EINVAL;
1010 1.83.2.1 yamt goto done;
1011 1.65 scw }
1012 1.83.2.2 yamt
1013 1.83.2.3 yamt if (mask) {
1014 1.83.2.3 yamt sigminusset(&sigcantmask, mask);
1015 1.83.2.3 yamt mutex_enter(&p->p_smutex);
1016 1.83.2.3 yamt oldmask = l->l_sigmask;
1017 1.83.2.3 yamt l->l_sigmask = *mask;
1018 1.83.2.3 yamt mutex_exit(&p->p_smutex);
1019 1.83.2.3 yamt } else
1020 1.83.2.3 yamt oldmask = l->l_sigmask; /* XXXgcc */
1021 1.65 scw
1022 1.83.2.4 yamt mutex_enter(&select_lock);
1023 1.83.2.4 yamt SLIST_INIT(&l->l_selwait);
1024 1.83.2.4 yamt for (;;) {
1025 1.83.2.4 yamt ncoll = nselcoll;
1026 1.83.2.4 yamt l->l_selflag = SEL_SCANNING;
1027 1.83.2.4 yamt mutex_exit(&select_lock);
1028 1.83.2.4 yamt
1029 1.83.2.4 yamt error = pollscan(l, (struct pollfd *)bits, nfds, retval);
1030 1.83.2.4 yamt
1031 1.83.2.4 yamt mutex_enter(&select_lock);
1032 1.83.2.4 yamt if (error || *retval)
1033 1.83.2.4 yamt break;
1034 1.83.2.4 yamt if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
1035 1.83.2.4 yamt break;
1036 1.83.2.4 yamt if (l->l_selflag != SEL_SCANNING || nselcoll != ncoll)
1037 1.83.2.4 yamt continue;
1038 1.83.2.4 yamt l->l_selflag = SEL_BLOCKING;
1039 1.83.2.4 yamt error = cv_timedwait_sig(&select_cv, &select_lock, timo);
1040 1.83.2.4 yamt if (error != 0)
1041 1.83.2.4 yamt break;
1042 1.83.2.4 yamt }
1043 1.83.2.4 yamt selclear();
1044 1.83.2.4 yamt mutex_exit(&select_lock);
1045 1.83.2.4 yamt
1046 1.83.2.3 yamt if (mask) {
1047 1.83.2.3 yamt mutex_enter(&p->p_smutex);
1048 1.83.2.3 yamt l->l_sigmask = oldmask;
1049 1.83.2.3 yamt mutex_exit(&p->p_smutex);
1050 1.83.2.3 yamt }
1051 1.83.2.2 yamt done:
1052 1.28 mycroft /* poll is not restarted after signals... */
1053 1.28 mycroft if (error == ERESTART)
1054 1.28 mycroft error = EINTR;
1055 1.28 mycroft if (error == EWOULDBLOCK)
1056 1.28 mycroft error = 0;
1057 1.83.2.4 yamt if (error == 0)
1058 1.82 matt error = copyout(bits, u_fds, ni);
1059 1.83.2.4 yamt if (bits != smallbits)
1060 1.83.2.4 yamt kmem_free(bits, ni);
1061 1.28 mycroft return (error);
1062 1.28 mycroft }
1063 1.28 mycroft
1064 1.28 mycroft int
1065 1.83.2.4 yamt pollscan(lwp_t *l, struct pollfd *fds, int nfd, register_t *retval)
1066 1.53 lukem {
1067 1.83.2.4 yamt proc_t *p = l->l_proc;
1068 1.53 lukem struct filedesc *fdp;
1069 1.53 lukem int i, n;
1070 1.53 lukem struct file *fp;
1071 1.28 mycroft
1072 1.53 lukem fdp = p->p_fd;
1073 1.54 lukem n = 0;
1074 1.28 mycroft for (i = 0; i < nfd; i++, fds++) {
1075 1.60 christos if (fds->fd >= fdp->fd_nfiles) {
1076 1.28 mycroft fds->revents = POLLNVAL;
1077 1.28 mycroft n++;
1078 1.60 christos } else if (fds->fd < 0) {
1079 1.60 christos fds->revents = 0;
1080 1.28 mycroft } else {
1081 1.56 thorpej if ((fp = fd_getfile(fdp, fds->fd)) == NULL) {
1082 1.32 mrg fds->revents = POLLNVAL;
1083 1.28 mycroft n++;
1084 1.32 mrg } else {
1085 1.45 thorpej FILE_USE(fp);
1086 1.32 mrg fds->revents = (*fp->f_ops->fo_poll)(fp,
1087 1.83.2.1 yamt fds->events | POLLERR | POLLHUP, l);
1088 1.32 mrg if (fds->revents != 0)
1089 1.32 mrg n++;
1090 1.83.2.1 yamt FILE_UNUSE(fp, l);
1091 1.32 mrg }
1092 1.28 mycroft }
1093 1.28 mycroft }
1094 1.28 mycroft *retval = n;
1095 1.28 mycroft return (0);
1096 1.28 mycroft }
1097 1.28 mycroft
1098 1.15 cgd /*ARGSUSED*/
1099 1.22 christos int
1100 1.83.2.4 yamt seltrue(dev_t dev, int events, lwp_t *l)
1101 1.15 cgd {
1102 1.15 cgd
1103 1.28 mycroft return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1104 1.15 cgd }
1105 1.15 cgd
1106 1.15 cgd /*
1107 1.15 cgd * Record a select request.
1108 1.15 cgd */
1109 1.15 cgd void
1110 1.83.2.4 yamt selrecord(lwp_t *selector, struct selinfo *sip)
1111 1.15 cgd {
1112 1.15 cgd
1113 1.83.2.4 yamt mutex_enter(&select_lock);
1114 1.83.2.4 yamt if (sip->sel_lwp == NULL) {
1115 1.83.2.4 yamt /* First named waiter, although there may be more. */
1116 1.83.2.4 yamt sip->sel_lwp = selector;
1117 1.83.2.4 yamt SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain);
1118 1.83.2.4 yamt } else if (sip->sel_lwp != selector) {
1119 1.83.2.4 yamt /* Multiple waiters. */
1120 1.83.2.4 yamt sip->sel_collision = true;
1121 1.69 thorpej }
1122 1.83.2.4 yamt mutex_exit(&select_lock);
1123 1.15 cgd }
1124 1.15 cgd
1125 1.15 cgd /*
1126 1.15 cgd * Do a wakeup when a selectable event occurs.
1127 1.15 cgd */
1128 1.15 cgd void
1129 1.83.2.4 yamt selwakeup(struct selinfo *sip)
1130 1.15 cgd {
1131 1.83.2.4 yamt lwp_t *l;
1132 1.15 cgd
1133 1.83.2.4 yamt mutex_enter(&select_lock);
1134 1.73 dsl if (sip->sel_collision) {
1135 1.83.2.4 yamt /* Multiple waiters - just notify everybody. */
1136 1.15 cgd nselcoll++;
1137 1.83.2.4 yamt sip->sel_collision = false;
1138 1.83.2.4 yamt cv_broadcast(&select_cv);
1139 1.83.2.4 yamt } else if (sip->sel_lwp != NULL) {
1140 1.83.2.4 yamt /* Only one LWP waiting. */
1141 1.83.2.4 yamt l = sip->sel_lwp;
1142 1.83.2.4 yamt if (l->l_selflag == SEL_BLOCKING) {
1143 1.83.2.4 yamt /*
1144 1.83.2.4 yamt * If it's sleeping, wake it up. If not, it's
1145 1.83.2.4 yamt * already awake but hasn't yet removed itself
1146 1.83.2.4 yamt * from the selector. We reset the state below
1147 1.83.2.4 yamt * so that we only attempt to do this once.
1148 1.83.2.4 yamt */
1149 1.83.2.4 yamt lwp_lock(l);
1150 1.83.2.4 yamt if (l->l_wchan == &select_cv) {
1151 1.83.2.4 yamt /* lwp_unsleep() releases the LWP lock. */
1152 1.83.2.4 yamt lwp_unsleep(l);
1153 1.83.2.4 yamt } else
1154 1.83.2.4 yamt lwp_unlock(l);
1155 1.83.2.4 yamt } else {
1156 1.83.2.4 yamt /*
1157 1.83.2.4 yamt * Not yet asleep. Reset its state below so that
1158 1.83.2.4 yamt * it will go around again.
1159 1.83.2.4 yamt */
1160 1.83.2.4 yamt }
1161 1.83.2.4 yamt l->l_selflag = SEL_RESET;
1162 1.15 cgd }
1163 1.83.2.4 yamt mutex_exit(&select_lock);
1164 1.83.2.4 yamt }
1165 1.83.2.3 yamt
1166 1.83.2.4 yamt void
1167 1.83.2.4 yamt selnotify(struct selinfo *sip, long knhint)
1168 1.83.2.4 yamt {
1169 1.83.2.3 yamt
1170 1.83.2.4 yamt selwakeup(sip);
1171 1.83.2.4 yamt KNOTE(&sip->sel_klist, knhint);
1172 1.83.2.4 yamt }
1173 1.83.2.4 yamt
1174 1.83.2.4 yamt /*
1175 1.83.2.4 yamt * Remove an LWP from all objects that it is waiting for.
1176 1.83.2.4 yamt */
1177 1.83.2.4 yamt static void
1178 1.83.2.4 yamt selclear(void)
1179 1.83.2.4 yamt {
1180 1.83.2.4 yamt struct selinfo *sip;
1181 1.83.2.4 yamt lwp_t *l = curlwp;
1182 1.83.2.4 yamt
1183 1.83.2.4 yamt KASSERT(mutex_owned(&select_lock));
1184 1.83.2.4 yamt
1185 1.83.2.4 yamt SLIST_FOREACH(sip, &l->l_selwait, sel_chain) {
1186 1.83.2.4 yamt KASSERT(sip->sel_lwp == l);
1187 1.83.2.4 yamt sip->sel_lwp = NULL;
1188 1.15 cgd }
1189 1.83.2.4 yamt }
1190 1.83.2.4 yamt
1191 1.83.2.4 yamt /*
1192 1.83.2.4 yamt * Initialize the select/poll system calls.
1193 1.83.2.4 yamt */
1194 1.83.2.4 yamt void
1195 1.83.2.4 yamt selsysinit(void)
1196 1.83.2.4 yamt {
1197 1.83.2.4 yamt
1198 1.83.2.4 yamt mutex_init(&select_lock, MUTEX_DRIVER, IPL_VM);
1199 1.83.2.4 yamt cv_init(&select_cv, "select");
1200 1.15 cgd }
1201 1.83.2.5 yamt
1202 1.83.2.5 yamt /*
1203 1.83.2.5 yamt * Initialize a selector.
1204 1.83.2.5 yamt */
1205 1.83.2.5 yamt void
1206 1.83.2.5 yamt selinit(struct selinfo *sip)
1207 1.83.2.5 yamt {
1208 1.83.2.5 yamt
1209 1.83.2.5 yamt memset(sip, 0, sizeof(*sip));
1210 1.83.2.5 yamt }
1211 1.83.2.5 yamt
1212 1.83.2.5 yamt /*
1213 1.83.2.5 yamt * Destroy a selector. The owning object must not gain new
1214 1.83.2.5 yamt * references while this is in progress: all activity on the
1215 1.83.2.5 yamt * selector must be stopped.
1216 1.83.2.5 yamt */
1217 1.83.2.5 yamt void
1218 1.83.2.5 yamt seldestroy(struct selinfo *sip)
1219 1.83.2.5 yamt {
1220 1.83.2.5 yamt lwp_t *l;
1221 1.83.2.5 yamt
1222 1.83.2.5 yamt if (sip->sel_lwp == NULL)
1223 1.83.2.5 yamt return;
1224 1.83.2.5 yamt
1225 1.83.2.5 yamt mutex_enter(&select_lock);
1226 1.83.2.5 yamt if ((l = sip->sel_lwp) != NULL) {
1227 1.83.2.5 yamt /* This should rarely happen, so SLIST_REMOVE() is OK. */
1228 1.83.2.5 yamt SLIST_REMOVE(&l->l_selwait, sip, selinfo, sel_chain);
1229 1.83.2.5 yamt sip->sel_lwp = NULL;
1230 1.83.2.5 yamt }
1231 1.83.2.5 yamt mutex_exit(&select_lock);
1232 1.83.2.5 yamt }
1233