sys_generic.c revision 1.83.2.4 1 1.83.2.4 yamt /* $NetBSD: sys_generic.c,v 1.83.2.4 2007/09/03 14:41:08 yamt Exp $ */
2 1.83.2.4 yamt
3 1.83.2.4 yamt /*-
4 1.83.2.4 yamt * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 1.83.2.4 yamt * All rights reserved.
6 1.83.2.4 yamt *
7 1.83.2.4 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.83.2.4 yamt * by Andrew Doran.
9 1.83.2.4 yamt *
10 1.83.2.4 yamt * Redistribution and use in source and binary forms, with or without
11 1.83.2.4 yamt * modification, are permitted provided that the following conditions
12 1.83.2.4 yamt * are met:
13 1.83.2.4 yamt * 1. Redistributions of source code must retain the above copyright
14 1.83.2.4 yamt * notice, this list of conditions and the following disclaimer.
15 1.83.2.4 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.83.2.4 yamt * notice, this list of conditions and the following disclaimer in the
17 1.83.2.4 yamt * documentation and/or other materials provided with the distribution.
18 1.83.2.4 yamt * 3. All advertising materials mentioning features or use of this software
19 1.83.2.4 yamt * must display the following acknowledgement:
20 1.83.2.4 yamt * This product includes software developed by the NetBSD
21 1.83.2.4 yamt * Foundation, Inc. and its contributors.
22 1.83.2.4 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.83.2.4 yamt * contributors may be used to endorse or promote products derived
24 1.83.2.4 yamt * from this software without specific prior written permission.
25 1.83.2.4 yamt *
26 1.83.2.4 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.83.2.4 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.83.2.4 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.83.2.4 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.83.2.4 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.83.2.4 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.83.2.4 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.83.2.4 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.83.2.4 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.83.2.4 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.83.2.4 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.83.2.4 yamt */
38 1.15 cgd
39 1.15 cgd /*
40 1.15 cgd * Copyright (c) 1982, 1986, 1989, 1993
41 1.15 cgd * The Regents of the University of California. All rights reserved.
42 1.15 cgd * (c) UNIX System Laboratories, Inc.
43 1.15 cgd * All or some portions of this file are derived from material licensed
44 1.15 cgd * to the University of California by American Telephone and Telegraph
45 1.15 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
46 1.15 cgd * the permission of UNIX System Laboratories, Inc.
47 1.15 cgd *
48 1.15 cgd * Redistribution and use in source and binary forms, with or without
49 1.15 cgd * modification, are permitted provided that the following conditions
50 1.15 cgd * are met:
51 1.15 cgd * 1. Redistributions of source code must retain the above copyright
52 1.15 cgd * notice, this list of conditions and the following disclaimer.
53 1.15 cgd * 2. Redistributions in binary form must reproduce the above copyright
54 1.15 cgd * notice, this list of conditions and the following disclaimer in the
55 1.15 cgd * documentation and/or other materials provided with the distribution.
56 1.77 agc * 3. Neither the name of the University nor the names of its contributors
57 1.15 cgd * may be used to endorse or promote products derived from this software
58 1.15 cgd * without specific prior written permission.
59 1.15 cgd *
60 1.15 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 1.15 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 1.15 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 1.15 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 1.15 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 1.15 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 1.15 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 1.15 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 1.15 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 1.15 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 1.15 cgd * SUCH DAMAGE.
71 1.15 cgd *
72 1.36 fvdl * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
73 1.15 cgd */
74 1.59 lukem
75 1.83.2.4 yamt /*
76 1.83.2.4 yamt * System calls relating to files.
77 1.83.2.4 yamt */
78 1.37 thorpej
79 1.83.2.4 yamt #include <sys/cdefs.h>
80 1.83.2.4 yamt __KERNEL_RCSID(0, "$NetBSD: sys_generic.c,v 1.83.2.4 2007/09/03 14:41:08 yamt Exp $");
81 1.15 cgd
82 1.15 cgd #include <sys/param.h>
83 1.15 cgd #include <sys/systm.h>
84 1.15 cgd #include <sys/filedesc.h>
85 1.15 cgd #include <sys/ioctl.h>
86 1.15 cgd #include <sys/file.h>
87 1.15 cgd #include <sys/proc.h>
88 1.15 cgd #include <sys/socketvar.h>
89 1.22 christos #include <sys/signalvar.h>
90 1.15 cgd #include <sys/uio.h>
91 1.15 cgd #include <sys/kernel.h>
92 1.15 cgd #include <sys/stat.h>
93 1.83.2.4 yamt #include <sys/kmem.h>
94 1.28 mycroft #include <sys/poll.h>
95 1.83.2.4 yamt #include <sys/vnode.h>
96 1.16 cgd #include <sys/mount.h>
97 1.16 cgd #include <sys/syscallargs.h>
98 1.83.2.4 yamt #include <sys/ktrace.h>
99 1.22 christos
100 1.83.2.1 yamt #include <uvm/uvm_extern.h>
101 1.83.2.1 yamt
102 1.83.2.4 yamt /* Flags for lwp::l_selflag. */
103 1.83.2.4 yamt #define SEL_RESET 0 /* awoken, interrupted, or not yet polling */
104 1.83.2.4 yamt #define SEL_SCANNING 1 /* polling descriptors */
105 1.83.2.4 yamt #define SEL_BLOCKING 2 /* about to block on select_cv */
106 1.83.2.4 yamt
107 1.83.2.4 yamt static int selscan(lwp_t *, fd_mask *, fd_mask *, int, register_t *);
108 1.83.2.4 yamt static int pollscan(lwp_t *, struct pollfd *, int, register_t *);
109 1.83.2.4 yamt static void selclear(void);
110 1.83.2.4 yamt
111 1.83.2.4 yamt /* Global state for select()/poll(). */
112 1.83.2.4 yamt kmutex_t select_lock;
113 1.83.2.4 yamt kcondvar_t select_cv;
114 1.83.2.4 yamt int nselcoll;
115 1.82 matt
116 1.15 cgd /*
117 1.15 cgd * Read system call.
118 1.15 cgd */
119 1.15 cgd /* ARGSUSED */
120 1.22 christos int
121 1.83.2.4 yamt sys_read(lwp_t *l, void *v, register_t *retval)
122 1.20 thorpej {
123 1.47 augustss struct sys_read_args /* {
124 1.53 lukem syscallarg(int) fd;
125 1.53 lukem syscallarg(void *) buf;
126 1.53 lukem syscallarg(size_t) nbyte;
127 1.20 thorpej } */ *uap = v;
128 1.53 lukem int fd;
129 1.53 lukem struct file *fp;
130 1.83.2.4 yamt proc_t *p;
131 1.53 lukem struct filedesc *fdp;
132 1.39 thorpej
133 1.53 lukem fd = SCARG(uap, fd);
134 1.69 thorpej p = l->l_proc;
135 1.53 lukem fdp = p->p_fd;
136 1.56 thorpej
137 1.56 thorpej if ((fp = fd_getfile(fdp, fd)) == NULL)
138 1.56 thorpej return (EBADF);
139 1.56 thorpej
140 1.70 pk if ((fp->f_flag & FREAD) == 0) {
141 1.70 pk simple_unlock(&fp->f_slock);
142 1.39 thorpej return (EBADF);
143 1.70 pk }
144 1.39 thorpej
145 1.45 thorpej FILE_USE(fp);
146 1.45 thorpej
147 1.45 thorpej /* dofileread() will unuse the descriptor for us */
148 1.83.2.1 yamt return (dofileread(l, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
149 1.39 thorpej &fp->f_offset, FOF_UPDATE_OFFSET, retval));
150 1.39 thorpej }
151 1.39 thorpej
152 1.39 thorpej int
153 1.83.2.4 yamt dofileread(lwp_t *l, int fd, struct file *fp, void *buf, size_t nbyte,
154 1.53 lukem off_t *offset, int flags, register_t *retval)
155 1.53 lukem {
156 1.83.2.1 yamt struct iovec aiov;
157 1.83.2.1 yamt struct uio auio;
158 1.83.2.4 yamt proc_t *p;
159 1.83.2.1 yamt struct vmspace *vm;
160 1.83.2.1 yamt size_t cnt;
161 1.83.2.1 yamt int error;
162 1.83.2.1 yamt p = l->l_proc;
163 1.83.2.1 yamt
164 1.83.2.1 yamt error = proc_vmspace_getref(p, &vm);
165 1.83.2.1 yamt if (error) {
166 1.83.2.1 yamt goto out;
167 1.83.2.1 yamt }
168 1.15 cgd
169 1.83.2.4 yamt aiov.iov_base = (void *)buf;
170 1.39 thorpej aiov.iov_len = nbyte;
171 1.15 cgd auio.uio_iov = &aiov;
172 1.15 cgd auio.uio_iovcnt = 1;
173 1.39 thorpej auio.uio_resid = nbyte;
174 1.15 cgd auio.uio_rw = UIO_READ;
175 1.83.2.1 yamt auio.uio_vmspace = vm;
176 1.40 thorpej
177 1.40 thorpej /*
178 1.40 thorpej * Reads return ssize_t because -1 is returned on error. Therefore
179 1.40 thorpej * we must restrict the length to SSIZE_MAX to avoid garbage return
180 1.40 thorpej * values.
181 1.40 thorpej */
182 1.45 thorpej if (auio.uio_resid > SSIZE_MAX) {
183 1.45 thorpej error = EINVAL;
184 1.45 thorpej goto out;
185 1.45 thorpej }
186 1.40 thorpej
187 1.38 thorpej cnt = auio.uio_resid;
188 1.39 thorpej error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
189 1.22 christos if (error)
190 1.15 cgd if (auio.uio_resid != cnt && (error == ERESTART ||
191 1.15 cgd error == EINTR || error == EWOULDBLOCK))
192 1.15 cgd error = 0;
193 1.15 cgd cnt -= auio.uio_resid;
194 1.83.2.4 yamt ktrgenio(fd, UIO_READ, buf, cnt, error);
195 1.15 cgd *retval = cnt;
196 1.45 thorpej out:
197 1.83.2.1 yamt FILE_UNUSE(fp, l);
198 1.83.2.1 yamt uvmspace_free(vm);
199 1.15 cgd return (error);
200 1.15 cgd }
201 1.15 cgd
202 1.15 cgd /*
203 1.15 cgd * Scatter read system call.
204 1.15 cgd */
205 1.22 christos int
206 1.83.2.4 yamt sys_readv(lwp_t *l, void *v, register_t *retval)
207 1.20 thorpej {
208 1.47 augustss struct sys_readv_args /* {
209 1.53 lukem syscallarg(int) fd;
210 1.53 lukem syscallarg(const struct iovec *) iovp;
211 1.53 lukem syscallarg(int) iovcnt;
212 1.20 thorpej } */ *uap = v;
213 1.83.2.4 yamt
214 1.83.2.4 yamt return do_filereadv(l, SCARG(uap, fd), SCARG(uap, iovp),
215 1.83.2.4 yamt SCARG(uap, iovcnt), NULL, FOF_UPDATE_OFFSET, retval);
216 1.83.2.4 yamt }
217 1.83.2.4 yamt
218 1.83.2.4 yamt int
219 1.83.2.4 yamt do_filereadv(struct lwp *l, int fd, const struct iovec *iovp, int iovcnt,
220 1.83.2.4 yamt off_t *offset, int flags, register_t *retval)
221 1.83.2.4 yamt {
222 1.83.2.4 yamt struct proc *p;
223 1.83.2.4 yamt struct uio auio;
224 1.83.2.4 yamt struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
225 1.83.2.4 yamt struct vmspace *vm;
226 1.83.2.4 yamt int i, error;
227 1.83.2.4 yamt size_t cnt;
228 1.83.2.4 yamt u_int iovlen;
229 1.83.2.4 yamt struct file *fp;
230 1.53 lukem struct filedesc *fdp;
231 1.83.2.4 yamt struct iovec *ktriov = NULL;
232 1.83.2.4 yamt
233 1.83.2.4 yamt if (iovcnt == 0)
234 1.83.2.4 yamt return EINVAL;
235 1.39 thorpej
236 1.69 thorpej p = l->l_proc;
237 1.53 lukem fdp = p->p_fd;
238 1.56 thorpej
239 1.56 thorpej if ((fp = fd_getfile(fdp, fd)) == NULL)
240 1.83.2.4 yamt return EBADF;
241 1.56 thorpej
242 1.70 pk if ((fp->f_flag & FREAD) == 0) {
243 1.70 pk simple_unlock(&fp->f_slock);
244 1.83.2.4 yamt return EBADF;
245 1.70 pk }
246 1.39 thorpej
247 1.45 thorpej FILE_USE(fp);
248 1.45 thorpej
249 1.83.2.4 yamt if (offset == NULL)
250 1.83.2.4 yamt offset = &fp->f_offset;
251 1.83.2.4 yamt else {
252 1.83.2.4 yamt struct vnode *vp = fp->f_data;
253 1.83.2.4 yamt if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
254 1.83.2.4 yamt error = ESPIPE;
255 1.83.2.4 yamt goto out;
256 1.83.2.4 yamt }
257 1.83.2.4 yamt /*
258 1.83.2.4 yamt * Test that the device is seekable ?
259 1.83.2.4 yamt * XXX This works because no file systems actually
260 1.83.2.4 yamt * XXX take any action on the seek operation.
261 1.83.2.4 yamt */
262 1.83.2.4 yamt error = VOP_SEEK(vp, fp->f_offset, *offset, fp->f_cred);
263 1.83.2.4 yamt if (error != 0)
264 1.83.2.4 yamt goto out;
265 1.83.2.4 yamt }
266 1.15 cgd
267 1.83.2.1 yamt error = proc_vmspace_getref(p, &vm);
268 1.83.2.4 yamt if (error)
269 1.83.2.1 yamt goto out;
270 1.83.2.1 yamt
271 1.42 perry iovlen = iovcnt * sizeof(struct iovec);
272 1.83.2.4 yamt if (flags & FOF_IOV_SYSSPACE)
273 1.83.2.4 yamt iov = __UNCONST(iovp);
274 1.83.2.4 yamt else {
275 1.15 cgd iov = aiov;
276 1.83.2.4 yamt if ((u_int)iovcnt > UIO_SMALLIOV) {
277 1.83.2.4 yamt if ((u_int)iovcnt > IOV_MAX) {
278 1.83.2.4 yamt error = EINVAL;
279 1.83.2.4 yamt goto out;
280 1.83.2.4 yamt }
281 1.83.2.4 yamt iov = kmem_alloc(iovlen, KM_SLEEP);
282 1.83.2.4 yamt if (iov == NULL) {
283 1.83.2.4 yamt error = ENOMEM;
284 1.83.2.4 yamt goto out;
285 1.83.2.4 yamt }
286 1.83.2.4 yamt needfree = iov;
287 1.83.2.4 yamt }
288 1.83.2.4 yamt error = copyin(iovp, iov, iovlen);
289 1.83.2.4 yamt if (error)
290 1.83.2.4 yamt goto done;
291 1.45 thorpej }
292 1.41 kleink
293 1.15 cgd auio.uio_iov = iov;
294 1.34 mycroft auio.uio_iovcnt = iovcnt;
295 1.15 cgd auio.uio_rw = UIO_READ;
296 1.83.2.1 yamt auio.uio_vmspace = vm;
297 1.83.2.4 yamt
298 1.15 cgd auio.uio_resid = 0;
299 1.83.2.4 yamt for (i = 0; i < iovcnt; i++, iov++) {
300 1.15 cgd auio.uio_resid += iov->iov_len;
301 1.40 thorpej /*
302 1.40 thorpej * Reads return ssize_t because -1 is returned on error.
303 1.40 thorpej * Therefore we must restrict the length to SSIZE_MAX to
304 1.40 thorpej * avoid garbage return values.
305 1.40 thorpej */
306 1.40 thorpej if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
307 1.15 cgd error = EINVAL;
308 1.15 cgd goto done;
309 1.15 cgd }
310 1.15 cgd }
311 1.83.2.4 yamt
312 1.15 cgd /*
313 1.15 cgd * if tracing, save a copy of iovec
314 1.15 cgd */
315 1.83.2.4 yamt if (ktrpoint(KTR_GENIO)) {
316 1.83.2.4 yamt ktriov = kmem_alloc(iovlen, KM_SLEEP);
317 1.83.2.4 yamt if (ktriov != NULL)
318 1.83.2.4 yamt memcpy(ktriov, auio.uio_iov, iovlen);
319 1.15 cgd }
320 1.83.2.4 yamt
321 1.15 cgd cnt = auio.uio_resid;
322 1.39 thorpej error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags);
323 1.22 christos if (error)
324 1.15 cgd if (auio.uio_resid != cnt && (error == ERESTART ||
325 1.15 cgd error == EINTR || error == EWOULDBLOCK))
326 1.15 cgd error = 0;
327 1.15 cgd cnt -= auio.uio_resid;
328 1.83.2.4 yamt *retval = cnt;
329 1.83.2.4 yamt
330 1.58 itohy if (ktriov != NULL) {
331 1.83.2.4 yamt ktrgeniov(fd, UIO_READ, ktriov, cnt, error);
332 1.83.2.4 yamt kmem_free(ktriov, iovlen);
333 1.15 cgd }
334 1.83.2.4 yamt
335 1.45 thorpej done:
336 1.15 cgd if (needfree)
337 1.83.2.4 yamt kmem_free(needfree, iovlen);
338 1.45 thorpej out:
339 1.83.2.1 yamt FILE_UNUSE(fp, l);
340 1.83.2.1 yamt uvmspace_free(vm);
341 1.15 cgd return (error);
342 1.15 cgd }
343 1.15 cgd
344 1.15 cgd /*
345 1.15 cgd * Write system call
346 1.15 cgd */
347 1.22 christos int
348 1.83.2.4 yamt sys_write(lwp_t *l, void *v, register_t *retval)
349 1.20 thorpej {
350 1.47 augustss struct sys_write_args /* {
351 1.53 lukem syscallarg(int) fd;
352 1.53 lukem syscallarg(const void *) buf;
353 1.53 lukem syscallarg(size_t) nbyte;
354 1.20 thorpej } */ *uap = v;
355 1.53 lukem int fd;
356 1.53 lukem struct file *fp;
357 1.83.2.4 yamt proc_t *p;
358 1.53 lukem struct filedesc *fdp;
359 1.39 thorpej
360 1.53 lukem fd = SCARG(uap, fd);
361 1.69 thorpej p = l->l_proc;
362 1.53 lukem fdp = p->p_fd;
363 1.56 thorpej
364 1.56 thorpej if ((fp = fd_getfile(fdp, fd)) == NULL)
365 1.56 thorpej return (EBADF);
366 1.56 thorpej
367 1.70 pk if ((fp->f_flag & FWRITE) == 0) {
368 1.70 pk simple_unlock(&fp->f_slock);
369 1.39 thorpej return (EBADF);
370 1.70 pk }
371 1.39 thorpej
372 1.45 thorpej FILE_USE(fp);
373 1.45 thorpej
374 1.45 thorpej /* dofilewrite() will unuse the descriptor for us */
375 1.83.2.1 yamt return (dofilewrite(l, fd, fp, SCARG(uap, buf), SCARG(uap, nbyte),
376 1.39 thorpej &fp->f_offset, FOF_UPDATE_OFFSET, retval));
377 1.39 thorpej }
378 1.39 thorpej
379 1.39 thorpej int
380 1.83.2.4 yamt dofilewrite(lwp_t *l, int fd, struct file *fp, const void *buf,
381 1.53 lukem size_t nbyte, off_t *offset, int flags, register_t *retval)
382 1.53 lukem {
383 1.83.2.1 yamt struct iovec aiov;
384 1.83.2.1 yamt struct uio auio;
385 1.83.2.4 yamt proc_t *p;
386 1.83.2.1 yamt struct vmspace *vm;
387 1.83.2.1 yamt size_t cnt;
388 1.83.2.1 yamt int error;
389 1.15 cgd
390 1.83.2.1 yamt p = l->l_proc;
391 1.83.2.1 yamt error = proc_vmspace_getref(p, &vm);
392 1.83.2.1 yamt if (error) {
393 1.83.2.1 yamt goto out;
394 1.83.2.1 yamt }
395 1.83 christos aiov.iov_base = __UNCONST(buf); /* XXXUNCONST kills const */
396 1.39 thorpej aiov.iov_len = nbyte;
397 1.15 cgd auio.uio_iov = &aiov;
398 1.15 cgd auio.uio_iovcnt = 1;
399 1.39 thorpej auio.uio_resid = nbyte;
400 1.15 cgd auio.uio_rw = UIO_WRITE;
401 1.83.2.1 yamt auio.uio_vmspace = vm;
402 1.40 thorpej
403 1.40 thorpej /*
404 1.40 thorpej * Writes return ssize_t because -1 is returned on error. Therefore
405 1.40 thorpej * we must restrict the length to SSIZE_MAX to avoid garbage return
406 1.40 thorpej * values.
407 1.40 thorpej */
408 1.45 thorpej if (auio.uio_resid > SSIZE_MAX) {
409 1.45 thorpej error = EINVAL;
410 1.45 thorpej goto out;
411 1.45 thorpej }
412 1.40 thorpej
413 1.38 thorpej cnt = auio.uio_resid;
414 1.39 thorpej error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
415 1.22 christos if (error) {
416 1.15 cgd if (auio.uio_resid != cnt && (error == ERESTART ||
417 1.15 cgd error == EINTR || error == EWOULDBLOCK))
418 1.15 cgd error = 0;
419 1.83.2.3 yamt if (error == EPIPE) {
420 1.83.2.3 yamt mutex_enter(&proclist_mutex);
421 1.15 cgd psignal(p, SIGPIPE);
422 1.83.2.3 yamt mutex_exit(&proclist_mutex);
423 1.83.2.3 yamt }
424 1.15 cgd }
425 1.15 cgd cnt -= auio.uio_resid;
426 1.83.2.4 yamt ktrgenio(fd, UIO_WRITE, buf, cnt, error);
427 1.15 cgd *retval = cnt;
428 1.45 thorpej out:
429 1.83.2.1 yamt FILE_UNUSE(fp, l);
430 1.83.2.1 yamt uvmspace_free(vm);
431 1.15 cgd return (error);
432 1.15 cgd }
433 1.15 cgd
434 1.15 cgd /*
435 1.15 cgd * Gather write system call
436 1.15 cgd */
437 1.22 christos int
438 1.83.2.4 yamt sys_writev(lwp_t *l, void *v, register_t *retval)
439 1.20 thorpej {
440 1.47 augustss struct sys_writev_args /* {
441 1.53 lukem syscallarg(int) fd;
442 1.53 lukem syscallarg(const struct iovec *) iovp;
443 1.53 lukem syscallarg(int) iovcnt;
444 1.20 thorpej } */ *uap = v;
445 1.83.2.4 yamt
446 1.83.2.4 yamt return do_filewritev(l, SCARG(uap, fd), SCARG(uap, iovp),
447 1.83.2.4 yamt SCARG(uap, iovcnt), NULL, FOF_UPDATE_OFFSET, retval);
448 1.83.2.4 yamt }
449 1.83.2.4 yamt
450 1.83.2.4 yamt int
451 1.83.2.4 yamt do_filewritev(struct lwp *l, int fd, const struct iovec *iovp, int iovcnt,
452 1.83.2.4 yamt off_t *offset, int flags, register_t *retval)
453 1.83.2.4 yamt {
454 1.69 thorpej struct proc *p;
455 1.83.2.4 yamt struct uio auio;
456 1.83.2.4 yamt struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
457 1.83.2.4 yamt struct vmspace *vm;
458 1.83.2.4 yamt int i, error;
459 1.83.2.4 yamt size_t cnt;
460 1.83.2.4 yamt u_int iovlen;
461 1.83.2.4 yamt struct file *fp;
462 1.53 lukem struct filedesc *fdp;
463 1.83.2.4 yamt struct iovec *ktriov = NULL;
464 1.83.2.4 yamt
465 1.83.2.4 yamt if (iovcnt == 0)
466 1.83.2.4 yamt return EINVAL;
467 1.39 thorpej
468 1.69 thorpej p = l->l_proc;
469 1.53 lukem fdp = p->p_fd;
470 1.56 thorpej
471 1.56 thorpej if ((fp = fd_getfile(fdp, fd)) == NULL)
472 1.83.2.4 yamt return EBADF;
473 1.56 thorpej
474 1.70 pk if ((fp->f_flag & FWRITE) == 0) {
475 1.70 pk simple_unlock(&fp->f_slock);
476 1.83.2.4 yamt return EBADF;
477 1.70 pk }
478 1.39 thorpej
479 1.45 thorpej FILE_USE(fp);
480 1.45 thorpej
481 1.83.2.4 yamt if (offset == NULL)
482 1.83.2.4 yamt offset = &fp->f_offset;
483 1.83.2.4 yamt else {
484 1.83.2.4 yamt struct vnode *vp = fp->f_data;
485 1.83.2.4 yamt if (fp->f_type != DTYPE_VNODE || vp->v_type == VFIFO) {
486 1.83.2.4 yamt error = ESPIPE;
487 1.83.2.4 yamt goto out;
488 1.83.2.4 yamt }
489 1.83.2.4 yamt /*
490 1.83.2.4 yamt * Test that the device is seekable ?
491 1.83.2.4 yamt * XXX This works because no file systems actually
492 1.83.2.4 yamt * XXX take any action on the seek operation.
493 1.83.2.4 yamt */
494 1.83.2.4 yamt error = VOP_SEEK(vp, fp->f_offset, *offset, fp->f_cred);
495 1.83.2.4 yamt if (error != 0)
496 1.83.2.4 yamt goto out;
497 1.83.2.4 yamt }
498 1.15 cgd
499 1.83.2.1 yamt error = proc_vmspace_getref(p, &vm);
500 1.83.2.4 yamt if (error)
501 1.83.2.1 yamt goto out;
502 1.83.2.4 yamt
503 1.42 perry iovlen = iovcnt * sizeof(struct iovec);
504 1.83.2.4 yamt if (flags & FOF_IOV_SYSSPACE)
505 1.83.2.4 yamt iov = __UNCONST(iovp);
506 1.83.2.4 yamt else {
507 1.15 cgd iov = aiov;
508 1.83.2.4 yamt if ((u_int)iovcnt > UIO_SMALLIOV) {
509 1.83.2.4 yamt if ((u_int)iovcnt > IOV_MAX) {
510 1.83.2.4 yamt error = EINVAL;
511 1.83.2.4 yamt goto out;
512 1.83.2.4 yamt }
513 1.83.2.4 yamt iov = kmem_alloc(iovlen, KM_SLEEP);
514 1.83.2.4 yamt if (iov == NULL) {
515 1.83.2.4 yamt error = ENOMEM;
516 1.83.2.4 yamt goto out;
517 1.83.2.4 yamt }
518 1.83.2.4 yamt needfree = iov;
519 1.83.2.4 yamt }
520 1.83.2.4 yamt error = copyin(iovp, iov, iovlen);
521 1.83.2.4 yamt if (error)
522 1.83.2.4 yamt goto done;
523 1.45 thorpej }
524 1.41 kleink
525 1.15 cgd auio.uio_iov = iov;
526 1.34 mycroft auio.uio_iovcnt = iovcnt;
527 1.15 cgd auio.uio_rw = UIO_WRITE;
528 1.83.2.1 yamt auio.uio_vmspace = vm;
529 1.83.2.4 yamt
530 1.15 cgd auio.uio_resid = 0;
531 1.83.2.4 yamt for (i = 0; i < iovcnt; i++, iov++) {
532 1.15 cgd auio.uio_resid += iov->iov_len;
533 1.40 thorpej /*
534 1.40 thorpej * Writes return ssize_t because -1 is returned on error.
535 1.40 thorpej * Therefore we must restrict the length to SSIZE_MAX to
536 1.40 thorpej * avoid garbage return values.
537 1.40 thorpej */
538 1.40 thorpej if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
539 1.15 cgd error = EINVAL;
540 1.15 cgd goto done;
541 1.15 cgd }
542 1.15 cgd }
543 1.83.2.4 yamt
544 1.15 cgd /*
545 1.15 cgd * if tracing, save a copy of iovec
546 1.15 cgd */
547 1.83.2.4 yamt if (ktrpoint(KTR_GENIO)) {
548 1.83.2.4 yamt ktriov = kmem_alloc(iovlen, KM_SLEEP);
549 1.83.2.4 yamt if (ktriov != NULL)
550 1.83.2.4 yamt memcpy(ktriov, auio.uio_iov, iovlen);
551 1.15 cgd }
552 1.83.2.4 yamt
553 1.15 cgd cnt = auio.uio_resid;
554 1.39 thorpej error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags);
555 1.22 christos if (error) {
556 1.15 cgd if (auio.uio_resid != cnt && (error == ERESTART ||
557 1.15 cgd error == EINTR || error == EWOULDBLOCK))
558 1.15 cgd error = 0;
559 1.83.2.3 yamt if (error == EPIPE) {
560 1.83.2.3 yamt mutex_enter(&proclist_mutex);
561 1.15 cgd psignal(p, SIGPIPE);
562 1.83.2.3 yamt mutex_exit(&proclist_mutex);
563 1.83.2.3 yamt }
564 1.15 cgd }
565 1.15 cgd cnt -= auio.uio_resid;
566 1.83.2.4 yamt *retval = cnt;
567 1.83.2.4 yamt
568 1.78 drochner if (ktriov != NULL) {
569 1.83.2.4 yamt ktrgeniov(fd, UIO_WRITE, ktriov, cnt, error);
570 1.83.2.4 yamt kmem_free(ktriov, iovlen);
571 1.15 cgd }
572 1.83.2.4 yamt
573 1.45 thorpej done:
574 1.15 cgd if (needfree)
575 1.83.2.4 yamt kmem_free(needfree, iovlen);
576 1.45 thorpej out:
577 1.83.2.1 yamt FILE_UNUSE(fp, l);
578 1.83.2.1 yamt uvmspace_free(vm);
579 1.15 cgd return (error);
580 1.15 cgd }
581 1.15 cgd
582 1.15 cgd /*
583 1.15 cgd * Ioctl system call
584 1.15 cgd */
585 1.15 cgd /* ARGSUSED */
586 1.22 christos int
587 1.69 thorpej sys_ioctl(struct lwp *l, void *v, register_t *retval)
588 1.20 thorpej {
589 1.47 augustss struct sys_ioctl_args /* {
590 1.53 lukem syscallarg(int) fd;
591 1.53 lukem syscallarg(u_long) com;
592 1.83.2.4 yamt syscallarg(void *) data;
593 1.20 thorpej } */ *uap = v;
594 1.53 lukem struct file *fp;
595 1.83.2.4 yamt proc_t *p;
596 1.53 lukem struct filedesc *fdp;
597 1.53 lukem u_long com;
598 1.53 lukem int error;
599 1.53 lukem u_int size;
600 1.83.2.4 yamt void *data, *memp;
601 1.53 lukem #define STK_PARAMS 128
602 1.53 lukem u_long stkbuf[STK_PARAMS/sizeof(u_long)];
603 1.15 cgd
604 1.53 lukem error = 0;
605 1.69 thorpej p = l->l_proc;
606 1.15 cgd fdp = p->p_fd;
607 1.56 thorpej
608 1.56 thorpej if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
609 1.15 cgd return (EBADF);
610 1.15 cgd
611 1.45 thorpej FILE_USE(fp);
612 1.45 thorpej
613 1.45 thorpej if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
614 1.45 thorpej error = EBADF;
615 1.65 scw com = 0;
616 1.45 thorpej goto out;
617 1.45 thorpej }
618 1.15 cgd
619 1.16 cgd switch (com = SCARG(uap, com)) {
620 1.15 cgd case FIONCLEX:
621 1.16 cgd fdp->fd_ofileflags[SCARG(uap, fd)] &= ~UF_EXCLOSE;
622 1.45 thorpej goto out;
623 1.45 thorpej
624 1.15 cgd case FIOCLEX:
625 1.16 cgd fdp->fd_ofileflags[SCARG(uap, fd)] |= UF_EXCLOSE;
626 1.45 thorpej goto out;
627 1.15 cgd }
628 1.15 cgd
629 1.15 cgd /*
630 1.15 cgd * Interpret high order word to find amount of data to be
631 1.15 cgd * copied to/from the user's address space.
632 1.15 cgd */
633 1.15 cgd size = IOCPARM_LEN(com);
634 1.45 thorpej if (size > IOCPARM_MAX) {
635 1.45 thorpej error = ENOTTY;
636 1.45 thorpej goto out;
637 1.45 thorpej }
638 1.15 cgd memp = NULL;
639 1.42 perry if (size > sizeof(stkbuf)) {
640 1.83.2.4 yamt memp = kmem_alloc(size, KM_SLEEP);
641 1.15 cgd data = memp;
642 1.15 cgd } else
643 1.83.2.4 yamt data = (void *)stkbuf;
644 1.15 cgd if (com&IOC_IN) {
645 1.15 cgd if (size) {
646 1.31 cgd error = copyin(SCARG(uap, data), data, size);
647 1.15 cgd if (error) {
648 1.15 cgd if (memp)
649 1.83.2.4 yamt kmem_free(memp, size);
650 1.45 thorpej goto out;
651 1.15 cgd }
652 1.83.2.4 yamt ktrgenio(SCARG(uap, fd), UIO_WRITE, SCARG(uap, data),
653 1.83.2.4 yamt size, 0);
654 1.15 cgd } else
655 1.83.2.4 yamt *(void **)data = SCARG(uap, data);
656 1.15 cgd } else if ((com&IOC_OUT) && size)
657 1.15 cgd /*
658 1.15 cgd * Zero the buffer so the user always
659 1.15 cgd * gets back something deterministic.
660 1.15 cgd */
661 1.44 perry memset(data, 0, size);
662 1.15 cgd else if (com&IOC_VOID)
663 1.83.2.4 yamt *(void **)data = SCARG(uap, data);
664 1.15 cgd
665 1.15 cgd switch (com) {
666 1.15 cgd
667 1.15 cgd case FIONBIO:
668 1.79 jdolecek if (*(int *)data != 0)
669 1.15 cgd fp->f_flag |= FNONBLOCK;
670 1.15 cgd else
671 1.15 cgd fp->f_flag &= ~FNONBLOCK;
672 1.83.2.1 yamt error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, data, l);
673 1.15 cgd break;
674 1.15 cgd
675 1.15 cgd case FIOASYNC:
676 1.79 jdolecek if (*(int *)data != 0)
677 1.15 cgd fp->f_flag |= FASYNC;
678 1.15 cgd else
679 1.15 cgd fp->f_flag &= ~FASYNC;
680 1.83.2.1 yamt error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, data, l);
681 1.15 cgd break;
682 1.15 cgd
683 1.15 cgd default:
684 1.83.2.1 yamt error = (*fp->f_ops->fo_ioctl)(fp, com, data, l);
685 1.15 cgd /*
686 1.15 cgd * Copy any data to user, size was
687 1.15 cgd * already set and checked above.
688 1.15 cgd */
689 1.73 dsl if (error == 0 && (com&IOC_OUT) && size) {
690 1.31 cgd error = copyout(data, SCARG(uap, data), size);
691 1.83.2.4 yamt ktrgenio(SCARG(uap, fd), UIO_READ, SCARG(uap, data),
692 1.83.2.4 yamt size, error);
693 1.73 dsl }
694 1.15 cgd break;
695 1.15 cgd }
696 1.15 cgd if (memp)
697 1.83.2.4 yamt kmem_free(memp, size);
698 1.45 thorpej out:
699 1.83.2.1 yamt FILE_UNUSE(fp, l);
700 1.61 atatat switch (error) {
701 1.61 atatat case -1:
702 1.61 atatat printf("sys_ioctl: _IO%s%s('%c', %lu, %lu) returned -1: "
703 1.61 atatat "pid=%d comm=%s\n",
704 1.61 atatat (com & IOC_IN) ? "W" : "", (com & IOC_OUT) ? "R" : "",
705 1.61 atatat (char)IOCGROUP(com), (com & 0xff), IOCPARM_LEN(com),
706 1.61 atatat p->p_pid, p->p_comm);
707 1.61 atatat /* FALLTHROUGH */
708 1.61 atatat case EPASSTHROUGH:
709 1.61 atatat error = ENOTTY;
710 1.61 atatat /* FALLTHROUGH */
711 1.61 atatat default:
712 1.61 atatat return (error);
713 1.61 atatat }
714 1.15 cgd }
715 1.15 cgd
716 1.15 cgd /*
717 1.15 cgd * Select system call.
718 1.15 cgd */
719 1.22 christos int
720 1.83.2.4 yamt sys_pselect(lwp_t *l, void *v, register_t *retval)
721 1.82 matt {
722 1.82 matt struct sys_pselect_args /* {
723 1.82 matt syscallarg(int) nd;
724 1.82 matt syscallarg(fd_set *) in;
725 1.82 matt syscallarg(fd_set *) ou;
726 1.82 matt syscallarg(fd_set *) ex;
727 1.82 matt syscallarg(const struct timespec *) ts;
728 1.82 matt syscallarg(sigset_t *) mask;
729 1.82 matt } */ * const uap = v;
730 1.82 matt struct timespec ats;
731 1.82 matt struct timeval atv, *tv = NULL;
732 1.82 matt sigset_t amask, *mask = NULL;
733 1.82 matt int error;
734 1.82 matt
735 1.82 matt if (SCARG(uap, ts)) {
736 1.82 matt error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
737 1.82 matt if (error)
738 1.82 matt return error;
739 1.82 matt atv.tv_sec = ats.tv_sec;
740 1.82 matt atv.tv_usec = ats.tv_nsec / 1000;
741 1.82 matt tv = &atv;
742 1.82 matt }
743 1.82 matt if (SCARG(uap, mask) != NULL) {
744 1.82 matt error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
745 1.82 matt if (error)
746 1.82 matt return error;
747 1.82 matt mask = &amask;
748 1.82 matt }
749 1.82 matt
750 1.82 matt return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
751 1.82 matt SCARG(uap, ou), SCARG(uap, ex), tv, mask);
752 1.82 matt }
753 1.82 matt
754 1.82 matt int
755 1.83.2.2 yamt inittimeleft(struct timeval *tv, struct timeval *sleeptv)
756 1.83.2.2 yamt {
757 1.83.2.2 yamt if (itimerfix(tv))
758 1.83.2.2 yamt return -1;
759 1.83.2.2 yamt getmicrouptime(sleeptv);
760 1.83.2.2 yamt return 0;
761 1.83.2.2 yamt }
762 1.83.2.2 yamt
763 1.83.2.2 yamt int
764 1.83.2.2 yamt gettimeleft(struct timeval *tv, struct timeval *sleeptv)
765 1.83.2.2 yamt {
766 1.83.2.2 yamt /*
767 1.83.2.2 yamt * We have to recalculate the timeout on every retry.
768 1.83.2.2 yamt */
769 1.83.2.2 yamt struct timeval slepttv;
770 1.83.2.2 yamt /*
771 1.83.2.2 yamt * reduce tv by elapsed time
772 1.83.2.2 yamt * based on monotonic time scale
773 1.83.2.2 yamt */
774 1.83.2.2 yamt getmicrouptime(&slepttv);
775 1.83.2.2 yamt timeradd(tv, sleeptv, tv);
776 1.83.2.2 yamt timersub(tv, &slepttv, tv);
777 1.83.2.2 yamt *sleeptv = slepttv;
778 1.83.2.2 yamt return tvtohz(tv);
779 1.83.2.2 yamt }
780 1.83.2.2 yamt
781 1.83.2.2 yamt int
782 1.83.2.4 yamt sys_select(lwp_t *l, void *v, register_t *retval)
783 1.20 thorpej {
784 1.47 augustss struct sys_select_args /* {
785 1.53 lukem syscallarg(int) nd;
786 1.53 lukem syscallarg(fd_set *) in;
787 1.53 lukem syscallarg(fd_set *) ou;
788 1.53 lukem syscallarg(fd_set *) ex;
789 1.53 lukem syscallarg(struct timeval *) tv;
790 1.82 matt } */ * const uap = v;
791 1.82 matt struct timeval atv, *tv = NULL;
792 1.82 matt int error;
793 1.82 matt
794 1.82 matt if (SCARG(uap, tv)) {
795 1.83.2.4 yamt error = copyin(SCARG(uap, tv), (void *)&atv,
796 1.82 matt sizeof(atv));
797 1.82 matt if (error)
798 1.82 matt return error;
799 1.82 matt tv = &atv;
800 1.82 matt }
801 1.82 matt
802 1.82 matt return selcommon(l, retval, SCARG(uap, nd), SCARG(uap, in),
803 1.82 matt SCARG(uap, ou), SCARG(uap, ex), tv, NULL);
804 1.82 matt }
805 1.82 matt
806 1.82 matt int
807 1.83.2.4 yamt selcommon(lwp_t *l, register_t *retval, int nd, fd_set *u_in,
808 1.83.2.4 yamt fd_set *u_ou, fd_set *u_ex, struct timeval *tv, sigset_t *mask)
809 1.82 matt {
810 1.53 lukem char smallbits[howmany(FD_SETSIZE, NFDBITS) *
811 1.53 lukem sizeof(fd_mask) * 6];
812 1.83.2.4 yamt proc_t * const p = l->l_proc;
813 1.83.2.4 yamt char *bits;
814 1.83.2.4 yamt int ncoll, error, timo;
815 1.53 lukem size_t ni;
816 1.82 matt sigset_t oldmask;
817 1.83.2.2 yamt struct timeval sleeptv;
818 1.15 cgd
819 1.53 lukem error = 0;
820 1.82 matt if (nd < 0)
821 1.35 thorpej return (EINVAL);
822 1.82 matt if (nd > p->p_fd->fd_nfiles) {
823 1.16 cgd /* forgiving; slightly wrong */
824 1.82 matt nd = p->p_fd->fd_nfiles;
825 1.16 cgd }
826 1.82 matt ni = howmany(nd, NFDBITS) * sizeof(fd_mask);
827 1.27 mycroft if (ni * 6 > sizeof(smallbits))
828 1.83.2.4 yamt bits = kmem_alloc(ni * 6, KM_SLEEP);
829 1.25 mycroft else
830 1.26 cgd bits = smallbits;
831 1.15 cgd
832 1.53 lukem #define getbits(name, x) \
833 1.82 matt if (u_ ## name) { \
834 1.82 matt error = copyin(u_ ## name, bits + ni * x, ni); \
835 1.53 lukem if (error) \
836 1.53 lukem goto done; \
837 1.53 lukem } else \
838 1.44 perry memset(bits + ni * x, 0, ni);
839 1.15 cgd getbits(in, 0);
840 1.15 cgd getbits(ou, 1);
841 1.15 cgd getbits(ex, 2);
842 1.15 cgd #undef getbits
843 1.15 cgd
844 1.65 scw timo = 0;
845 1.83.2.2 yamt if (tv && inittimeleft(tv, &sleeptv) == -1) {
846 1.83.2.1 yamt error = EINVAL;
847 1.83.2.1 yamt goto done;
848 1.65 scw }
849 1.83.2.2 yamt
850 1.83.2.3 yamt if (mask) {
851 1.83.2.3 yamt sigminusset(&sigcantmask, mask);
852 1.83.2.3 yamt mutex_enter(&p->p_smutex);
853 1.83.2.3 yamt oldmask = l->l_sigmask;
854 1.83.2.3 yamt l->l_sigmask = *mask;
855 1.83.2.3 yamt mutex_exit(&p->p_smutex);
856 1.83.2.3 yamt } else
857 1.83.2.3 yamt oldmask = l->l_sigmask; /* XXXgcc */
858 1.65 scw
859 1.83.2.4 yamt mutex_enter(&select_lock);
860 1.83.2.4 yamt SLIST_INIT(&l->l_selwait);
861 1.83.2.4 yamt for (;;) {
862 1.83.2.4 yamt l->l_selflag = SEL_SCANNING;
863 1.83.2.4 yamt ncoll = nselcoll;
864 1.83.2.4 yamt mutex_exit(&select_lock);
865 1.83.2.4 yamt
866 1.83.2.4 yamt error = selscan(l, (fd_mask *)(bits + ni * 0),
867 1.83.2.4 yamt (fd_mask *)(bits + ni * 3), nd, retval);
868 1.83.2.4 yamt
869 1.83.2.4 yamt mutex_enter(&select_lock);
870 1.83.2.4 yamt if (error || *retval)
871 1.83.2.4 yamt break;
872 1.83.2.4 yamt if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
873 1.83.2.4 yamt break;
874 1.83.2.4 yamt if (l->l_selflag != SEL_SCANNING || ncoll != nselcoll)
875 1.83.2.4 yamt continue;
876 1.83.2.4 yamt l->l_selflag = SEL_BLOCKING;
877 1.83.2.4 yamt error = cv_timedwait_sig(&select_cv, &select_lock, timo);
878 1.83.2.4 yamt if (error != 0)
879 1.83.2.4 yamt break;
880 1.83.2.4 yamt }
881 1.83.2.4 yamt selclear();
882 1.83.2.4 yamt mutex_exit(&select_lock);
883 1.83.2.4 yamt
884 1.83.2.3 yamt if (mask) {
885 1.83.2.3 yamt mutex_enter(&p->p_smutex);
886 1.83.2.3 yamt l->l_sigmask = oldmask;
887 1.83.2.3 yamt mutex_exit(&p->p_smutex);
888 1.83.2.3 yamt }
889 1.83.2.4 yamt
890 1.83.2.2 yamt done:
891 1.15 cgd /* select is not restarted after signals... */
892 1.15 cgd if (error == ERESTART)
893 1.15 cgd error = EINTR;
894 1.15 cgd if (error == EWOULDBLOCK)
895 1.15 cgd error = 0;
896 1.83.2.4 yamt if (error == 0 && u_in != NULL)
897 1.83.2.4 yamt error = copyout(bits + ni * 3, u_in, ni);
898 1.83.2.4 yamt if (error == 0 && u_ou != NULL)
899 1.83.2.4 yamt error = copyout(bits + ni * 4, u_ou, ni);
900 1.83.2.4 yamt if (error == 0 && u_ex != NULL)
901 1.83.2.4 yamt error = copyout(bits + ni * 5, u_ex, ni);
902 1.83.2.4 yamt if (bits != smallbits)
903 1.83.2.4 yamt kmem_free(bits, ni * 6);
904 1.15 cgd return (error);
905 1.15 cgd }
906 1.15 cgd
907 1.22 christos int
908 1.83.2.4 yamt selscan(lwp_t *l, fd_mask *ibitp, fd_mask *obitp, int nfd,
909 1.53 lukem register_t *retval)
910 1.53 lukem {
911 1.63 jdolecek static const int flag[3] = { POLLRDNORM | POLLHUP | POLLERR,
912 1.28 mycroft POLLWRNORM | POLLHUP | POLLERR,
913 1.28 mycroft POLLRDBAND };
914 1.83.2.4 yamt proc_t *p = l->l_proc;
915 1.83.2.1 yamt struct filedesc *fdp;
916 1.83.2.1 yamt int msk, i, j, fd, n;
917 1.83.2.1 yamt fd_mask ibits, obits;
918 1.83.2.1 yamt struct file *fp;
919 1.15 cgd
920 1.53 lukem fdp = p->p_fd;
921 1.53 lukem n = 0;
922 1.15 cgd for (msk = 0; msk < 3; msk++) {
923 1.15 cgd for (i = 0; i < nfd; i += NFDBITS) {
924 1.25 mycroft ibits = *ibitp++;
925 1.25 mycroft obits = 0;
926 1.25 mycroft while ((j = ffs(ibits)) && (fd = i + --j) < nfd) {
927 1.25 mycroft ibits &= ~(1 << j);
928 1.56 thorpej if ((fp = fd_getfile(fdp, fd)) == NULL)
929 1.15 cgd return (EBADF);
930 1.45 thorpej FILE_USE(fp);
931 1.83.2.1 yamt if ((*fp->f_ops->fo_poll)(fp, flag[msk], l)) {
932 1.25 mycroft obits |= (1 << j);
933 1.15 cgd n++;
934 1.15 cgd }
935 1.83.2.1 yamt FILE_UNUSE(fp, l);
936 1.15 cgd }
937 1.25 mycroft *obitp++ = obits;
938 1.15 cgd }
939 1.15 cgd }
940 1.15 cgd *retval = n;
941 1.15 cgd return (0);
942 1.15 cgd }
943 1.15 cgd
944 1.28 mycroft /*
945 1.28 mycroft * Poll system call.
946 1.28 mycroft */
947 1.28 mycroft int
948 1.83.2.4 yamt sys_poll(lwp_t *l, void *v, register_t *retval)
949 1.28 mycroft {
950 1.47 augustss struct sys_poll_args /* {
951 1.53 lukem syscallarg(struct pollfd *) fds;
952 1.53 lukem syscallarg(u_int) nfds;
953 1.53 lukem syscallarg(int) timeout;
954 1.82 matt } */ * const uap = v;
955 1.82 matt struct timeval atv, *tv = NULL;
956 1.82 matt
957 1.82 matt if (SCARG(uap, timeout) != INFTIM) {
958 1.82 matt atv.tv_sec = SCARG(uap, timeout) / 1000;
959 1.82 matt atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
960 1.82 matt tv = &atv;
961 1.82 matt }
962 1.82 matt
963 1.82 matt return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
964 1.82 matt tv, NULL);
965 1.82 matt }
966 1.82 matt
967 1.82 matt /*
968 1.82 matt * Poll system call.
969 1.82 matt */
970 1.82 matt int
971 1.83.2.4 yamt sys_pollts(lwp_t *l, void *v, register_t *retval)
972 1.82 matt {
973 1.82 matt struct sys_pollts_args /* {
974 1.82 matt syscallarg(struct pollfd *) fds;
975 1.82 matt syscallarg(u_int) nfds;
976 1.82 matt syscallarg(const struct timespec *) ts;
977 1.82 matt syscallarg(const sigset_t *) mask;
978 1.82 matt } */ * const uap = v;
979 1.82 matt struct timespec ats;
980 1.82 matt struct timeval atv, *tv = NULL;
981 1.82 matt sigset_t amask, *mask = NULL;
982 1.82 matt int error;
983 1.82 matt
984 1.82 matt if (SCARG(uap, ts)) {
985 1.82 matt error = copyin(SCARG(uap, ts), &ats, sizeof(ats));
986 1.82 matt if (error)
987 1.82 matt return error;
988 1.82 matt atv.tv_sec = ats.tv_sec;
989 1.82 matt atv.tv_usec = ats.tv_nsec / 1000;
990 1.82 matt tv = &atv;
991 1.82 matt }
992 1.82 matt if (SCARG(uap, mask)) {
993 1.82 matt error = copyin(SCARG(uap, mask), &amask, sizeof(amask));
994 1.82 matt if (error)
995 1.82 matt return error;
996 1.82 matt mask = &amask;
997 1.82 matt }
998 1.82 matt
999 1.82 matt return pollcommon(l, retval, SCARG(uap, fds), SCARG(uap, nfds),
1000 1.82 matt tv, mask);
1001 1.82 matt }
1002 1.82 matt
1003 1.82 matt int
1004 1.83.2.4 yamt pollcommon(lwp_t *l, register_t *retval,
1005 1.82 matt struct pollfd *u_fds, u_int nfds,
1006 1.82 matt struct timeval *tv, sigset_t *mask)
1007 1.82 matt {
1008 1.83.2.1 yamt char smallbits[32 * sizeof(struct pollfd)];
1009 1.83.2.4 yamt proc_t * const p = l->l_proc;
1010 1.83.2.4 yamt void * bits;
1011 1.82 matt sigset_t oldmask;
1012 1.83.2.4 yamt int ncoll, error, timo;
1013 1.53 lukem size_t ni;
1014 1.83.2.2 yamt struct timeval sleeptv;
1015 1.28 mycroft
1016 1.82 matt if (nfds > p->p_fd->fd_nfiles) {
1017 1.28 mycroft /* forgiving; slightly wrong */
1018 1.82 matt nfds = p->p_fd->fd_nfiles;
1019 1.28 mycroft }
1020 1.82 matt ni = nfds * sizeof(struct pollfd);
1021 1.28 mycroft if (ni > sizeof(smallbits))
1022 1.83.2.4 yamt bits = kmem_alloc(ni, KM_SLEEP);
1023 1.28 mycroft else
1024 1.28 mycroft bits = smallbits;
1025 1.28 mycroft
1026 1.82 matt error = copyin(u_fds, bits, ni);
1027 1.28 mycroft if (error)
1028 1.28 mycroft goto done;
1029 1.28 mycroft
1030 1.65 scw timo = 0;
1031 1.83.2.2 yamt if (tv && inittimeleft(tv, &sleeptv) == -1) {
1032 1.83.2.1 yamt error = EINVAL;
1033 1.83.2.1 yamt goto done;
1034 1.65 scw }
1035 1.83.2.2 yamt
1036 1.83.2.3 yamt if (mask) {
1037 1.83.2.3 yamt sigminusset(&sigcantmask, mask);
1038 1.83.2.3 yamt mutex_enter(&p->p_smutex);
1039 1.83.2.3 yamt oldmask = l->l_sigmask;
1040 1.83.2.3 yamt l->l_sigmask = *mask;
1041 1.83.2.3 yamt mutex_exit(&p->p_smutex);
1042 1.83.2.3 yamt } else
1043 1.83.2.3 yamt oldmask = l->l_sigmask; /* XXXgcc */
1044 1.65 scw
1045 1.83.2.4 yamt mutex_enter(&select_lock);
1046 1.83.2.4 yamt SLIST_INIT(&l->l_selwait);
1047 1.83.2.4 yamt for (;;) {
1048 1.83.2.4 yamt ncoll = nselcoll;
1049 1.83.2.4 yamt l->l_selflag = SEL_SCANNING;
1050 1.83.2.4 yamt mutex_exit(&select_lock);
1051 1.83.2.4 yamt
1052 1.83.2.4 yamt error = pollscan(l, (struct pollfd *)bits, nfds, retval);
1053 1.83.2.4 yamt
1054 1.83.2.4 yamt mutex_enter(&select_lock);
1055 1.83.2.4 yamt if (error || *retval)
1056 1.83.2.4 yamt break;
1057 1.83.2.4 yamt if (tv && (timo = gettimeleft(tv, &sleeptv)) <= 0)
1058 1.83.2.4 yamt break;
1059 1.83.2.4 yamt if (l->l_selflag != SEL_SCANNING || nselcoll != ncoll)
1060 1.83.2.4 yamt continue;
1061 1.83.2.4 yamt l->l_selflag = SEL_BLOCKING;
1062 1.83.2.4 yamt error = cv_timedwait_sig(&select_cv, &select_lock, timo);
1063 1.83.2.4 yamt if (error != 0)
1064 1.83.2.4 yamt break;
1065 1.83.2.4 yamt }
1066 1.83.2.4 yamt selclear();
1067 1.83.2.4 yamt mutex_exit(&select_lock);
1068 1.83.2.4 yamt
1069 1.83.2.3 yamt if (mask) {
1070 1.83.2.3 yamt mutex_enter(&p->p_smutex);
1071 1.83.2.3 yamt l->l_sigmask = oldmask;
1072 1.83.2.3 yamt mutex_exit(&p->p_smutex);
1073 1.83.2.3 yamt }
1074 1.83.2.2 yamt done:
1075 1.28 mycroft /* poll is not restarted after signals... */
1076 1.28 mycroft if (error == ERESTART)
1077 1.28 mycroft error = EINTR;
1078 1.28 mycroft if (error == EWOULDBLOCK)
1079 1.28 mycroft error = 0;
1080 1.83.2.4 yamt if (error == 0)
1081 1.82 matt error = copyout(bits, u_fds, ni);
1082 1.83.2.4 yamt if (bits != smallbits)
1083 1.83.2.4 yamt kmem_free(bits, ni);
1084 1.28 mycroft return (error);
1085 1.28 mycroft }
1086 1.28 mycroft
1087 1.28 mycroft int
1088 1.83.2.4 yamt pollscan(lwp_t *l, struct pollfd *fds, int nfd, register_t *retval)
1089 1.53 lukem {
1090 1.83.2.4 yamt proc_t *p = l->l_proc;
1091 1.53 lukem struct filedesc *fdp;
1092 1.53 lukem int i, n;
1093 1.53 lukem struct file *fp;
1094 1.28 mycroft
1095 1.53 lukem fdp = p->p_fd;
1096 1.54 lukem n = 0;
1097 1.28 mycroft for (i = 0; i < nfd; i++, fds++) {
1098 1.60 christos if (fds->fd >= fdp->fd_nfiles) {
1099 1.28 mycroft fds->revents = POLLNVAL;
1100 1.28 mycroft n++;
1101 1.60 christos } else if (fds->fd < 0) {
1102 1.60 christos fds->revents = 0;
1103 1.28 mycroft } else {
1104 1.56 thorpej if ((fp = fd_getfile(fdp, fds->fd)) == NULL) {
1105 1.32 mrg fds->revents = POLLNVAL;
1106 1.28 mycroft n++;
1107 1.32 mrg } else {
1108 1.45 thorpej FILE_USE(fp);
1109 1.32 mrg fds->revents = (*fp->f_ops->fo_poll)(fp,
1110 1.83.2.1 yamt fds->events | POLLERR | POLLHUP, l);
1111 1.32 mrg if (fds->revents != 0)
1112 1.32 mrg n++;
1113 1.83.2.1 yamt FILE_UNUSE(fp, l);
1114 1.32 mrg }
1115 1.28 mycroft }
1116 1.28 mycroft }
1117 1.28 mycroft *retval = n;
1118 1.28 mycroft return (0);
1119 1.28 mycroft }
1120 1.28 mycroft
1121 1.15 cgd /*ARGSUSED*/
1122 1.22 christos int
1123 1.83.2.4 yamt seltrue(dev_t dev, int events, lwp_t *l)
1124 1.15 cgd {
1125 1.15 cgd
1126 1.28 mycroft return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1127 1.15 cgd }
1128 1.15 cgd
1129 1.15 cgd /*
1130 1.15 cgd * Record a select request.
1131 1.15 cgd */
1132 1.15 cgd void
1133 1.83.2.4 yamt selrecord(lwp_t *selector, struct selinfo *sip)
1134 1.15 cgd {
1135 1.15 cgd
1136 1.83.2.4 yamt mutex_enter(&select_lock);
1137 1.83.2.4 yamt if (sip->sel_lwp == NULL) {
1138 1.83.2.4 yamt /* First named waiter, although there may be more. */
1139 1.83.2.4 yamt sip->sel_lwp = selector;
1140 1.83.2.4 yamt SLIST_INSERT_HEAD(&selector->l_selwait, sip, sel_chain);
1141 1.83.2.4 yamt } else if (sip->sel_lwp != selector) {
1142 1.83.2.4 yamt /* Multiple waiters. */
1143 1.83.2.4 yamt sip->sel_collision = true;
1144 1.69 thorpej }
1145 1.83.2.4 yamt mutex_exit(&select_lock);
1146 1.15 cgd }
1147 1.15 cgd
1148 1.15 cgd /*
1149 1.15 cgd * Do a wakeup when a selectable event occurs.
1150 1.15 cgd */
1151 1.15 cgd void
1152 1.83.2.4 yamt selwakeup(struct selinfo *sip)
1153 1.15 cgd {
1154 1.83.2.4 yamt lwp_t *l;
1155 1.15 cgd
1156 1.83.2.4 yamt mutex_enter(&select_lock);
1157 1.73 dsl if (sip->sel_collision) {
1158 1.83.2.4 yamt /* Multiple waiters - just notify everybody. */
1159 1.15 cgd nselcoll++;
1160 1.83.2.4 yamt sip->sel_collision = false;
1161 1.83.2.4 yamt cv_broadcast(&select_cv);
1162 1.83.2.4 yamt } else if (sip->sel_lwp != NULL) {
1163 1.83.2.4 yamt /* Only one LWP waiting. */
1164 1.83.2.4 yamt l = sip->sel_lwp;
1165 1.83.2.4 yamt if (l->l_selflag == SEL_BLOCKING) {
1166 1.83.2.4 yamt /*
1167 1.83.2.4 yamt * If it's sleeping, wake it up. If not, it's
1168 1.83.2.4 yamt * already awake but hasn't yet removed itself
1169 1.83.2.4 yamt * from the selector. We reset the state below
1170 1.83.2.4 yamt * so that we only attempt to do this once.
1171 1.83.2.4 yamt */
1172 1.83.2.4 yamt lwp_lock(l);
1173 1.83.2.4 yamt if (l->l_wchan == &select_cv) {
1174 1.83.2.4 yamt /* lwp_unsleep() releases the LWP lock. */
1175 1.83.2.4 yamt lwp_unsleep(l);
1176 1.83.2.4 yamt } else
1177 1.83.2.4 yamt lwp_unlock(l);
1178 1.83.2.4 yamt } else {
1179 1.83.2.4 yamt /*
1180 1.83.2.4 yamt * Not yet asleep. Reset its state below so that
1181 1.83.2.4 yamt * it will go around again.
1182 1.83.2.4 yamt */
1183 1.83.2.4 yamt }
1184 1.83.2.4 yamt l->l_selflag = SEL_RESET;
1185 1.15 cgd }
1186 1.83.2.4 yamt mutex_exit(&select_lock);
1187 1.83.2.4 yamt }
1188 1.83.2.3 yamt
1189 1.83.2.4 yamt void
1190 1.83.2.4 yamt selnotify(struct selinfo *sip, long knhint)
1191 1.83.2.4 yamt {
1192 1.83.2.3 yamt
1193 1.83.2.4 yamt selwakeup(sip);
1194 1.83.2.4 yamt KNOTE(&sip->sel_klist, knhint);
1195 1.83.2.4 yamt }
1196 1.83.2.4 yamt
1197 1.83.2.4 yamt /*
1198 1.83.2.4 yamt * Remove an LWP from all objects that it is waiting for.
1199 1.83.2.4 yamt */
1200 1.83.2.4 yamt static void
1201 1.83.2.4 yamt selclear(void)
1202 1.83.2.4 yamt {
1203 1.83.2.4 yamt struct selinfo *sip;
1204 1.83.2.4 yamt lwp_t *l = curlwp;
1205 1.83.2.4 yamt
1206 1.83.2.4 yamt KASSERT(mutex_owned(&select_lock));
1207 1.83.2.4 yamt
1208 1.83.2.4 yamt SLIST_FOREACH(sip, &l->l_selwait, sel_chain) {
1209 1.83.2.4 yamt KASSERT(sip->sel_lwp == l);
1210 1.83.2.4 yamt sip->sel_lwp = NULL;
1211 1.15 cgd }
1212 1.83.2.4 yamt }
1213 1.83.2.4 yamt
1214 1.83.2.4 yamt /*
1215 1.83.2.4 yamt * Initialize the select/poll system calls.
1216 1.83.2.4 yamt */
1217 1.83.2.4 yamt void
1218 1.83.2.4 yamt selsysinit(void)
1219 1.83.2.4 yamt {
1220 1.83.2.4 yamt
1221 1.83.2.4 yamt mutex_init(&select_lock, MUTEX_DRIVER, IPL_VM);
1222 1.83.2.4 yamt cv_init(&select_cv, "select");
1223 1.15 cgd }
1224