sys_aio.c revision 1.9 1 /* $NetBSD: sys_aio.c,v 1.9 2007/11/26 19:02:04 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007, Mindaugas Rasiukevicius <rmind at NetBSD org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * TODO:
30 * 1. Additional work for VCHR and maybe VBLK devices.
31 * 2. Consider making the job-finding O(n) per one file descriptor.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: sys_aio.c,v 1.9 2007/11/26 19:02:04 pooka Exp $");
36
37 #include "opt_ddb.h"
38
39 #include <sys/param.h>
40 #include <sys/condvar.h>
41 #include <sys/file.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/kmem.h>
45 #include <sys/lwp.h>
46 #include <sys/mutex.h>
47 #include <sys/pool.h>
48 #include <sys/proc.h>
49 #include <sys/queue.h>
50 #include <sys/signal.h>
51 #include <sys/signalvar.h>
52 #include <sys/syscallargs.h>
53 #include <sys/sysctl.h>
54 #include <sys/systm.h>
55 #include <sys/types.h>
56 #include <sys/vnode.h>
57
58 #include <uvm/uvm_extern.h>
59
60 /*
61 * System-wide limits and counter of AIO operations.
62 * XXXSMP: We should spin-lock it, or modify atomically.
63 */
64 static u_int aio_listio_max = AIO_LISTIO_MAX;
65 static u_int aio_max = AIO_MAX;
66 static u_int aio_jobs_count;
67
68 static struct pool aio_job_pool;
69 static struct pool aio_lio_pool;
70
71 /* Prototypes */
72 void aio_worker(void *);
73 static void aio_process(struct aio_job *);
74 static void aio_sendsig(struct proc *, struct sigevent *);
75 static int aio_enqueue_job(int, void *, struct lio_req *);
76
77 /*
78 * Initialize the AIO system.
79 */
80 void
81 aio_sysinit(void)
82 {
83
84 pool_init(&aio_job_pool, sizeof(struct aio_job), 0, 0, 0,
85 "aio_jobs_pool", &pool_allocator_nointr, IPL_NONE);
86 pool_init(&aio_lio_pool, sizeof(struct lio_req), 0, 0, 0,
87 "aio_lio_pool", &pool_allocator_nointr, IPL_NONE);
88 }
89
90 /*
91 * Initialize Asynchronous I/O data structures for the process.
92 */
93 int
94 aio_init(struct proc *p)
95 {
96 struct aioproc *aio;
97 struct lwp *l;
98 int error;
99 bool inmem;
100 vaddr_t uaddr;
101
102 /* Allocate and initialize AIO structure */
103 aio = kmem_zalloc(sizeof(struct aioproc), KM_NOSLEEP);
104 if (aio == NULL)
105 return EAGAIN;
106
107 /* Initialize queue and their synchronization structures */
108 mutex_init(&aio->aio_mtx, MUTEX_DEFAULT, IPL_NONE);
109 cv_init(&aio->aio_worker_cv, "aiowork");
110 cv_init(&aio->done_cv, "aiodone");
111 TAILQ_INIT(&aio->jobs_queue);
112
113 /*
114 * Create an AIO worker thread.
115 * XXX: Currently, AIO thread is not protected against user's actions.
116 */
117 inmem = uvm_uarea_alloc(&uaddr);
118 if (uaddr == 0) {
119 aio_exit(p, aio);
120 return EAGAIN;
121 }
122 error = lwp_create(curlwp, p, uaddr, inmem, 0, NULL, 0, aio_worker,
123 NULL, &l, curlwp->l_class);
124 if (error != 0) {
125 uvm_uarea_free(uaddr, curcpu());
126 aio_exit(p, aio);
127 return error;
128 }
129
130 /* Recheck if we are really first */
131 mutex_enter(&p->p_mutex);
132 if (p->p_aio) {
133 mutex_exit(&p->p_mutex);
134 aio_exit(p, aio);
135 lwp_exit(l);
136 return 0;
137 }
138 p->p_aio = aio;
139 mutex_exit(&p->p_mutex);
140
141 /* Complete the initialization of thread, and run it */
142 mutex_enter(&p->p_smutex);
143 aio->aio_worker = l;
144 p->p_nrlwps++;
145 lwp_lock(l);
146 l->l_stat = LSRUN;
147 l->l_priority = PRI_KERNEL - 1;
148 sched_enqueue(l, false);
149 lwp_unlock(l);
150 mutex_exit(&p->p_smutex);
151
152 return 0;
153 }
154
155 /*
156 * Exit of Asynchronous I/O subsystem of process.
157 */
158 void
159 aio_exit(struct proc *p, struct aioproc *aio)
160 {
161 struct aio_job *a_job;
162
163 if (aio == NULL)
164 return;
165
166 /* Free AIO queue */
167 while (!TAILQ_EMPTY(&aio->jobs_queue)) {
168 a_job = TAILQ_FIRST(&aio->jobs_queue);
169 TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
170 pool_put(&aio_job_pool, a_job);
171 aio_jobs_count--; /* XXXSMP */
172 }
173
174 /* Destroy and free the entire AIO data structure */
175 cv_destroy(&aio->aio_worker_cv);
176 cv_destroy(&aio->done_cv);
177 mutex_destroy(&aio->aio_mtx);
178 kmem_free(aio, sizeof(struct aioproc));
179 }
180
181 /*
182 * AIO worker thread and processor.
183 */
184 void
185 aio_worker(void *arg)
186 {
187 struct proc *p = curlwp->l_proc;
188 struct aioproc *aio = p->p_aio;
189 struct aio_job *a_job;
190 struct lio_req *lio;
191 sigset_t oss, nss;
192 int error, refcnt;
193
194 /*
195 * Make an empty signal mask, so it
196 * handles only SIGKILL and SIGSTOP.
197 */
198 sigfillset(&nss);
199 mutex_enter(&p->p_smutex);
200 error = sigprocmask1(curlwp, SIG_SETMASK, &nss, &oss);
201 mutex_exit(&p->p_smutex);
202 KASSERT(error == 0);
203
204 for (;;) {
205 /*
206 * Loop for each job in the queue. If there
207 * are no jobs then sleep.
208 */
209 mutex_enter(&aio->aio_mtx);
210 while ((a_job = TAILQ_FIRST(&aio->jobs_queue)) == NULL) {
211 if (cv_wait_sig(&aio->aio_worker_cv, &aio->aio_mtx)) {
212 /*
213 * Thread was interrupted - check for
214 * pending exit or suspend.
215 */
216 mutex_exit(&aio->aio_mtx);
217 lwp_userret(curlwp);
218 mutex_enter(&aio->aio_mtx);
219 }
220 }
221
222 /* Take the job from the queue */
223 aio->curjob = a_job;
224 TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
225
226 aio_jobs_count--; /* XXXSMP */
227 aio->jobs_count--;
228
229 mutex_exit(&aio->aio_mtx);
230
231 /* Process an AIO operation */
232 aio_process(a_job);
233
234 /* Copy data structure back to the user-space */
235 (void)copyout(&a_job->aiocbp, a_job->aiocb_uptr,
236 sizeof(struct aiocb));
237
238 mutex_enter(&aio->aio_mtx);
239 aio->curjob = NULL;
240
241 /* Decrease a reference counter, if there is a LIO structure */
242 lio = a_job->lio;
243 refcnt = (lio != NULL ? --lio->refcnt : -1);
244
245 /* Notify all suspenders */
246 cv_broadcast(&aio->done_cv);
247 mutex_exit(&aio->aio_mtx);
248
249 /* Send a signal, if any */
250 aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
251
252 /* Destroy the LIO structure */
253 if (refcnt == 0) {
254 aio_sendsig(p, &lio->sig);
255 pool_put(&aio_lio_pool, lio);
256 }
257
258 /* Destroy the the job */
259 pool_put(&aio_job_pool, a_job);
260 }
261
262 /* NOTREACHED */
263 }
264
265 static void
266 aio_process(struct aio_job *a_job)
267 {
268 struct proc *p = curlwp->l_proc;
269 struct aiocb *aiocbp = &a_job->aiocbp;
270 struct file *fp;
271 struct filedesc *fdp = p->p_fd;
272 int fd = aiocbp->aio_fildes;
273 int error = 0;
274
275 KASSERT(fdp != NULL);
276 KASSERT(a_job->aio_op != 0);
277
278 if ((a_job->aio_op & (AIO_READ | AIO_WRITE)) != 0) {
279 struct iovec aiov;
280 struct uio auio;
281
282 if (aiocbp->aio_nbytes > SSIZE_MAX) {
283 error = EINVAL;
284 goto done;
285 }
286
287 fp = fd_getfile(fdp, fd);
288 if (fp == NULL) {
289 error = EBADF;
290 goto done;
291 }
292
293 aiov.iov_base = (void *)(uintptr_t)aiocbp->aio_buf;
294 aiov.iov_len = aiocbp->aio_nbytes;
295 auio.uio_iov = &aiov;
296 auio.uio_iovcnt = 1;
297 auio.uio_resid = aiocbp->aio_nbytes;
298 auio.uio_vmspace = p->p_vmspace;
299
300 FILE_USE(fp);
301 if (a_job->aio_op & AIO_READ) {
302 /*
303 * Perform a Read operation
304 */
305 KASSERT((a_job->aio_op & AIO_WRITE) == 0);
306
307 if ((fp->f_flag & FREAD) == 0) {
308 FILE_UNUSE(fp, curlwp);
309 error = EBADF;
310 goto done;
311 }
312 auio.uio_rw = UIO_READ;
313 error = (*fp->f_ops->fo_read)(fp, &aiocbp->aio_offset,
314 &auio, fp->f_cred, FOF_UPDATE_OFFSET);
315 } else {
316 /*
317 * Perform a Write operation
318 */
319 KASSERT(a_job->aio_op & AIO_WRITE);
320
321 if ((fp->f_flag & FWRITE) == 0) {
322 FILE_UNUSE(fp, curlwp);
323 error = EBADF;
324 goto done;
325 }
326 auio.uio_rw = UIO_WRITE;
327 error = (*fp->f_ops->fo_write)(fp, &aiocbp->aio_offset,
328 &auio, fp->f_cred, FOF_UPDATE_OFFSET);
329 }
330 FILE_UNUSE(fp, curlwp);
331
332 /* Store the result value */
333 a_job->aiocbp.aio_nbytes -= auio.uio_resid;
334 a_job->aiocbp._retval = (error == 0) ?
335 a_job->aiocbp.aio_nbytes : -1;
336
337 } else if ((a_job->aio_op & (AIO_SYNC | AIO_DSYNC)) != 0) {
338 /*
339 * Perform a file Sync operation
340 */
341 struct vnode *vp;
342
343 if ((error = getvnode(fdp, fd, &fp)) != 0)
344 goto done;
345
346 if ((fp->f_flag & FWRITE) == 0) {
347 FILE_UNUSE(fp, curlwp);
348 error = EBADF;
349 goto done;
350 }
351
352 vp = (struct vnode *)fp->f_data;
353 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
354 if (a_job->aio_op & AIO_DSYNC) {
355 error = VOP_FSYNC(vp, fp->f_cred,
356 FSYNC_WAIT | FSYNC_DATAONLY, 0, 0);
357 } else if (a_job->aio_op & AIO_SYNC) {
358 error = VOP_FSYNC(vp, fp->f_cred,
359 FSYNC_WAIT, 0, 0);
360 if (error == 0 && bioopsp != NULL &&
361 vp->v_mount &&
362 (vp->v_mount->mnt_flag & MNT_SOFTDEP))
363 bioopsp->io_fsync(vp, 0);
364 }
365 VOP_UNLOCK(vp, 0);
366 FILE_UNUSE(fp, curlwp);
367
368 /* Store the result value */
369 a_job->aiocbp._retval = (error == 0) ? 0 : -1;
370
371 } else
372 panic("aio_process: invalid operation code\n");
373
374 done:
375 /* Job is done, set the error, if any */
376 a_job->aiocbp._errno = error;
377 a_job->aiocbp._state = JOB_DONE;
378 }
379
380 /*
381 * Send AIO signal.
382 */
383 static void
384 aio_sendsig(struct proc *p, struct sigevent *sig)
385 {
386 ksiginfo_t ksi;
387
388 if (sig->sigev_signo == 0 || sig->sigev_notify == SIGEV_NONE)
389 return;
390
391 KSI_INIT(&ksi);
392 ksi.ksi_signo = sig->sigev_signo;
393 ksi.ksi_code = SI_ASYNCIO;
394 ksi.ksi_value = sig->sigev_value;
395 mutex_enter(&proclist_mutex);
396 kpsignal(p, &ksi, NULL);
397 mutex_exit(&proclist_mutex);
398 }
399
400 /*
401 * Enqueue the job.
402 */
403 static int
404 aio_enqueue_job(int op, void *aiocb_uptr, struct lio_req *lio)
405 {
406 struct proc *p = curlwp->l_proc;
407 struct aioproc *aio;
408 struct aio_job *a_job;
409 struct aiocb aiocbp;
410 struct sigevent *sig;
411 int error;
412
413 /* Check for the limit */
414 if (aio_jobs_count + 1 > aio_max) /* XXXSMP */
415 return EAGAIN;
416
417 /* Get the data structure from user-space */
418 error = copyin(aiocb_uptr, &aiocbp, sizeof(struct aiocb));
419 if (error)
420 return error;
421
422 /* Check if signal is set, and validate it */
423 sig = &aiocbp.aio_sigevent;
424 if (sig->sigev_signo < 0 || sig->sigev_signo >= NSIG ||
425 sig->sigev_notify < SIGEV_NONE || sig->sigev_notify > SIGEV_SA)
426 return EINVAL;
427
428 /* Buffer and byte count */
429 if (((AIO_SYNC | AIO_DSYNC) & op) == 0)
430 if (aiocbp.aio_buf == NULL || aiocbp.aio_nbytes > SSIZE_MAX)
431 return EINVAL;
432
433 /* Check the opcode, if LIO_NOP - simply ignore */
434 if (op == AIO_LIO) {
435 KASSERT(lio != NULL);
436 if (aiocbp.aio_lio_opcode == LIO_WRITE)
437 op = AIO_WRITE;
438 else if (aiocbp.aio_lio_opcode == LIO_READ)
439 op = AIO_READ;
440 else
441 return (aiocbp.aio_lio_opcode == LIO_NOP) ? 0 : EINVAL;
442 } else {
443 KASSERT(lio == NULL);
444 }
445
446 /*
447 * Look for already existing job. If found - the job is in-progress.
448 * According to POSIX this is invalid, so return the error.
449 */
450 aio = p->p_aio;
451 if (aio) {
452 mutex_enter(&aio->aio_mtx);
453 if (aio->curjob) {
454 a_job = aio->curjob;
455 if (a_job->aiocb_uptr == aiocb_uptr) {
456 mutex_exit(&aio->aio_mtx);
457 return EINVAL;
458 }
459 }
460 TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
461 if (a_job->aiocb_uptr != aiocb_uptr)
462 continue;
463 mutex_exit(&aio->aio_mtx);
464 return EINVAL;
465 }
466 mutex_exit(&aio->aio_mtx);
467 }
468
469 /*
470 * Check if AIO structure is initialized, if not - initialize it.
471 * In LIO case, we did that already. We will recheck this with
472 * the lock in aio_init().
473 */
474 if (lio == NULL && p->p_aio == NULL)
475 if (aio_init(p))
476 return EAGAIN;
477 aio = p->p_aio;
478
479 /*
480 * Set the state with errno, and copy data
481 * structure back to the user-space.
482 */
483 aiocbp._state = JOB_WIP;
484 aiocbp._errno = EINPROGRESS;
485 aiocbp._retval = -1;
486 error = copyout(&aiocbp, aiocb_uptr, sizeof(struct aiocb));
487 if (error)
488 return error;
489
490 /* Allocate and initialize a new AIO job */
491 a_job = pool_get(&aio_job_pool, PR_WAITOK);
492 memset(a_job, 0, sizeof(struct aio_job));
493
494 /*
495 * Set the data.
496 * Store the user-space pointer for searching. Since we
497 * are storing only per proc pointers - it is safe.
498 */
499 memcpy(&a_job->aiocbp, &aiocbp, sizeof(struct aiocb));
500 a_job->aiocb_uptr = aiocb_uptr;
501 a_job->aio_op |= op;
502 a_job->lio = lio;
503
504 /*
505 * Add the job to the queue, update the counters, and
506 * notify the AIO worker thread to handle the job.
507 */
508 mutex_enter(&aio->aio_mtx);
509
510 /* Fail, if the limit was reached */
511 if (aio->jobs_count >= aio_listio_max) {
512 mutex_exit(&aio->aio_mtx);
513 pool_put(&aio_job_pool, a_job);
514 return EAGAIN;
515 }
516
517 TAILQ_INSERT_TAIL(&aio->jobs_queue, a_job, list);
518 aio_jobs_count++; /* XXXSMP */
519 aio->jobs_count++;
520 if (lio)
521 lio->refcnt++;
522 cv_signal(&aio->aio_worker_cv);
523
524 mutex_exit(&aio->aio_mtx);
525
526 /*
527 * One would handle the errors only with aio_error() function.
528 * This way is appropriate according to POSIX.
529 */
530 return 0;
531 }
532
533 /*
534 * Syscall functions.
535 */
536
537 int
538 sys_aio_cancel(struct lwp *l, void *v, register_t *retval)
539 {
540 struct sys_aio_cancel_args /* {
541 syscallarg(int) fildes;
542 syscallarg(struct aiocb *) aiocbp;
543 } */ *uap = v;
544 struct proc *p = l->l_proc;
545 struct aioproc *aio;
546 struct aio_job *a_job;
547 struct aiocb *aiocbp_ptr;
548 struct lio_req *lio;
549 struct filedesc *fdp = p->p_fd;
550 unsigned int cn, errcnt, fildes;
551
552 TAILQ_HEAD(, aio_job) tmp_jobs_list;
553
554 /* Check for invalid file descriptor */
555 fildes = (unsigned int)SCARG(uap, fildes);
556 if (fildes >= fdp->fd_nfiles || fdp->fd_ofiles[fildes] == NULL)
557 return EBADF;
558
559 /* Check if AIO structure is initialized */
560 if (p->p_aio == NULL) {
561 *retval = AIO_NOTCANCELED;
562 return 0;
563 }
564
565 aio = p->p_aio;
566 aiocbp_ptr = (struct aiocb *)SCARG(uap, aiocbp);
567
568 mutex_enter(&aio->aio_mtx);
569
570 /* Cancel the jobs, and remove them from the queue */
571 cn = 0;
572 TAILQ_INIT(&tmp_jobs_list);
573 TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
574 if (aiocbp_ptr) {
575 if (aiocbp_ptr != a_job->aiocb_uptr)
576 continue;
577 if (fildes != a_job->aiocbp.aio_fildes) {
578 mutex_exit(&aio->aio_mtx);
579 return EBADF;
580 }
581 } else if (a_job->aiocbp.aio_fildes != fildes)
582 continue;
583
584 TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
585 TAILQ_INSERT_TAIL(&tmp_jobs_list, a_job, list);
586
587 /* Decrease the counters */
588 aio_jobs_count--; /* XXXSMP */
589 aio->jobs_count--;
590 lio = a_job->lio;
591 if (lio != NULL && --lio->refcnt != 0)
592 a_job->lio = NULL;
593
594 cn++;
595 if (aiocbp_ptr)
596 break;
597 }
598
599 /* There are canceled jobs */
600 if (cn)
601 *retval = AIO_CANCELED;
602
603 /* We cannot cancel current job */
604 a_job = aio->curjob;
605 if (a_job && ((a_job->aiocbp.aio_fildes == fildes) ||
606 (a_job->aiocb_uptr == aiocbp_ptr)))
607 *retval = AIO_NOTCANCELED;
608
609 mutex_exit(&aio->aio_mtx);
610
611 /* Free the jobs after the lock */
612 errcnt = 0;
613 while (!TAILQ_EMPTY(&tmp_jobs_list)) {
614 a_job = TAILQ_FIRST(&tmp_jobs_list);
615 TAILQ_REMOVE(&tmp_jobs_list, a_job, list);
616 /* Set the errno and copy structures back to the user-space */
617 a_job->aiocbp._errno = ECANCELED;
618 a_job->aiocbp._state = JOB_DONE;
619 if (copyout(&a_job->aiocbp, a_job->aiocb_uptr,
620 sizeof(struct aiocb)))
621 errcnt++;
622 /* Send a signal if any */
623 aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
624 if (a_job->lio) {
625 lio = a_job->lio;
626 aio_sendsig(p, &lio->sig);
627 pool_put(&aio_lio_pool, lio);
628 }
629 pool_put(&aio_job_pool, a_job);
630 }
631
632 if (errcnt)
633 return EFAULT;
634
635 /* Set a correct return value */
636 if (*retval == 0)
637 *retval = AIO_ALLDONE;
638
639 return 0;
640 }
641
642 int
643 sys_aio_error(struct lwp *l, void *v, register_t *retval)
644 {
645 struct sys_aio_error_args /* {
646 syscallarg(const struct aiocb *) aiocbp;
647 } */ *uap = v;
648 struct proc *p = l->l_proc;
649 struct aioproc *aio = p->p_aio;
650 struct aiocb aiocbp;
651 int error;
652
653 if (aio == NULL)
654 return EINVAL;
655
656 error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
657 if (error)
658 return error;
659
660 if (aiocbp._state == JOB_NONE)
661 return EINVAL;
662
663 *retval = aiocbp._errno;
664
665 return 0;
666 }
667
668 int
669 sys_aio_fsync(struct lwp *l, void *v, register_t *retval)
670 {
671 struct sys_aio_fsync_args /* {
672 syscallarg(int) op;
673 syscallarg(struct aiocb *) aiocbp;
674 } */ *uap = v;
675 int op = SCARG(uap, op);
676
677 if ((op != O_DSYNC) && (op != O_SYNC))
678 return EINVAL;
679
680 op = O_DSYNC ? AIO_DSYNC : AIO_SYNC;
681
682 return aio_enqueue_job(op, SCARG(uap, aiocbp), NULL);
683 }
684
685 int
686 sys_aio_read(struct lwp *l, void *v, register_t *retval)
687 {
688 struct sys_aio_read_args /* {
689 syscallarg(struct aiocb *) aiocbp;
690 } */ *uap = v;
691
692 return aio_enqueue_job(AIO_READ, SCARG(uap, aiocbp), NULL);
693 }
694
695 int
696 sys_aio_return(struct lwp *l, void *v, register_t *retval)
697 {
698 struct sys_aio_return_args /* {
699 syscallarg(struct aiocb *) aiocbp;
700 } */ *uap = v;
701 struct proc *p = l->l_proc;
702 struct aioproc *aio = p->p_aio;
703 struct aiocb aiocbp;
704 int error;
705
706 if (aio == NULL)
707 return EINVAL;
708
709 error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
710 if (error)
711 return error;
712
713 if (aiocbp._errno == EINPROGRESS || aiocbp._state != JOB_DONE)
714 return EINVAL;
715
716 *retval = aiocbp._retval;
717
718 /* Reset the internal variables */
719 aiocbp._errno = 0;
720 aiocbp._retval = -1;
721 aiocbp._state = JOB_NONE;
722 error = copyout(&aiocbp, SCARG(uap, aiocbp), sizeof(struct aiocb));
723
724 return error;
725 }
726
727 int
728 sys_aio_suspend(struct lwp *l, void *v, register_t *retval)
729 {
730 struct sys_aio_suspend_args /* {
731 syscallarg(const struct aiocb *const[]) list;
732 syscallarg(int) nent;
733 syscallarg(const struct timespec *) timeout;
734 } */ *uap = v;
735 struct proc *p = l->l_proc;
736 struct aioproc *aio;
737 struct aio_job *a_job;
738 struct aiocb **aiocbp_list;
739 struct timespec ts;
740 int i, error, nent, timo;
741
742 if (p->p_aio == NULL)
743 return EAGAIN;
744 aio = p->p_aio;
745
746 nent = SCARG(uap, nent);
747 if (nent <= 0 || nent > aio_listio_max)
748 return EAGAIN;
749
750 if (SCARG(uap, timeout)) {
751 /* Convert timespec to ticks */
752 error = copyin(SCARG(uap, timeout), &ts,
753 sizeof(struct timespec));
754 if (error)
755 return error;
756 timo = mstohz((ts.tv_sec * 1000) + (ts.tv_nsec / 1000000));
757 if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
758 timo = 1;
759 if (timo <= 0)
760 return EAGAIN;
761 } else
762 timo = 0;
763
764 /* Get the list from user-space */
765 aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
766 error = copyin(SCARG(uap, list), aiocbp_list,
767 nent * sizeof(struct aiocb));
768 if (error) {
769 kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
770 return error;
771 }
772
773 mutex_enter(&aio->aio_mtx);
774 for (;;) {
775
776 for (i = 0; i < nent; i++) {
777
778 /* Skip NULL entries */
779 if (aiocbp_list[i] == NULL)
780 continue;
781
782 /* Skip current job */
783 if (aio->curjob) {
784 a_job = aio->curjob;
785 if (a_job->aiocb_uptr == aiocbp_list[i])
786 continue;
787 }
788
789 /* Look for a job in the queue */
790 TAILQ_FOREACH(a_job, &aio->jobs_queue, list)
791 if (a_job->aiocb_uptr == aiocbp_list[i])
792 break;
793
794 if (a_job == NULL) {
795 struct aiocb aiocbp;
796
797 mutex_exit(&aio->aio_mtx);
798
799 error = copyin(aiocbp_list[i], &aiocbp,
800 sizeof(struct aiocb));
801 if (error == 0 && aiocbp._state != JOB_DONE) {
802 mutex_enter(&aio->aio_mtx);
803 continue;
804 }
805
806 kmem_free(aiocbp_list,
807 nent * sizeof(struct aio_job));
808 return error;
809 }
810 }
811
812 /* Wait for a signal or when timeout occurs */
813 error = cv_timedwait_sig(&aio->done_cv, &aio->aio_mtx, timo);
814 if (error) {
815 if (error == EWOULDBLOCK)
816 error = EAGAIN;
817 break;
818 }
819 }
820 mutex_exit(&aio->aio_mtx);
821
822 kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
823 return error;
824 }
825
826 int
827 sys_aio_write(struct lwp *l, void *v, register_t *retval)
828 {
829 struct sys_aio_write_args /* {
830 syscallarg(struct aiocb *) aiocbp;
831 } */ *uap = v;
832
833 return aio_enqueue_job(AIO_WRITE, SCARG(uap, aiocbp), NULL);
834 }
835
836 int
837 sys_lio_listio(struct lwp *l, void *v, register_t *retval)
838 {
839 struct sys_lio_listio_args /* {
840 syscallarg(int) mode;
841 syscallarg(struct aiocb *const[]) list;
842 syscallarg(int) nent;
843 syscallarg(struct sigevent *) sig;
844 } */ *uap = v;
845 struct proc *p = l->l_proc;
846 struct aioproc *aio;
847 struct aiocb **aiocbp_list;
848 struct lio_req *lio;
849 int i, error, errcnt, mode, nent;
850
851 mode = SCARG(uap, mode);
852 nent = SCARG(uap, nent);
853
854 /* Check for the limits, and invalid values */
855 if (nent < 1 || nent > aio_listio_max)
856 return EINVAL;
857 if (aio_jobs_count + nent > aio_max) /* XXXSMP */
858 return EAGAIN;
859
860 /* Check if AIO structure is initialized, if not - initialize it */
861 if (p->p_aio == NULL)
862 if (aio_init(p))
863 return EAGAIN;
864 aio = p->p_aio;
865
866 /* Create a LIO structure */
867 lio = pool_get(&aio_lio_pool, PR_WAITOK);
868 lio->refcnt = 1;
869 error = 0;
870
871 switch (mode) {
872 case LIO_WAIT:
873 memset(&lio->sig, 0, sizeof(struct sigevent));
874 break;
875 case LIO_NOWAIT:
876 /* Check for signal, validate it */
877 if (SCARG(uap, sig)) {
878 struct sigevent *sig = &lio->sig;
879
880 error = copyin(SCARG(uap, sig), &lio->sig,
881 sizeof(struct sigevent));
882 if (error == 0 &&
883 (sig->sigev_signo < 0 ||
884 sig->sigev_signo >= NSIG ||
885 sig->sigev_notify < SIGEV_NONE ||
886 sig->sigev_notify > SIGEV_SA))
887 error = EINVAL;
888 } else
889 memset(&lio->sig, 0, sizeof(struct sigevent));
890 break;
891 default:
892 error = EINVAL;
893 break;
894 }
895
896 if (error != 0) {
897 pool_put(&aio_lio_pool, lio);
898 return error;
899 }
900
901 /* Get the list from user-space */
902 aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
903 error = copyin(SCARG(uap, list), aiocbp_list,
904 nent * sizeof(struct aiocb));
905 if (error) {
906 mutex_enter(&aio->aio_mtx);
907 goto err;
908 }
909
910 /* Enqueue all jobs */
911 errcnt = 0;
912 for (i = 0; i < nent; i++) {
913 error = aio_enqueue_job(AIO_LIO, aiocbp_list[i], lio);
914 /*
915 * According to POSIX, in such error case it may
916 * fail with other I/O operations initiated.
917 */
918 if (error)
919 errcnt++;
920 }
921
922 mutex_enter(&aio->aio_mtx);
923
924 /* Return an error, if any */
925 if (errcnt) {
926 error = EIO;
927 goto err;
928 }
929
930 if (mode == LIO_WAIT) {
931 /*
932 * Wait for AIO completion. In such case,
933 * the LIO structure will be freed here.
934 */
935 while (lio->refcnt > 1 && error == 0)
936 error = cv_wait_sig(&aio->done_cv, &aio->aio_mtx);
937 if (error)
938 error = EINTR;
939 }
940
941 err:
942 if (--lio->refcnt != 0)
943 lio = NULL;
944 mutex_exit(&aio->aio_mtx);
945 if (lio != NULL) {
946 aio_sendsig(p, &lio->sig);
947 pool_put(&aio_lio_pool, lio);
948 }
949 kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
950 return error;
951 }
952
953 /*
954 * SysCtl
955 */
956
957 static int
958 sysctl_aio_listio_max(SYSCTLFN_ARGS)
959 {
960 struct sysctlnode node;
961 int error, newsize;
962
963 node = *rnode;
964 node.sysctl_data = &newsize;
965
966 newsize = aio_listio_max;
967 error = sysctl_lookup(SYSCTLFN_CALL(&node));
968 if (error || newp == NULL)
969 return error;
970
971 /* XXXSMP */
972 if (newsize < 1 || newsize > aio_max)
973 return EINVAL;
974 aio_listio_max = newsize;
975
976 return 0;
977 }
978
979 static int
980 sysctl_aio_max(SYSCTLFN_ARGS)
981 {
982 struct sysctlnode node;
983 int error, newsize;
984
985 node = *rnode;
986 node.sysctl_data = &newsize;
987
988 newsize = aio_max;
989 error = sysctl_lookup(SYSCTLFN_CALL(&node));
990 if (error || newp == NULL)
991 return error;
992
993 /* XXXSMP */
994 if (newsize < 1 || newsize < aio_listio_max)
995 return EINVAL;
996 aio_max = newsize;
997
998 return 0;
999 }
1000
1001 SYSCTL_SETUP(sysctl_aio_setup, "sysctl aio setup")
1002 {
1003
1004 sysctl_createv(clog, 0, NULL, NULL,
1005 CTLFLAG_PERMANENT,
1006 CTLTYPE_NODE, "kern", NULL,
1007 NULL, 0, NULL, 0,
1008 CTL_KERN, CTL_EOL);
1009 sysctl_createv(clog, 0, NULL, NULL,
1010 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
1011 CTLTYPE_INT, "posix_aio",
1012 SYSCTL_DESCR("Version of IEEE Std 1003.1 and its "
1013 "Asynchronous I/O option to which the "
1014 "system attempts to conform"),
1015 NULL, _POSIX_ASYNCHRONOUS_IO, NULL, 0,
1016 CTL_KERN, CTL_CREATE, CTL_EOL);
1017 sysctl_createv(clog, 0, NULL, NULL,
1018 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1019 CTLTYPE_INT, "aio_listio_max",
1020 SYSCTL_DESCR("Maximum number of asynchronous I/O "
1021 "operations in a single list I/O call"),
1022 sysctl_aio_listio_max, 0, &aio_listio_max, 0,
1023 CTL_KERN, CTL_CREATE, CTL_EOL);
1024 sysctl_createv(clog, 0, NULL, NULL,
1025 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1026 CTLTYPE_INT, "aio_max",
1027 SYSCTL_DESCR("Maximum number of asynchronous I/O "
1028 "operations"),
1029 sysctl_aio_max, 0, &aio_max, 0,
1030 CTL_KERN, CTL_CREATE, CTL_EOL);
1031 }
1032
1033 /*
1034 * Debugging
1035 */
1036 #if defined(DDB)
1037 void
1038 aio_print_jobs(void (*pr)(const char *, ...))
1039 {
1040 struct proc *p = (curlwp == NULL ? NULL : curlwp->l_proc);
1041 struct aioproc *aio;
1042 struct aio_job *a_job;
1043 struct aiocb *aiocbp;
1044
1045 if (p == NULL) {
1046 (*pr)("AIO: We are not in the processes right now.\n");
1047 return;
1048 }
1049
1050 aio = p->p_aio;
1051 if (aio == NULL) {
1052 (*pr)("AIO data is not initialized (PID = %d).\n", p->p_pid);
1053 return;
1054 }
1055
1056 (*pr)("AIO: PID = %d\n", p->p_pid);
1057 (*pr)("AIO: Global count of the jobs = %u\n", aio_jobs_count);
1058 (*pr)("AIO: Count of the jobs = %u\n", aio->jobs_count);
1059
1060 if (aio->curjob) {
1061 a_job = aio->curjob;
1062 (*pr)("\nAIO current job:\n");
1063 (*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
1064 a_job->aio_op, a_job->aiocbp._errno,
1065 a_job->aiocbp._state, a_job->aiocb_uptr);
1066 aiocbp = &a_job->aiocbp;
1067 (*pr)(" fd = %d, offset = %u, buf = %p, nbytes = %u\n",
1068 aiocbp->aio_fildes, aiocbp->aio_offset,
1069 aiocbp->aio_buf, aiocbp->aio_nbytes);
1070 }
1071
1072 (*pr)("\nAIO queue:\n");
1073 TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
1074 (*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
1075 a_job->aio_op, a_job->aiocbp._errno,
1076 a_job->aiocbp._state, a_job->aiocb_uptr);
1077 aiocbp = &a_job->aiocbp;
1078 (*pr)(" fd = %d, offset = %u, buf = %p, nbytes = %u\n",
1079 aiocbp->aio_fildes, aiocbp->aio_offset,
1080 aiocbp->aio_buf, aiocbp->aio_nbytes);
1081 }
1082 }
1083 #endif /* defined(DDB) */
1084