Home | History | Annotate | Line # | Download | only in kern
sys_aio.c revision 1.5.2.9
      1  1.5.2.9  ad /*	$NetBSD: sys_aio.c,v 1.5.2.9 2007/10/16 11:08:51 ad Exp $	*/
      2  1.5.2.2  ad 
      3  1.5.2.2  ad /*
      4  1.5.2.2  ad  * Copyright (c) 2007, Mindaugas Rasiukevicius <rmind at NetBSD org>
      5  1.5.2.2  ad  *
      6  1.5.2.2  ad  * Redistribution and use in source and binary forms, with or without
      7  1.5.2.2  ad  * modification, are permitted provided that the following conditions
      8  1.5.2.2  ad  * are met:
      9  1.5.2.2  ad  * 1. Redistributions of source code must retain the above copyright
     10  1.5.2.2  ad  *    notice, this list of conditions and the following disclaimer.
     11  1.5.2.2  ad  * 2. Redistributions in binary form must reproduce the above copyright
     12  1.5.2.2  ad  *    notice, this list of conditions and the following disclaimer in the
     13  1.5.2.2  ad  *    documentation and/or other materials provided with the distribution.
     14  1.5.2.2  ad  *
     15  1.5.2.2  ad  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  1.5.2.2  ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     17  1.5.2.2  ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     18  1.5.2.2  ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     19  1.5.2.2  ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     20  1.5.2.2  ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     21  1.5.2.2  ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     22  1.5.2.2  ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     23  1.5.2.2  ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     24  1.5.2.2  ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  1.5.2.2  ad  * POSSIBILITY OF SUCH DAMAGE.
     26  1.5.2.2  ad  */
     27  1.5.2.2  ad 
     28  1.5.2.2  ad /*
     29  1.5.2.2  ad  * TODO:
     30  1.5.2.2  ad  *   1. Additional work for VCHR and maybe VBLK devices.
     31  1.5.2.2  ad  *   2. Consider making the job-finding O(n) per one file descriptor.
     32  1.5.2.2  ad  */
     33  1.5.2.2  ad 
     34  1.5.2.2  ad #include <sys/cdefs.h>
     35  1.5.2.9  ad __KERNEL_RCSID(0, "$NetBSD: sys_aio.c,v 1.5.2.9 2007/10/16 11:08:51 ad Exp $");
     36  1.5.2.2  ad 
     37  1.5.2.3  ad #include "opt_ddb.h"
     38  1.5.2.2  ad 
     39  1.5.2.3  ad #include <sys/param.h>
     40  1.5.2.2  ad #include <sys/condvar.h>
     41  1.5.2.2  ad #include <sys/file.h>
     42  1.5.2.2  ad #include <sys/filedesc.h>
     43  1.5.2.2  ad #include <sys/kernel.h>
     44  1.5.2.2  ad #include <sys/kmem.h>
     45  1.5.2.2  ad #include <sys/lwp.h>
     46  1.5.2.2  ad #include <sys/mutex.h>
     47  1.5.2.2  ad #include <sys/pool.h>
     48  1.5.2.2  ad #include <sys/proc.h>
     49  1.5.2.2  ad #include <sys/queue.h>
     50  1.5.2.2  ad #include <sys/signal.h>
     51  1.5.2.2  ad #include <sys/signalvar.h>
     52  1.5.2.2  ad #include <sys/syscallargs.h>
     53  1.5.2.2  ad #include <sys/sysctl.h>
     54  1.5.2.2  ad #include <sys/systm.h>
     55  1.5.2.2  ad #include <sys/types.h>
     56  1.5.2.2  ad #include <sys/vnode.h>
     57  1.5.2.2  ad 
     58  1.5.2.2  ad #include <uvm/uvm_extern.h>
     59  1.5.2.2  ad 
     60  1.5.2.2  ad /*
     61  1.5.2.2  ad  * System-wide limits and counter of AIO operations.
     62  1.5.2.2  ad  * XXXSMP: We should spin-lock it, or modify atomically.
     63  1.5.2.2  ad  */
     64  1.5.2.3  ad static u_int aio_listio_max = AIO_LISTIO_MAX;
     65  1.5.2.3  ad static u_int aio_max = AIO_MAX;
     66  1.5.2.3  ad static u_int aio_jobs_count;
     67  1.5.2.2  ad 
     68  1.5.2.3  ad static struct pool aio_job_pool;
     69  1.5.2.3  ad static struct pool aio_lio_pool;
     70  1.5.2.2  ad 
     71  1.5.2.2  ad /* Prototypes */
     72  1.5.2.2  ad void aio_worker(void *);
     73  1.5.2.2  ad static void aio_process(struct aio_job *);
     74  1.5.2.2  ad static void aio_sendsig(struct proc *, struct sigevent *);
     75  1.5.2.2  ad static int aio_enqueue_job(int, void *, struct lio_req *);
     76  1.5.2.2  ad 
     77  1.5.2.2  ad /*
     78  1.5.2.3  ad  * Initialize the AIO system.
     79  1.5.2.3  ad  */
     80  1.5.2.3  ad void
     81  1.5.2.3  ad aio_sysinit(void)
     82  1.5.2.3  ad {
     83  1.5.2.3  ad 
     84  1.5.2.3  ad 	pool_init(&aio_job_pool, sizeof(struct aio_job), 0, 0, 0,
     85  1.5.2.3  ad 	    "aio_jobs_pool", &pool_allocator_nointr, IPL_NONE);
     86  1.5.2.3  ad 	pool_init(&aio_lio_pool, sizeof(struct lio_req), 0, 0, 0,
     87  1.5.2.3  ad 	    "aio_lio_pool", &pool_allocator_nointr, IPL_NONE);
     88  1.5.2.3  ad }
     89  1.5.2.3  ad 
     90  1.5.2.3  ad /*
     91  1.5.2.2  ad  * Initialize Asynchronous I/O data structures for the process.
     92  1.5.2.2  ad  */
     93  1.5.2.2  ad int
     94  1.5.2.2  ad aio_init(struct proc *p)
     95  1.5.2.2  ad {
     96  1.5.2.2  ad 	struct aioproc *aio;
     97  1.5.2.2  ad 	struct lwp *l;
     98  1.5.2.2  ad 	bool inmem;
     99  1.5.2.2  ad 	vaddr_t uaddr;
    100  1.5.2.2  ad 
    101  1.5.2.2  ad 	/* Allocate and initialize AIO structure */
    102  1.5.2.2  ad 	aio = kmem_zalloc(sizeof(struct aioproc), KM_NOSLEEP);
    103  1.5.2.2  ad 	if (aio == NULL)
    104  1.5.2.2  ad 		return EAGAIN;
    105  1.5.2.2  ad 
    106  1.5.2.3  ad 	/* Initialize queue and their synchronization structures */
    107  1.5.2.2  ad 	mutex_init(&aio->aio_mtx, MUTEX_DEFAULT, IPL_NONE);
    108  1.5.2.2  ad 	cv_init(&aio->aio_worker_cv, "aiowork");
    109  1.5.2.2  ad 	cv_init(&aio->done_cv, "aiodone");
    110  1.5.2.2  ad 	TAILQ_INIT(&aio->jobs_queue);
    111  1.5.2.2  ad 
    112  1.5.2.2  ad 	/*
    113  1.5.2.2  ad 	 * Create an AIO worker thread.
    114  1.5.2.2  ad 	 * XXX: Currently, AIO thread is not protected against user's actions.
    115  1.5.2.2  ad 	 */
    116  1.5.2.2  ad 	inmem = uvm_uarea_alloc(&uaddr);
    117  1.5.2.2  ad 	if (uaddr == 0) {
    118  1.5.2.3  ad 		aio_exit(p, aio);
    119  1.5.2.2  ad 		return EAGAIN;
    120  1.5.2.2  ad 	}
    121  1.5.2.2  ad 	if (newlwp(curlwp, p, uaddr, inmem, 0, NULL, 0,
    122  1.5.2.3  ad 	    aio_worker, NULL, &l)) {
    123  1.5.2.2  ad 		uvm_uarea_free(uaddr);
    124  1.5.2.3  ad 		aio_exit(p, aio);
    125  1.5.2.2  ad 		return EAGAIN;
    126  1.5.2.2  ad 	}
    127  1.5.2.2  ad 
    128  1.5.2.3  ad 	/* Recheck if we are really first */
    129  1.5.2.3  ad 	mutex_enter(&p->p_mutex);
    130  1.5.2.3  ad 	if (p->p_aio) {
    131  1.5.2.3  ad 		mutex_exit(&p->p_mutex);
    132  1.5.2.3  ad 		aio_exit(p, aio);
    133  1.5.2.3  ad 		lwp_exit(l);
    134  1.5.2.3  ad 		return 0;
    135  1.5.2.3  ad 	}
    136  1.5.2.3  ad 	p->p_aio = aio;
    137  1.5.2.3  ad 	mutex_exit(&p->p_mutex);
    138  1.5.2.3  ad 
    139  1.5.2.2  ad 	/* Complete the initialization of thread, and run it */
    140  1.5.2.2  ad 	mutex_enter(&p->p_smutex);
    141  1.5.2.2  ad 	aio->aio_worker = l;
    142  1.5.2.2  ad 	p->p_nrlwps++;
    143  1.5.2.2  ad 	lwp_lock(l);
    144  1.5.2.2  ad 	l->l_stat = LSRUN;
    145  1.5.2.8  ad 	l->l_usrpri = PRI_KERNEL - 1;
    146  1.5.2.9  ad 	l->l_priority = l->l_usrpri;
    147  1.5.2.2  ad 	sched_enqueue(l, false);
    148  1.5.2.2  ad 	lwp_unlock(l);
    149  1.5.2.2  ad 	mutex_exit(&p->p_smutex);
    150  1.5.2.2  ad 
    151  1.5.2.2  ad 	return 0;
    152  1.5.2.2  ad }
    153  1.5.2.2  ad 
    154  1.5.2.2  ad /*
    155  1.5.2.2  ad  * Exit of Asynchronous I/O subsystem of process.
    156  1.5.2.2  ad  */
    157  1.5.2.2  ad void
    158  1.5.2.3  ad aio_exit(struct proc *p, struct aioproc *aio)
    159  1.5.2.2  ad {
    160  1.5.2.2  ad 	struct aio_job *a_job;
    161  1.5.2.2  ad 
    162  1.5.2.3  ad 	if (aio == NULL)
    163  1.5.2.2  ad 		return;
    164  1.5.2.2  ad 
    165  1.5.2.2  ad 	/* Free AIO queue */
    166  1.5.2.2  ad 	while (!TAILQ_EMPTY(&aio->jobs_queue)) {
    167  1.5.2.2  ad 		a_job = TAILQ_FIRST(&aio->jobs_queue);
    168  1.5.2.2  ad 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    169  1.5.2.3  ad 		pool_put(&aio_job_pool, a_job);
    170  1.5.2.2  ad 		aio_jobs_count--; /* XXXSMP */
    171  1.5.2.2  ad 	}
    172  1.5.2.2  ad 
    173  1.5.2.2  ad 	/* Destroy and free the entire AIO data structure */
    174  1.5.2.2  ad 	cv_destroy(&aio->aio_worker_cv);
    175  1.5.2.2  ad 	cv_destroy(&aio->done_cv);
    176  1.5.2.2  ad 	mutex_destroy(&aio->aio_mtx);
    177  1.5.2.2  ad 	kmem_free(aio, sizeof(struct aioproc));
    178  1.5.2.2  ad }
    179  1.5.2.2  ad 
    180  1.5.2.2  ad /*
    181  1.5.2.2  ad  * AIO worker thread and processor.
    182  1.5.2.2  ad  */
    183  1.5.2.2  ad void
    184  1.5.2.2  ad aio_worker(void *arg)
    185  1.5.2.2  ad {
    186  1.5.2.2  ad 	struct proc *p = curlwp->l_proc;
    187  1.5.2.2  ad 	struct aioproc *aio = p->p_aio;
    188  1.5.2.2  ad 	struct aio_job *a_job;
    189  1.5.2.2  ad 	struct lio_req *lio;
    190  1.5.2.2  ad 	sigset_t oss, nss;
    191  1.5.2.3  ad 	int error, refcnt;
    192  1.5.2.2  ad 
    193  1.5.2.2  ad 	/*
    194  1.5.2.2  ad 	 * Make an empty signal mask, so it
    195  1.5.2.2  ad 	 * handles only SIGKILL and SIGSTOP.
    196  1.5.2.2  ad 	 */
    197  1.5.2.2  ad 	sigfillset(&nss);
    198  1.5.2.2  ad 	mutex_enter(&p->p_smutex);
    199  1.5.2.2  ad 	error = sigprocmask1(curlwp, SIG_SETMASK, &nss, &oss);
    200  1.5.2.2  ad 	mutex_exit(&p->p_smutex);
    201  1.5.2.3  ad 	KASSERT(error == 0);
    202  1.5.2.2  ad 
    203  1.5.2.2  ad 	for (;;) {
    204  1.5.2.2  ad 		/*
    205  1.5.2.2  ad 		 * Loop for each job in the queue.  If there
    206  1.5.2.3  ad 		 * are no jobs then sleep.
    207  1.5.2.2  ad 		 */
    208  1.5.2.2  ad 		mutex_enter(&aio->aio_mtx);
    209  1.5.2.2  ad 		while ((a_job = TAILQ_FIRST(&aio->jobs_queue)) == NULL) {
    210  1.5.2.2  ad 			if (cv_wait_sig(&aio->aio_worker_cv, &aio->aio_mtx)) {
    211  1.5.2.2  ad 				/*
    212  1.5.2.3  ad 				 * Thread was interrupted - check for
    213  1.5.2.3  ad 				 * pending exit or suspend.
    214  1.5.2.2  ad 				 */
    215  1.5.2.3  ad 				mutex_exit(&aio->aio_mtx);
    216  1.5.2.3  ad 				lwp_userret(curlwp);
    217  1.5.2.3  ad 				mutex_enter(&aio->aio_mtx);
    218  1.5.2.2  ad 			}
    219  1.5.2.2  ad 		}
    220  1.5.2.2  ad 
    221  1.5.2.2  ad 		/* Take the job from the queue */
    222  1.5.2.2  ad 		aio->curjob = a_job;
    223  1.5.2.2  ad 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    224  1.5.2.2  ad 
    225  1.5.2.2  ad 		aio_jobs_count--; /* XXXSMP */
    226  1.5.2.2  ad 		aio->jobs_count--;
    227  1.5.2.2  ad 
    228  1.5.2.2  ad 		mutex_exit(&aio->aio_mtx);
    229  1.5.2.2  ad 
    230  1.5.2.2  ad 		/* Process an AIO operation */
    231  1.5.2.2  ad 		aio_process(a_job);
    232  1.5.2.2  ad 
    233  1.5.2.2  ad 		/* Copy data structure back to the user-space */
    234  1.5.2.2  ad 		(void)copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    235  1.5.2.2  ad 		    sizeof(struct aiocb));
    236  1.5.2.2  ad 
    237  1.5.2.2  ad 		mutex_enter(&aio->aio_mtx);
    238  1.5.2.2  ad 		aio->curjob = NULL;
    239  1.5.2.3  ad 
    240  1.5.2.2  ad 		/* Decrease a reference counter, if there is a LIO structure */
    241  1.5.2.2  ad 		lio = a_job->lio;
    242  1.5.2.3  ad 		refcnt = (lio != NULL ? --lio->refcnt : -1);
    243  1.5.2.3  ad 
    244  1.5.2.2  ad 		/* Notify all suspenders */
    245  1.5.2.2  ad 		cv_broadcast(&aio->done_cv);
    246  1.5.2.2  ad 		mutex_exit(&aio->aio_mtx);
    247  1.5.2.2  ad 
    248  1.5.2.2  ad 		/* Send a signal, if any */
    249  1.5.2.2  ad 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    250  1.5.2.2  ad 
    251  1.5.2.2  ad 		/* Destroy the LIO structure */
    252  1.5.2.3  ad 		if (refcnt == 0) {
    253  1.5.2.2  ad 			aio_sendsig(p, &lio->sig);
    254  1.5.2.3  ad 			pool_put(&aio_lio_pool, lio);
    255  1.5.2.2  ad 		}
    256  1.5.2.2  ad 
    257  1.5.2.2  ad 		/* Destroy the the job */
    258  1.5.2.3  ad 		pool_put(&aio_job_pool, a_job);
    259  1.5.2.2  ad 	}
    260  1.5.2.2  ad 
    261  1.5.2.3  ad 	/* NOTREACHED */
    262  1.5.2.2  ad }
    263  1.5.2.2  ad 
    264  1.5.2.2  ad static void
    265  1.5.2.2  ad aio_process(struct aio_job *a_job)
    266  1.5.2.2  ad {
    267  1.5.2.2  ad 	struct proc *p = curlwp->l_proc;
    268  1.5.2.2  ad 	struct aiocb *aiocbp = &a_job->aiocbp;
    269  1.5.2.2  ad 	struct file *fp;
    270  1.5.2.2  ad 	struct filedesc	*fdp = p->p_fd;
    271  1.5.2.2  ad 	int fd = aiocbp->aio_fildes;
    272  1.5.2.2  ad 	int error = 0;
    273  1.5.2.2  ad 
    274  1.5.2.2  ad 	KASSERT(fdp != NULL);
    275  1.5.2.2  ad 	KASSERT(a_job->aio_op != 0);
    276  1.5.2.2  ad 
    277  1.5.2.3  ad 	if ((a_job->aio_op & (AIO_READ | AIO_WRITE)) != 0) {
    278  1.5.2.2  ad 		struct iovec aiov;
    279  1.5.2.2  ad 		struct uio auio;
    280  1.5.2.2  ad 
    281  1.5.2.2  ad 		if (aiocbp->aio_nbytes > SSIZE_MAX) {
    282  1.5.2.2  ad 			error = EINVAL;
    283  1.5.2.2  ad 			goto done;
    284  1.5.2.2  ad 		}
    285  1.5.2.2  ad 
    286  1.5.2.2  ad 		fp = fd_getfile(fdp, fd);
    287  1.5.2.2  ad 		if (fp == NULL) {
    288  1.5.2.2  ad 			error = EBADF;
    289  1.5.2.2  ad 			goto done;
    290  1.5.2.2  ad 		}
    291  1.5.2.2  ad 
    292  1.5.2.2  ad 		aiov.iov_base = (void *)(uintptr_t)aiocbp->aio_buf;
    293  1.5.2.2  ad 		aiov.iov_len = aiocbp->aio_nbytes;
    294  1.5.2.2  ad 		auio.uio_iov = &aiov;
    295  1.5.2.2  ad 		auio.uio_iovcnt = 1;
    296  1.5.2.2  ad 		auio.uio_resid = aiocbp->aio_nbytes;
    297  1.5.2.2  ad 		auio.uio_vmspace = p->p_vmspace;
    298  1.5.2.2  ad 
    299  1.5.2.2  ad 		FILE_USE(fp);
    300  1.5.2.2  ad 		if (a_job->aio_op & AIO_READ) {
    301  1.5.2.2  ad 			/*
    302  1.5.2.2  ad 			 * Perform a Read operation
    303  1.5.2.2  ad 			 */
    304  1.5.2.2  ad 			KASSERT((a_job->aio_op & AIO_WRITE) == 0);
    305  1.5.2.2  ad 
    306  1.5.2.2  ad 			if ((fp->f_flag & FREAD) == 0) {
    307  1.5.2.2  ad 				FILE_UNUSE(fp, curlwp);
    308  1.5.2.2  ad 				error = EBADF;
    309  1.5.2.2  ad 				goto done;
    310  1.5.2.2  ad 			}
    311  1.5.2.2  ad 			auio.uio_rw = UIO_READ;
    312  1.5.2.2  ad 			error = (*fp->f_ops->fo_read)(fp, &aiocbp->aio_offset,
    313  1.5.2.2  ad 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    314  1.5.2.2  ad 		} else {
    315  1.5.2.2  ad 			/*
    316  1.5.2.2  ad 			 * Perform a Write operation
    317  1.5.2.2  ad 			 */
    318  1.5.2.2  ad 			KASSERT(a_job->aio_op & AIO_WRITE);
    319  1.5.2.2  ad 
    320  1.5.2.2  ad 			if ((fp->f_flag & FWRITE) == 0) {
    321  1.5.2.2  ad 				FILE_UNUSE(fp, curlwp);
    322  1.5.2.2  ad 				error = EBADF;
    323  1.5.2.2  ad 				goto done;
    324  1.5.2.2  ad 			}
    325  1.5.2.2  ad 			auio.uio_rw = UIO_WRITE;
    326  1.5.2.2  ad 			error = (*fp->f_ops->fo_write)(fp, &aiocbp->aio_offset,
    327  1.5.2.2  ad 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    328  1.5.2.2  ad 		}
    329  1.5.2.2  ad 		FILE_UNUSE(fp, curlwp);
    330  1.5.2.2  ad 
    331  1.5.2.2  ad 		/* Store the result value */
    332  1.5.2.2  ad 		a_job->aiocbp.aio_nbytes -= auio.uio_resid;
    333  1.5.2.2  ad 		a_job->aiocbp._retval = (error == 0) ?
    334  1.5.2.2  ad 		    a_job->aiocbp.aio_nbytes : -1;
    335  1.5.2.2  ad 
    336  1.5.2.3  ad 	} else if ((a_job->aio_op & (AIO_SYNC | AIO_DSYNC)) != 0) {
    337  1.5.2.2  ad 		/*
    338  1.5.2.2  ad 		 * Perform a file Sync operation
    339  1.5.2.2  ad 		 */
    340  1.5.2.2  ad 		struct vnode *vp;
    341  1.5.2.2  ad 
    342  1.5.2.2  ad 		if ((error = getvnode(fdp, fd, &fp)) != 0)
    343  1.5.2.2  ad 			goto done;
    344  1.5.2.2  ad 
    345  1.5.2.2  ad 		if ((fp->f_flag & FWRITE) == 0) {
    346  1.5.2.2  ad 			FILE_UNUSE(fp, curlwp);
    347  1.5.2.2  ad 			error = EBADF;
    348  1.5.2.2  ad 			goto done;
    349  1.5.2.2  ad 		}
    350  1.5.2.2  ad 
    351  1.5.2.2  ad 		vp = (struct vnode *)fp->f_data;
    352  1.5.2.2  ad 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    353  1.5.2.2  ad 		if (a_job->aio_op & AIO_DSYNC) {
    354  1.5.2.2  ad 			error = VOP_FSYNC(vp, fp->f_cred,
    355  1.5.2.2  ad 			    FSYNC_WAIT | FSYNC_DATAONLY, 0, 0, curlwp);
    356  1.5.2.2  ad 		} else if (a_job->aio_op & AIO_SYNC) {
    357  1.5.2.2  ad 			error = VOP_FSYNC(vp, fp->f_cred,
    358  1.5.2.2  ad 			    FSYNC_WAIT, 0, 0, curlwp);
    359  1.5.2.6  ad 			if (error == 0 && bioopsp != NULL &&
    360  1.5.2.2  ad 			    vp->v_mount &&
    361  1.5.2.2  ad 			    (vp->v_mount->mnt_flag & MNT_SOFTDEP))
    362  1.5.2.7  ad 			    bioopsp->io_fsync(vp, 0);
    363  1.5.2.2  ad 		}
    364  1.5.2.2  ad 		VOP_UNLOCK(vp, 0);
    365  1.5.2.2  ad 		FILE_UNUSE(fp, curlwp);
    366  1.5.2.2  ad 
    367  1.5.2.2  ad 		/* Store the result value */
    368  1.5.2.2  ad 		a_job->aiocbp._retval = (error == 0) ? 0 : -1;
    369  1.5.2.2  ad 
    370  1.5.2.2  ad 	} else
    371  1.5.2.2  ad 		panic("aio_process: invalid operation code\n");
    372  1.5.2.2  ad 
    373  1.5.2.2  ad done:
    374  1.5.2.2  ad 	/* Job is done, set the error, if any */
    375  1.5.2.2  ad 	a_job->aiocbp._errno = error;
    376  1.5.2.2  ad 	a_job->aiocbp._state = JOB_DONE;
    377  1.5.2.2  ad }
    378  1.5.2.2  ad 
    379  1.5.2.2  ad /*
    380  1.5.2.2  ad  * Send AIO signal.
    381  1.5.2.2  ad  */
    382  1.5.2.2  ad static void
    383  1.5.2.2  ad aio_sendsig(struct proc *p, struct sigevent *sig)
    384  1.5.2.2  ad {
    385  1.5.2.2  ad 	ksiginfo_t ksi;
    386  1.5.2.2  ad 
    387  1.5.2.2  ad 	if (sig->sigev_signo == 0 || sig->sigev_notify == SIGEV_NONE)
    388  1.5.2.2  ad 		return;
    389  1.5.2.2  ad 
    390  1.5.2.2  ad 	KSI_INIT(&ksi);
    391  1.5.2.2  ad 	ksi.ksi_signo = sig->sigev_signo;
    392  1.5.2.2  ad 	ksi.ksi_code = SI_ASYNCIO;
    393  1.5.2.2  ad 	ksi.ksi_value = sig->sigev_value;
    394  1.5.2.2  ad 	mutex_enter(&proclist_mutex);
    395  1.5.2.2  ad 	kpsignal(p, &ksi, NULL);
    396  1.5.2.2  ad 	mutex_exit(&proclist_mutex);
    397  1.5.2.2  ad }
    398  1.5.2.2  ad 
    399  1.5.2.2  ad /*
    400  1.5.2.2  ad  * Enqueue the job.
    401  1.5.2.2  ad  */
    402  1.5.2.2  ad static int
    403  1.5.2.2  ad aio_enqueue_job(int op, void *aiocb_uptr, struct lio_req *lio)
    404  1.5.2.2  ad {
    405  1.5.2.2  ad 	struct proc *p = curlwp->l_proc;
    406  1.5.2.2  ad 	struct aioproc *aio;
    407  1.5.2.2  ad 	struct aio_job *a_job;
    408  1.5.2.2  ad 	struct aiocb aiocbp;
    409  1.5.2.2  ad 	struct sigevent *sig;
    410  1.5.2.2  ad 	int error;
    411  1.5.2.2  ad 
    412  1.5.2.2  ad 	/* Check for the limit */
    413  1.5.2.2  ad 	if (aio_jobs_count + 1 > aio_max) /* XXXSMP */
    414  1.5.2.2  ad 		return EAGAIN;
    415  1.5.2.2  ad 
    416  1.5.2.2  ad 	/* Get the data structure from user-space */
    417  1.5.2.2  ad 	error = copyin(aiocb_uptr, &aiocbp, sizeof(struct aiocb));
    418  1.5.2.2  ad 	if (error)
    419  1.5.2.2  ad 		return error;
    420  1.5.2.2  ad 
    421  1.5.2.2  ad 	/* Check if signal is set, and validate it */
    422  1.5.2.2  ad 	sig = &aiocbp.aio_sigevent;
    423  1.5.2.2  ad 	if (sig->sigev_signo < 0 || sig->sigev_signo >= NSIG ||
    424  1.5.2.2  ad 	    sig->sigev_notify < SIGEV_NONE || sig->sigev_notify > SIGEV_SA)
    425  1.5.2.2  ad 		return EINVAL;
    426  1.5.2.2  ad 
    427  1.5.2.2  ad 	/* Buffer and byte count */
    428  1.5.2.2  ad 	if (((AIO_SYNC | AIO_DSYNC) & op) == 0)
    429  1.5.2.2  ad 		if (aiocbp.aio_buf == NULL || aiocbp.aio_nbytes > SSIZE_MAX)
    430  1.5.2.2  ad 			return EINVAL;
    431  1.5.2.2  ad 
    432  1.5.2.2  ad 	/* Check the opcode, if LIO_NOP - simply ignore */
    433  1.5.2.2  ad 	if (op == AIO_LIO) {
    434  1.5.2.2  ad 		KASSERT(lio != NULL);
    435  1.5.2.2  ad 		if (aiocbp.aio_lio_opcode == LIO_WRITE)
    436  1.5.2.2  ad 			op = AIO_WRITE;
    437  1.5.2.2  ad 		else if (aiocbp.aio_lio_opcode == LIO_READ)
    438  1.5.2.2  ad 			op = AIO_READ;
    439  1.5.2.2  ad 		else
    440  1.5.2.2  ad 			return (aiocbp.aio_lio_opcode == LIO_NOP) ? 0 : EINVAL;
    441  1.5.2.2  ad 	} else {
    442  1.5.2.2  ad 		KASSERT(lio == NULL);
    443  1.5.2.2  ad 	}
    444  1.5.2.2  ad 
    445  1.5.2.2  ad 	/*
    446  1.5.2.2  ad 	 * Look for already existing job.  If found - the job is in-progress.
    447  1.5.2.2  ad 	 * According to POSIX this is invalid, so return the error.
    448  1.5.2.2  ad 	 */
    449  1.5.2.2  ad 	aio = p->p_aio;
    450  1.5.2.2  ad 	if (aio) {
    451  1.5.2.2  ad 		mutex_enter(&aio->aio_mtx);
    452  1.5.2.2  ad 		if (aio->curjob) {
    453  1.5.2.2  ad 			a_job = aio->curjob;
    454  1.5.2.2  ad 			if (a_job->aiocb_uptr == aiocb_uptr) {
    455  1.5.2.2  ad 				mutex_exit(&aio->aio_mtx);
    456  1.5.2.2  ad 				return EINVAL;
    457  1.5.2.2  ad 			}
    458  1.5.2.2  ad 		}
    459  1.5.2.2  ad 		TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    460  1.5.2.2  ad 			if (a_job->aiocb_uptr != aiocb_uptr)
    461  1.5.2.2  ad 				continue;
    462  1.5.2.2  ad 			mutex_exit(&aio->aio_mtx);
    463  1.5.2.2  ad 			return EINVAL;
    464  1.5.2.2  ad 		}
    465  1.5.2.2  ad 		mutex_exit(&aio->aio_mtx);
    466  1.5.2.2  ad 	}
    467  1.5.2.2  ad 
    468  1.5.2.2  ad 	/*
    469  1.5.2.2  ad 	 * Check if AIO structure is initialized, if not - initialize it.
    470  1.5.2.2  ad 	 * In LIO case, we did that already.  We will recheck this with
    471  1.5.2.2  ad 	 * the lock in aio_init().
    472  1.5.2.2  ad 	 */
    473  1.5.2.2  ad 	if (lio == NULL && p->p_aio == NULL)
    474  1.5.2.2  ad 		if (aio_init(p))
    475  1.5.2.2  ad 			return EAGAIN;
    476  1.5.2.2  ad 	aio = p->p_aio;
    477  1.5.2.2  ad 
    478  1.5.2.2  ad 	/*
    479  1.5.2.2  ad 	 * Set the state with errno, and copy data
    480  1.5.2.2  ad 	 * structure back to the user-space.
    481  1.5.2.2  ad 	 */
    482  1.5.2.2  ad 	aiocbp._state = JOB_WIP;
    483  1.5.2.2  ad 	aiocbp._errno = EINPROGRESS;
    484  1.5.2.2  ad 	aiocbp._retval = -1;
    485  1.5.2.2  ad 	error = copyout(&aiocbp, aiocb_uptr, sizeof(struct aiocb));
    486  1.5.2.2  ad 	if (error)
    487  1.5.2.2  ad 		return error;
    488  1.5.2.2  ad 
    489  1.5.2.2  ad 	/* Allocate and initialize a new AIO job */
    490  1.5.2.3  ad 	a_job = pool_get(&aio_job_pool, PR_WAITOK);
    491  1.5.2.2  ad 	memset(a_job, 0, sizeof(struct aio_job));
    492  1.5.2.2  ad 
    493  1.5.2.2  ad 	/*
    494  1.5.2.2  ad 	 * Set the data.
    495  1.5.2.2  ad 	 * Store the user-space pointer for searching.  Since we
    496  1.5.2.2  ad 	 * are storing only per proc pointers - it is safe.
    497  1.5.2.2  ad 	 */
    498  1.5.2.2  ad 	memcpy(&a_job->aiocbp, &aiocbp, sizeof(struct aiocb));
    499  1.5.2.2  ad 	a_job->aiocb_uptr = aiocb_uptr;
    500  1.5.2.2  ad 	a_job->aio_op |= op;
    501  1.5.2.2  ad 	a_job->lio = lio;
    502  1.5.2.2  ad 
    503  1.5.2.2  ad 	/*
    504  1.5.2.2  ad 	 * Add the job to the queue, update the counters, and
    505  1.5.2.2  ad 	 * notify the AIO worker thread to handle the job.
    506  1.5.2.2  ad 	 */
    507  1.5.2.2  ad 	mutex_enter(&aio->aio_mtx);
    508  1.5.2.2  ad 
    509  1.5.2.2  ad 	/* Fail, if the limit was reached */
    510  1.5.2.2  ad 	if (aio->jobs_count >= aio_listio_max) {
    511  1.5.2.2  ad 		mutex_exit(&aio->aio_mtx);
    512  1.5.2.3  ad 		pool_put(&aio_job_pool, a_job);
    513  1.5.2.2  ad 		return EAGAIN;
    514  1.5.2.2  ad 	}
    515  1.5.2.2  ad 
    516  1.5.2.2  ad 	TAILQ_INSERT_TAIL(&aio->jobs_queue, a_job, list);
    517  1.5.2.2  ad 	aio_jobs_count++; /* XXXSMP */
    518  1.5.2.2  ad 	aio->jobs_count++;
    519  1.5.2.2  ad 	if (lio)
    520  1.5.2.2  ad 		lio->refcnt++;
    521  1.5.2.2  ad 	cv_signal(&aio->aio_worker_cv);
    522  1.5.2.2  ad 
    523  1.5.2.2  ad 	mutex_exit(&aio->aio_mtx);
    524  1.5.2.2  ad 
    525  1.5.2.2  ad 	/*
    526  1.5.2.2  ad 	 * One would handle the errors only with aio_error() function.
    527  1.5.2.2  ad 	 * This way is appropriate according to POSIX.
    528  1.5.2.2  ad 	 */
    529  1.5.2.2  ad 	return 0;
    530  1.5.2.2  ad }
    531  1.5.2.2  ad 
    532  1.5.2.2  ad /*
    533  1.5.2.2  ad  * Syscall functions.
    534  1.5.2.2  ad  */
    535  1.5.2.2  ad 
    536  1.5.2.2  ad int
    537  1.5.2.2  ad sys_aio_cancel(struct lwp *l, void *v, register_t *retval)
    538  1.5.2.2  ad {
    539  1.5.2.2  ad 	struct sys_aio_cancel_args /* {
    540  1.5.2.2  ad 		syscallarg(int) fildes;
    541  1.5.2.2  ad 		syscallarg(struct aiocb *) aiocbp;
    542  1.5.2.2  ad 	} */ *uap = v;
    543  1.5.2.2  ad 	struct proc *p = l->l_proc;
    544  1.5.2.2  ad 	struct aioproc *aio;
    545  1.5.2.2  ad 	struct aio_job *a_job;
    546  1.5.2.2  ad 	struct aiocb *aiocbp_ptr;
    547  1.5.2.2  ad 	struct lio_req *lio;
    548  1.5.2.2  ad 	struct filedesc	*fdp = p->p_fd;
    549  1.5.2.2  ad 	unsigned int cn, errcnt, fildes;
    550  1.5.2.2  ad 
    551  1.5.2.2  ad 	TAILQ_HEAD(, aio_job) tmp_jobs_list;
    552  1.5.2.2  ad 
    553  1.5.2.2  ad 	/* Check for invalid file descriptor */
    554  1.5.2.2  ad 	fildes = (unsigned int)SCARG(uap, fildes);
    555  1.5.2.2  ad 	if (fildes >= fdp->fd_nfiles || fdp->fd_ofiles[fildes] == NULL)
    556  1.5.2.2  ad 		return EBADF;
    557  1.5.2.2  ad 
    558  1.5.2.2  ad 	/* Check if AIO structure is initialized */
    559  1.5.2.2  ad 	if (p->p_aio == NULL) {
    560  1.5.2.2  ad 		*retval = AIO_NOTCANCELED;
    561  1.5.2.2  ad 		return 0;
    562  1.5.2.2  ad 	}
    563  1.5.2.2  ad 
    564  1.5.2.2  ad 	aio = p->p_aio;
    565  1.5.2.2  ad 	aiocbp_ptr = (struct aiocb *)SCARG(uap, aiocbp);
    566  1.5.2.2  ad 
    567  1.5.2.2  ad 	mutex_enter(&aio->aio_mtx);
    568  1.5.2.2  ad 
    569  1.5.2.2  ad 	/* Cancel the jobs, and remove them from the queue */
    570  1.5.2.2  ad 	cn = 0;
    571  1.5.2.2  ad 	TAILQ_INIT(&tmp_jobs_list);
    572  1.5.2.2  ad 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    573  1.5.2.2  ad 		if (aiocbp_ptr) {
    574  1.5.2.2  ad 			if (aiocbp_ptr != a_job->aiocb_uptr)
    575  1.5.2.2  ad 				continue;
    576  1.5.2.2  ad 			if (fildes != a_job->aiocbp.aio_fildes) {
    577  1.5.2.2  ad 				mutex_exit(&aio->aio_mtx);
    578  1.5.2.2  ad 				return EBADF;
    579  1.5.2.2  ad 			}
    580  1.5.2.2  ad 		} else if (a_job->aiocbp.aio_fildes != fildes)
    581  1.5.2.2  ad 			continue;
    582  1.5.2.2  ad 
    583  1.5.2.2  ad 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    584  1.5.2.2  ad 		TAILQ_INSERT_TAIL(&tmp_jobs_list, a_job, list);
    585  1.5.2.2  ad 
    586  1.5.2.2  ad 		/* Decrease the counters */
    587  1.5.2.2  ad 		aio_jobs_count--; /* XXXSMP */
    588  1.5.2.2  ad 		aio->jobs_count--;
    589  1.5.2.2  ad 		lio = a_job->lio;
    590  1.5.2.3  ad 		if (lio != NULL && --lio->refcnt != 0)
    591  1.5.2.3  ad 			a_job->lio = NULL;
    592  1.5.2.2  ad 
    593  1.5.2.2  ad 		cn++;
    594  1.5.2.2  ad 		if (aiocbp_ptr)
    595  1.5.2.2  ad 			break;
    596  1.5.2.2  ad 	}
    597  1.5.2.2  ad 
    598  1.5.2.2  ad 	/* There are canceled jobs */
    599  1.5.2.2  ad 	if (cn)
    600  1.5.2.2  ad 		*retval = AIO_CANCELED;
    601  1.5.2.2  ad 
    602  1.5.2.2  ad 	/* We cannot cancel current job */
    603  1.5.2.2  ad 	a_job = aio->curjob;
    604  1.5.2.2  ad 	if (a_job && ((a_job->aiocbp.aio_fildes == fildes) ||
    605  1.5.2.2  ad 	    (a_job->aiocb_uptr == aiocbp_ptr)))
    606  1.5.2.2  ad 		*retval = AIO_NOTCANCELED;
    607  1.5.2.2  ad 
    608  1.5.2.2  ad 	mutex_exit(&aio->aio_mtx);
    609  1.5.2.2  ad 
    610  1.5.2.2  ad 	/* Free the jobs after the lock */
    611  1.5.2.2  ad 	errcnt = 0;
    612  1.5.2.2  ad 	while (!TAILQ_EMPTY(&tmp_jobs_list)) {
    613  1.5.2.2  ad 		a_job = TAILQ_FIRST(&tmp_jobs_list);
    614  1.5.2.2  ad 		TAILQ_REMOVE(&tmp_jobs_list, a_job, list);
    615  1.5.2.2  ad 		/* Set the errno and copy structures back to the user-space */
    616  1.5.2.2  ad 		a_job->aiocbp._errno = ECANCELED;
    617  1.5.2.2  ad 		a_job->aiocbp._state = JOB_DONE;
    618  1.5.2.2  ad 		if (copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    619  1.5.2.2  ad 		    sizeof(struct aiocb)))
    620  1.5.2.2  ad 			errcnt++;
    621  1.5.2.2  ad 		/* Send a signal if any */
    622  1.5.2.2  ad 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    623  1.5.2.4  ad 		if (a_job->lio) {
    624  1.5.2.4  ad 			lio = a_job->lio;
    625  1.5.2.4  ad 			aio_sendsig(p, &lio->sig);
    626  1.5.2.4  ad 			pool_put(&aio_lio_pool, lio);
    627  1.5.2.4  ad 		}
    628  1.5.2.3  ad 		pool_put(&aio_job_pool, a_job);
    629  1.5.2.2  ad 	}
    630  1.5.2.2  ad 
    631  1.5.2.2  ad 	if (errcnt)
    632  1.5.2.2  ad 		return EFAULT;
    633  1.5.2.2  ad 
    634  1.5.2.2  ad 	/* Set a correct return value */
    635  1.5.2.2  ad 	if (*retval == 0)
    636  1.5.2.2  ad 		*retval = AIO_ALLDONE;
    637  1.5.2.2  ad 
    638  1.5.2.2  ad 	return 0;
    639  1.5.2.2  ad }
    640  1.5.2.2  ad 
    641  1.5.2.2  ad int
    642  1.5.2.2  ad sys_aio_error(struct lwp *l, void *v, register_t *retval)
    643  1.5.2.2  ad {
    644  1.5.2.2  ad 	struct sys_aio_error_args /* {
    645  1.5.2.2  ad 		syscallarg(const struct aiocb *) aiocbp;
    646  1.5.2.2  ad 	} */ *uap = v;
    647  1.5.2.2  ad 	struct proc *p = l->l_proc;
    648  1.5.2.2  ad 	struct aioproc *aio = p->p_aio;
    649  1.5.2.2  ad 	struct aiocb aiocbp;
    650  1.5.2.2  ad 	int error;
    651  1.5.2.2  ad 
    652  1.5.2.2  ad 	if (aio == NULL)
    653  1.5.2.2  ad 		return EINVAL;
    654  1.5.2.2  ad 
    655  1.5.2.2  ad 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    656  1.5.2.2  ad 	if (error)
    657  1.5.2.2  ad 		return error;
    658  1.5.2.2  ad 
    659  1.5.2.2  ad 	if (aiocbp._state == JOB_NONE)
    660  1.5.2.2  ad 		return EINVAL;
    661  1.5.2.2  ad 
    662  1.5.2.2  ad 	*retval = aiocbp._errno;
    663  1.5.2.2  ad 
    664  1.5.2.2  ad 	return 0;
    665  1.5.2.2  ad }
    666  1.5.2.2  ad 
    667  1.5.2.2  ad int
    668  1.5.2.2  ad sys_aio_fsync(struct lwp *l, void *v, register_t *retval)
    669  1.5.2.2  ad {
    670  1.5.2.2  ad 	struct sys_aio_fsync_args /* {
    671  1.5.2.2  ad 		syscallarg(int) op;
    672  1.5.2.2  ad 		syscallarg(struct aiocb *) aiocbp;
    673  1.5.2.2  ad 	} */ *uap = v;
    674  1.5.2.2  ad 	int op = SCARG(uap, op);
    675  1.5.2.2  ad 
    676  1.5.2.2  ad 	if ((op != O_DSYNC) && (op != O_SYNC))
    677  1.5.2.2  ad 		return EINVAL;
    678  1.5.2.2  ad 
    679  1.5.2.2  ad 	op = O_DSYNC ? AIO_DSYNC : AIO_SYNC;
    680  1.5.2.2  ad 
    681  1.5.2.2  ad 	return aio_enqueue_job(op, SCARG(uap, aiocbp), NULL);
    682  1.5.2.2  ad }
    683  1.5.2.2  ad 
    684  1.5.2.2  ad int
    685  1.5.2.2  ad sys_aio_read(struct lwp *l, void *v, register_t *retval)
    686  1.5.2.2  ad {
    687  1.5.2.2  ad 	struct sys_aio_read_args /* {
    688  1.5.2.2  ad 		syscallarg(struct aiocb *) aiocbp;
    689  1.5.2.2  ad 	} */ *uap = v;
    690  1.5.2.2  ad 
    691  1.5.2.2  ad 	return aio_enqueue_job(AIO_READ, SCARG(uap, aiocbp), NULL);
    692  1.5.2.2  ad }
    693  1.5.2.2  ad 
    694  1.5.2.2  ad int
    695  1.5.2.2  ad sys_aio_return(struct lwp *l, void *v, register_t *retval)
    696  1.5.2.2  ad {
    697  1.5.2.2  ad 	struct sys_aio_return_args /* {
    698  1.5.2.2  ad 		syscallarg(struct aiocb *) aiocbp;
    699  1.5.2.2  ad 	} */ *uap = v;
    700  1.5.2.2  ad 	struct proc *p = l->l_proc;
    701  1.5.2.2  ad 	struct aioproc *aio = p->p_aio;
    702  1.5.2.2  ad 	struct aiocb aiocbp;
    703  1.5.2.2  ad 	int error;
    704  1.5.2.2  ad 
    705  1.5.2.2  ad 	if (aio == NULL)
    706  1.5.2.2  ad 		return EINVAL;
    707  1.5.2.2  ad 
    708  1.5.2.2  ad 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    709  1.5.2.2  ad 	if (error)
    710  1.5.2.2  ad 		return error;
    711  1.5.2.2  ad 
    712  1.5.2.2  ad 	if (aiocbp._errno == EINPROGRESS || aiocbp._state != JOB_DONE)
    713  1.5.2.2  ad 		return EINVAL;
    714  1.5.2.2  ad 
    715  1.5.2.2  ad 	*retval = aiocbp._retval;
    716  1.5.2.2  ad 
    717  1.5.2.2  ad 	/* Reset the internal variables */
    718  1.5.2.2  ad 	aiocbp._errno = 0;
    719  1.5.2.2  ad 	aiocbp._retval = -1;
    720  1.5.2.2  ad 	aiocbp._state = JOB_NONE;
    721  1.5.2.2  ad 	error = copyout(&aiocbp, SCARG(uap, aiocbp), sizeof(struct aiocb));
    722  1.5.2.2  ad 
    723  1.5.2.2  ad 	return error;
    724  1.5.2.2  ad }
    725  1.5.2.2  ad 
    726  1.5.2.2  ad int
    727  1.5.2.2  ad sys_aio_suspend(struct lwp *l, void *v, register_t *retval)
    728  1.5.2.2  ad {
    729  1.5.2.2  ad 	struct sys_aio_suspend_args /* {
    730  1.5.2.2  ad 		syscallarg(const struct aiocb *const[]) list;
    731  1.5.2.2  ad 		syscallarg(int) nent;
    732  1.5.2.2  ad 		syscallarg(const struct timespec *) timeout;
    733  1.5.2.2  ad 	} */ *uap = v;
    734  1.5.2.2  ad 	struct proc *p = l->l_proc;
    735  1.5.2.2  ad 	struct aioproc *aio;
    736  1.5.2.2  ad 	struct aio_job *a_job;
    737  1.5.2.2  ad 	struct aiocb **aiocbp_list;
    738  1.5.2.2  ad 	struct timespec ts;
    739  1.5.2.2  ad 	int i, error, nent, timo;
    740  1.5.2.2  ad 
    741  1.5.2.2  ad 	if (p->p_aio == NULL)
    742  1.5.2.2  ad 		return EAGAIN;
    743  1.5.2.2  ad 	aio = p->p_aio;
    744  1.5.2.2  ad 
    745  1.5.2.2  ad 	nent = SCARG(uap, nent);
    746  1.5.2.2  ad 	if (nent <= 0 || nent > aio_listio_max)
    747  1.5.2.2  ad 		return EAGAIN;
    748  1.5.2.2  ad 
    749  1.5.2.2  ad 	if (SCARG(uap, timeout)) {
    750  1.5.2.2  ad 		/* Convert timespec to ticks */
    751  1.5.2.2  ad 		error = copyin(SCARG(uap, timeout), &ts,
    752  1.5.2.2  ad 		    sizeof(struct timespec));
    753  1.5.2.2  ad 		if (error)
    754  1.5.2.2  ad 			return error;
    755  1.5.2.2  ad 		timo = mstohz((ts.tv_sec * 1000) + (ts.tv_nsec / 1000000));
    756  1.5.2.2  ad 		if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
    757  1.5.2.2  ad 			timo = 1;
    758  1.5.2.2  ad 		if (timo <= 0)
    759  1.5.2.2  ad 			return EAGAIN;
    760  1.5.2.2  ad 	} else
    761  1.5.2.2  ad 		timo = 0;
    762  1.5.2.2  ad 
    763  1.5.2.2  ad 	/* Get the list from user-space */
    764  1.5.2.2  ad 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    765  1.5.2.2  ad 	error = copyin(SCARG(uap, list), aiocbp_list,
    766  1.5.2.2  ad 	    nent * sizeof(struct aiocb));
    767  1.5.2.2  ad 	if (error) {
    768  1.5.2.2  ad 		kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    769  1.5.2.2  ad 		return error;
    770  1.5.2.2  ad 	}
    771  1.5.2.2  ad 
    772  1.5.2.2  ad 	mutex_enter(&aio->aio_mtx);
    773  1.5.2.2  ad 	for (;;) {
    774  1.5.2.2  ad 
    775  1.5.2.2  ad 		for (i = 0; i < nent; i++) {
    776  1.5.2.2  ad 
    777  1.5.2.2  ad 			/* Skip NULL entries */
    778  1.5.2.2  ad 			if (aiocbp_list[i] == NULL)
    779  1.5.2.2  ad 				continue;
    780  1.5.2.2  ad 
    781  1.5.2.2  ad 			/* Skip current job */
    782  1.5.2.2  ad 			if (aio->curjob) {
    783  1.5.2.2  ad 				a_job = aio->curjob;
    784  1.5.2.2  ad 				if (a_job->aiocb_uptr == aiocbp_list[i])
    785  1.5.2.2  ad 					continue;
    786  1.5.2.2  ad 			}
    787  1.5.2.2  ad 
    788  1.5.2.2  ad 			/* Look for a job in the queue */
    789  1.5.2.2  ad 			TAILQ_FOREACH(a_job, &aio->jobs_queue, list)
    790  1.5.2.2  ad 				if (a_job->aiocb_uptr == aiocbp_list[i])
    791  1.5.2.2  ad 					break;
    792  1.5.2.2  ad 
    793  1.5.2.2  ad 			if (a_job == NULL) {
    794  1.5.2.2  ad 				struct aiocb aiocbp;
    795  1.5.2.2  ad 
    796  1.5.2.2  ad 				mutex_exit(&aio->aio_mtx);
    797  1.5.2.2  ad 
    798  1.5.2.2  ad 				error = copyin(aiocbp_list[i], &aiocbp,
    799  1.5.2.2  ad 				    sizeof(struct aiocb));
    800  1.5.2.2  ad 				if (error == 0 && aiocbp._state != JOB_DONE) {
    801  1.5.2.2  ad 					mutex_enter(&aio->aio_mtx);
    802  1.5.2.2  ad 					continue;
    803  1.5.2.2  ad 				}
    804  1.5.2.2  ad 
    805  1.5.2.2  ad 				kmem_free(aiocbp_list,
    806  1.5.2.2  ad 				    nent * sizeof(struct aio_job));
    807  1.5.2.2  ad 				return error;
    808  1.5.2.2  ad 			}
    809  1.5.2.2  ad 		}
    810  1.5.2.2  ad 
    811  1.5.2.2  ad 		/* Wait for a signal or when timeout occurs */
    812  1.5.2.2  ad 		error = cv_timedwait_sig(&aio->done_cv, &aio->aio_mtx, timo);
    813  1.5.2.2  ad 		if (error) {
    814  1.5.2.2  ad 			if (error == EWOULDBLOCK)
    815  1.5.2.2  ad 				error = EAGAIN;
    816  1.5.2.2  ad 			break;
    817  1.5.2.2  ad 		}
    818  1.5.2.2  ad 	}
    819  1.5.2.2  ad 	mutex_exit(&aio->aio_mtx);
    820  1.5.2.2  ad 
    821  1.5.2.2  ad 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    822  1.5.2.2  ad 	return error;
    823  1.5.2.2  ad }
    824  1.5.2.2  ad 
    825  1.5.2.2  ad int
    826  1.5.2.2  ad sys_aio_write(struct lwp *l, void *v, register_t *retval)
    827  1.5.2.2  ad {
    828  1.5.2.2  ad 	struct sys_aio_write_args /* {
    829  1.5.2.2  ad 		syscallarg(struct aiocb *) aiocbp;
    830  1.5.2.2  ad 	} */ *uap = v;
    831  1.5.2.2  ad 
    832  1.5.2.2  ad 	return aio_enqueue_job(AIO_WRITE, SCARG(uap, aiocbp), NULL);
    833  1.5.2.2  ad }
    834  1.5.2.2  ad 
    835  1.5.2.2  ad int
    836  1.5.2.2  ad sys_lio_listio(struct lwp *l, void *v, register_t *retval)
    837  1.5.2.2  ad {
    838  1.5.2.2  ad 	struct sys_lio_listio_args /* {
    839  1.5.2.2  ad 		syscallarg(int) mode;
    840  1.5.2.2  ad 		syscallarg(struct aiocb *const[]) list;
    841  1.5.2.2  ad 		syscallarg(int) nent;
    842  1.5.2.2  ad 		syscallarg(struct sigevent *) sig;
    843  1.5.2.2  ad 	} */ *uap = v;
    844  1.5.2.2  ad 	struct proc *p = l->l_proc;
    845  1.5.2.2  ad 	struct aioproc *aio;
    846  1.5.2.2  ad 	struct aiocb **aiocbp_list;
    847  1.5.2.2  ad 	struct lio_req *lio;
    848  1.5.2.2  ad 	int i, error, errcnt, mode, nent;
    849  1.5.2.2  ad 
    850  1.5.2.2  ad 	mode = SCARG(uap, mode);
    851  1.5.2.2  ad 	nent = SCARG(uap, nent);
    852  1.5.2.2  ad 
    853  1.5.2.2  ad 	/* Check for the limits, and invalid values */
    854  1.5.2.2  ad 	if (nent < 1 || nent > aio_listio_max)
    855  1.5.2.2  ad 		return EINVAL;
    856  1.5.2.2  ad 	if (aio_jobs_count + nent > aio_max) /* XXXSMP */
    857  1.5.2.2  ad 		return EAGAIN;
    858  1.5.2.2  ad 
    859  1.5.2.2  ad 	/* Check if AIO structure is initialized, if not - initialize it */
    860  1.5.2.2  ad 	if (p->p_aio == NULL)
    861  1.5.2.2  ad 		if (aio_init(p))
    862  1.5.2.2  ad 			return EAGAIN;
    863  1.5.2.2  ad 	aio = p->p_aio;
    864  1.5.2.2  ad 
    865  1.5.2.2  ad 	/* Create a LIO structure */
    866  1.5.2.3  ad 	lio = pool_get(&aio_lio_pool, PR_WAITOK);
    867  1.5.2.3  ad 	lio->refcnt = 1;
    868  1.5.2.3  ad 	error = 0;
    869  1.5.2.3  ad 
    870  1.5.2.3  ad 	switch (mode) {
    871  1.5.2.3  ad 	case LIO_WAIT:
    872  1.5.2.2  ad 		memset(&lio->sig, 0, sizeof(struct sigevent));
    873  1.5.2.3  ad 		break;
    874  1.5.2.3  ad 	case LIO_NOWAIT:
    875  1.5.2.3  ad 		/* Check for signal, validate it */
    876  1.5.2.3  ad 		if (SCARG(uap, sig)) {
    877  1.5.2.3  ad 			struct sigevent *sig = &lio->sig;
    878  1.5.2.3  ad 
    879  1.5.2.3  ad 			error = copyin(SCARG(uap, sig), &lio->sig,
    880  1.5.2.3  ad 			    sizeof(struct sigevent));
    881  1.5.2.3  ad 			if (error == 0 &&
    882  1.5.2.3  ad 			    (sig->sigev_signo < 0 ||
    883  1.5.2.3  ad 			    sig->sigev_signo >= NSIG ||
    884  1.5.2.3  ad 			    sig->sigev_notify < SIGEV_NONE ||
    885  1.5.2.3  ad 			    sig->sigev_notify > SIGEV_SA))
    886  1.5.2.3  ad 				error = EINVAL;
    887  1.5.2.3  ad 		} else
    888  1.5.2.3  ad 			memset(&lio->sig, 0, sizeof(struct sigevent));
    889  1.5.2.3  ad 		break;
    890  1.5.2.3  ad 	default:
    891  1.5.2.3  ad 		error = EINVAL;
    892  1.5.2.3  ad 		break;
    893  1.5.2.3  ad 	}
    894  1.5.2.3  ad 
    895  1.5.2.3  ad 	if (error != 0) {
    896  1.5.2.3  ad 		pool_put(&aio_lio_pool, lio);
    897  1.5.2.3  ad 		return error;
    898  1.5.2.3  ad 	}
    899  1.5.2.2  ad 
    900  1.5.2.2  ad 	/* Get the list from user-space */
    901  1.5.2.2  ad 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    902  1.5.2.2  ad 	error = copyin(SCARG(uap, list), aiocbp_list,
    903  1.5.2.2  ad 	    nent * sizeof(struct aiocb));
    904  1.5.2.3  ad 	if (error) {
    905  1.5.2.3  ad 		mutex_enter(&aio->aio_mtx);
    906  1.5.2.2  ad 		goto err;
    907  1.5.2.3  ad 	}
    908  1.5.2.2  ad 
    909  1.5.2.2  ad 	/* Enqueue all jobs */
    910  1.5.2.2  ad 	errcnt = 0;
    911  1.5.2.2  ad 	for (i = 0; i < nent; i++) {
    912  1.5.2.2  ad 		error = aio_enqueue_job(AIO_LIO, aiocbp_list[i], lio);
    913  1.5.2.2  ad 		/*
    914  1.5.2.2  ad 		 * According to POSIX, in such error case it may
    915  1.5.2.2  ad 		 * fail with other I/O operations initiated.
    916  1.5.2.2  ad 		 */
    917  1.5.2.2  ad 		if (error)
    918  1.5.2.2  ad 			errcnt++;
    919  1.5.2.2  ad 	}
    920  1.5.2.2  ad 
    921  1.5.2.3  ad 	mutex_enter(&aio->aio_mtx);
    922  1.5.2.3  ad 
    923  1.5.2.2  ad 	/* Return an error, if any */
    924  1.5.2.2  ad 	if (errcnt) {
    925  1.5.2.2  ad 		error = EIO;
    926  1.5.2.2  ad 		goto err;
    927  1.5.2.2  ad 	}
    928  1.5.2.2  ad 
    929  1.5.2.2  ad 	if (mode == LIO_WAIT) {
    930  1.5.2.2  ad 		/*
    931  1.5.2.2  ad 		 * Wait for AIO completion.  In such case,
    932  1.5.2.2  ad 		 * the LIO structure will be freed here.
    933  1.5.2.2  ad 		 */
    934  1.5.2.3  ad 		while (lio->refcnt > 1 && error == 0)
    935  1.5.2.2  ad 			error = cv_wait_sig(&aio->done_cv, &aio->aio_mtx);
    936  1.5.2.2  ad 		if (error)
    937  1.5.2.2  ad 			error = EINTR;
    938  1.5.2.2  ad 	}
    939  1.5.2.2  ad 
    940  1.5.2.2  ad err:
    941  1.5.2.3  ad 	if (--lio->refcnt != 0)
    942  1.5.2.3  ad 		lio = NULL;
    943  1.5.2.3  ad 	mutex_exit(&aio->aio_mtx);
    944  1.5.2.3  ad 	if (lio != NULL) {
    945  1.5.2.3  ad 		aio_sendsig(p, &lio->sig);
    946  1.5.2.3  ad 		pool_put(&aio_lio_pool, lio);
    947  1.5.2.2  ad 	}
    948  1.5.2.3  ad 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    949  1.5.2.2  ad 	return error;
    950  1.5.2.2  ad }
    951  1.5.2.2  ad 
    952  1.5.2.2  ad /*
    953  1.5.2.2  ad  * SysCtl
    954  1.5.2.2  ad  */
    955  1.5.2.2  ad 
    956  1.5.2.2  ad static int
    957  1.5.2.2  ad sysctl_aio_listio_max(SYSCTLFN_ARGS)
    958  1.5.2.2  ad {
    959  1.5.2.2  ad 	struct sysctlnode node;
    960  1.5.2.2  ad 	int error, newsize;
    961  1.5.2.2  ad 
    962  1.5.2.2  ad 	node = *rnode;
    963  1.5.2.2  ad 	node.sysctl_data = &newsize;
    964  1.5.2.2  ad 
    965  1.5.2.2  ad 	newsize = aio_listio_max;
    966  1.5.2.2  ad 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    967  1.5.2.2  ad 	if (error || newp == NULL)
    968  1.5.2.2  ad 		return error;
    969  1.5.2.2  ad 
    970  1.5.2.2  ad 	/* XXXSMP */
    971  1.5.2.2  ad 	if (newsize < 1 || newsize > aio_max)
    972  1.5.2.2  ad 		return EINVAL;
    973  1.5.2.2  ad 	aio_listio_max = newsize;
    974  1.5.2.2  ad 
    975  1.5.2.2  ad 	return 0;
    976  1.5.2.2  ad }
    977  1.5.2.2  ad 
    978  1.5.2.2  ad static int
    979  1.5.2.2  ad sysctl_aio_max(SYSCTLFN_ARGS)
    980  1.5.2.2  ad {
    981  1.5.2.2  ad 	struct sysctlnode node;
    982  1.5.2.2  ad 	int error, newsize;
    983  1.5.2.2  ad 
    984  1.5.2.2  ad 	node = *rnode;
    985  1.5.2.2  ad 	node.sysctl_data = &newsize;
    986  1.5.2.2  ad 
    987  1.5.2.2  ad 	newsize = aio_max;
    988  1.5.2.2  ad 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    989  1.5.2.2  ad 	if (error || newp == NULL)
    990  1.5.2.2  ad 		return error;
    991  1.5.2.2  ad 
    992  1.5.2.2  ad 	/* XXXSMP */
    993  1.5.2.2  ad 	if (newsize < 1 || newsize < aio_listio_max)
    994  1.5.2.2  ad 		return EINVAL;
    995  1.5.2.2  ad 	aio_max = newsize;
    996  1.5.2.2  ad 
    997  1.5.2.2  ad 	return 0;
    998  1.5.2.2  ad }
    999  1.5.2.2  ad 
   1000  1.5.2.2  ad SYSCTL_SETUP(sysctl_aio_setup, "sysctl aio setup")
   1001  1.5.2.2  ad {
   1002  1.5.2.2  ad 
   1003  1.5.2.2  ad 	sysctl_createv(clog, 0, NULL, NULL,
   1004  1.5.2.2  ad 		CTLFLAG_PERMANENT,
   1005  1.5.2.2  ad 		CTLTYPE_NODE, "kern", NULL,
   1006  1.5.2.2  ad 		NULL, 0, NULL, 0,
   1007  1.5.2.2  ad 		CTL_KERN, CTL_EOL);
   1008  1.5.2.2  ad 	sysctl_createv(clog, 0, NULL, NULL,
   1009  1.5.2.2  ad 		CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
   1010  1.5.2.2  ad 		CTLTYPE_INT, "posix_aio",
   1011  1.5.2.2  ad 		SYSCTL_DESCR("Version of IEEE Std 1003.1 and its "
   1012  1.5.2.2  ad 			     "Asynchronous I/O option to which the "
   1013  1.5.2.2  ad 			     "system attempts to conform"),
   1014  1.5.2.2  ad 		NULL, _POSIX_ASYNCHRONOUS_IO, NULL, 0,
   1015  1.5.2.2  ad 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1016  1.5.2.2  ad 	sysctl_createv(clog, 0, NULL, NULL,
   1017  1.5.2.2  ad 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1018  1.5.2.2  ad 		CTLTYPE_INT, "aio_listio_max",
   1019  1.5.2.2  ad 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1020  1.5.2.2  ad 			     "operations in a single list I/O call"),
   1021  1.5.2.2  ad 		sysctl_aio_listio_max, 0, &aio_listio_max, 0,
   1022  1.5.2.2  ad 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1023  1.5.2.2  ad 	sysctl_createv(clog, 0, NULL, NULL,
   1024  1.5.2.2  ad 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1025  1.5.2.2  ad 		CTLTYPE_INT, "aio_max",
   1026  1.5.2.2  ad 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1027  1.5.2.2  ad 			     "operations"),
   1028  1.5.2.2  ad 		sysctl_aio_max, 0, &aio_max, 0,
   1029  1.5.2.2  ad 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1030  1.5.2.2  ad }
   1031  1.5.2.2  ad 
   1032  1.5.2.2  ad /*
   1033  1.5.2.2  ad  * Debugging
   1034  1.5.2.2  ad  */
   1035  1.5.2.2  ad #if defined(DDB)
   1036  1.5.2.2  ad void
   1037  1.5.2.2  ad aio_print_jobs(void (*pr)(const char *, ...))
   1038  1.5.2.2  ad {
   1039  1.5.2.2  ad 	struct proc *p = (curlwp == NULL ? NULL : curlwp->l_proc);
   1040  1.5.2.2  ad 	struct aioproc *aio;
   1041  1.5.2.2  ad 	struct aio_job *a_job;
   1042  1.5.2.2  ad 	struct aiocb *aiocbp;
   1043  1.5.2.2  ad 
   1044  1.5.2.2  ad 	if (p == NULL) {
   1045  1.5.2.2  ad 		(*pr)("AIO: We are not in the processes right now.\n");
   1046  1.5.2.2  ad 		return;
   1047  1.5.2.2  ad 	}
   1048  1.5.2.2  ad 
   1049  1.5.2.2  ad 	aio = p->p_aio;
   1050  1.5.2.2  ad 	if (aio == NULL) {
   1051  1.5.2.2  ad 		(*pr)("AIO data is not initialized (PID = %d).\n", p->p_pid);
   1052  1.5.2.2  ad 		return;
   1053  1.5.2.2  ad 	}
   1054  1.5.2.2  ad 
   1055  1.5.2.2  ad 	(*pr)("AIO: PID = %d\n", p->p_pid);
   1056  1.5.2.2  ad 	(*pr)("AIO: Global count of the jobs = %u\n", aio_jobs_count);
   1057  1.5.2.2  ad 	(*pr)("AIO: Count of the jobs = %u\n", aio->jobs_count);
   1058  1.5.2.2  ad 
   1059  1.5.2.2  ad 	if (aio->curjob) {
   1060  1.5.2.2  ad 		a_job = aio->curjob;
   1061  1.5.2.2  ad 		(*pr)("\nAIO current job:\n");
   1062  1.5.2.2  ad 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1063  1.5.2.2  ad 		    a_job->aio_op, a_job->aiocbp._errno,
   1064  1.5.2.2  ad 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1065  1.5.2.2  ad 		aiocbp = &a_job->aiocbp;
   1066  1.5.2.2  ad 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1067  1.5.2.2  ad 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1068  1.5.2.2  ad 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1069  1.5.2.2  ad 	}
   1070  1.5.2.2  ad 
   1071  1.5.2.2  ad 	(*pr)("\nAIO queue:\n");
   1072  1.5.2.2  ad 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
   1073  1.5.2.2  ad 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1074  1.5.2.2  ad 		    a_job->aio_op, a_job->aiocbp._errno,
   1075  1.5.2.2  ad 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1076  1.5.2.2  ad 		aiocbp = &a_job->aiocbp;
   1077  1.5.2.2  ad 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1078  1.5.2.2  ad 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1079  1.5.2.2  ad 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1080  1.5.2.2  ad 	}
   1081  1.5.2.2  ad }
   1082  1.5.2.2  ad #endif /* defined(DDB) */
   1083