Home | History | Annotate | Line # | Download | only in kern
sys_aio.c revision 1.1
      1  1.1  rmind /*	$NetBSD: sys_aio.c,v 1.1 2007/05/07 22:22:21 rmind Exp $	*/
      2  1.1  rmind 
      3  1.1  rmind /*
      4  1.1  rmind  * Copyright (c) 2007, Mindaugas Rasiukevicius <rmind at NetBSD org>
      5  1.1  rmind  *
      6  1.1  rmind  * Redistribution and use in source and binary forms, with or without
      7  1.1  rmind  * modification, are permitted provided that the following conditions
      8  1.1  rmind  * are met:
      9  1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     10  1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     11  1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     12  1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     13  1.1  rmind  *    documentation and/or other materials provided with the distribution.
     14  1.1  rmind  *
     15  1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  1.1  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     17  1.1  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     18  1.1  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     19  1.1  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     20  1.1  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     21  1.1  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     22  1.1  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     23  1.1  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     24  1.1  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  1.1  rmind  * POSSIBILITY OF SUCH DAMAGE.
     26  1.1  rmind  */
     27  1.1  rmind 
     28  1.1  rmind /*
     29  1.1  rmind  * TODO:
     30  1.1  rmind  *   1. Additional work for VCHR and maybe VBLK devices.
     31  1.1  rmind  *   2. Consider making the job-finding O(n) per one file descriptor.
     32  1.1  rmind  */
     33  1.1  rmind 
     34  1.1  rmind #include <sys/cdefs.h>
     35  1.1  rmind __KERNEL_RCSID(0, "$NetBSD: sys_aio.c,v 1.1 2007/05/07 22:22:21 rmind Exp $");
     36  1.1  rmind 
     37  1.1  rmind #include <sys/param.h>
     38  1.1  rmind 
     39  1.1  rmind #include <sys/condvar.h>
     40  1.1  rmind #include <sys/file.h>
     41  1.1  rmind #include <sys/filedesc.h>
     42  1.1  rmind #include <sys/kernel.h>
     43  1.1  rmind #include <sys/kmem.h>
     44  1.1  rmind #include <sys/lwp.h>
     45  1.1  rmind #include <sys/mutex.h>
     46  1.1  rmind #include <sys/pool.h>
     47  1.1  rmind #include <sys/proc.h>
     48  1.1  rmind #include <sys/queue.h>
     49  1.1  rmind #include <sys/signal.h>
     50  1.1  rmind #include <sys/signalvar.h>
     51  1.1  rmind #include <sys/syscallargs.h>
     52  1.1  rmind #include <sys/sysctl.h>
     53  1.1  rmind #include <sys/systm.h>
     54  1.1  rmind #include <sys/types.h>
     55  1.1  rmind #include <sys/vnode.h>
     56  1.1  rmind 
     57  1.1  rmind #include <uvm/uvm_extern.h>
     58  1.1  rmind 
     59  1.1  rmind /*
     60  1.1  rmind  * System-wide limits and counter of AIO operations.
     61  1.1  rmind  * XXXSMP: We should spin-lock it, or modify atomically.
     62  1.1  rmind  */
     63  1.1  rmind static unsigned long aio_listio_max = AIO_LISTIO_MAX;
     64  1.1  rmind static unsigned long aio_max = AIO_MAX;
     65  1.1  rmind 
     66  1.1  rmind static unsigned long aio_jobs_count = 0;
     67  1.1  rmind 
     68  1.1  rmind /* Prototypes */
     69  1.1  rmind void aio_worker(void *);
     70  1.1  rmind static void aio_process(struct aio_job *);
     71  1.1  rmind static void aio_sendsig(struct proc *, struct sigevent *);
     72  1.1  rmind static int aio_enqueue_job(int, void *, struct lio_req *);
     73  1.1  rmind 
     74  1.1  rmind /*
     75  1.1  rmind  * Initialize Asynchronous I/O data structures for the process.
     76  1.1  rmind  */
     77  1.1  rmind int
     78  1.1  rmind aio_init(struct proc *p)
     79  1.1  rmind {
     80  1.1  rmind 	struct aioproc *aio;
     81  1.1  rmind 	struct lwp *l;
     82  1.1  rmind 	bool inmem;
     83  1.1  rmind 	vaddr_t uaddr;
     84  1.1  rmind 
     85  1.1  rmind 	/* Allocate and initialize AIO structure */
     86  1.1  rmind 	aio = kmem_zalloc(sizeof(struct aioproc), KM_NOSLEEP);
     87  1.1  rmind 	if (aio == NULL)
     88  1.1  rmind 		return EAGAIN;
     89  1.1  rmind 
     90  1.1  rmind 	/* Recheck if we are really first */
     91  1.1  rmind 	mutex_enter(&p->p_mutex);
     92  1.1  rmind 	if (p->p_aio) {
     93  1.1  rmind 		mutex_exit(&p->p_mutex);
     94  1.1  rmind 		kmem_free(aio, sizeof(struct aioproc));
     95  1.1  rmind 		return 0;
     96  1.1  rmind 	}
     97  1.1  rmind 	p->p_aio = aio;
     98  1.1  rmind 
     99  1.1  rmind 	/* Initialize pools, queue and their synchronization structures */
    100  1.1  rmind 	pool_init(&aio->jobs_pool, sizeof(struct aio_job), 0, 0, 0,
    101  1.1  rmind 	    "aio_jobs_pool", &pool_allocator_nointr, IPL_NONE);
    102  1.1  rmind 	pool_init(&aio->lio_pool, sizeof(struct lio_req), 0, 0, 0,
    103  1.1  rmind 	    "aio_lio_pool", &pool_allocator_nointr, IPL_NONE);
    104  1.1  rmind 	mutex_init(&aio->aio_mtx, MUTEX_DEFAULT, IPL_NONE);
    105  1.1  rmind 	cv_init(&aio->aio_worker_cv, "aiowork");
    106  1.1  rmind 	cv_init(&aio->done_cv, "aiodone");
    107  1.1  rmind 	TAILQ_INIT(&aio->jobs_queue);
    108  1.1  rmind 
    109  1.1  rmind 	/* It is safe to leave this window without AIO worker set */
    110  1.1  rmind 	mutex_exit(&p->p_mutex);
    111  1.1  rmind 
    112  1.1  rmind 	/*
    113  1.1  rmind 	 * Create an AIO worker thread.
    114  1.1  rmind 	 * XXX: Currently, AIO thread is not protected against user's actions.
    115  1.1  rmind 	 */
    116  1.1  rmind 	inmem = uvm_uarea_alloc(&uaddr);
    117  1.1  rmind 	if (uaddr == 0) {
    118  1.1  rmind 		aio_exit(p);
    119  1.1  rmind 		return EAGAIN;
    120  1.1  rmind 	}
    121  1.1  rmind 	if (newlwp(curlwp, p, uaddr, inmem, 0, NULL, 0,
    122  1.1  rmind 	    aio_worker, NULL, &l))
    123  1.1  rmind 	{
    124  1.1  rmind 		uvm_uarea_free(uaddr);
    125  1.1  rmind 		aio_exit(p);
    126  1.1  rmind 		return EAGAIN;
    127  1.1  rmind 	}
    128  1.1  rmind 
    129  1.1  rmind 	/* Complete the initialization of thread, and run it */
    130  1.1  rmind 	mutex_enter(&p->p_smutex);
    131  1.1  rmind 	aio->aio_worker = l;
    132  1.1  rmind 	p->p_nrlwps++;
    133  1.1  rmind 	lwp_lock(l);
    134  1.1  rmind 	l->l_stat = LSRUN;
    135  1.1  rmind 	l->l_usrpri = PUSER - 1; /* XXX */
    136  1.1  rmind 	setrunqueue(l);
    137  1.1  rmind 	lwp_unlock(l);
    138  1.1  rmind 	mutex_exit(&p->p_smutex);
    139  1.1  rmind 
    140  1.1  rmind 	return 0;
    141  1.1  rmind }
    142  1.1  rmind 
    143  1.1  rmind /*
    144  1.1  rmind  * Exit of Asynchronous I/O subsystem of process.
    145  1.1  rmind  */
    146  1.1  rmind void
    147  1.1  rmind aio_exit(struct proc *p)
    148  1.1  rmind {
    149  1.1  rmind 	struct aioproc *aio;
    150  1.1  rmind 	struct aio_job *a_job;
    151  1.1  rmind 
    152  1.1  rmind 	if (p->p_aio == NULL)
    153  1.1  rmind 		return;
    154  1.1  rmind 	aio = p->p_aio;
    155  1.1  rmind 
    156  1.1  rmind 	KASSERT(p->p_aio->aio_worker == NULL);
    157  1.1  rmind 
    158  1.1  rmind 	/* Free AIO queue */
    159  1.1  rmind 	while (!TAILQ_EMPTY(&aio->jobs_queue)) {
    160  1.1  rmind 		a_job = TAILQ_FIRST(&aio->jobs_queue);
    161  1.1  rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    162  1.1  rmind 		pool_put(&aio->jobs_pool, a_job);
    163  1.1  rmind 		aio_jobs_count--; /* XXXSMP */
    164  1.1  rmind 	}
    165  1.1  rmind 
    166  1.1  rmind 	/* Destroy and free the entire AIO data structure */
    167  1.1  rmind 	cv_destroy(&aio->aio_worker_cv);
    168  1.1  rmind 	cv_destroy(&aio->done_cv);
    169  1.1  rmind 	mutex_destroy(&aio->aio_mtx);
    170  1.1  rmind 	pool_destroy(&aio->jobs_pool);
    171  1.1  rmind 	pool_destroy(&aio->lio_pool);
    172  1.1  rmind 	kmem_free(aio, sizeof(struct aioproc));
    173  1.1  rmind 	p->p_aio = NULL;
    174  1.1  rmind }
    175  1.1  rmind 
    176  1.1  rmind /*
    177  1.1  rmind  * AIO worker thread and processor.
    178  1.1  rmind  */
    179  1.1  rmind void
    180  1.1  rmind aio_worker(void *arg)
    181  1.1  rmind {
    182  1.1  rmind 	struct proc *p = curlwp->l_proc;
    183  1.1  rmind 	struct aioproc *aio = p->p_aio;
    184  1.1  rmind 	struct aio_job *a_job;
    185  1.1  rmind 	struct lio_req *lio;
    186  1.1  rmind 	sigset_t oss, nss;
    187  1.1  rmind 	int error;
    188  1.1  rmind 
    189  1.1  rmind 	/*
    190  1.1  rmind 	 * Make an empty signal mask, so it
    191  1.1  rmind 	 * handles only SIGKILL and SIGSTOP.
    192  1.1  rmind 	 */
    193  1.1  rmind 	sigfillset(&nss);
    194  1.1  rmind 	mutex_enter(&p->p_smutex);
    195  1.1  rmind 	error = sigprocmask1(curlwp, SIG_SETMASK, &nss, &oss);
    196  1.1  rmind 	KASSERT(error == 0);
    197  1.1  rmind 	mutex_exit(&p->p_smutex);
    198  1.1  rmind 
    199  1.1  rmind 	for (;;) {
    200  1.1  rmind 		/*
    201  1.1  rmind 		 * Loop for each job in the queue.  If there
    202  1.1  rmind 		 * are no jobs - sleep and wait for the signal.
    203  1.1  rmind 		 */
    204  1.1  rmind 		mutex_enter(&aio->aio_mtx);
    205  1.1  rmind 		while ((a_job = TAILQ_FIRST(&aio->jobs_queue)) == NULL) {
    206  1.1  rmind 			if (cv_wait_sig(&aio->aio_worker_cv, &aio->aio_mtx)) {
    207  1.1  rmind 				/*
    208  1.1  rmind 				 * Thread was interrupted by the
    209  1.1  rmind 				 * signal - check for exit.
    210  1.1  rmind 				 */
    211  1.1  rmind 				if (curlwp->l_flag & (LW_WEXIT | LW_WCORE))
    212  1.1  rmind 					goto exit;
    213  1.1  rmind 			}
    214  1.1  rmind 		}
    215  1.1  rmind 
    216  1.1  rmind 		/* Take the job from the queue */
    217  1.1  rmind 		aio->curjob = a_job;
    218  1.1  rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    219  1.1  rmind 
    220  1.1  rmind 		aio_jobs_count--; /* XXXSMP */
    221  1.1  rmind 		aio->jobs_count--;
    222  1.1  rmind 
    223  1.1  rmind 		mutex_exit(&aio->aio_mtx);
    224  1.1  rmind 
    225  1.1  rmind 		/* Process an AIO operation */
    226  1.1  rmind 		aio_process(a_job);
    227  1.1  rmind 
    228  1.1  rmind 		/* Copy data structure back to the user-space */
    229  1.1  rmind 		(void)copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    230  1.1  rmind 		    sizeof(struct aiocb));
    231  1.1  rmind 
    232  1.1  rmind 		mutex_enter(&aio->aio_mtx);
    233  1.1  rmind 		aio->curjob = NULL;
    234  1.1  rmind 		/* Decrease a reference counter, if there is a LIO structure */
    235  1.1  rmind 		lio = a_job->lio;
    236  1.1  rmind 		if (lio) {
    237  1.1  rmind 			lio->refcnt--;
    238  1.1  rmind 			if (lio->refcnt || lio->dofree == false)
    239  1.1  rmind 				lio = NULL;
    240  1.1  rmind 		}
    241  1.1  rmind 		/* Notify all suspenders */
    242  1.1  rmind 		cv_broadcast(&aio->done_cv);
    243  1.1  rmind 		mutex_exit(&aio->aio_mtx);
    244  1.1  rmind 
    245  1.1  rmind 		/* Send a signal, if any */
    246  1.1  rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    247  1.1  rmind 
    248  1.1  rmind 		/* Destroy the LIO structure */
    249  1.1  rmind 		if (lio) {
    250  1.1  rmind 			aio_sendsig(p, &lio->sig);
    251  1.1  rmind 			if (lio->dofree == true)
    252  1.1  rmind 				pool_put(&aio->lio_pool, lio);
    253  1.1  rmind 		}
    254  1.1  rmind 
    255  1.1  rmind 		/* Destroy the the job */
    256  1.1  rmind 		pool_put(&aio->jobs_pool, a_job);
    257  1.1  rmind 	}
    258  1.1  rmind 
    259  1.1  rmind exit:
    260  1.1  rmind 	/*
    261  1.1  rmind 	 * Destroy oneself, the rest will be cared.
    262  1.1  rmind 	 */
    263  1.1  rmind 	aio->aio_worker = NULL;
    264  1.1  rmind 	mutex_exit(&aio->aio_mtx);
    265  1.1  rmind 
    266  1.1  rmind 	/* Restore the old signal mask */
    267  1.1  rmind 	mutex_enter(&p->p_smutex);
    268  1.1  rmind 	error = sigprocmask1(curlwp, SIG_SETMASK, &oss, NULL);
    269  1.1  rmind 	KASSERT(error == 0);
    270  1.1  rmind 	mutex_exit(&p->p_smutex);
    271  1.1  rmind 
    272  1.1  rmind 	lwp_exit(curlwp);
    273  1.1  rmind }
    274  1.1  rmind 
    275  1.1  rmind static void
    276  1.1  rmind aio_process(struct aio_job *a_job)
    277  1.1  rmind {
    278  1.1  rmind 	struct proc *p = curlwp->l_proc;
    279  1.1  rmind 	struct aiocb *aiocbp = &a_job->aiocbp;
    280  1.1  rmind 	struct file *fp;
    281  1.1  rmind 	struct filedesc	*fdp = p->p_fd;
    282  1.1  rmind 	int fd = aiocbp->aio_fildes;
    283  1.1  rmind 	int error = 0;
    284  1.1  rmind 
    285  1.1  rmind 	KASSERT(fdp != NULL);
    286  1.1  rmind 	KASSERT(a_job->aio_op != 0);
    287  1.1  rmind 
    288  1.1  rmind 	if ((a_job->aio_op & AIO_READ) || (a_job->aio_op & AIO_WRITE)) {
    289  1.1  rmind 		struct iovec aiov;
    290  1.1  rmind 		struct uio auio;
    291  1.1  rmind 
    292  1.1  rmind 		if (aiocbp->aio_nbytes > SSIZE_MAX) {
    293  1.1  rmind 			error = EINVAL;
    294  1.1  rmind 			goto done;
    295  1.1  rmind 		}
    296  1.1  rmind 
    297  1.1  rmind 		fp = fd_getfile(fdp, fd);
    298  1.1  rmind 		if (fp == NULL) {
    299  1.1  rmind 			error = EBADF;
    300  1.1  rmind 			goto done;
    301  1.1  rmind 		}
    302  1.1  rmind 
    303  1.1  rmind 		aiov.iov_base = (void *)(uintptr_t)aiocbp->aio_buf;
    304  1.1  rmind 		aiov.iov_len = aiocbp->aio_nbytes;
    305  1.1  rmind 		auio.uio_iov = &aiov;
    306  1.1  rmind 		auio.uio_iovcnt = 1;
    307  1.1  rmind 		auio.uio_resid = aiocbp->aio_nbytes;
    308  1.1  rmind 		auio.uio_vmspace = p->p_vmspace;
    309  1.1  rmind 
    310  1.1  rmind 		FILE_USE(fp);
    311  1.1  rmind 		if (a_job->aio_op & AIO_READ) {
    312  1.1  rmind 			/*
    313  1.1  rmind 			 * Perform a Read operation
    314  1.1  rmind 			 */
    315  1.1  rmind 			KASSERT((a_job->aio_op & AIO_WRITE) == 0);
    316  1.1  rmind 
    317  1.1  rmind 			if ((fp->f_flag & FREAD) == 0) {
    318  1.1  rmind 				FILE_UNUSE(fp, curlwp);
    319  1.1  rmind 				error = EBADF;
    320  1.1  rmind 				goto done;
    321  1.1  rmind 			}
    322  1.1  rmind 			auio.uio_rw = UIO_READ;
    323  1.1  rmind 			error = (*fp->f_ops->fo_read)(fp, &aiocbp->aio_offset,
    324  1.1  rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    325  1.1  rmind 		} else {
    326  1.1  rmind 			/*
    327  1.1  rmind 			 * Perform a Write operation
    328  1.1  rmind 			 */
    329  1.1  rmind 			KASSERT(a_job->aio_op & AIO_WRITE);
    330  1.1  rmind 
    331  1.1  rmind 			if ((fp->f_flag & FWRITE) == 0) {
    332  1.1  rmind 				FILE_UNUSE(fp, curlwp);
    333  1.1  rmind 				error = EBADF;
    334  1.1  rmind 				goto done;
    335  1.1  rmind 			}
    336  1.1  rmind 			auio.uio_rw = UIO_WRITE;
    337  1.1  rmind 			error = (*fp->f_ops->fo_write)(fp, &aiocbp->aio_offset,
    338  1.1  rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    339  1.1  rmind 		}
    340  1.1  rmind 		FILE_UNUSE(fp, curlwp);
    341  1.1  rmind 
    342  1.1  rmind 		/* Store the result value */
    343  1.1  rmind 		a_job->aiocbp.aio_nbytes -= auio.uio_resid;
    344  1.1  rmind 		a_job->aiocbp._retval = (error == 0) ?
    345  1.1  rmind 		    a_job->aiocbp.aio_nbytes : -1;
    346  1.1  rmind 
    347  1.1  rmind 	} else if((a_job->aio_op & AIO_SYNC) || (a_job->aio_op & AIO_DSYNC)) {
    348  1.1  rmind 		/*
    349  1.1  rmind 		 * Perform a file Sync operation
    350  1.1  rmind 		 */
    351  1.1  rmind 		struct vnode *vp;
    352  1.1  rmind 
    353  1.1  rmind 		if ((error = getvnode(fdp, fd, &fp)) != 0)
    354  1.1  rmind 			goto done;
    355  1.1  rmind 
    356  1.1  rmind 		if ((fp->f_flag & FWRITE) == 0) {
    357  1.1  rmind 			FILE_UNUSE(fp, curlwp);
    358  1.1  rmind 			error = EBADF;
    359  1.1  rmind 			goto done;
    360  1.1  rmind 		}
    361  1.1  rmind 
    362  1.1  rmind 		vp = (struct vnode *)fp->f_data;
    363  1.1  rmind 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    364  1.1  rmind 		if (a_job->aio_op & AIO_DSYNC) {
    365  1.1  rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    366  1.1  rmind 			    FSYNC_WAIT | FSYNC_DATAONLY, 0, 0, curlwp);
    367  1.1  rmind 		} else if (a_job->aio_op & AIO_SYNC) {
    368  1.1  rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    369  1.1  rmind 			    FSYNC_WAIT, 0, 0, curlwp);
    370  1.1  rmind 			if (error == 0 && bioops.io_fsync != NULL &&
    371  1.1  rmind 			    vp->v_mount &&
    372  1.1  rmind 			    (vp->v_mount->mnt_flag & MNT_SOFTDEP))
    373  1.1  rmind 			    (*bioops.io_fsync)(vp, 0);
    374  1.1  rmind 		}
    375  1.1  rmind 		VOP_UNLOCK(vp, 0);
    376  1.1  rmind 		FILE_UNUSE(fp, curlwp);
    377  1.1  rmind 
    378  1.1  rmind 		/* Store the result value */
    379  1.1  rmind 		a_job->aiocbp._retval = (error == 0) ? 0 : -1;
    380  1.1  rmind 
    381  1.1  rmind 	} else
    382  1.1  rmind 		panic("aio_process: invalid operation code\n");
    383  1.1  rmind 
    384  1.1  rmind done:
    385  1.1  rmind 	/* Job is done, set the error, if any */
    386  1.1  rmind 	a_job->aiocbp._errno = error;
    387  1.1  rmind 	a_job->aiocbp._state = JOB_DONE;
    388  1.1  rmind }
    389  1.1  rmind 
    390  1.1  rmind /*
    391  1.1  rmind  * Send AIO signal.
    392  1.1  rmind  */
    393  1.1  rmind static void
    394  1.1  rmind aio_sendsig(struct proc *p, struct sigevent *sig)
    395  1.1  rmind {
    396  1.1  rmind 	ksiginfo_t ksi;
    397  1.1  rmind 
    398  1.1  rmind 	if (sig->sigev_signo == 0 || sig->sigev_notify == SIGEV_NONE)
    399  1.1  rmind 		return;
    400  1.1  rmind 
    401  1.1  rmind 	KSI_INIT(&ksi);
    402  1.1  rmind 	ksi.ksi_signo = sig->sigev_signo;
    403  1.1  rmind 	ksi.ksi_code = SI_ASYNCIO;
    404  1.1  rmind 	ksi.ksi_sigval = sig->sigev_value;
    405  1.1  rmind 	mutex_enter(&proclist_mutex);
    406  1.1  rmind 	kpsignal(p, &ksi, NULL);
    407  1.1  rmind 	mutex_exit(&proclist_mutex);
    408  1.1  rmind }
    409  1.1  rmind 
    410  1.1  rmind /*
    411  1.1  rmind  * Enqueue the job.
    412  1.1  rmind  */
    413  1.1  rmind static int
    414  1.1  rmind aio_enqueue_job(int op, void *aiocb_uptr, struct lio_req *lio)
    415  1.1  rmind {
    416  1.1  rmind 	struct proc *p = curlwp->l_proc;
    417  1.1  rmind 	struct aioproc *aio;
    418  1.1  rmind 	struct aio_job *a_job;
    419  1.1  rmind 	struct aiocb aiocbp;
    420  1.1  rmind 	struct sigevent *sig;
    421  1.1  rmind 	int error;
    422  1.1  rmind 
    423  1.1  rmind 	/* Check for the limit */
    424  1.1  rmind 	if (aio_jobs_count + 1 > aio_max) /* XXXSMP */
    425  1.1  rmind 		return EAGAIN;
    426  1.1  rmind 
    427  1.1  rmind 	/* Get the data structure from user-space */
    428  1.1  rmind 	error = copyin(aiocb_uptr, &aiocbp, sizeof(struct aiocb));
    429  1.1  rmind 	if (error)
    430  1.1  rmind 		return error;
    431  1.1  rmind 
    432  1.1  rmind 	/* Check if signal is set, and validate it */
    433  1.1  rmind 	sig = &aiocbp.aio_sigevent;
    434  1.1  rmind 	if (sig->sigev_signo < 0 || sig->sigev_signo >= NSIG ||
    435  1.1  rmind 	    sig->sigev_notify < SIGEV_NONE || sig->sigev_notify > SIGEV_SA)
    436  1.1  rmind 		return EINVAL;
    437  1.1  rmind 
    438  1.1  rmind 	/* Buffer and byte count */
    439  1.1  rmind 	if (((AIO_SYNC | AIO_DSYNC) & op) == 0)
    440  1.1  rmind 		if (aiocbp.aio_buf == NULL || aiocbp.aio_nbytes > SSIZE_MAX)
    441  1.1  rmind 			return EINVAL;
    442  1.1  rmind 
    443  1.1  rmind 	/* Check the opcode, if LIO_NOP - simply ignore */
    444  1.1  rmind 	if (op == AIO_LIO) {
    445  1.1  rmind 		KASSERT(lio != NULL);
    446  1.1  rmind 		if (aiocbp.aio_lio_opcode == LIO_WRITE)
    447  1.1  rmind 			op = AIO_WRITE;
    448  1.1  rmind 		else if (aiocbp.aio_lio_opcode == LIO_READ)
    449  1.1  rmind 			op = AIO_READ;
    450  1.1  rmind 		else
    451  1.1  rmind 			return (aiocbp.aio_lio_opcode == LIO_NOP) ? 0 : EINVAL;
    452  1.1  rmind 	} else {
    453  1.1  rmind 		KASSERT(lio == NULL);
    454  1.1  rmind 	}
    455  1.1  rmind 
    456  1.1  rmind 	/*
    457  1.1  rmind 	 * Look for already existing job.  If found - the job is in-progress.
    458  1.1  rmind 	 * According to POSIX this is invalid, so return the error.
    459  1.1  rmind 	 */
    460  1.1  rmind 	aio = p->p_aio;
    461  1.1  rmind 	if (aio) {
    462  1.1  rmind 		mutex_enter(&aio->aio_mtx);
    463  1.1  rmind 		if (aio->curjob) {
    464  1.1  rmind 			a_job = aio->curjob;
    465  1.1  rmind 			if (a_job->aiocb_uptr == aiocb_uptr) {
    466  1.1  rmind 				mutex_exit(&aio->aio_mtx);
    467  1.1  rmind 				return EINVAL;
    468  1.1  rmind 			}
    469  1.1  rmind 		}
    470  1.1  rmind 		TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    471  1.1  rmind 			if (a_job->aiocb_uptr != aiocb_uptr)
    472  1.1  rmind 				continue;
    473  1.1  rmind 			mutex_exit(&aio->aio_mtx);
    474  1.1  rmind 			return EINVAL;
    475  1.1  rmind 		}
    476  1.1  rmind 		mutex_exit(&aio->aio_mtx);
    477  1.1  rmind 	}
    478  1.1  rmind 
    479  1.1  rmind 	/*
    480  1.1  rmind 	 * Check if AIO structure is initialized, if not - initialize it.
    481  1.1  rmind 	 * In LIO case, we did that already.  We will recheck this with
    482  1.1  rmind 	 * the lock in aio_init().
    483  1.1  rmind 	 */
    484  1.1  rmind 	if (lio == NULL && p->p_aio == NULL)
    485  1.1  rmind 		if (aio_init(p))
    486  1.1  rmind 			return EAGAIN;
    487  1.1  rmind 	aio = p->p_aio;
    488  1.1  rmind 
    489  1.1  rmind 	/*
    490  1.1  rmind 	 * Set the state with errno, and copy data
    491  1.1  rmind 	 * structure back to the user-space.
    492  1.1  rmind 	 */
    493  1.1  rmind 	aiocbp._state = JOB_WIP;
    494  1.1  rmind 	aiocbp._errno = EINPROGRESS;
    495  1.1  rmind 	aiocbp._retval = -1;
    496  1.1  rmind 	error = copyout(&aiocbp, aiocb_uptr, sizeof(struct aiocb));
    497  1.1  rmind 	if (error)
    498  1.1  rmind 		return error;
    499  1.1  rmind 
    500  1.1  rmind 	/* Allocate and initialize a new AIO job */
    501  1.1  rmind 	a_job = pool_get(&aio->jobs_pool, PR_WAITOK);
    502  1.1  rmind 	memset(a_job, 0, sizeof(struct aio_job));
    503  1.1  rmind 
    504  1.1  rmind 	/*
    505  1.1  rmind 	 * Set the data.
    506  1.1  rmind 	 * Store the user-space pointer for searching.  Since we
    507  1.1  rmind 	 * are storing only per proc pointers - it is safe.
    508  1.1  rmind 	 */
    509  1.1  rmind 	memcpy(&a_job->aiocbp, &aiocbp, sizeof(struct aiocb));
    510  1.1  rmind 	a_job->aiocb_uptr = aiocb_uptr;
    511  1.1  rmind 	a_job->aio_op |= op;
    512  1.1  rmind 	a_job->lio = lio;
    513  1.1  rmind 
    514  1.1  rmind 	/*
    515  1.1  rmind 	 * Add the job to the queue, update the counters, and
    516  1.1  rmind 	 * notify the AIO worker thread to handle the job.
    517  1.1  rmind 	 */
    518  1.1  rmind 	mutex_enter(&aio->aio_mtx);
    519  1.1  rmind 
    520  1.1  rmind 	/* Fail, if the limit was reached */
    521  1.1  rmind 	if (aio->jobs_count >= aio_listio_max) {
    522  1.1  rmind 		mutex_exit(&aio->aio_mtx);
    523  1.1  rmind 		pool_put(&aio->jobs_pool, a_job);
    524  1.1  rmind 		return EAGAIN;
    525  1.1  rmind 	}
    526  1.1  rmind 
    527  1.1  rmind 	TAILQ_INSERT_TAIL(&aio->jobs_queue, a_job, list);
    528  1.1  rmind 	aio_jobs_count++; /* XXXSMP */
    529  1.1  rmind 	aio->jobs_count++;
    530  1.1  rmind 	if (lio)
    531  1.1  rmind 		lio->refcnt++;
    532  1.1  rmind 	cv_signal(&aio->aio_worker_cv);
    533  1.1  rmind 
    534  1.1  rmind 	mutex_exit(&aio->aio_mtx);
    535  1.1  rmind 
    536  1.1  rmind 	/*
    537  1.1  rmind 	 * One would handle the errors only with aio_error() function.
    538  1.1  rmind 	 * This way is appropriate according to POSIX.
    539  1.1  rmind 	 */
    540  1.1  rmind 	return 0;
    541  1.1  rmind }
    542  1.1  rmind 
    543  1.1  rmind /*
    544  1.1  rmind  * Syscall functions.
    545  1.1  rmind  */
    546  1.1  rmind 
    547  1.1  rmind int
    548  1.1  rmind sys_aio_cancel(struct lwp *l, void *v, register_t *retval)
    549  1.1  rmind {
    550  1.1  rmind 	struct sys_aio_cancel_args /* {
    551  1.1  rmind 		syscallarg(int) fildes;
    552  1.1  rmind 		syscallarg(struct aiocb *) aiocbp;
    553  1.1  rmind 	} */ *uap = v;
    554  1.1  rmind 	struct proc *p = l->l_proc;
    555  1.1  rmind 	struct aioproc *aio;
    556  1.1  rmind 	struct aio_job *a_job;
    557  1.1  rmind 	struct aiocb *aiocbp_ptr;
    558  1.1  rmind 	struct lio_req *lio;
    559  1.1  rmind 	struct filedesc	*fdp = p->p_fd;
    560  1.1  rmind 	unsigned int cn, errcnt, fildes;
    561  1.1  rmind 
    562  1.1  rmind 	TAILQ_HEAD(, aio_job) tmp_jobs_list;
    563  1.1  rmind 
    564  1.1  rmind 	/* Check for invalid file descriptor */
    565  1.1  rmind 	fildes = (unsigned int)SCARG(uap, fildes);
    566  1.1  rmind 	if (fildes >= fdp->fd_nfiles || fdp->fd_ofiles[fildes] == NULL)
    567  1.1  rmind 		return EBADF;
    568  1.1  rmind 
    569  1.1  rmind 	/* Check if AIO structure is initialized */
    570  1.1  rmind 	if (p->p_aio == NULL) {
    571  1.1  rmind 		*retval = AIO_NOTCANCELED;
    572  1.1  rmind 		return 0;
    573  1.1  rmind 	}
    574  1.1  rmind 
    575  1.1  rmind 	aio = p->p_aio;
    576  1.1  rmind 	aiocbp_ptr = (struct aiocb *)SCARG(uap, aiocbp);
    577  1.1  rmind 
    578  1.1  rmind 	mutex_enter(&aio->aio_mtx);
    579  1.1  rmind 
    580  1.1  rmind 	/* Cancel the jobs, and remove them from the queue */
    581  1.1  rmind 	cn = 0;
    582  1.1  rmind 	TAILQ_INIT(&tmp_jobs_list);
    583  1.1  rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    584  1.1  rmind 		if (aiocbp_ptr) {
    585  1.1  rmind 			if (aiocbp_ptr != a_job->aiocb_uptr)
    586  1.1  rmind 				continue;
    587  1.1  rmind 			if (fildes != a_job->aiocbp.aio_fildes) {
    588  1.1  rmind 				mutex_exit(&aio->aio_mtx);
    589  1.1  rmind 				return EBADF;
    590  1.1  rmind 			}
    591  1.1  rmind 		} else if (a_job->aiocbp.aio_fildes != fildes)
    592  1.1  rmind 			continue;
    593  1.1  rmind 
    594  1.1  rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    595  1.1  rmind 		TAILQ_INSERT_TAIL(&tmp_jobs_list, a_job, list);
    596  1.1  rmind 
    597  1.1  rmind 		/* Decrease the counters */
    598  1.1  rmind 		aio_jobs_count--; /* XXXSMP */
    599  1.1  rmind 		aio->jobs_count--;
    600  1.1  rmind 		lio = a_job->lio;
    601  1.1  rmind 		if (lio) {
    602  1.1  rmind 			lio->refcnt--;
    603  1.1  rmind 			if (lio->refcnt || lio->dofree == false)
    604  1.1  rmind 				a_job->lio = NULL;
    605  1.1  rmind 		}
    606  1.1  rmind 
    607  1.1  rmind 		cn++;
    608  1.1  rmind 		if (aiocbp_ptr)
    609  1.1  rmind 			break;
    610  1.1  rmind 	}
    611  1.1  rmind 
    612  1.1  rmind 	/* There are canceled jobs */
    613  1.1  rmind 	if (cn)
    614  1.1  rmind 		*retval = AIO_CANCELED;
    615  1.1  rmind 
    616  1.1  rmind 	/* We cannot cancel current job */
    617  1.1  rmind 	a_job = aio->curjob;
    618  1.1  rmind 	if (a_job && ((a_job->aiocbp.aio_fildes == fildes) ||
    619  1.1  rmind 	    (a_job->aiocb_uptr == aiocbp_ptr)))
    620  1.1  rmind 		*retval = AIO_NOTCANCELED;
    621  1.1  rmind 
    622  1.1  rmind 	mutex_exit(&aio->aio_mtx);
    623  1.1  rmind 
    624  1.1  rmind 	/* Free the jobs after the lock */
    625  1.1  rmind 	errcnt = 0;
    626  1.1  rmind 	while (!TAILQ_EMPTY(&tmp_jobs_list)) {
    627  1.1  rmind 		a_job = TAILQ_FIRST(&tmp_jobs_list);
    628  1.1  rmind 		TAILQ_REMOVE(&tmp_jobs_list, a_job, list);
    629  1.1  rmind 		/* Set the errno and copy structures back to the user-space */
    630  1.1  rmind 		a_job->aiocbp._errno = ECANCELED;
    631  1.1  rmind 		a_job->aiocbp._state = JOB_DONE;
    632  1.1  rmind 		if (copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    633  1.1  rmind 		    sizeof(struct aiocb)))
    634  1.1  rmind 			errcnt++;
    635  1.1  rmind 		/* Send a signal if any */
    636  1.1  rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    637  1.1  rmind 		if (a_job->lio)
    638  1.1  rmind 			pool_put(&aio->lio_pool, a_job->lio);
    639  1.1  rmind 		pool_put(&aio->jobs_pool, a_job);
    640  1.1  rmind 	}
    641  1.1  rmind 
    642  1.1  rmind 	if (errcnt)
    643  1.1  rmind 		return EFAULT;
    644  1.1  rmind 
    645  1.1  rmind 	/* Set a correct return value */
    646  1.1  rmind 	if (*retval == 0)
    647  1.1  rmind 		*retval = AIO_ALLDONE;
    648  1.1  rmind 
    649  1.1  rmind 	return 0;
    650  1.1  rmind }
    651  1.1  rmind 
    652  1.1  rmind int
    653  1.1  rmind sys_aio_error(struct lwp *l, void *v, register_t *retval)
    654  1.1  rmind {
    655  1.1  rmind 	struct sys_aio_error_args /* {
    656  1.1  rmind 		syscallarg(const struct aiocb *) aiocbp;
    657  1.1  rmind 	} */ *uap = v;
    658  1.1  rmind 	struct proc *p = l->l_proc;
    659  1.1  rmind 	struct aioproc *aio = p->p_aio;
    660  1.1  rmind 	struct aiocb aiocbp;
    661  1.1  rmind 	int error;
    662  1.1  rmind 
    663  1.1  rmind 	if (aio == NULL)
    664  1.1  rmind 		return EINVAL;
    665  1.1  rmind 
    666  1.1  rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    667  1.1  rmind 	if (error)
    668  1.1  rmind 		return error;
    669  1.1  rmind 
    670  1.1  rmind 	if (aiocbp._state == JOB_NONE)
    671  1.1  rmind 		return EINVAL;
    672  1.1  rmind 
    673  1.1  rmind 	*retval = aiocbp._errno;
    674  1.1  rmind 
    675  1.1  rmind 	return 0;
    676  1.1  rmind }
    677  1.1  rmind 
    678  1.1  rmind int
    679  1.1  rmind sys_aio_fsync(struct lwp *l, void *v, register_t *retval)
    680  1.1  rmind {
    681  1.1  rmind 	struct sys_aio_fsync_args /* {
    682  1.1  rmind 		syscallarg(int) op;
    683  1.1  rmind 		syscallarg(struct aiocb *) aiocbp;
    684  1.1  rmind 	} */ *uap = v;
    685  1.1  rmind 	int op = SCARG(uap, op);
    686  1.1  rmind 
    687  1.1  rmind 	if ((op != O_DSYNC) && (op != O_SYNC))
    688  1.1  rmind 		return EINVAL;
    689  1.1  rmind 
    690  1.1  rmind 	op = O_DSYNC ? AIO_DSYNC : AIO_SYNC;
    691  1.1  rmind 
    692  1.1  rmind 	return aio_enqueue_job(op, SCARG(uap, aiocbp), NULL);
    693  1.1  rmind }
    694  1.1  rmind 
    695  1.1  rmind int
    696  1.1  rmind sys_aio_read(struct lwp *l, void *v, register_t *retval)
    697  1.1  rmind {
    698  1.1  rmind 	struct sys_aio_read_args /* {
    699  1.1  rmind 		syscallarg(struct aiocb *) aiocbp;
    700  1.1  rmind 	} */ *uap = v;
    701  1.1  rmind 
    702  1.1  rmind 	return aio_enqueue_job(AIO_READ, SCARG(uap, aiocbp), NULL);
    703  1.1  rmind }
    704  1.1  rmind 
    705  1.1  rmind int
    706  1.1  rmind sys_aio_return(struct lwp *l, void *v, register_t *retval)
    707  1.1  rmind {
    708  1.1  rmind 	struct sys_aio_return_args /* {
    709  1.1  rmind 		syscallarg(struct aiocb *) aiocbp;
    710  1.1  rmind 	} */ *uap = v;
    711  1.1  rmind 	struct proc *p = l->l_proc;
    712  1.1  rmind 	struct aioproc *aio = p->p_aio;
    713  1.1  rmind 	struct aiocb aiocbp;
    714  1.1  rmind 	int error;
    715  1.1  rmind 
    716  1.1  rmind 	if (aio == NULL)
    717  1.1  rmind 		return EINVAL;
    718  1.1  rmind 
    719  1.1  rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    720  1.1  rmind 	if (error)
    721  1.1  rmind 		return error;
    722  1.1  rmind 
    723  1.1  rmind 	if (aiocbp._errno == EINPROGRESS || aiocbp._state != JOB_DONE)
    724  1.1  rmind 		return EINVAL;
    725  1.1  rmind 
    726  1.1  rmind 	*retval = aiocbp._retval;
    727  1.1  rmind 
    728  1.1  rmind 	/* Reset the internal variables */
    729  1.1  rmind 	aiocbp._errno = 0;
    730  1.1  rmind 	aiocbp._retval = -1;
    731  1.1  rmind 	aiocbp._state = JOB_NONE;
    732  1.1  rmind 	error = copyout(&aiocbp, SCARG(uap, aiocbp), sizeof(struct aiocb));
    733  1.1  rmind 
    734  1.1  rmind 	return error;
    735  1.1  rmind }
    736  1.1  rmind 
    737  1.1  rmind int
    738  1.1  rmind sys_aio_suspend(struct lwp *l, void *v, register_t *retval)
    739  1.1  rmind {
    740  1.1  rmind 	struct sys_aio_suspend_args /* {
    741  1.1  rmind 		syscallarg(const struct aiocb *const[]) list;
    742  1.1  rmind 		syscallarg(int) nent;
    743  1.1  rmind 		syscallarg(const struct timespec *) timeout;
    744  1.1  rmind 	} */ *uap = v;
    745  1.1  rmind 	struct proc *p = l->l_proc;
    746  1.1  rmind 	struct aioproc *aio;
    747  1.1  rmind 	struct aio_job *a_job;
    748  1.1  rmind 	struct aiocb **aiocbp_list;
    749  1.1  rmind 	struct timespec ts;
    750  1.1  rmind 	int i, error, nent, timo;
    751  1.1  rmind 
    752  1.1  rmind 	if (p->p_aio == NULL)
    753  1.1  rmind 		return EAGAIN;
    754  1.1  rmind 	aio = p->p_aio;
    755  1.1  rmind 
    756  1.1  rmind 	nent = SCARG(uap, nent);
    757  1.1  rmind 	if (nent <= 0 || nent > aio_listio_max)
    758  1.1  rmind 		return EAGAIN;
    759  1.1  rmind 
    760  1.1  rmind 	if (SCARG(uap, timeout)) {
    761  1.1  rmind 		/* Convert timespec to ticks */
    762  1.1  rmind 		error = copyin(SCARG(uap, timeout), &ts,
    763  1.1  rmind 		    sizeof(struct timespec));
    764  1.1  rmind 		if (error)
    765  1.1  rmind 			return error;
    766  1.1  rmind 		timo = mstohz((ts.tv_sec * 1000) + (ts.tv_nsec / 1000000));
    767  1.1  rmind 		if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
    768  1.1  rmind 			timo = 1;
    769  1.1  rmind 		if (timo <= 0)
    770  1.1  rmind 			return EAGAIN;
    771  1.1  rmind 	} else
    772  1.1  rmind 		timo = 0;
    773  1.1  rmind 
    774  1.1  rmind 	/* Get the list from user-space */
    775  1.1  rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    776  1.1  rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    777  1.1  rmind 	    nent * sizeof(struct aiocb));
    778  1.1  rmind 	if (error) {
    779  1.1  rmind 		kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    780  1.1  rmind 		return error;
    781  1.1  rmind 	}
    782  1.1  rmind 
    783  1.1  rmind 	mutex_enter(&aio->aio_mtx);
    784  1.1  rmind 	for (;;) {
    785  1.1  rmind 
    786  1.1  rmind 		for (i = 0; i < nent; i++) {
    787  1.1  rmind 
    788  1.1  rmind 			/* Skip NULL entries */
    789  1.1  rmind 			if (aiocbp_list[i] == NULL)
    790  1.1  rmind 				continue;
    791  1.1  rmind 
    792  1.1  rmind 			/* Skip current job */
    793  1.1  rmind 			if (aio->curjob) {
    794  1.1  rmind 				a_job = aio->curjob;
    795  1.1  rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    796  1.1  rmind 					continue;
    797  1.1  rmind 			}
    798  1.1  rmind 
    799  1.1  rmind 			/* Look for a job in the queue */
    800  1.1  rmind 			TAILQ_FOREACH(a_job, &aio->jobs_queue, list)
    801  1.1  rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    802  1.1  rmind 					break;
    803  1.1  rmind 
    804  1.1  rmind 			if (a_job == NULL) {
    805  1.1  rmind 				struct aiocb aiocbp;
    806  1.1  rmind 
    807  1.1  rmind 				mutex_exit(&aio->aio_mtx);
    808  1.1  rmind 
    809  1.1  rmind 				error = copyin(aiocbp_list[i], &aiocbp,
    810  1.1  rmind 				    sizeof(struct aiocb));
    811  1.1  rmind 				if (error == 0 && aiocbp._state != JOB_DONE) {
    812  1.1  rmind 					mutex_enter(&aio->aio_mtx);
    813  1.1  rmind 					continue;
    814  1.1  rmind 				}
    815  1.1  rmind 
    816  1.1  rmind 				kmem_free(aiocbp_list,
    817  1.1  rmind 				    nent * sizeof(struct aio_job));
    818  1.1  rmind 				return error;
    819  1.1  rmind 			}
    820  1.1  rmind 		}
    821  1.1  rmind 
    822  1.1  rmind 		/* Wait for a signal or when timeout occurs */
    823  1.1  rmind 		error = cv_timedwait_sig(&aio->done_cv, &aio->aio_mtx, timo);
    824  1.1  rmind 		if (error) {
    825  1.1  rmind 			if (error == EWOULDBLOCK)
    826  1.1  rmind 				error = EAGAIN;
    827  1.1  rmind 			break;
    828  1.1  rmind 		}
    829  1.1  rmind 	}
    830  1.1  rmind 	mutex_exit(&aio->aio_mtx);
    831  1.1  rmind 
    832  1.1  rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    833  1.1  rmind 	return error;
    834  1.1  rmind }
    835  1.1  rmind 
    836  1.1  rmind int
    837  1.1  rmind sys_aio_write(struct lwp *l, void *v, register_t *retval)
    838  1.1  rmind {
    839  1.1  rmind 	struct sys_aio_write_args /* {
    840  1.1  rmind 		syscallarg(struct aiocb *) aiocbp;
    841  1.1  rmind 	} */ *uap = v;
    842  1.1  rmind 
    843  1.1  rmind 	return aio_enqueue_job(AIO_WRITE, SCARG(uap, aiocbp), NULL);
    844  1.1  rmind }
    845  1.1  rmind 
    846  1.1  rmind int
    847  1.1  rmind sys_lio_listio(struct lwp *l, void *v, register_t *retval)
    848  1.1  rmind {
    849  1.1  rmind 	struct sys_lio_listio_args /* {
    850  1.1  rmind 		syscallarg(int) mode;
    851  1.1  rmind 		syscallarg(struct aiocb *const[]) list;
    852  1.1  rmind 		syscallarg(int) nent;
    853  1.1  rmind 		syscallarg(struct sigevent *) sig;
    854  1.1  rmind 	} */ *uap = v;
    855  1.1  rmind 	struct proc *p = l->l_proc;
    856  1.1  rmind 	struct aioproc *aio;
    857  1.1  rmind 	struct aiocb **aiocbp_list;
    858  1.1  rmind 	struct lio_req *lio;
    859  1.1  rmind 	struct sigevent sig;
    860  1.1  rmind 	int i, error, errcnt, mode, nent;
    861  1.1  rmind 
    862  1.1  rmind 	mode = SCARG(uap, mode);
    863  1.1  rmind 	nent = SCARG(uap, nent);
    864  1.1  rmind 
    865  1.1  rmind 	/* Check for the limits, and invalid values */
    866  1.1  rmind 	if (nent < 1 || nent > aio_listio_max)
    867  1.1  rmind 		return EINVAL;
    868  1.1  rmind 	if (aio_jobs_count + nent > aio_max) /* XXXSMP */
    869  1.1  rmind 		return EAGAIN;
    870  1.1  rmind 	if (mode != LIO_NOWAIT && mode != LIO_WAIT)
    871  1.1  rmind 		return EINVAL;
    872  1.1  rmind 
    873  1.1  rmind 	/* Check for signal, validate it */
    874  1.1  rmind 	if (mode == LIO_NOWAIT && SCARG(uap, sig)) {
    875  1.1  rmind 		error = copyin(SCARG(uap, sig), &sig, sizeof(struct sigevent));
    876  1.1  rmind 		if (error)
    877  1.1  rmind 			return error;
    878  1.1  rmind 		if (sig.sigev_signo < 0 || sig.sigev_signo >= NSIG ||
    879  1.1  rmind 		    sig.sigev_notify < SIGEV_NONE ||
    880  1.1  rmind 		    sig.sigev_notify > SIGEV_SA)
    881  1.1  rmind 			return EINVAL;
    882  1.1  rmind 	}
    883  1.1  rmind 
    884  1.1  rmind 	/* Check if AIO structure is initialized, if not - initialize it */
    885  1.1  rmind 	if (p->p_aio == NULL)
    886  1.1  rmind 		if (aio_init(p))
    887  1.1  rmind 			return EAGAIN;
    888  1.1  rmind 	aio = p->p_aio;
    889  1.1  rmind 
    890  1.1  rmind 	/* Create a LIO structure */
    891  1.1  rmind 	lio = pool_get(&aio->lio_pool, PR_WAITOK);
    892  1.1  rmind 	if (SCARG(uap, sig))
    893  1.1  rmind 		memcpy(&lio->sig, &sig, sizeof(struct sigevent));
    894  1.1  rmind 	else
    895  1.1  rmind 		memset(&lio->sig, 0, sizeof(struct sigevent));
    896  1.1  rmind 	lio->dofree = (mode == LIO_WAIT) ? false : true;
    897  1.1  rmind 	lio->refcnt = 1; /* XXX: Hack */
    898  1.1  rmind 
    899  1.1  rmind 	/* Get the list from user-space */
    900  1.1  rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    901  1.1  rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    902  1.1  rmind 	    nent * sizeof(struct aiocb));
    903  1.1  rmind 	if (error)
    904  1.1  rmind 		goto err;
    905  1.1  rmind 
    906  1.1  rmind 	/* Enqueue all jobs */
    907  1.1  rmind 	errcnt = 0;
    908  1.1  rmind 	for (i = 0; i < nent; i++) {
    909  1.1  rmind 		if (i == (nent - 1)) /* XXX: Hack */
    910  1.1  rmind 			lio->refcnt--;
    911  1.1  rmind 		error = aio_enqueue_job(AIO_LIO, aiocbp_list[i], lio);
    912  1.1  rmind 		/*
    913  1.1  rmind 		 * According to POSIX, in such error case it may
    914  1.1  rmind 		 * fail with other I/O operations initiated.
    915  1.1  rmind 		 */
    916  1.1  rmind 		if (error)
    917  1.1  rmind 			errcnt++;
    918  1.1  rmind 	}
    919  1.1  rmind 
    920  1.1  rmind 	/* Return an error, if any */
    921  1.1  rmind 	if (errcnt) {
    922  1.1  rmind 		error = EIO;
    923  1.1  rmind 		goto err;
    924  1.1  rmind 	}
    925  1.1  rmind 
    926  1.1  rmind 	if (mode == LIO_WAIT) {
    927  1.1  rmind 		/*
    928  1.1  rmind 		 * Wait for AIO completion.  In such case,
    929  1.1  rmind 		 * the LIO structure will be freed here.
    930  1.1  rmind 		 */
    931  1.1  rmind 		error = 0;
    932  1.1  rmind 		mutex_enter(&aio->aio_mtx);
    933  1.1  rmind 		while (lio->refcnt || error)
    934  1.1  rmind 			error = cv_wait_sig(&aio->done_cv, &aio->aio_mtx);
    935  1.1  rmind 		mutex_exit(&aio->aio_mtx);
    936  1.1  rmind 		if (error)
    937  1.1  rmind 			error = EINTR;
    938  1.1  rmind 	}
    939  1.1  rmind 
    940  1.1  rmind err:
    941  1.1  rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    942  1.1  rmind 	if (mode == LIO_WAIT) {
    943  1.1  rmind 		KASSERT(lio != NULL);
    944  1.1  rmind 		pool_put(&aio->lio_pool, lio);
    945  1.1  rmind 	}
    946  1.1  rmind 
    947  1.1  rmind 	return error;
    948  1.1  rmind }
    949  1.1  rmind 
    950  1.1  rmind /*
    951  1.1  rmind  * SysCtl
    952  1.1  rmind  */
    953  1.1  rmind 
    954  1.1  rmind static int
    955  1.1  rmind sysctl_aio_listio_max(SYSCTLFN_ARGS)
    956  1.1  rmind {
    957  1.1  rmind 	struct sysctlnode node;
    958  1.1  rmind 	int error, newsize;
    959  1.1  rmind 
    960  1.1  rmind 	node = *rnode;
    961  1.1  rmind 	node.sysctl_data = &newsize;
    962  1.1  rmind 
    963  1.1  rmind 	newsize = aio_listio_max;
    964  1.1  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    965  1.1  rmind 	if (error || newp == NULL)
    966  1.1  rmind 		return error;
    967  1.1  rmind 
    968  1.1  rmind 	/* XXXSMP */
    969  1.1  rmind 	if (newsize < 1 || newsize > aio_max)
    970  1.1  rmind 		return EINVAL;
    971  1.1  rmind 	aio_listio_max = newsize;
    972  1.1  rmind 
    973  1.1  rmind 	return 0;
    974  1.1  rmind }
    975  1.1  rmind 
    976  1.1  rmind static int
    977  1.1  rmind sysctl_aio_max(SYSCTLFN_ARGS)
    978  1.1  rmind {
    979  1.1  rmind 	struct sysctlnode node;
    980  1.1  rmind 	int error, newsize;
    981  1.1  rmind 
    982  1.1  rmind 	node = *rnode;
    983  1.1  rmind 	node.sysctl_data = &newsize;
    984  1.1  rmind 
    985  1.1  rmind 	newsize = aio_max;
    986  1.1  rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    987  1.1  rmind 	if (error || newp == NULL)
    988  1.1  rmind 		return error;
    989  1.1  rmind 
    990  1.1  rmind 	/* XXXSMP */
    991  1.1  rmind 	if (newsize < 1 || newsize < aio_listio_max)
    992  1.1  rmind 		return EINVAL;
    993  1.1  rmind 	aio_max = newsize;
    994  1.1  rmind 
    995  1.1  rmind 	return 0;
    996  1.1  rmind }
    997  1.1  rmind 
    998  1.1  rmind SYSCTL_SETUP(sysctl_aio_setup, "sysctl aio setup")
    999  1.1  rmind {
   1000  1.1  rmind 
   1001  1.1  rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1002  1.1  rmind 		CTLFLAG_PERMANENT,
   1003  1.1  rmind 		CTLTYPE_NODE, "kern", NULL,
   1004  1.1  rmind 		NULL, 0, NULL, 0,
   1005  1.1  rmind 		CTL_KERN, CTL_EOL);
   1006  1.1  rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1007  1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
   1008  1.1  rmind 		CTLTYPE_INT, "posix_aio",
   1009  1.1  rmind 		SYSCTL_DESCR("Version of IEEE Std 1003.1 and its "
   1010  1.1  rmind 			     "Asynchronous I/O option to which the "
   1011  1.1  rmind 			     "system attempts to conform"),
   1012  1.1  rmind 		NULL, _POSIX_ASYNCHRONOUS_IO, NULL, 0,
   1013  1.1  rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1014  1.1  rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1015  1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1016  1.1  rmind 		CTLTYPE_INT, "aio_listio_max",
   1017  1.1  rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1018  1.1  rmind 			     "operations in a single list I/O call"),
   1019  1.1  rmind 		sysctl_aio_listio_max, 0, &aio_listio_max, 0,
   1020  1.1  rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1021  1.1  rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1022  1.1  rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1023  1.1  rmind 		CTLTYPE_INT, "aio_max",
   1024  1.1  rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1025  1.1  rmind 			     "operations"),
   1026  1.1  rmind 		sysctl_aio_max, 0, &aio_max, 0,
   1027  1.1  rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1028  1.1  rmind }
   1029  1.1  rmind 
   1030  1.1  rmind /*
   1031  1.1  rmind  * Debugging
   1032  1.1  rmind  */
   1033  1.1  rmind #if defined(DDB)
   1034  1.1  rmind void
   1035  1.1  rmind aio_print_jobs(void (*pr)(const char *, ...))
   1036  1.1  rmind {
   1037  1.1  rmind 	struct proc *p = (curlwp == NULL ? NULL : curlwp->l_proc);
   1038  1.1  rmind 	struct aioproc *aio;
   1039  1.1  rmind 	struct aio_job *a_job;
   1040  1.1  rmind 	struct aiocb *aiocbp;
   1041  1.1  rmind 
   1042  1.1  rmind 	if (p == NULL) {
   1043  1.1  rmind 		(*pr)("AIO: We are not in the processes right now.\n");
   1044  1.1  rmind 		return;
   1045  1.1  rmind 	}
   1046  1.1  rmind 
   1047  1.1  rmind 	aio = p->p_aio;
   1048  1.1  rmind 	if (aio == NULL) {
   1049  1.1  rmind 		(*pr)("AIO data is not initialized (PID = %d).\n", p->p_pid);
   1050  1.1  rmind 		return;
   1051  1.1  rmind 	}
   1052  1.1  rmind 
   1053  1.1  rmind 	(*pr)("AIO: PID = %d\n", p->p_pid);
   1054  1.1  rmind 	(*pr)("AIO: Global count of the jobs = %u\n", aio_jobs_count);
   1055  1.1  rmind 	(*pr)("AIO: Count of the jobs = %u\n", aio->jobs_count);
   1056  1.1  rmind 
   1057  1.1  rmind 	if (aio->curjob) {
   1058  1.1  rmind 		a_job = aio->curjob;
   1059  1.1  rmind 		(*pr)("\nAIO current job:\n");
   1060  1.1  rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1061  1.1  rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1062  1.1  rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1063  1.1  rmind 		aiocbp = &a_job->aiocbp;
   1064  1.1  rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1065  1.1  rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1066  1.1  rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1067  1.1  rmind 	}
   1068  1.1  rmind 
   1069  1.1  rmind 	(*pr)("\nAIO queue:\n");
   1070  1.1  rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
   1071  1.1  rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1072  1.1  rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1073  1.1  rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1074  1.1  rmind 		aiocbp = &a_job->aiocbp;
   1075  1.1  rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1076  1.1  rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1077  1.1  rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1078  1.1  rmind 	}
   1079  1.1  rmind }
   1080  1.1  rmind #endif /* defined(DDB) */
   1081