Home | History | Annotate | Line # | Download | only in kern
sys_aio.c revision 1.7
      1  1.7     pooka /*	$NetBSD: sys_aio.c,v 1.7 2007/09/01 23:40:23 pooka Exp $	*/
      2  1.1     rmind 
      3  1.1     rmind /*
      4  1.1     rmind  * Copyright (c) 2007, Mindaugas Rasiukevicius <rmind at NetBSD org>
      5  1.1     rmind  *
      6  1.1     rmind  * Redistribution and use in source and binary forms, with or without
      7  1.1     rmind  * modification, are permitted provided that the following conditions
      8  1.1     rmind  * are met:
      9  1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     10  1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     11  1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     12  1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     13  1.1     rmind  *    documentation and/or other materials provided with the distribution.
     14  1.1     rmind  *
     15  1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     17  1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     18  1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     19  1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     20  1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     21  1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     22  1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     23  1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     24  1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     26  1.1     rmind  */
     27  1.1     rmind 
     28  1.1     rmind /*
     29  1.1     rmind  * TODO:
     30  1.1     rmind  *   1. Additional work for VCHR and maybe VBLK devices.
     31  1.1     rmind  *   2. Consider making the job-finding O(n) per one file descriptor.
     32  1.1     rmind  */
     33  1.1     rmind 
     34  1.1     rmind #include <sys/cdefs.h>
     35  1.7     pooka __KERNEL_RCSID(0, "$NetBSD: sys_aio.c,v 1.7 2007/09/01 23:40:23 pooka Exp $");
     36  1.4     rmind 
     37  1.4     rmind #include "opt_ddb.h"
     38  1.1     rmind 
     39  1.1     rmind #include <sys/param.h>
     40  1.1     rmind #include <sys/condvar.h>
     41  1.1     rmind #include <sys/file.h>
     42  1.1     rmind #include <sys/filedesc.h>
     43  1.1     rmind #include <sys/kernel.h>
     44  1.1     rmind #include <sys/kmem.h>
     45  1.1     rmind #include <sys/lwp.h>
     46  1.1     rmind #include <sys/mutex.h>
     47  1.1     rmind #include <sys/pool.h>
     48  1.1     rmind #include <sys/proc.h>
     49  1.1     rmind #include <sys/queue.h>
     50  1.1     rmind #include <sys/signal.h>
     51  1.1     rmind #include <sys/signalvar.h>
     52  1.1     rmind #include <sys/syscallargs.h>
     53  1.1     rmind #include <sys/sysctl.h>
     54  1.1     rmind #include <sys/systm.h>
     55  1.1     rmind #include <sys/types.h>
     56  1.1     rmind #include <sys/vnode.h>
     57  1.1     rmind 
     58  1.1     rmind #include <uvm/uvm_extern.h>
     59  1.1     rmind 
     60  1.1     rmind /*
     61  1.1     rmind  * System-wide limits and counter of AIO operations.
     62  1.1     rmind  * XXXSMP: We should spin-lock it, or modify atomically.
     63  1.1     rmind  */
     64  1.4     rmind static u_int aio_listio_max = AIO_LISTIO_MAX;
     65  1.4     rmind static u_int aio_max = AIO_MAX;
     66  1.4     rmind static u_int aio_jobs_count;
     67  1.1     rmind 
     68  1.4     rmind static struct pool aio_job_pool;
     69  1.4     rmind static struct pool aio_lio_pool;
     70  1.1     rmind 
     71  1.1     rmind /* Prototypes */
     72  1.1     rmind void aio_worker(void *);
     73  1.1     rmind static void aio_process(struct aio_job *);
     74  1.1     rmind static void aio_sendsig(struct proc *, struct sigevent *);
     75  1.1     rmind static int aio_enqueue_job(int, void *, struct lio_req *);
     76  1.1     rmind 
     77  1.1     rmind /*
     78  1.4     rmind  * Initialize the AIO system.
     79  1.4     rmind  */
     80  1.4     rmind void
     81  1.4     rmind aio_sysinit(void)
     82  1.4     rmind {
     83  1.4     rmind 
     84  1.4     rmind 	pool_init(&aio_job_pool, sizeof(struct aio_job), 0, 0, 0,
     85  1.4     rmind 	    "aio_jobs_pool", &pool_allocator_nointr, IPL_NONE);
     86  1.4     rmind 	pool_init(&aio_lio_pool, sizeof(struct lio_req), 0, 0, 0,
     87  1.4     rmind 	    "aio_lio_pool", &pool_allocator_nointr, IPL_NONE);
     88  1.4     rmind }
     89  1.4     rmind 
     90  1.4     rmind /*
     91  1.1     rmind  * Initialize Asynchronous I/O data structures for the process.
     92  1.1     rmind  */
     93  1.1     rmind int
     94  1.1     rmind aio_init(struct proc *p)
     95  1.1     rmind {
     96  1.1     rmind 	struct aioproc *aio;
     97  1.1     rmind 	struct lwp *l;
     98  1.1     rmind 	bool inmem;
     99  1.1     rmind 	vaddr_t uaddr;
    100  1.1     rmind 
    101  1.1     rmind 	/* Allocate and initialize AIO structure */
    102  1.1     rmind 	aio = kmem_zalloc(sizeof(struct aioproc), KM_NOSLEEP);
    103  1.1     rmind 	if (aio == NULL)
    104  1.1     rmind 		return EAGAIN;
    105  1.1     rmind 
    106  1.4     rmind 	/* Initialize queue and their synchronization structures */
    107  1.1     rmind 	mutex_init(&aio->aio_mtx, MUTEX_DEFAULT, IPL_NONE);
    108  1.1     rmind 	cv_init(&aio->aio_worker_cv, "aiowork");
    109  1.1     rmind 	cv_init(&aio->done_cv, "aiodone");
    110  1.1     rmind 	TAILQ_INIT(&aio->jobs_queue);
    111  1.1     rmind 
    112  1.1     rmind 	/*
    113  1.1     rmind 	 * Create an AIO worker thread.
    114  1.1     rmind 	 * XXX: Currently, AIO thread is not protected against user's actions.
    115  1.1     rmind 	 */
    116  1.1     rmind 	inmem = uvm_uarea_alloc(&uaddr);
    117  1.1     rmind 	if (uaddr == 0) {
    118  1.5     rmind 		aio_exit(p, aio);
    119  1.1     rmind 		return EAGAIN;
    120  1.1     rmind 	}
    121  1.1     rmind 	if (newlwp(curlwp, p, uaddr, inmem, 0, NULL, 0,
    122  1.4     rmind 	    aio_worker, NULL, &l)) {
    123  1.1     rmind 		uvm_uarea_free(uaddr);
    124  1.5     rmind 		aio_exit(p, aio);
    125  1.1     rmind 		return EAGAIN;
    126  1.1     rmind 	}
    127  1.1     rmind 
    128  1.5     rmind 	/* Recheck if we are really first */
    129  1.5     rmind 	mutex_enter(&p->p_mutex);
    130  1.5     rmind 	if (p->p_aio) {
    131  1.5     rmind 		mutex_exit(&p->p_mutex);
    132  1.5     rmind 		aio_exit(p, aio);
    133  1.5     rmind 		lwp_exit(l);
    134  1.5     rmind 		return 0;
    135  1.5     rmind 	}
    136  1.5     rmind 	p->p_aio = aio;
    137  1.5     rmind 	mutex_exit(&p->p_mutex);
    138  1.5     rmind 
    139  1.1     rmind 	/* Complete the initialization of thread, and run it */
    140  1.1     rmind 	mutex_enter(&p->p_smutex);
    141  1.1     rmind 	aio->aio_worker = l;
    142  1.1     rmind 	p->p_nrlwps++;
    143  1.1     rmind 	lwp_lock(l);
    144  1.1     rmind 	l->l_stat = LSRUN;
    145  1.1     rmind 	l->l_usrpri = PUSER - 1; /* XXX */
    146  1.2      yamt 	sched_enqueue(l, false);
    147  1.1     rmind 	lwp_unlock(l);
    148  1.1     rmind 	mutex_exit(&p->p_smutex);
    149  1.1     rmind 
    150  1.1     rmind 	return 0;
    151  1.1     rmind }
    152  1.1     rmind 
    153  1.1     rmind /*
    154  1.1     rmind  * Exit of Asynchronous I/O subsystem of process.
    155  1.1     rmind  */
    156  1.1     rmind void
    157  1.5     rmind aio_exit(struct proc *p, struct aioproc *aio)
    158  1.1     rmind {
    159  1.1     rmind 	struct aio_job *a_job;
    160  1.1     rmind 
    161  1.5     rmind 	if (aio == NULL)
    162  1.1     rmind 		return;
    163  1.1     rmind 
    164  1.1     rmind 	/* Free AIO queue */
    165  1.1     rmind 	while (!TAILQ_EMPTY(&aio->jobs_queue)) {
    166  1.1     rmind 		a_job = TAILQ_FIRST(&aio->jobs_queue);
    167  1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    168  1.4     rmind 		pool_put(&aio_job_pool, a_job);
    169  1.1     rmind 		aio_jobs_count--; /* XXXSMP */
    170  1.1     rmind 	}
    171  1.1     rmind 
    172  1.1     rmind 	/* Destroy and free the entire AIO data structure */
    173  1.1     rmind 	cv_destroy(&aio->aio_worker_cv);
    174  1.1     rmind 	cv_destroy(&aio->done_cv);
    175  1.1     rmind 	mutex_destroy(&aio->aio_mtx);
    176  1.1     rmind 	kmem_free(aio, sizeof(struct aioproc));
    177  1.1     rmind }
    178  1.1     rmind 
    179  1.1     rmind /*
    180  1.1     rmind  * AIO worker thread and processor.
    181  1.1     rmind  */
    182  1.1     rmind void
    183  1.1     rmind aio_worker(void *arg)
    184  1.1     rmind {
    185  1.1     rmind 	struct proc *p = curlwp->l_proc;
    186  1.1     rmind 	struct aioproc *aio = p->p_aio;
    187  1.1     rmind 	struct aio_job *a_job;
    188  1.1     rmind 	struct lio_req *lio;
    189  1.1     rmind 	sigset_t oss, nss;
    190  1.4     rmind 	int error, refcnt;
    191  1.1     rmind 
    192  1.1     rmind 	/*
    193  1.1     rmind 	 * Make an empty signal mask, so it
    194  1.1     rmind 	 * handles only SIGKILL and SIGSTOP.
    195  1.1     rmind 	 */
    196  1.1     rmind 	sigfillset(&nss);
    197  1.1     rmind 	mutex_enter(&p->p_smutex);
    198  1.1     rmind 	error = sigprocmask1(curlwp, SIG_SETMASK, &nss, &oss);
    199  1.4     rmind 	mutex_exit(&p->p_smutex);
    200  1.1     rmind 	KASSERT(error == 0);
    201  1.1     rmind 
    202  1.1     rmind 	for (;;) {
    203  1.1     rmind 		/*
    204  1.1     rmind 		 * Loop for each job in the queue.  If there
    205  1.4     rmind 		 * are no jobs then sleep.
    206  1.1     rmind 		 */
    207  1.1     rmind 		mutex_enter(&aio->aio_mtx);
    208  1.1     rmind 		while ((a_job = TAILQ_FIRST(&aio->jobs_queue)) == NULL) {
    209  1.1     rmind 			if (cv_wait_sig(&aio->aio_worker_cv, &aio->aio_mtx)) {
    210  1.1     rmind 				/*
    211  1.4     rmind 				 * Thread was interrupted - check for
    212  1.4     rmind 				 * pending exit or suspend.
    213  1.1     rmind 				 */
    214  1.4     rmind 				mutex_exit(&aio->aio_mtx);
    215  1.4     rmind 				lwp_userret(curlwp);
    216  1.4     rmind 				mutex_enter(&aio->aio_mtx);
    217  1.1     rmind 			}
    218  1.1     rmind 		}
    219  1.1     rmind 
    220  1.1     rmind 		/* Take the job from the queue */
    221  1.1     rmind 		aio->curjob = a_job;
    222  1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    223  1.1     rmind 
    224  1.1     rmind 		aio_jobs_count--; /* XXXSMP */
    225  1.1     rmind 		aio->jobs_count--;
    226  1.1     rmind 
    227  1.1     rmind 		mutex_exit(&aio->aio_mtx);
    228  1.1     rmind 
    229  1.1     rmind 		/* Process an AIO operation */
    230  1.1     rmind 		aio_process(a_job);
    231  1.1     rmind 
    232  1.1     rmind 		/* Copy data structure back to the user-space */
    233  1.1     rmind 		(void)copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    234  1.1     rmind 		    sizeof(struct aiocb));
    235  1.1     rmind 
    236  1.1     rmind 		mutex_enter(&aio->aio_mtx);
    237  1.1     rmind 		aio->curjob = NULL;
    238  1.4     rmind 
    239  1.1     rmind 		/* Decrease a reference counter, if there is a LIO structure */
    240  1.1     rmind 		lio = a_job->lio;
    241  1.4     rmind 		refcnt = (lio != NULL ? --lio->refcnt : -1);
    242  1.4     rmind 
    243  1.1     rmind 		/* Notify all suspenders */
    244  1.1     rmind 		cv_broadcast(&aio->done_cv);
    245  1.1     rmind 		mutex_exit(&aio->aio_mtx);
    246  1.1     rmind 
    247  1.1     rmind 		/* Send a signal, if any */
    248  1.1     rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    249  1.1     rmind 
    250  1.1     rmind 		/* Destroy the LIO structure */
    251  1.4     rmind 		if (refcnt == 0) {
    252  1.1     rmind 			aio_sendsig(p, &lio->sig);
    253  1.4     rmind 			pool_put(&aio_lio_pool, lio);
    254  1.1     rmind 		}
    255  1.1     rmind 
    256  1.1     rmind 		/* Destroy the the job */
    257  1.4     rmind 		pool_put(&aio_job_pool, a_job);
    258  1.1     rmind 	}
    259  1.1     rmind 
    260  1.4     rmind 	/* NOTREACHED */
    261  1.1     rmind }
    262  1.1     rmind 
    263  1.1     rmind static void
    264  1.1     rmind aio_process(struct aio_job *a_job)
    265  1.1     rmind {
    266  1.1     rmind 	struct proc *p = curlwp->l_proc;
    267  1.1     rmind 	struct aiocb *aiocbp = &a_job->aiocbp;
    268  1.1     rmind 	struct file *fp;
    269  1.1     rmind 	struct filedesc	*fdp = p->p_fd;
    270  1.1     rmind 	int fd = aiocbp->aio_fildes;
    271  1.1     rmind 	int error = 0;
    272  1.1     rmind 
    273  1.1     rmind 	KASSERT(fdp != NULL);
    274  1.1     rmind 	KASSERT(a_job->aio_op != 0);
    275  1.1     rmind 
    276  1.4     rmind 	if ((a_job->aio_op & (AIO_READ | AIO_WRITE)) != 0) {
    277  1.1     rmind 		struct iovec aiov;
    278  1.1     rmind 		struct uio auio;
    279  1.1     rmind 
    280  1.1     rmind 		if (aiocbp->aio_nbytes > SSIZE_MAX) {
    281  1.1     rmind 			error = EINVAL;
    282  1.1     rmind 			goto done;
    283  1.1     rmind 		}
    284  1.1     rmind 
    285  1.1     rmind 		fp = fd_getfile(fdp, fd);
    286  1.1     rmind 		if (fp == NULL) {
    287  1.1     rmind 			error = EBADF;
    288  1.1     rmind 			goto done;
    289  1.1     rmind 		}
    290  1.1     rmind 
    291  1.1     rmind 		aiov.iov_base = (void *)(uintptr_t)aiocbp->aio_buf;
    292  1.1     rmind 		aiov.iov_len = aiocbp->aio_nbytes;
    293  1.1     rmind 		auio.uio_iov = &aiov;
    294  1.1     rmind 		auio.uio_iovcnt = 1;
    295  1.1     rmind 		auio.uio_resid = aiocbp->aio_nbytes;
    296  1.1     rmind 		auio.uio_vmspace = p->p_vmspace;
    297  1.1     rmind 
    298  1.1     rmind 		FILE_USE(fp);
    299  1.1     rmind 		if (a_job->aio_op & AIO_READ) {
    300  1.1     rmind 			/*
    301  1.1     rmind 			 * Perform a Read operation
    302  1.1     rmind 			 */
    303  1.1     rmind 			KASSERT((a_job->aio_op & AIO_WRITE) == 0);
    304  1.1     rmind 
    305  1.1     rmind 			if ((fp->f_flag & FREAD) == 0) {
    306  1.1     rmind 				FILE_UNUSE(fp, curlwp);
    307  1.1     rmind 				error = EBADF;
    308  1.1     rmind 				goto done;
    309  1.1     rmind 			}
    310  1.1     rmind 			auio.uio_rw = UIO_READ;
    311  1.1     rmind 			error = (*fp->f_ops->fo_read)(fp, &aiocbp->aio_offset,
    312  1.1     rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    313  1.1     rmind 		} else {
    314  1.1     rmind 			/*
    315  1.1     rmind 			 * Perform a Write operation
    316  1.1     rmind 			 */
    317  1.1     rmind 			KASSERT(a_job->aio_op & AIO_WRITE);
    318  1.1     rmind 
    319  1.1     rmind 			if ((fp->f_flag & FWRITE) == 0) {
    320  1.1     rmind 				FILE_UNUSE(fp, curlwp);
    321  1.1     rmind 				error = EBADF;
    322  1.1     rmind 				goto done;
    323  1.1     rmind 			}
    324  1.1     rmind 			auio.uio_rw = UIO_WRITE;
    325  1.1     rmind 			error = (*fp->f_ops->fo_write)(fp, &aiocbp->aio_offset,
    326  1.1     rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    327  1.1     rmind 		}
    328  1.1     rmind 		FILE_UNUSE(fp, curlwp);
    329  1.1     rmind 
    330  1.1     rmind 		/* Store the result value */
    331  1.1     rmind 		a_job->aiocbp.aio_nbytes -= auio.uio_resid;
    332  1.1     rmind 		a_job->aiocbp._retval = (error == 0) ?
    333  1.1     rmind 		    a_job->aiocbp.aio_nbytes : -1;
    334  1.1     rmind 
    335  1.4     rmind 	} else if ((a_job->aio_op & (AIO_SYNC | AIO_DSYNC)) != 0) {
    336  1.1     rmind 		/*
    337  1.1     rmind 		 * Perform a file Sync operation
    338  1.1     rmind 		 */
    339  1.1     rmind 		struct vnode *vp;
    340  1.1     rmind 
    341  1.1     rmind 		if ((error = getvnode(fdp, fd, &fp)) != 0)
    342  1.1     rmind 			goto done;
    343  1.1     rmind 
    344  1.1     rmind 		if ((fp->f_flag & FWRITE) == 0) {
    345  1.1     rmind 			FILE_UNUSE(fp, curlwp);
    346  1.1     rmind 			error = EBADF;
    347  1.1     rmind 			goto done;
    348  1.1     rmind 		}
    349  1.1     rmind 
    350  1.1     rmind 		vp = (struct vnode *)fp->f_data;
    351  1.1     rmind 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    352  1.1     rmind 		if (a_job->aio_op & AIO_DSYNC) {
    353  1.1     rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    354  1.1     rmind 			    FSYNC_WAIT | FSYNC_DATAONLY, 0, 0, curlwp);
    355  1.1     rmind 		} else if (a_job->aio_op & AIO_SYNC) {
    356  1.1     rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    357  1.1     rmind 			    FSYNC_WAIT, 0, 0, curlwp);
    358  1.7     pooka 			if (error == 0 && bioopsp != NULL &&
    359  1.1     rmind 			    vp->v_mount &&
    360  1.1     rmind 			    (vp->v_mount->mnt_flag & MNT_SOFTDEP))
    361  1.7     pooka 			    bioopsp->io_fsync(vp, 0);
    362  1.1     rmind 		}
    363  1.1     rmind 		VOP_UNLOCK(vp, 0);
    364  1.1     rmind 		FILE_UNUSE(fp, curlwp);
    365  1.1     rmind 
    366  1.1     rmind 		/* Store the result value */
    367  1.1     rmind 		a_job->aiocbp._retval = (error == 0) ? 0 : -1;
    368  1.1     rmind 
    369  1.1     rmind 	} else
    370  1.1     rmind 		panic("aio_process: invalid operation code\n");
    371  1.1     rmind 
    372  1.1     rmind done:
    373  1.1     rmind 	/* Job is done, set the error, if any */
    374  1.1     rmind 	a_job->aiocbp._errno = error;
    375  1.1     rmind 	a_job->aiocbp._state = JOB_DONE;
    376  1.1     rmind }
    377  1.1     rmind 
    378  1.1     rmind /*
    379  1.1     rmind  * Send AIO signal.
    380  1.1     rmind  */
    381  1.1     rmind static void
    382  1.1     rmind aio_sendsig(struct proc *p, struct sigevent *sig)
    383  1.1     rmind {
    384  1.1     rmind 	ksiginfo_t ksi;
    385  1.1     rmind 
    386  1.1     rmind 	if (sig->sigev_signo == 0 || sig->sigev_notify == SIGEV_NONE)
    387  1.1     rmind 		return;
    388  1.1     rmind 
    389  1.1     rmind 	KSI_INIT(&ksi);
    390  1.1     rmind 	ksi.ksi_signo = sig->sigev_signo;
    391  1.1     rmind 	ksi.ksi_code = SI_ASYNCIO;
    392  1.3  christos 	ksi.ksi_value = sig->sigev_value;
    393  1.1     rmind 	mutex_enter(&proclist_mutex);
    394  1.1     rmind 	kpsignal(p, &ksi, NULL);
    395  1.1     rmind 	mutex_exit(&proclist_mutex);
    396  1.1     rmind }
    397  1.1     rmind 
    398  1.1     rmind /*
    399  1.1     rmind  * Enqueue the job.
    400  1.1     rmind  */
    401  1.1     rmind static int
    402  1.1     rmind aio_enqueue_job(int op, void *aiocb_uptr, struct lio_req *lio)
    403  1.1     rmind {
    404  1.1     rmind 	struct proc *p = curlwp->l_proc;
    405  1.1     rmind 	struct aioproc *aio;
    406  1.1     rmind 	struct aio_job *a_job;
    407  1.1     rmind 	struct aiocb aiocbp;
    408  1.1     rmind 	struct sigevent *sig;
    409  1.1     rmind 	int error;
    410  1.1     rmind 
    411  1.1     rmind 	/* Check for the limit */
    412  1.1     rmind 	if (aio_jobs_count + 1 > aio_max) /* XXXSMP */
    413  1.1     rmind 		return EAGAIN;
    414  1.1     rmind 
    415  1.1     rmind 	/* Get the data structure from user-space */
    416  1.1     rmind 	error = copyin(aiocb_uptr, &aiocbp, sizeof(struct aiocb));
    417  1.1     rmind 	if (error)
    418  1.1     rmind 		return error;
    419  1.1     rmind 
    420  1.1     rmind 	/* Check if signal is set, and validate it */
    421  1.1     rmind 	sig = &aiocbp.aio_sigevent;
    422  1.1     rmind 	if (sig->sigev_signo < 0 || sig->sigev_signo >= NSIG ||
    423  1.1     rmind 	    sig->sigev_notify < SIGEV_NONE || sig->sigev_notify > SIGEV_SA)
    424  1.1     rmind 		return EINVAL;
    425  1.1     rmind 
    426  1.1     rmind 	/* Buffer and byte count */
    427  1.1     rmind 	if (((AIO_SYNC | AIO_DSYNC) & op) == 0)
    428  1.1     rmind 		if (aiocbp.aio_buf == NULL || aiocbp.aio_nbytes > SSIZE_MAX)
    429  1.1     rmind 			return EINVAL;
    430  1.1     rmind 
    431  1.1     rmind 	/* Check the opcode, if LIO_NOP - simply ignore */
    432  1.1     rmind 	if (op == AIO_LIO) {
    433  1.1     rmind 		KASSERT(lio != NULL);
    434  1.1     rmind 		if (aiocbp.aio_lio_opcode == LIO_WRITE)
    435  1.1     rmind 			op = AIO_WRITE;
    436  1.1     rmind 		else if (aiocbp.aio_lio_opcode == LIO_READ)
    437  1.1     rmind 			op = AIO_READ;
    438  1.1     rmind 		else
    439  1.1     rmind 			return (aiocbp.aio_lio_opcode == LIO_NOP) ? 0 : EINVAL;
    440  1.1     rmind 	} else {
    441  1.1     rmind 		KASSERT(lio == NULL);
    442  1.1     rmind 	}
    443  1.1     rmind 
    444  1.1     rmind 	/*
    445  1.1     rmind 	 * Look for already existing job.  If found - the job is in-progress.
    446  1.1     rmind 	 * According to POSIX this is invalid, so return the error.
    447  1.1     rmind 	 */
    448  1.1     rmind 	aio = p->p_aio;
    449  1.1     rmind 	if (aio) {
    450  1.1     rmind 		mutex_enter(&aio->aio_mtx);
    451  1.1     rmind 		if (aio->curjob) {
    452  1.1     rmind 			a_job = aio->curjob;
    453  1.1     rmind 			if (a_job->aiocb_uptr == aiocb_uptr) {
    454  1.1     rmind 				mutex_exit(&aio->aio_mtx);
    455  1.1     rmind 				return EINVAL;
    456  1.1     rmind 			}
    457  1.1     rmind 		}
    458  1.1     rmind 		TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    459  1.1     rmind 			if (a_job->aiocb_uptr != aiocb_uptr)
    460  1.1     rmind 				continue;
    461  1.1     rmind 			mutex_exit(&aio->aio_mtx);
    462  1.1     rmind 			return EINVAL;
    463  1.1     rmind 		}
    464  1.1     rmind 		mutex_exit(&aio->aio_mtx);
    465  1.1     rmind 	}
    466  1.1     rmind 
    467  1.1     rmind 	/*
    468  1.1     rmind 	 * Check if AIO structure is initialized, if not - initialize it.
    469  1.1     rmind 	 * In LIO case, we did that already.  We will recheck this with
    470  1.1     rmind 	 * the lock in aio_init().
    471  1.1     rmind 	 */
    472  1.1     rmind 	if (lio == NULL && p->p_aio == NULL)
    473  1.1     rmind 		if (aio_init(p))
    474  1.1     rmind 			return EAGAIN;
    475  1.1     rmind 	aio = p->p_aio;
    476  1.1     rmind 
    477  1.1     rmind 	/*
    478  1.1     rmind 	 * Set the state with errno, and copy data
    479  1.1     rmind 	 * structure back to the user-space.
    480  1.1     rmind 	 */
    481  1.1     rmind 	aiocbp._state = JOB_WIP;
    482  1.1     rmind 	aiocbp._errno = EINPROGRESS;
    483  1.1     rmind 	aiocbp._retval = -1;
    484  1.1     rmind 	error = copyout(&aiocbp, aiocb_uptr, sizeof(struct aiocb));
    485  1.1     rmind 	if (error)
    486  1.1     rmind 		return error;
    487  1.1     rmind 
    488  1.1     rmind 	/* Allocate and initialize a new AIO job */
    489  1.4     rmind 	a_job = pool_get(&aio_job_pool, PR_WAITOK);
    490  1.1     rmind 	memset(a_job, 0, sizeof(struct aio_job));
    491  1.1     rmind 
    492  1.1     rmind 	/*
    493  1.1     rmind 	 * Set the data.
    494  1.1     rmind 	 * Store the user-space pointer for searching.  Since we
    495  1.1     rmind 	 * are storing only per proc pointers - it is safe.
    496  1.1     rmind 	 */
    497  1.1     rmind 	memcpy(&a_job->aiocbp, &aiocbp, sizeof(struct aiocb));
    498  1.1     rmind 	a_job->aiocb_uptr = aiocb_uptr;
    499  1.1     rmind 	a_job->aio_op |= op;
    500  1.1     rmind 	a_job->lio = lio;
    501  1.1     rmind 
    502  1.1     rmind 	/*
    503  1.1     rmind 	 * Add the job to the queue, update the counters, and
    504  1.1     rmind 	 * notify the AIO worker thread to handle the job.
    505  1.1     rmind 	 */
    506  1.1     rmind 	mutex_enter(&aio->aio_mtx);
    507  1.1     rmind 
    508  1.1     rmind 	/* Fail, if the limit was reached */
    509  1.1     rmind 	if (aio->jobs_count >= aio_listio_max) {
    510  1.1     rmind 		mutex_exit(&aio->aio_mtx);
    511  1.4     rmind 		pool_put(&aio_job_pool, a_job);
    512  1.1     rmind 		return EAGAIN;
    513  1.1     rmind 	}
    514  1.1     rmind 
    515  1.1     rmind 	TAILQ_INSERT_TAIL(&aio->jobs_queue, a_job, list);
    516  1.1     rmind 	aio_jobs_count++; /* XXXSMP */
    517  1.1     rmind 	aio->jobs_count++;
    518  1.1     rmind 	if (lio)
    519  1.1     rmind 		lio->refcnt++;
    520  1.1     rmind 	cv_signal(&aio->aio_worker_cv);
    521  1.1     rmind 
    522  1.1     rmind 	mutex_exit(&aio->aio_mtx);
    523  1.1     rmind 
    524  1.1     rmind 	/*
    525  1.1     rmind 	 * One would handle the errors only with aio_error() function.
    526  1.1     rmind 	 * This way is appropriate according to POSIX.
    527  1.1     rmind 	 */
    528  1.1     rmind 	return 0;
    529  1.1     rmind }
    530  1.1     rmind 
    531  1.1     rmind /*
    532  1.1     rmind  * Syscall functions.
    533  1.1     rmind  */
    534  1.1     rmind 
    535  1.1     rmind int
    536  1.1     rmind sys_aio_cancel(struct lwp *l, void *v, register_t *retval)
    537  1.1     rmind {
    538  1.1     rmind 	struct sys_aio_cancel_args /* {
    539  1.1     rmind 		syscallarg(int) fildes;
    540  1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    541  1.1     rmind 	} */ *uap = v;
    542  1.1     rmind 	struct proc *p = l->l_proc;
    543  1.1     rmind 	struct aioproc *aio;
    544  1.1     rmind 	struct aio_job *a_job;
    545  1.1     rmind 	struct aiocb *aiocbp_ptr;
    546  1.1     rmind 	struct lio_req *lio;
    547  1.1     rmind 	struct filedesc	*fdp = p->p_fd;
    548  1.1     rmind 	unsigned int cn, errcnt, fildes;
    549  1.1     rmind 
    550  1.1     rmind 	TAILQ_HEAD(, aio_job) tmp_jobs_list;
    551  1.1     rmind 
    552  1.1     rmind 	/* Check for invalid file descriptor */
    553  1.1     rmind 	fildes = (unsigned int)SCARG(uap, fildes);
    554  1.1     rmind 	if (fildes >= fdp->fd_nfiles || fdp->fd_ofiles[fildes] == NULL)
    555  1.1     rmind 		return EBADF;
    556  1.1     rmind 
    557  1.1     rmind 	/* Check if AIO structure is initialized */
    558  1.1     rmind 	if (p->p_aio == NULL) {
    559  1.1     rmind 		*retval = AIO_NOTCANCELED;
    560  1.1     rmind 		return 0;
    561  1.1     rmind 	}
    562  1.1     rmind 
    563  1.1     rmind 	aio = p->p_aio;
    564  1.1     rmind 	aiocbp_ptr = (struct aiocb *)SCARG(uap, aiocbp);
    565  1.1     rmind 
    566  1.1     rmind 	mutex_enter(&aio->aio_mtx);
    567  1.1     rmind 
    568  1.1     rmind 	/* Cancel the jobs, and remove them from the queue */
    569  1.1     rmind 	cn = 0;
    570  1.1     rmind 	TAILQ_INIT(&tmp_jobs_list);
    571  1.1     rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    572  1.1     rmind 		if (aiocbp_ptr) {
    573  1.1     rmind 			if (aiocbp_ptr != a_job->aiocb_uptr)
    574  1.1     rmind 				continue;
    575  1.1     rmind 			if (fildes != a_job->aiocbp.aio_fildes) {
    576  1.1     rmind 				mutex_exit(&aio->aio_mtx);
    577  1.1     rmind 				return EBADF;
    578  1.1     rmind 			}
    579  1.1     rmind 		} else if (a_job->aiocbp.aio_fildes != fildes)
    580  1.1     rmind 			continue;
    581  1.1     rmind 
    582  1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    583  1.1     rmind 		TAILQ_INSERT_TAIL(&tmp_jobs_list, a_job, list);
    584  1.1     rmind 
    585  1.1     rmind 		/* Decrease the counters */
    586  1.1     rmind 		aio_jobs_count--; /* XXXSMP */
    587  1.1     rmind 		aio->jobs_count--;
    588  1.1     rmind 		lio = a_job->lio;
    589  1.4     rmind 		if (lio != NULL && --lio->refcnt != 0)
    590  1.4     rmind 			a_job->lio = NULL;
    591  1.1     rmind 
    592  1.1     rmind 		cn++;
    593  1.1     rmind 		if (aiocbp_ptr)
    594  1.1     rmind 			break;
    595  1.1     rmind 	}
    596  1.1     rmind 
    597  1.1     rmind 	/* There are canceled jobs */
    598  1.1     rmind 	if (cn)
    599  1.1     rmind 		*retval = AIO_CANCELED;
    600  1.1     rmind 
    601  1.1     rmind 	/* We cannot cancel current job */
    602  1.1     rmind 	a_job = aio->curjob;
    603  1.1     rmind 	if (a_job && ((a_job->aiocbp.aio_fildes == fildes) ||
    604  1.1     rmind 	    (a_job->aiocb_uptr == aiocbp_ptr)))
    605  1.1     rmind 		*retval = AIO_NOTCANCELED;
    606  1.1     rmind 
    607  1.1     rmind 	mutex_exit(&aio->aio_mtx);
    608  1.1     rmind 
    609  1.1     rmind 	/* Free the jobs after the lock */
    610  1.1     rmind 	errcnt = 0;
    611  1.1     rmind 	while (!TAILQ_EMPTY(&tmp_jobs_list)) {
    612  1.1     rmind 		a_job = TAILQ_FIRST(&tmp_jobs_list);
    613  1.1     rmind 		TAILQ_REMOVE(&tmp_jobs_list, a_job, list);
    614  1.1     rmind 		/* Set the errno and copy structures back to the user-space */
    615  1.1     rmind 		a_job->aiocbp._errno = ECANCELED;
    616  1.1     rmind 		a_job->aiocbp._state = JOB_DONE;
    617  1.1     rmind 		if (copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    618  1.1     rmind 		    sizeof(struct aiocb)))
    619  1.1     rmind 			errcnt++;
    620  1.1     rmind 		/* Send a signal if any */
    621  1.1     rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    622  1.6     rmind 		if (a_job->lio) {
    623  1.6     rmind 			lio = a_job->lio;
    624  1.6     rmind 			aio_sendsig(p, &lio->sig);
    625  1.6     rmind 			pool_put(&aio_lio_pool, lio);
    626  1.6     rmind 		}
    627  1.4     rmind 		pool_put(&aio_job_pool, a_job);
    628  1.1     rmind 	}
    629  1.1     rmind 
    630  1.1     rmind 	if (errcnt)
    631  1.1     rmind 		return EFAULT;
    632  1.1     rmind 
    633  1.1     rmind 	/* Set a correct return value */
    634  1.1     rmind 	if (*retval == 0)
    635  1.1     rmind 		*retval = AIO_ALLDONE;
    636  1.1     rmind 
    637  1.1     rmind 	return 0;
    638  1.1     rmind }
    639  1.1     rmind 
    640  1.1     rmind int
    641  1.1     rmind sys_aio_error(struct lwp *l, void *v, register_t *retval)
    642  1.1     rmind {
    643  1.1     rmind 	struct sys_aio_error_args /* {
    644  1.1     rmind 		syscallarg(const struct aiocb *) aiocbp;
    645  1.1     rmind 	} */ *uap = v;
    646  1.1     rmind 	struct proc *p = l->l_proc;
    647  1.1     rmind 	struct aioproc *aio = p->p_aio;
    648  1.1     rmind 	struct aiocb aiocbp;
    649  1.1     rmind 	int error;
    650  1.1     rmind 
    651  1.1     rmind 	if (aio == NULL)
    652  1.1     rmind 		return EINVAL;
    653  1.1     rmind 
    654  1.1     rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    655  1.1     rmind 	if (error)
    656  1.1     rmind 		return error;
    657  1.1     rmind 
    658  1.1     rmind 	if (aiocbp._state == JOB_NONE)
    659  1.1     rmind 		return EINVAL;
    660  1.1     rmind 
    661  1.1     rmind 	*retval = aiocbp._errno;
    662  1.1     rmind 
    663  1.1     rmind 	return 0;
    664  1.1     rmind }
    665  1.1     rmind 
    666  1.1     rmind int
    667  1.1     rmind sys_aio_fsync(struct lwp *l, void *v, register_t *retval)
    668  1.1     rmind {
    669  1.1     rmind 	struct sys_aio_fsync_args /* {
    670  1.1     rmind 		syscallarg(int) op;
    671  1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    672  1.1     rmind 	} */ *uap = v;
    673  1.1     rmind 	int op = SCARG(uap, op);
    674  1.1     rmind 
    675  1.1     rmind 	if ((op != O_DSYNC) && (op != O_SYNC))
    676  1.1     rmind 		return EINVAL;
    677  1.1     rmind 
    678  1.1     rmind 	op = O_DSYNC ? AIO_DSYNC : AIO_SYNC;
    679  1.1     rmind 
    680  1.1     rmind 	return aio_enqueue_job(op, SCARG(uap, aiocbp), NULL);
    681  1.1     rmind }
    682  1.1     rmind 
    683  1.1     rmind int
    684  1.1     rmind sys_aio_read(struct lwp *l, void *v, register_t *retval)
    685  1.1     rmind {
    686  1.1     rmind 	struct sys_aio_read_args /* {
    687  1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    688  1.1     rmind 	} */ *uap = v;
    689  1.1     rmind 
    690  1.1     rmind 	return aio_enqueue_job(AIO_READ, SCARG(uap, aiocbp), NULL);
    691  1.1     rmind }
    692  1.1     rmind 
    693  1.1     rmind int
    694  1.1     rmind sys_aio_return(struct lwp *l, void *v, register_t *retval)
    695  1.1     rmind {
    696  1.1     rmind 	struct sys_aio_return_args /* {
    697  1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    698  1.1     rmind 	} */ *uap = v;
    699  1.1     rmind 	struct proc *p = l->l_proc;
    700  1.1     rmind 	struct aioproc *aio = p->p_aio;
    701  1.1     rmind 	struct aiocb aiocbp;
    702  1.1     rmind 	int error;
    703  1.1     rmind 
    704  1.1     rmind 	if (aio == NULL)
    705  1.1     rmind 		return EINVAL;
    706  1.1     rmind 
    707  1.1     rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    708  1.1     rmind 	if (error)
    709  1.1     rmind 		return error;
    710  1.1     rmind 
    711  1.1     rmind 	if (aiocbp._errno == EINPROGRESS || aiocbp._state != JOB_DONE)
    712  1.1     rmind 		return EINVAL;
    713  1.1     rmind 
    714  1.1     rmind 	*retval = aiocbp._retval;
    715  1.1     rmind 
    716  1.1     rmind 	/* Reset the internal variables */
    717  1.1     rmind 	aiocbp._errno = 0;
    718  1.1     rmind 	aiocbp._retval = -1;
    719  1.1     rmind 	aiocbp._state = JOB_NONE;
    720  1.1     rmind 	error = copyout(&aiocbp, SCARG(uap, aiocbp), sizeof(struct aiocb));
    721  1.1     rmind 
    722  1.1     rmind 	return error;
    723  1.1     rmind }
    724  1.1     rmind 
    725  1.1     rmind int
    726  1.1     rmind sys_aio_suspend(struct lwp *l, void *v, register_t *retval)
    727  1.1     rmind {
    728  1.1     rmind 	struct sys_aio_suspend_args /* {
    729  1.1     rmind 		syscallarg(const struct aiocb *const[]) list;
    730  1.1     rmind 		syscallarg(int) nent;
    731  1.1     rmind 		syscallarg(const struct timespec *) timeout;
    732  1.1     rmind 	} */ *uap = v;
    733  1.1     rmind 	struct proc *p = l->l_proc;
    734  1.1     rmind 	struct aioproc *aio;
    735  1.1     rmind 	struct aio_job *a_job;
    736  1.1     rmind 	struct aiocb **aiocbp_list;
    737  1.1     rmind 	struct timespec ts;
    738  1.1     rmind 	int i, error, nent, timo;
    739  1.1     rmind 
    740  1.1     rmind 	if (p->p_aio == NULL)
    741  1.1     rmind 		return EAGAIN;
    742  1.1     rmind 	aio = p->p_aio;
    743  1.1     rmind 
    744  1.1     rmind 	nent = SCARG(uap, nent);
    745  1.1     rmind 	if (nent <= 0 || nent > aio_listio_max)
    746  1.1     rmind 		return EAGAIN;
    747  1.1     rmind 
    748  1.1     rmind 	if (SCARG(uap, timeout)) {
    749  1.1     rmind 		/* Convert timespec to ticks */
    750  1.1     rmind 		error = copyin(SCARG(uap, timeout), &ts,
    751  1.1     rmind 		    sizeof(struct timespec));
    752  1.1     rmind 		if (error)
    753  1.1     rmind 			return error;
    754  1.1     rmind 		timo = mstohz((ts.tv_sec * 1000) + (ts.tv_nsec / 1000000));
    755  1.1     rmind 		if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
    756  1.1     rmind 			timo = 1;
    757  1.1     rmind 		if (timo <= 0)
    758  1.1     rmind 			return EAGAIN;
    759  1.1     rmind 	} else
    760  1.1     rmind 		timo = 0;
    761  1.1     rmind 
    762  1.1     rmind 	/* Get the list from user-space */
    763  1.1     rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    764  1.1     rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    765  1.1     rmind 	    nent * sizeof(struct aiocb));
    766  1.1     rmind 	if (error) {
    767  1.1     rmind 		kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    768  1.1     rmind 		return error;
    769  1.1     rmind 	}
    770  1.1     rmind 
    771  1.1     rmind 	mutex_enter(&aio->aio_mtx);
    772  1.1     rmind 	for (;;) {
    773  1.1     rmind 
    774  1.1     rmind 		for (i = 0; i < nent; i++) {
    775  1.1     rmind 
    776  1.1     rmind 			/* Skip NULL entries */
    777  1.1     rmind 			if (aiocbp_list[i] == NULL)
    778  1.1     rmind 				continue;
    779  1.1     rmind 
    780  1.1     rmind 			/* Skip current job */
    781  1.1     rmind 			if (aio->curjob) {
    782  1.1     rmind 				a_job = aio->curjob;
    783  1.1     rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    784  1.1     rmind 					continue;
    785  1.1     rmind 			}
    786  1.1     rmind 
    787  1.1     rmind 			/* Look for a job in the queue */
    788  1.1     rmind 			TAILQ_FOREACH(a_job, &aio->jobs_queue, list)
    789  1.1     rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    790  1.1     rmind 					break;
    791  1.1     rmind 
    792  1.1     rmind 			if (a_job == NULL) {
    793  1.1     rmind 				struct aiocb aiocbp;
    794  1.1     rmind 
    795  1.1     rmind 				mutex_exit(&aio->aio_mtx);
    796  1.1     rmind 
    797  1.1     rmind 				error = copyin(aiocbp_list[i], &aiocbp,
    798  1.1     rmind 				    sizeof(struct aiocb));
    799  1.1     rmind 				if (error == 0 && aiocbp._state != JOB_DONE) {
    800  1.1     rmind 					mutex_enter(&aio->aio_mtx);
    801  1.1     rmind 					continue;
    802  1.1     rmind 				}
    803  1.1     rmind 
    804  1.1     rmind 				kmem_free(aiocbp_list,
    805  1.1     rmind 				    nent * sizeof(struct aio_job));
    806  1.1     rmind 				return error;
    807  1.1     rmind 			}
    808  1.1     rmind 		}
    809  1.1     rmind 
    810  1.1     rmind 		/* Wait for a signal or when timeout occurs */
    811  1.1     rmind 		error = cv_timedwait_sig(&aio->done_cv, &aio->aio_mtx, timo);
    812  1.1     rmind 		if (error) {
    813  1.1     rmind 			if (error == EWOULDBLOCK)
    814  1.1     rmind 				error = EAGAIN;
    815  1.1     rmind 			break;
    816  1.1     rmind 		}
    817  1.1     rmind 	}
    818  1.1     rmind 	mutex_exit(&aio->aio_mtx);
    819  1.1     rmind 
    820  1.1     rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    821  1.1     rmind 	return error;
    822  1.1     rmind }
    823  1.1     rmind 
    824  1.1     rmind int
    825  1.1     rmind sys_aio_write(struct lwp *l, void *v, register_t *retval)
    826  1.1     rmind {
    827  1.1     rmind 	struct sys_aio_write_args /* {
    828  1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    829  1.1     rmind 	} */ *uap = v;
    830  1.1     rmind 
    831  1.1     rmind 	return aio_enqueue_job(AIO_WRITE, SCARG(uap, aiocbp), NULL);
    832  1.1     rmind }
    833  1.1     rmind 
    834  1.1     rmind int
    835  1.1     rmind sys_lio_listio(struct lwp *l, void *v, register_t *retval)
    836  1.1     rmind {
    837  1.1     rmind 	struct sys_lio_listio_args /* {
    838  1.1     rmind 		syscallarg(int) mode;
    839  1.1     rmind 		syscallarg(struct aiocb *const[]) list;
    840  1.1     rmind 		syscallarg(int) nent;
    841  1.1     rmind 		syscallarg(struct sigevent *) sig;
    842  1.1     rmind 	} */ *uap = v;
    843  1.1     rmind 	struct proc *p = l->l_proc;
    844  1.1     rmind 	struct aioproc *aio;
    845  1.1     rmind 	struct aiocb **aiocbp_list;
    846  1.1     rmind 	struct lio_req *lio;
    847  1.1     rmind 	int i, error, errcnt, mode, nent;
    848  1.1     rmind 
    849  1.1     rmind 	mode = SCARG(uap, mode);
    850  1.1     rmind 	nent = SCARG(uap, nent);
    851  1.1     rmind 
    852  1.1     rmind 	/* Check for the limits, and invalid values */
    853  1.1     rmind 	if (nent < 1 || nent > aio_listio_max)
    854  1.1     rmind 		return EINVAL;
    855  1.1     rmind 	if (aio_jobs_count + nent > aio_max) /* XXXSMP */
    856  1.1     rmind 		return EAGAIN;
    857  1.1     rmind 
    858  1.1     rmind 	/* Check if AIO structure is initialized, if not - initialize it */
    859  1.1     rmind 	if (p->p_aio == NULL)
    860  1.1     rmind 		if (aio_init(p))
    861  1.1     rmind 			return EAGAIN;
    862  1.1     rmind 	aio = p->p_aio;
    863  1.1     rmind 
    864  1.1     rmind 	/* Create a LIO structure */
    865  1.4     rmind 	lio = pool_get(&aio_lio_pool, PR_WAITOK);
    866  1.4     rmind 	lio->refcnt = 1;
    867  1.4     rmind 	error = 0;
    868  1.4     rmind 
    869  1.4     rmind 	switch (mode) {
    870  1.4     rmind 	case LIO_WAIT:
    871  1.1     rmind 		memset(&lio->sig, 0, sizeof(struct sigevent));
    872  1.4     rmind 		break;
    873  1.4     rmind 	case LIO_NOWAIT:
    874  1.4     rmind 		/* Check for signal, validate it */
    875  1.4     rmind 		if (SCARG(uap, sig)) {
    876  1.4     rmind 			struct sigevent *sig = &lio->sig;
    877  1.4     rmind 
    878  1.4     rmind 			error = copyin(SCARG(uap, sig), &lio->sig,
    879  1.4     rmind 			    sizeof(struct sigevent));
    880  1.4     rmind 			if (error == 0 &&
    881  1.4     rmind 			    (sig->sigev_signo < 0 ||
    882  1.4     rmind 			    sig->sigev_signo >= NSIG ||
    883  1.4     rmind 			    sig->sigev_notify < SIGEV_NONE ||
    884  1.4     rmind 			    sig->sigev_notify > SIGEV_SA))
    885  1.4     rmind 				error = EINVAL;
    886  1.4     rmind 		} else
    887  1.4     rmind 			memset(&lio->sig, 0, sizeof(struct sigevent));
    888  1.4     rmind 		break;
    889  1.4     rmind 	default:
    890  1.4     rmind 		error = EINVAL;
    891  1.4     rmind 		break;
    892  1.4     rmind 	}
    893  1.4     rmind 
    894  1.4     rmind 	if (error != 0) {
    895  1.4     rmind 		pool_put(&aio_lio_pool, lio);
    896  1.4     rmind 		return error;
    897  1.4     rmind 	}
    898  1.1     rmind 
    899  1.1     rmind 	/* Get the list from user-space */
    900  1.1     rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    901  1.1     rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    902  1.1     rmind 	    nent * sizeof(struct aiocb));
    903  1.4     rmind 	if (error) {
    904  1.4     rmind 		mutex_enter(&aio->aio_mtx);
    905  1.1     rmind 		goto err;
    906  1.4     rmind 	}
    907  1.1     rmind 
    908  1.1     rmind 	/* Enqueue all jobs */
    909  1.1     rmind 	errcnt = 0;
    910  1.1     rmind 	for (i = 0; i < nent; i++) {
    911  1.1     rmind 		error = aio_enqueue_job(AIO_LIO, aiocbp_list[i], lio);
    912  1.1     rmind 		/*
    913  1.1     rmind 		 * According to POSIX, in such error case it may
    914  1.1     rmind 		 * fail with other I/O operations initiated.
    915  1.1     rmind 		 */
    916  1.1     rmind 		if (error)
    917  1.1     rmind 			errcnt++;
    918  1.1     rmind 	}
    919  1.1     rmind 
    920  1.4     rmind 	mutex_enter(&aio->aio_mtx);
    921  1.4     rmind 
    922  1.1     rmind 	/* Return an error, if any */
    923  1.1     rmind 	if (errcnt) {
    924  1.1     rmind 		error = EIO;
    925  1.1     rmind 		goto err;
    926  1.1     rmind 	}
    927  1.1     rmind 
    928  1.1     rmind 	if (mode == LIO_WAIT) {
    929  1.1     rmind 		/*
    930  1.1     rmind 		 * Wait for AIO completion.  In such case,
    931  1.1     rmind 		 * the LIO structure will be freed here.
    932  1.1     rmind 		 */
    933  1.4     rmind 		while (lio->refcnt > 1 && error == 0)
    934  1.1     rmind 			error = cv_wait_sig(&aio->done_cv, &aio->aio_mtx);
    935  1.1     rmind 		if (error)
    936  1.1     rmind 			error = EINTR;
    937  1.1     rmind 	}
    938  1.1     rmind 
    939  1.1     rmind err:
    940  1.4     rmind 	if (--lio->refcnt != 0)
    941  1.4     rmind 		lio = NULL;
    942  1.4     rmind 	mutex_exit(&aio->aio_mtx);
    943  1.4     rmind 	if (lio != NULL) {
    944  1.4     rmind 		aio_sendsig(p, &lio->sig);
    945  1.4     rmind 		pool_put(&aio_lio_pool, lio);
    946  1.4     rmind 	}
    947  1.1     rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    948  1.1     rmind 	return error;
    949  1.1     rmind }
    950  1.1     rmind 
    951  1.1     rmind /*
    952  1.1     rmind  * SysCtl
    953  1.1     rmind  */
    954  1.1     rmind 
    955  1.1     rmind static int
    956  1.1     rmind sysctl_aio_listio_max(SYSCTLFN_ARGS)
    957  1.1     rmind {
    958  1.1     rmind 	struct sysctlnode node;
    959  1.1     rmind 	int error, newsize;
    960  1.1     rmind 
    961  1.1     rmind 	node = *rnode;
    962  1.1     rmind 	node.sysctl_data = &newsize;
    963  1.1     rmind 
    964  1.1     rmind 	newsize = aio_listio_max;
    965  1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    966  1.1     rmind 	if (error || newp == NULL)
    967  1.1     rmind 		return error;
    968  1.1     rmind 
    969  1.1     rmind 	/* XXXSMP */
    970  1.1     rmind 	if (newsize < 1 || newsize > aio_max)
    971  1.1     rmind 		return EINVAL;
    972  1.1     rmind 	aio_listio_max = newsize;
    973  1.1     rmind 
    974  1.1     rmind 	return 0;
    975  1.1     rmind }
    976  1.1     rmind 
    977  1.1     rmind static int
    978  1.1     rmind sysctl_aio_max(SYSCTLFN_ARGS)
    979  1.1     rmind {
    980  1.1     rmind 	struct sysctlnode node;
    981  1.1     rmind 	int error, newsize;
    982  1.1     rmind 
    983  1.1     rmind 	node = *rnode;
    984  1.1     rmind 	node.sysctl_data = &newsize;
    985  1.1     rmind 
    986  1.1     rmind 	newsize = aio_max;
    987  1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    988  1.1     rmind 	if (error || newp == NULL)
    989  1.1     rmind 		return error;
    990  1.1     rmind 
    991  1.1     rmind 	/* XXXSMP */
    992  1.1     rmind 	if (newsize < 1 || newsize < aio_listio_max)
    993  1.1     rmind 		return EINVAL;
    994  1.1     rmind 	aio_max = newsize;
    995  1.1     rmind 
    996  1.1     rmind 	return 0;
    997  1.1     rmind }
    998  1.1     rmind 
    999  1.1     rmind SYSCTL_SETUP(sysctl_aio_setup, "sysctl aio setup")
   1000  1.1     rmind {
   1001  1.1     rmind 
   1002  1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1003  1.1     rmind 		CTLFLAG_PERMANENT,
   1004  1.1     rmind 		CTLTYPE_NODE, "kern", NULL,
   1005  1.1     rmind 		NULL, 0, NULL, 0,
   1006  1.1     rmind 		CTL_KERN, CTL_EOL);
   1007  1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1008  1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
   1009  1.1     rmind 		CTLTYPE_INT, "posix_aio",
   1010  1.1     rmind 		SYSCTL_DESCR("Version of IEEE Std 1003.1 and its "
   1011  1.1     rmind 			     "Asynchronous I/O option to which the "
   1012  1.1     rmind 			     "system attempts to conform"),
   1013  1.1     rmind 		NULL, _POSIX_ASYNCHRONOUS_IO, NULL, 0,
   1014  1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1015  1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1016  1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1017  1.1     rmind 		CTLTYPE_INT, "aio_listio_max",
   1018  1.1     rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1019  1.1     rmind 			     "operations in a single list I/O call"),
   1020  1.1     rmind 		sysctl_aio_listio_max, 0, &aio_listio_max, 0,
   1021  1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1022  1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1023  1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1024  1.1     rmind 		CTLTYPE_INT, "aio_max",
   1025  1.1     rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1026  1.1     rmind 			     "operations"),
   1027  1.1     rmind 		sysctl_aio_max, 0, &aio_max, 0,
   1028  1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1029  1.1     rmind }
   1030  1.1     rmind 
   1031  1.1     rmind /*
   1032  1.1     rmind  * Debugging
   1033  1.1     rmind  */
   1034  1.1     rmind #if defined(DDB)
   1035  1.1     rmind void
   1036  1.1     rmind aio_print_jobs(void (*pr)(const char *, ...))
   1037  1.1     rmind {
   1038  1.1     rmind 	struct proc *p = (curlwp == NULL ? NULL : curlwp->l_proc);
   1039  1.1     rmind 	struct aioproc *aio;
   1040  1.1     rmind 	struct aio_job *a_job;
   1041  1.1     rmind 	struct aiocb *aiocbp;
   1042  1.1     rmind 
   1043  1.1     rmind 	if (p == NULL) {
   1044  1.1     rmind 		(*pr)("AIO: We are not in the processes right now.\n");
   1045  1.1     rmind 		return;
   1046  1.1     rmind 	}
   1047  1.1     rmind 
   1048  1.1     rmind 	aio = p->p_aio;
   1049  1.1     rmind 	if (aio == NULL) {
   1050  1.1     rmind 		(*pr)("AIO data is not initialized (PID = %d).\n", p->p_pid);
   1051  1.1     rmind 		return;
   1052  1.1     rmind 	}
   1053  1.1     rmind 
   1054  1.1     rmind 	(*pr)("AIO: PID = %d\n", p->p_pid);
   1055  1.1     rmind 	(*pr)("AIO: Global count of the jobs = %u\n", aio_jobs_count);
   1056  1.1     rmind 	(*pr)("AIO: Count of the jobs = %u\n", aio->jobs_count);
   1057  1.1     rmind 
   1058  1.1     rmind 	if (aio->curjob) {
   1059  1.1     rmind 		a_job = aio->curjob;
   1060  1.1     rmind 		(*pr)("\nAIO current job:\n");
   1061  1.1     rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1062  1.1     rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1063  1.1     rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1064  1.1     rmind 		aiocbp = &a_job->aiocbp;
   1065  1.1     rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1066  1.1     rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1067  1.1     rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1068  1.1     rmind 	}
   1069  1.1     rmind 
   1070  1.1     rmind 	(*pr)("\nAIO queue:\n");
   1071  1.1     rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
   1072  1.1     rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1073  1.1     rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1074  1.1     rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1075  1.1     rmind 		aiocbp = &a_job->aiocbp;
   1076  1.1     rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1077  1.1     rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1078  1.1     rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1079  1.1     rmind 	}
   1080  1.1     rmind }
   1081  1.1     rmind #endif /* defined(DDB) */
   1082