Home | History | Annotate | Line # | Download | only in kern
sys_aio.c revision 1.15.6.1
      1  1.15.6.1       mjf /*	$NetBSD: sys_aio.c,v 1.15.6.1 2008/04/03 12:43:04 mjf Exp $	*/
      2       1.1     rmind 
      3       1.1     rmind /*
      4       1.1     rmind  * Copyright (c) 2007, Mindaugas Rasiukevicius <rmind at NetBSD org>
      5      1.10     rmind  * All rights reserved.
      6       1.1     rmind  *
      7       1.1     rmind  * Redistribution and use in source and binary forms, with or without
      8       1.1     rmind  * modification, are permitted provided that the following conditions
      9       1.1     rmind  * are met:
     10       1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     11       1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     12       1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     14       1.1     rmind  *    documentation and/or other materials provided with the distribution.
     15       1.1     rmind  *
     16       1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17       1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18       1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19       1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20       1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21       1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22       1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23       1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24       1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25       1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26       1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     27       1.1     rmind  */
     28       1.1     rmind 
     29       1.1     rmind /*
     30       1.1     rmind  * TODO:
     31       1.1     rmind  *   1. Additional work for VCHR and maybe VBLK devices.
     32       1.1     rmind  *   2. Consider making the job-finding O(n) per one file descriptor.
     33       1.1     rmind  */
     34       1.1     rmind 
     35       1.1     rmind #include <sys/cdefs.h>
     36  1.15.6.1       mjf __KERNEL_RCSID(0, "$NetBSD: sys_aio.c,v 1.15.6.1 2008/04/03 12:43:04 mjf Exp $");
     37       1.4     rmind 
     38       1.4     rmind #include "opt_ddb.h"
     39       1.1     rmind 
     40       1.1     rmind #include <sys/param.h>
     41       1.1     rmind #include <sys/condvar.h>
     42       1.1     rmind #include <sys/file.h>
     43       1.1     rmind #include <sys/filedesc.h>
     44       1.1     rmind #include <sys/kernel.h>
     45       1.1     rmind #include <sys/kmem.h>
     46       1.1     rmind #include <sys/lwp.h>
     47       1.1     rmind #include <sys/mutex.h>
     48       1.1     rmind #include <sys/pool.h>
     49       1.1     rmind #include <sys/proc.h>
     50       1.1     rmind #include <sys/queue.h>
     51       1.1     rmind #include <sys/signal.h>
     52       1.1     rmind #include <sys/signalvar.h>
     53       1.1     rmind #include <sys/syscallargs.h>
     54       1.1     rmind #include <sys/sysctl.h>
     55       1.1     rmind #include <sys/systm.h>
     56       1.1     rmind #include <sys/types.h>
     57       1.1     rmind #include <sys/vnode.h>
     58      1.11        ad #include <sys/atomic.h>
     59       1.1     rmind 
     60       1.1     rmind #include <uvm/uvm_extern.h>
     61       1.1     rmind 
     62       1.1     rmind /*
     63       1.1     rmind  * System-wide limits and counter of AIO operations.
     64       1.1     rmind  */
     65       1.4     rmind static u_int aio_listio_max = AIO_LISTIO_MAX;
     66       1.4     rmind static u_int aio_max = AIO_MAX;
     67       1.4     rmind static u_int aio_jobs_count;
     68       1.1     rmind 
     69       1.4     rmind static struct pool aio_job_pool;
     70       1.4     rmind static struct pool aio_lio_pool;
     71       1.1     rmind 
     72       1.1     rmind /* Prototypes */
     73       1.1     rmind void aio_worker(void *);
     74       1.1     rmind static void aio_process(struct aio_job *);
     75       1.1     rmind static void aio_sendsig(struct proc *, struct sigevent *);
     76       1.1     rmind static int aio_enqueue_job(int, void *, struct lio_req *);
     77       1.1     rmind 
     78       1.1     rmind /*
     79       1.4     rmind  * Initialize the AIO system.
     80       1.4     rmind  */
     81       1.4     rmind void
     82       1.4     rmind aio_sysinit(void)
     83       1.4     rmind {
     84       1.4     rmind 
     85       1.4     rmind 	pool_init(&aio_job_pool, sizeof(struct aio_job), 0, 0, 0,
     86       1.4     rmind 	    "aio_jobs_pool", &pool_allocator_nointr, IPL_NONE);
     87       1.4     rmind 	pool_init(&aio_lio_pool, sizeof(struct lio_req), 0, 0, 0,
     88       1.4     rmind 	    "aio_lio_pool", &pool_allocator_nointr, IPL_NONE);
     89       1.4     rmind }
     90       1.4     rmind 
     91       1.4     rmind /*
     92       1.1     rmind  * Initialize Asynchronous I/O data structures for the process.
     93       1.1     rmind  */
     94       1.1     rmind int
     95       1.1     rmind aio_init(struct proc *p)
     96       1.1     rmind {
     97       1.1     rmind 	struct aioproc *aio;
     98       1.1     rmind 	struct lwp *l;
     99       1.8        ad 	int error;
    100       1.1     rmind 	bool inmem;
    101       1.1     rmind 	vaddr_t uaddr;
    102       1.1     rmind 
    103       1.1     rmind 	/* Allocate and initialize AIO structure */
    104      1.15        ad 	aio = kmem_zalloc(sizeof(struct aioproc), KM_SLEEP);
    105       1.1     rmind 	if (aio == NULL)
    106       1.1     rmind 		return EAGAIN;
    107       1.1     rmind 
    108       1.4     rmind 	/* Initialize queue and their synchronization structures */
    109       1.1     rmind 	mutex_init(&aio->aio_mtx, MUTEX_DEFAULT, IPL_NONE);
    110       1.1     rmind 	cv_init(&aio->aio_worker_cv, "aiowork");
    111       1.1     rmind 	cv_init(&aio->done_cv, "aiodone");
    112       1.1     rmind 	TAILQ_INIT(&aio->jobs_queue);
    113       1.1     rmind 
    114       1.1     rmind 	/*
    115       1.1     rmind 	 * Create an AIO worker thread.
    116       1.1     rmind 	 * XXX: Currently, AIO thread is not protected against user's actions.
    117       1.1     rmind 	 */
    118       1.1     rmind 	inmem = uvm_uarea_alloc(&uaddr);
    119       1.1     rmind 	if (uaddr == 0) {
    120       1.5     rmind 		aio_exit(p, aio);
    121       1.1     rmind 		return EAGAIN;
    122       1.1     rmind 	}
    123       1.8        ad 	error = lwp_create(curlwp, p, uaddr, inmem, 0, NULL, 0, aio_worker,
    124       1.8        ad 	    NULL, &l, curlwp->l_class);
    125       1.8        ad 	if (error != 0) {
    126       1.8        ad 		uvm_uarea_free(uaddr, curcpu());
    127       1.5     rmind 		aio_exit(p, aio);
    128       1.8        ad 		return error;
    129       1.1     rmind 	}
    130       1.1     rmind 
    131       1.5     rmind 	/* Recheck if we are really first */
    132       1.5     rmind 	mutex_enter(&p->p_mutex);
    133       1.5     rmind 	if (p->p_aio) {
    134       1.5     rmind 		mutex_exit(&p->p_mutex);
    135       1.5     rmind 		aio_exit(p, aio);
    136       1.5     rmind 		lwp_exit(l);
    137       1.5     rmind 		return 0;
    138       1.5     rmind 	}
    139       1.5     rmind 	p->p_aio = aio;
    140       1.5     rmind 	mutex_exit(&p->p_mutex);
    141       1.5     rmind 
    142       1.1     rmind 	/* Complete the initialization of thread, and run it */
    143       1.1     rmind 	mutex_enter(&p->p_smutex);
    144       1.1     rmind 	aio->aio_worker = l;
    145       1.1     rmind 	p->p_nrlwps++;
    146       1.1     rmind 	lwp_lock(l);
    147       1.1     rmind 	l->l_stat = LSRUN;
    148      1.12     rmind 	l->l_priority = MAXPRI_USER;
    149       1.2      yamt 	sched_enqueue(l, false);
    150       1.1     rmind 	lwp_unlock(l);
    151       1.1     rmind 	mutex_exit(&p->p_smutex);
    152       1.1     rmind 
    153       1.1     rmind 	return 0;
    154       1.1     rmind }
    155       1.1     rmind 
    156       1.1     rmind /*
    157       1.1     rmind  * Exit of Asynchronous I/O subsystem of process.
    158       1.1     rmind  */
    159       1.1     rmind void
    160       1.5     rmind aio_exit(struct proc *p, struct aioproc *aio)
    161       1.1     rmind {
    162       1.1     rmind 	struct aio_job *a_job;
    163       1.1     rmind 
    164       1.5     rmind 	if (aio == NULL)
    165       1.1     rmind 		return;
    166       1.1     rmind 
    167       1.1     rmind 	/* Free AIO queue */
    168       1.1     rmind 	while (!TAILQ_EMPTY(&aio->jobs_queue)) {
    169       1.1     rmind 		a_job = TAILQ_FIRST(&aio->jobs_queue);
    170       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    171       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    172      1.11        ad 		atomic_dec_uint(&aio_jobs_count);
    173       1.1     rmind 	}
    174       1.1     rmind 
    175       1.1     rmind 	/* Destroy and free the entire AIO data structure */
    176       1.1     rmind 	cv_destroy(&aio->aio_worker_cv);
    177       1.1     rmind 	cv_destroy(&aio->done_cv);
    178       1.1     rmind 	mutex_destroy(&aio->aio_mtx);
    179       1.1     rmind 	kmem_free(aio, sizeof(struct aioproc));
    180       1.1     rmind }
    181       1.1     rmind 
    182       1.1     rmind /*
    183       1.1     rmind  * AIO worker thread and processor.
    184       1.1     rmind  */
    185       1.1     rmind void
    186       1.1     rmind aio_worker(void *arg)
    187       1.1     rmind {
    188       1.1     rmind 	struct proc *p = curlwp->l_proc;
    189       1.1     rmind 	struct aioproc *aio = p->p_aio;
    190       1.1     rmind 	struct aio_job *a_job;
    191       1.1     rmind 	struct lio_req *lio;
    192       1.1     rmind 	sigset_t oss, nss;
    193       1.4     rmind 	int error, refcnt;
    194       1.1     rmind 
    195       1.1     rmind 	/*
    196       1.1     rmind 	 * Make an empty signal mask, so it
    197       1.1     rmind 	 * handles only SIGKILL and SIGSTOP.
    198       1.1     rmind 	 */
    199       1.1     rmind 	sigfillset(&nss);
    200       1.1     rmind 	mutex_enter(&p->p_smutex);
    201       1.1     rmind 	error = sigprocmask1(curlwp, SIG_SETMASK, &nss, &oss);
    202       1.4     rmind 	mutex_exit(&p->p_smutex);
    203       1.1     rmind 	KASSERT(error == 0);
    204       1.1     rmind 
    205       1.1     rmind 	for (;;) {
    206       1.1     rmind 		/*
    207       1.1     rmind 		 * Loop for each job in the queue.  If there
    208       1.4     rmind 		 * are no jobs then sleep.
    209       1.1     rmind 		 */
    210       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    211       1.1     rmind 		while ((a_job = TAILQ_FIRST(&aio->jobs_queue)) == NULL) {
    212       1.1     rmind 			if (cv_wait_sig(&aio->aio_worker_cv, &aio->aio_mtx)) {
    213       1.1     rmind 				/*
    214       1.4     rmind 				 * Thread was interrupted - check for
    215       1.4     rmind 				 * pending exit or suspend.
    216       1.1     rmind 				 */
    217       1.4     rmind 				mutex_exit(&aio->aio_mtx);
    218       1.4     rmind 				lwp_userret(curlwp);
    219       1.4     rmind 				mutex_enter(&aio->aio_mtx);
    220       1.1     rmind 			}
    221       1.1     rmind 		}
    222       1.1     rmind 
    223       1.1     rmind 		/* Take the job from the queue */
    224       1.1     rmind 		aio->curjob = a_job;
    225       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    226       1.1     rmind 
    227      1.11        ad 		atomic_dec_uint(&aio_jobs_count);
    228       1.1     rmind 		aio->jobs_count--;
    229       1.1     rmind 
    230       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    231       1.1     rmind 
    232       1.1     rmind 		/* Process an AIO operation */
    233       1.1     rmind 		aio_process(a_job);
    234       1.1     rmind 
    235       1.1     rmind 		/* Copy data structure back to the user-space */
    236       1.1     rmind 		(void)copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    237       1.1     rmind 		    sizeof(struct aiocb));
    238       1.1     rmind 
    239       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    240       1.1     rmind 		aio->curjob = NULL;
    241       1.4     rmind 
    242       1.1     rmind 		/* Decrease a reference counter, if there is a LIO structure */
    243       1.1     rmind 		lio = a_job->lio;
    244       1.4     rmind 		refcnt = (lio != NULL ? --lio->refcnt : -1);
    245       1.4     rmind 
    246       1.1     rmind 		/* Notify all suspenders */
    247       1.1     rmind 		cv_broadcast(&aio->done_cv);
    248       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    249       1.1     rmind 
    250       1.1     rmind 		/* Send a signal, if any */
    251       1.1     rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    252       1.1     rmind 
    253       1.1     rmind 		/* Destroy the LIO structure */
    254       1.4     rmind 		if (refcnt == 0) {
    255       1.1     rmind 			aio_sendsig(p, &lio->sig);
    256       1.4     rmind 			pool_put(&aio_lio_pool, lio);
    257       1.1     rmind 		}
    258       1.1     rmind 
    259       1.1     rmind 		/* Destroy the the job */
    260       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    261       1.1     rmind 	}
    262       1.1     rmind 
    263       1.4     rmind 	/* NOTREACHED */
    264       1.1     rmind }
    265       1.1     rmind 
    266       1.1     rmind static void
    267       1.1     rmind aio_process(struct aio_job *a_job)
    268       1.1     rmind {
    269       1.1     rmind 	struct proc *p = curlwp->l_proc;
    270       1.1     rmind 	struct aiocb *aiocbp = &a_job->aiocbp;
    271       1.1     rmind 	struct file *fp;
    272       1.1     rmind 	int fd = aiocbp->aio_fildes;
    273       1.1     rmind 	int error = 0;
    274       1.1     rmind 
    275       1.1     rmind 	KASSERT(a_job->aio_op != 0);
    276       1.1     rmind 
    277       1.4     rmind 	if ((a_job->aio_op & (AIO_READ | AIO_WRITE)) != 0) {
    278       1.1     rmind 		struct iovec aiov;
    279       1.1     rmind 		struct uio auio;
    280       1.1     rmind 
    281       1.1     rmind 		if (aiocbp->aio_nbytes > SSIZE_MAX) {
    282       1.1     rmind 			error = EINVAL;
    283       1.1     rmind 			goto done;
    284       1.1     rmind 		}
    285       1.1     rmind 
    286  1.15.6.1       mjf 		fp = fd_getfile(fd);
    287       1.1     rmind 		if (fp == NULL) {
    288       1.1     rmind 			error = EBADF;
    289       1.1     rmind 			goto done;
    290       1.1     rmind 		}
    291       1.1     rmind 
    292       1.1     rmind 		aiov.iov_base = (void *)(uintptr_t)aiocbp->aio_buf;
    293       1.1     rmind 		aiov.iov_len = aiocbp->aio_nbytes;
    294       1.1     rmind 		auio.uio_iov = &aiov;
    295       1.1     rmind 		auio.uio_iovcnt = 1;
    296       1.1     rmind 		auio.uio_resid = aiocbp->aio_nbytes;
    297       1.1     rmind 		auio.uio_vmspace = p->p_vmspace;
    298       1.1     rmind 
    299       1.1     rmind 		if (a_job->aio_op & AIO_READ) {
    300       1.1     rmind 			/*
    301       1.1     rmind 			 * Perform a Read operation
    302       1.1     rmind 			 */
    303       1.1     rmind 			KASSERT((a_job->aio_op & AIO_WRITE) == 0);
    304       1.1     rmind 
    305       1.1     rmind 			if ((fp->f_flag & FREAD) == 0) {
    306  1.15.6.1       mjf 				fd_putfile(fd);
    307       1.1     rmind 				error = EBADF;
    308       1.1     rmind 				goto done;
    309       1.1     rmind 			}
    310       1.1     rmind 			auio.uio_rw = UIO_READ;
    311       1.1     rmind 			error = (*fp->f_ops->fo_read)(fp, &aiocbp->aio_offset,
    312       1.1     rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    313       1.1     rmind 		} else {
    314       1.1     rmind 			/*
    315       1.1     rmind 			 * Perform a Write operation
    316       1.1     rmind 			 */
    317       1.1     rmind 			KASSERT(a_job->aio_op & AIO_WRITE);
    318       1.1     rmind 
    319       1.1     rmind 			if ((fp->f_flag & FWRITE) == 0) {
    320  1.15.6.1       mjf 				fd_putfile(fd);
    321       1.1     rmind 				error = EBADF;
    322       1.1     rmind 				goto done;
    323       1.1     rmind 			}
    324       1.1     rmind 			auio.uio_rw = UIO_WRITE;
    325       1.1     rmind 			error = (*fp->f_ops->fo_write)(fp, &aiocbp->aio_offset,
    326       1.1     rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    327       1.1     rmind 		}
    328  1.15.6.1       mjf 		fd_putfile(fd);
    329       1.1     rmind 
    330       1.1     rmind 		/* Store the result value */
    331       1.1     rmind 		a_job->aiocbp.aio_nbytes -= auio.uio_resid;
    332       1.1     rmind 		a_job->aiocbp._retval = (error == 0) ?
    333       1.1     rmind 		    a_job->aiocbp.aio_nbytes : -1;
    334       1.1     rmind 
    335       1.4     rmind 	} else if ((a_job->aio_op & (AIO_SYNC | AIO_DSYNC)) != 0) {
    336       1.1     rmind 		/*
    337       1.1     rmind 		 * Perform a file Sync operation
    338       1.1     rmind 		 */
    339       1.1     rmind 		struct vnode *vp;
    340       1.1     rmind 
    341  1.15.6.1       mjf 		if ((error = fd_getvnode(fd, &fp)) != 0)
    342       1.1     rmind 			goto done;
    343       1.1     rmind 
    344       1.1     rmind 		if ((fp->f_flag & FWRITE) == 0) {
    345  1.15.6.1       mjf 			fd_putfile(fd);
    346       1.1     rmind 			error = EBADF;
    347       1.1     rmind 			goto done;
    348       1.1     rmind 		}
    349       1.1     rmind 
    350       1.1     rmind 		vp = (struct vnode *)fp->f_data;
    351       1.1     rmind 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    352       1.1     rmind 		if (a_job->aio_op & AIO_DSYNC) {
    353       1.1     rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    354       1.9     pooka 			    FSYNC_WAIT | FSYNC_DATAONLY, 0, 0);
    355       1.1     rmind 		} else if (a_job->aio_op & AIO_SYNC) {
    356       1.1     rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    357       1.9     pooka 			    FSYNC_WAIT, 0, 0);
    358       1.7     pooka 			if (error == 0 && bioopsp != NULL &&
    359       1.1     rmind 			    vp->v_mount &&
    360       1.1     rmind 			    (vp->v_mount->mnt_flag & MNT_SOFTDEP))
    361       1.7     pooka 			    bioopsp->io_fsync(vp, 0);
    362       1.1     rmind 		}
    363       1.1     rmind 		VOP_UNLOCK(vp, 0);
    364  1.15.6.1       mjf 		fd_putfile(fd);
    365       1.1     rmind 
    366       1.1     rmind 		/* Store the result value */
    367       1.1     rmind 		a_job->aiocbp._retval = (error == 0) ? 0 : -1;
    368       1.1     rmind 
    369       1.1     rmind 	} else
    370       1.1     rmind 		panic("aio_process: invalid operation code\n");
    371       1.1     rmind 
    372       1.1     rmind done:
    373       1.1     rmind 	/* Job is done, set the error, if any */
    374       1.1     rmind 	a_job->aiocbp._errno = error;
    375       1.1     rmind 	a_job->aiocbp._state = JOB_DONE;
    376       1.1     rmind }
    377       1.1     rmind 
    378       1.1     rmind /*
    379       1.1     rmind  * Send AIO signal.
    380       1.1     rmind  */
    381       1.1     rmind static void
    382       1.1     rmind aio_sendsig(struct proc *p, struct sigevent *sig)
    383       1.1     rmind {
    384       1.1     rmind 	ksiginfo_t ksi;
    385       1.1     rmind 
    386       1.1     rmind 	if (sig->sigev_signo == 0 || sig->sigev_notify == SIGEV_NONE)
    387       1.1     rmind 		return;
    388       1.1     rmind 
    389       1.1     rmind 	KSI_INIT(&ksi);
    390       1.1     rmind 	ksi.ksi_signo = sig->sigev_signo;
    391       1.1     rmind 	ksi.ksi_code = SI_ASYNCIO;
    392       1.3  christos 	ksi.ksi_value = sig->sigev_value;
    393       1.1     rmind 	mutex_enter(&proclist_mutex);
    394       1.1     rmind 	kpsignal(p, &ksi, NULL);
    395       1.1     rmind 	mutex_exit(&proclist_mutex);
    396       1.1     rmind }
    397       1.1     rmind 
    398       1.1     rmind /*
    399       1.1     rmind  * Enqueue the job.
    400       1.1     rmind  */
    401       1.1     rmind static int
    402       1.1     rmind aio_enqueue_job(int op, void *aiocb_uptr, struct lio_req *lio)
    403       1.1     rmind {
    404       1.1     rmind 	struct proc *p = curlwp->l_proc;
    405       1.1     rmind 	struct aioproc *aio;
    406       1.1     rmind 	struct aio_job *a_job;
    407       1.1     rmind 	struct aiocb aiocbp;
    408       1.1     rmind 	struct sigevent *sig;
    409       1.1     rmind 	int error;
    410       1.1     rmind 
    411      1.12     rmind 	/* Non-accurate check for the limit */
    412      1.12     rmind 	if (aio_jobs_count + 1 > aio_max)
    413       1.1     rmind 		return EAGAIN;
    414       1.1     rmind 
    415       1.1     rmind 	/* Get the data structure from user-space */
    416       1.1     rmind 	error = copyin(aiocb_uptr, &aiocbp, sizeof(struct aiocb));
    417       1.1     rmind 	if (error)
    418       1.1     rmind 		return error;
    419       1.1     rmind 
    420       1.1     rmind 	/* Check if signal is set, and validate it */
    421       1.1     rmind 	sig = &aiocbp.aio_sigevent;
    422       1.1     rmind 	if (sig->sigev_signo < 0 || sig->sigev_signo >= NSIG ||
    423       1.1     rmind 	    sig->sigev_notify < SIGEV_NONE || sig->sigev_notify > SIGEV_SA)
    424       1.1     rmind 		return EINVAL;
    425       1.1     rmind 
    426       1.1     rmind 	/* Buffer and byte count */
    427       1.1     rmind 	if (((AIO_SYNC | AIO_DSYNC) & op) == 0)
    428       1.1     rmind 		if (aiocbp.aio_buf == NULL || aiocbp.aio_nbytes > SSIZE_MAX)
    429       1.1     rmind 			return EINVAL;
    430       1.1     rmind 
    431       1.1     rmind 	/* Check the opcode, if LIO_NOP - simply ignore */
    432       1.1     rmind 	if (op == AIO_LIO) {
    433       1.1     rmind 		KASSERT(lio != NULL);
    434       1.1     rmind 		if (aiocbp.aio_lio_opcode == LIO_WRITE)
    435       1.1     rmind 			op = AIO_WRITE;
    436       1.1     rmind 		else if (aiocbp.aio_lio_opcode == LIO_READ)
    437       1.1     rmind 			op = AIO_READ;
    438       1.1     rmind 		else
    439       1.1     rmind 			return (aiocbp.aio_lio_opcode == LIO_NOP) ? 0 : EINVAL;
    440       1.1     rmind 	} else {
    441       1.1     rmind 		KASSERT(lio == NULL);
    442       1.1     rmind 	}
    443       1.1     rmind 
    444       1.1     rmind 	/*
    445       1.1     rmind 	 * Look for already existing job.  If found - the job is in-progress.
    446       1.1     rmind 	 * According to POSIX this is invalid, so return the error.
    447       1.1     rmind 	 */
    448       1.1     rmind 	aio = p->p_aio;
    449       1.1     rmind 	if (aio) {
    450       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    451       1.1     rmind 		if (aio->curjob) {
    452       1.1     rmind 			a_job = aio->curjob;
    453       1.1     rmind 			if (a_job->aiocb_uptr == aiocb_uptr) {
    454       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    455       1.1     rmind 				return EINVAL;
    456       1.1     rmind 			}
    457       1.1     rmind 		}
    458       1.1     rmind 		TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    459       1.1     rmind 			if (a_job->aiocb_uptr != aiocb_uptr)
    460       1.1     rmind 				continue;
    461       1.1     rmind 			mutex_exit(&aio->aio_mtx);
    462       1.1     rmind 			return EINVAL;
    463       1.1     rmind 		}
    464       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    465       1.1     rmind 	}
    466       1.1     rmind 
    467       1.1     rmind 	/*
    468       1.1     rmind 	 * Check if AIO structure is initialized, if not - initialize it.
    469       1.1     rmind 	 * In LIO case, we did that already.  We will recheck this with
    470       1.1     rmind 	 * the lock in aio_init().
    471       1.1     rmind 	 */
    472       1.1     rmind 	if (lio == NULL && p->p_aio == NULL)
    473       1.1     rmind 		if (aio_init(p))
    474       1.1     rmind 			return EAGAIN;
    475       1.1     rmind 	aio = p->p_aio;
    476       1.1     rmind 
    477       1.1     rmind 	/*
    478       1.1     rmind 	 * Set the state with errno, and copy data
    479       1.1     rmind 	 * structure back to the user-space.
    480       1.1     rmind 	 */
    481       1.1     rmind 	aiocbp._state = JOB_WIP;
    482       1.1     rmind 	aiocbp._errno = EINPROGRESS;
    483       1.1     rmind 	aiocbp._retval = -1;
    484       1.1     rmind 	error = copyout(&aiocbp, aiocb_uptr, sizeof(struct aiocb));
    485       1.1     rmind 	if (error)
    486       1.1     rmind 		return error;
    487       1.1     rmind 
    488       1.1     rmind 	/* Allocate and initialize a new AIO job */
    489       1.4     rmind 	a_job = pool_get(&aio_job_pool, PR_WAITOK);
    490       1.1     rmind 	memset(a_job, 0, sizeof(struct aio_job));
    491       1.1     rmind 
    492       1.1     rmind 	/*
    493       1.1     rmind 	 * Set the data.
    494       1.1     rmind 	 * Store the user-space pointer for searching.  Since we
    495       1.1     rmind 	 * are storing only per proc pointers - it is safe.
    496       1.1     rmind 	 */
    497       1.1     rmind 	memcpy(&a_job->aiocbp, &aiocbp, sizeof(struct aiocb));
    498       1.1     rmind 	a_job->aiocb_uptr = aiocb_uptr;
    499       1.1     rmind 	a_job->aio_op |= op;
    500       1.1     rmind 	a_job->lio = lio;
    501       1.1     rmind 
    502       1.1     rmind 	/*
    503       1.1     rmind 	 * Add the job to the queue, update the counters, and
    504       1.1     rmind 	 * notify the AIO worker thread to handle the job.
    505       1.1     rmind 	 */
    506       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    507       1.1     rmind 
    508       1.1     rmind 	/* Fail, if the limit was reached */
    509      1.13     rmind 	if (atomic_inc_uint_nv(&aio_jobs_count) > aio_max ||
    510      1.13     rmind 	    aio->jobs_count >= aio_listio_max) {
    511      1.12     rmind 		atomic_dec_uint(&aio_jobs_count);
    512       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    513       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    514       1.1     rmind 		return EAGAIN;
    515       1.1     rmind 	}
    516       1.1     rmind 
    517       1.1     rmind 	TAILQ_INSERT_TAIL(&aio->jobs_queue, a_job, list);
    518       1.1     rmind 	aio->jobs_count++;
    519       1.1     rmind 	if (lio)
    520       1.1     rmind 		lio->refcnt++;
    521       1.1     rmind 	cv_signal(&aio->aio_worker_cv);
    522       1.1     rmind 
    523       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    524       1.1     rmind 
    525       1.1     rmind 	/*
    526       1.1     rmind 	 * One would handle the errors only with aio_error() function.
    527       1.1     rmind 	 * This way is appropriate according to POSIX.
    528       1.1     rmind 	 */
    529       1.1     rmind 	return 0;
    530       1.1     rmind }
    531       1.1     rmind 
    532       1.1     rmind /*
    533       1.1     rmind  * Syscall functions.
    534       1.1     rmind  */
    535       1.1     rmind 
    536       1.1     rmind int
    537      1.14       dsl sys_aio_cancel(struct lwp *l, const struct sys_aio_cancel_args *uap, register_t *retval)
    538       1.1     rmind {
    539      1.14       dsl 	/* {
    540       1.1     rmind 		syscallarg(int) fildes;
    541       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    542      1.14       dsl 	} */
    543       1.1     rmind 	struct proc *p = l->l_proc;
    544       1.1     rmind 	struct aioproc *aio;
    545       1.1     rmind 	struct aio_job *a_job;
    546       1.1     rmind 	struct aiocb *aiocbp_ptr;
    547       1.1     rmind 	struct lio_req *lio;
    548       1.1     rmind 	struct filedesc	*fdp = p->p_fd;
    549       1.1     rmind 	unsigned int cn, errcnt, fildes;
    550       1.1     rmind 
    551       1.1     rmind 	TAILQ_HEAD(, aio_job) tmp_jobs_list;
    552       1.1     rmind 
    553       1.1     rmind 	/* Check for invalid file descriptor */
    554       1.1     rmind 	fildes = (unsigned int)SCARG(uap, fildes);
    555  1.15.6.1       mjf 	if (fildes >= fdp->fd_nfiles)
    556  1.15.6.1       mjf 		return EBADF;
    557  1.15.6.1       mjf 	membar_consumer();
    558  1.15.6.1       mjf 	if (fdp->fd_ofiles[fildes] == NULL || fdp->fd_ofiles[fildes]->ff_file == NULL)
    559       1.1     rmind 		return EBADF;
    560       1.1     rmind 
    561       1.1     rmind 	/* Check if AIO structure is initialized */
    562       1.1     rmind 	if (p->p_aio == NULL) {
    563       1.1     rmind 		*retval = AIO_NOTCANCELED;
    564       1.1     rmind 		return 0;
    565       1.1     rmind 	}
    566       1.1     rmind 
    567       1.1     rmind 	aio = p->p_aio;
    568       1.1     rmind 	aiocbp_ptr = (struct aiocb *)SCARG(uap, aiocbp);
    569       1.1     rmind 
    570       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    571       1.1     rmind 
    572       1.1     rmind 	/* Cancel the jobs, and remove them from the queue */
    573       1.1     rmind 	cn = 0;
    574       1.1     rmind 	TAILQ_INIT(&tmp_jobs_list);
    575       1.1     rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    576       1.1     rmind 		if (aiocbp_ptr) {
    577       1.1     rmind 			if (aiocbp_ptr != a_job->aiocb_uptr)
    578       1.1     rmind 				continue;
    579       1.1     rmind 			if (fildes != a_job->aiocbp.aio_fildes) {
    580       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    581       1.1     rmind 				return EBADF;
    582       1.1     rmind 			}
    583       1.1     rmind 		} else if (a_job->aiocbp.aio_fildes != fildes)
    584       1.1     rmind 			continue;
    585       1.1     rmind 
    586       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    587       1.1     rmind 		TAILQ_INSERT_TAIL(&tmp_jobs_list, a_job, list);
    588       1.1     rmind 
    589       1.1     rmind 		/* Decrease the counters */
    590      1.11        ad 		atomic_dec_uint(&aio_jobs_count);
    591       1.1     rmind 		aio->jobs_count--;
    592       1.1     rmind 		lio = a_job->lio;
    593       1.4     rmind 		if (lio != NULL && --lio->refcnt != 0)
    594       1.4     rmind 			a_job->lio = NULL;
    595       1.1     rmind 
    596       1.1     rmind 		cn++;
    597       1.1     rmind 		if (aiocbp_ptr)
    598       1.1     rmind 			break;
    599       1.1     rmind 	}
    600       1.1     rmind 
    601       1.1     rmind 	/* There are canceled jobs */
    602       1.1     rmind 	if (cn)
    603       1.1     rmind 		*retval = AIO_CANCELED;
    604       1.1     rmind 
    605       1.1     rmind 	/* We cannot cancel current job */
    606       1.1     rmind 	a_job = aio->curjob;
    607       1.1     rmind 	if (a_job && ((a_job->aiocbp.aio_fildes == fildes) ||
    608       1.1     rmind 	    (a_job->aiocb_uptr == aiocbp_ptr)))
    609       1.1     rmind 		*retval = AIO_NOTCANCELED;
    610       1.1     rmind 
    611       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    612       1.1     rmind 
    613       1.1     rmind 	/* Free the jobs after the lock */
    614       1.1     rmind 	errcnt = 0;
    615       1.1     rmind 	while (!TAILQ_EMPTY(&tmp_jobs_list)) {
    616       1.1     rmind 		a_job = TAILQ_FIRST(&tmp_jobs_list);
    617       1.1     rmind 		TAILQ_REMOVE(&tmp_jobs_list, a_job, list);
    618       1.1     rmind 		/* Set the errno and copy structures back to the user-space */
    619       1.1     rmind 		a_job->aiocbp._errno = ECANCELED;
    620       1.1     rmind 		a_job->aiocbp._state = JOB_DONE;
    621       1.1     rmind 		if (copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    622       1.1     rmind 		    sizeof(struct aiocb)))
    623       1.1     rmind 			errcnt++;
    624       1.1     rmind 		/* Send a signal if any */
    625       1.1     rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    626       1.6     rmind 		if (a_job->lio) {
    627       1.6     rmind 			lio = a_job->lio;
    628       1.6     rmind 			aio_sendsig(p, &lio->sig);
    629       1.6     rmind 			pool_put(&aio_lio_pool, lio);
    630       1.6     rmind 		}
    631       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    632       1.1     rmind 	}
    633       1.1     rmind 
    634       1.1     rmind 	if (errcnt)
    635       1.1     rmind 		return EFAULT;
    636       1.1     rmind 
    637       1.1     rmind 	/* Set a correct return value */
    638       1.1     rmind 	if (*retval == 0)
    639       1.1     rmind 		*retval = AIO_ALLDONE;
    640       1.1     rmind 
    641       1.1     rmind 	return 0;
    642       1.1     rmind }
    643       1.1     rmind 
    644       1.1     rmind int
    645      1.14       dsl sys_aio_error(struct lwp *l, const struct sys_aio_error_args *uap, register_t *retval)
    646       1.1     rmind {
    647      1.14       dsl 	/* {
    648       1.1     rmind 		syscallarg(const struct aiocb *) aiocbp;
    649      1.14       dsl 	} */
    650       1.1     rmind 	struct proc *p = l->l_proc;
    651       1.1     rmind 	struct aioproc *aio = p->p_aio;
    652       1.1     rmind 	struct aiocb aiocbp;
    653       1.1     rmind 	int error;
    654       1.1     rmind 
    655       1.1     rmind 	if (aio == NULL)
    656       1.1     rmind 		return EINVAL;
    657       1.1     rmind 
    658       1.1     rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    659       1.1     rmind 	if (error)
    660       1.1     rmind 		return error;
    661       1.1     rmind 
    662       1.1     rmind 	if (aiocbp._state == JOB_NONE)
    663       1.1     rmind 		return EINVAL;
    664       1.1     rmind 
    665       1.1     rmind 	*retval = aiocbp._errno;
    666       1.1     rmind 
    667       1.1     rmind 	return 0;
    668       1.1     rmind }
    669       1.1     rmind 
    670       1.1     rmind int
    671      1.14       dsl sys_aio_fsync(struct lwp *l, const struct sys_aio_fsync_args *uap, register_t *retval)
    672       1.1     rmind {
    673      1.14       dsl 	/* {
    674       1.1     rmind 		syscallarg(int) op;
    675       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    676      1.14       dsl 	} */
    677       1.1     rmind 	int op = SCARG(uap, op);
    678       1.1     rmind 
    679       1.1     rmind 	if ((op != O_DSYNC) && (op != O_SYNC))
    680       1.1     rmind 		return EINVAL;
    681       1.1     rmind 
    682       1.1     rmind 	op = O_DSYNC ? AIO_DSYNC : AIO_SYNC;
    683       1.1     rmind 
    684       1.1     rmind 	return aio_enqueue_job(op, SCARG(uap, aiocbp), NULL);
    685       1.1     rmind }
    686       1.1     rmind 
    687       1.1     rmind int
    688      1.14       dsl sys_aio_read(struct lwp *l, const struct sys_aio_read_args *uap, register_t *retval)
    689       1.1     rmind {
    690      1.14       dsl 	/* {
    691       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    692      1.14       dsl 	} */
    693       1.1     rmind 
    694       1.1     rmind 	return aio_enqueue_job(AIO_READ, SCARG(uap, aiocbp), NULL);
    695       1.1     rmind }
    696       1.1     rmind 
    697       1.1     rmind int
    698      1.14       dsl sys_aio_return(struct lwp *l, const struct sys_aio_return_args *uap, register_t *retval)
    699       1.1     rmind {
    700      1.14       dsl 	/* {
    701       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    702      1.14       dsl 	} */
    703       1.1     rmind 	struct proc *p = l->l_proc;
    704       1.1     rmind 	struct aioproc *aio = p->p_aio;
    705       1.1     rmind 	struct aiocb aiocbp;
    706       1.1     rmind 	int error;
    707       1.1     rmind 
    708       1.1     rmind 	if (aio == NULL)
    709       1.1     rmind 		return EINVAL;
    710       1.1     rmind 
    711       1.1     rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    712       1.1     rmind 	if (error)
    713       1.1     rmind 		return error;
    714       1.1     rmind 
    715       1.1     rmind 	if (aiocbp._errno == EINPROGRESS || aiocbp._state != JOB_DONE)
    716       1.1     rmind 		return EINVAL;
    717       1.1     rmind 
    718       1.1     rmind 	*retval = aiocbp._retval;
    719       1.1     rmind 
    720       1.1     rmind 	/* Reset the internal variables */
    721       1.1     rmind 	aiocbp._errno = 0;
    722       1.1     rmind 	aiocbp._retval = -1;
    723       1.1     rmind 	aiocbp._state = JOB_NONE;
    724       1.1     rmind 	error = copyout(&aiocbp, SCARG(uap, aiocbp), sizeof(struct aiocb));
    725       1.1     rmind 
    726       1.1     rmind 	return error;
    727       1.1     rmind }
    728       1.1     rmind 
    729       1.1     rmind int
    730      1.14       dsl sys_aio_suspend(struct lwp *l, const struct sys_aio_suspend_args *uap, register_t *retval)
    731       1.1     rmind {
    732      1.14       dsl 	/* {
    733       1.1     rmind 		syscallarg(const struct aiocb *const[]) list;
    734       1.1     rmind 		syscallarg(int) nent;
    735       1.1     rmind 		syscallarg(const struct timespec *) timeout;
    736      1.14       dsl 	} */
    737       1.1     rmind 	struct proc *p = l->l_proc;
    738       1.1     rmind 	struct aioproc *aio;
    739       1.1     rmind 	struct aio_job *a_job;
    740       1.1     rmind 	struct aiocb **aiocbp_list;
    741       1.1     rmind 	struct timespec ts;
    742       1.1     rmind 	int i, error, nent, timo;
    743       1.1     rmind 
    744       1.1     rmind 	if (p->p_aio == NULL)
    745       1.1     rmind 		return EAGAIN;
    746       1.1     rmind 	aio = p->p_aio;
    747       1.1     rmind 
    748       1.1     rmind 	nent = SCARG(uap, nent);
    749       1.1     rmind 	if (nent <= 0 || nent > aio_listio_max)
    750       1.1     rmind 		return EAGAIN;
    751       1.1     rmind 
    752       1.1     rmind 	if (SCARG(uap, timeout)) {
    753       1.1     rmind 		/* Convert timespec to ticks */
    754       1.1     rmind 		error = copyin(SCARG(uap, timeout), &ts,
    755       1.1     rmind 		    sizeof(struct timespec));
    756       1.1     rmind 		if (error)
    757       1.1     rmind 			return error;
    758       1.1     rmind 		timo = mstohz((ts.tv_sec * 1000) + (ts.tv_nsec / 1000000));
    759       1.1     rmind 		if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
    760       1.1     rmind 			timo = 1;
    761       1.1     rmind 		if (timo <= 0)
    762       1.1     rmind 			return EAGAIN;
    763       1.1     rmind 	} else
    764       1.1     rmind 		timo = 0;
    765       1.1     rmind 
    766       1.1     rmind 	/* Get the list from user-space */
    767       1.1     rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    768       1.1     rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    769       1.1     rmind 	    nent * sizeof(struct aiocb));
    770       1.1     rmind 	if (error) {
    771       1.1     rmind 		kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    772       1.1     rmind 		return error;
    773       1.1     rmind 	}
    774       1.1     rmind 
    775       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    776       1.1     rmind 	for (;;) {
    777       1.1     rmind 
    778       1.1     rmind 		for (i = 0; i < nent; i++) {
    779       1.1     rmind 
    780       1.1     rmind 			/* Skip NULL entries */
    781       1.1     rmind 			if (aiocbp_list[i] == NULL)
    782       1.1     rmind 				continue;
    783       1.1     rmind 
    784       1.1     rmind 			/* Skip current job */
    785       1.1     rmind 			if (aio->curjob) {
    786       1.1     rmind 				a_job = aio->curjob;
    787       1.1     rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    788       1.1     rmind 					continue;
    789       1.1     rmind 			}
    790       1.1     rmind 
    791       1.1     rmind 			/* Look for a job in the queue */
    792       1.1     rmind 			TAILQ_FOREACH(a_job, &aio->jobs_queue, list)
    793       1.1     rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    794       1.1     rmind 					break;
    795       1.1     rmind 
    796       1.1     rmind 			if (a_job == NULL) {
    797       1.1     rmind 				struct aiocb aiocbp;
    798       1.1     rmind 
    799       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    800       1.1     rmind 
    801       1.1     rmind 				error = copyin(aiocbp_list[i], &aiocbp,
    802       1.1     rmind 				    sizeof(struct aiocb));
    803       1.1     rmind 				if (error == 0 && aiocbp._state != JOB_DONE) {
    804       1.1     rmind 					mutex_enter(&aio->aio_mtx);
    805       1.1     rmind 					continue;
    806       1.1     rmind 				}
    807       1.1     rmind 
    808       1.1     rmind 				kmem_free(aiocbp_list,
    809       1.1     rmind 				    nent * sizeof(struct aio_job));
    810       1.1     rmind 				return error;
    811       1.1     rmind 			}
    812       1.1     rmind 		}
    813       1.1     rmind 
    814       1.1     rmind 		/* Wait for a signal or when timeout occurs */
    815       1.1     rmind 		error = cv_timedwait_sig(&aio->done_cv, &aio->aio_mtx, timo);
    816       1.1     rmind 		if (error) {
    817       1.1     rmind 			if (error == EWOULDBLOCK)
    818       1.1     rmind 				error = EAGAIN;
    819       1.1     rmind 			break;
    820       1.1     rmind 		}
    821       1.1     rmind 	}
    822       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    823       1.1     rmind 
    824       1.1     rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    825       1.1     rmind 	return error;
    826       1.1     rmind }
    827       1.1     rmind 
    828       1.1     rmind int
    829      1.14       dsl sys_aio_write(struct lwp *l, const struct sys_aio_write_args *uap, register_t *retval)
    830       1.1     rmind {
    831      1.14       dsl 	/* {
    832       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    833      1.14       dsl 	} */
    834       1.1     rmind 
    835       1.1     rmind 	return aio_enqueue_job(AIO_WRITE, SCARG(uap, aiocbp), NULL);
    836       1.1     rmind }
    837       1.1     rmind 
    838       1.1     rmind int
    839      1.14       dsl sys_lio_listio(struct lwp *l, const struct sys_lio_listio_args *uap, register_t *retval)
    840       1.1     rmind {
    841      1.14       dsl 	/* {
    842       1.1     rmind 		syscallarg(int) mode;
    843       1.1     rmind 		syscallarg(struct aiocb *const[]) list;
    844       1.1     rmind 		syscallarg(int) nent;
    845       1.1     rmind 		syscallarg(struct sigevent *) sig;
    846      1.14       dsl 	} */
    847       1.1     rmind 	struct proc *p = l->l_proc;
    848       1.1     rmind 	struct aioproc *aio;
    849       1.1     rmind 	struct aiocb **aiocbp_list;
    850       1.1     rmind 	struct lio_req *lio;
    851       1.1     rmind 	int i, error, errcnt, mode, nent;
    852       1.1     rmind 
    853       1.1     rmind 	mode = SCARG(uap, mode);
    854       1.1     rmind 	nent = SCARG(uap, nent);
    855       1.1     rmind 
    856      1.12     rmind 	/* Non-accurate checks for the limit and invalid values */
    857       1.1     rmind 	if (nent < 1 || nent > aio_listio_max)
    858       1.1     rmind 		return EINVAL;
    859      1.12     rmind 	if (aio_jobs_count + nent > aio_max)
    860       1.1     rmind 		return EAGAIN;
    861       1.1     rmind 
    862       1.1     rmind 	/* Check if AIO structure is initialized, if not - initialize it */
    863       1.1     rmind 	if (p->p_aio == NULL)
    864       1.1     rmind 		if (aio_init(p))
    865       1.1     rmind 			return EAGAIN;
    866       1.1     rmind 	aio = p->p_aio;
    867       1.1     rmind 
    868       1.1     rmind 	/* Create a LIO structure */
    869       1.4     rmind 	lio = pool_get(&aio_lio_pool, PR_WAITOK);
    870       1.4     rmind 	lio->refcnt = 1;
    871       1.4     rmind 	error = 0;
    872       1.4     rmind 
    873       1.4     rmind 	switch (mode) {
    874       1.4     rmind 	case LIO_WAIT:
    875       1.1     rmind 		memset(&lio->sig, 0, sizeof(struct sigevent));
    876       1.4     rmind 		break;
    877       1.4     rmind 	case LIO_NOWAIT:
    878       1.4     rmind 		/* Check for signal, validate it */
    879       1.4     rmind 		if (SCARG(uap, sig)) {
    880       1.4     rmind 			struct sigevent *sig = &lio->sig;
    881       1.4     rmind 
    882       1.4     rmind 			error = copyin(SCARG(uap, sig), &lio->sig,
    883       1.4     rmind 			    sizeof(struct sigevent));
    884       1.4     rmind 			if (error == 0 &&
    885       1.4     rmind 			    (sig->sigev_signo < 0 ||
    886       1.4     rmind 			    sig->sigev_signo >= NSIG ||
    887       1.4     rmind 			    sig->sigev_notify < SIGEV_NONE ||
    888       1.4     rmind 			    sig->sigev_notify > SIGEV_SA))
    889       1.4     rmind 				error = EINVAL;
    890       1.4     rmind 		} else
    891       1.4     rmind 			memset(&lio->sig, 0, sizeof(struct sigevent));
    892       1.4     rmind 		break;
    893       1.4     rmind 	default:
    894       1.4     rmind 		error = EINVAL;
    895       1.4     rmind 		break;
    896       1.4     rmind 	}
    897       1.4     rmind 
    898       1.4     rmind 	if (error != 0) {
    899       1.4     rmind 		pool_put(&aio_lio_pool, lio);
    900       1.4     rmind 		return error;
    901       1.4     rmind 	}
    902       1.1     rmind 
    903       1.1     rmind 	/* Get the list from user-space */
    904       1.1     rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    905       1.1     rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    906       1.1     rmind 	    nent * sizeof(struct aiocb));
    907       1.4     rmind 	if (error) {
    908       1.4     rmind 		mutex_enter(&aio->aio_mtx);
    909       1.1     rmind 		goto err;
    910       1.4     rmind 	}
    911       1.1     rmind 
    912       1.1     rmind 	/* Enqueue all jobs */
    913       1.1     rmind 	errcnt = 0;
    914       1.1     rmind 	for (i = 0; i < nent; i++) {
    915       1.1     rmind 		error = aio_enqueue_job(AIO_LIO, aiocbp_list[i], lio);
    916       1.1     rmind 		/*
    917       1.1     rmind 		 * According to POSIX, in such error case it may
    918       1.1     rmind 		 * fail with other I/O operations initiated.
    919       1.1     rmind 		 */
    920       1.1     rmind 		if (error)
    921       1.1     rmind 			errcnt++;
    922       1.1     rmind 	}
    923       1.1     rmind 
    924       1.4     rmind 	mutex_enter(&aio->aio_mtx);
    925       1.4     rmind 
    926       1.1     rmind 	/* Return an error, if any */
    927       1.1     rmind 	if (errcnt) {
    928       1.1     rmind 		error = EIO;
    929       1.1     rmind 		goto err;
    930       1.1     rmind 	}
    931       1.1     rmind 
    932       1.1     rmind 	if (mode == LIO_WAIT) {
    933       1.1     rmind 		/*
    934       1.1     rmind 		 * Wait for AIO completion.  In such case,
    935       1.1     rmind 		 * the LIO structure will be freed here.
    936       1.1     rmind 		 */
    937       1.4     rmind 		while (lio->refcnt > 1 && error == 0)
    938       1.1     rmind 			error = cv_wait_sig(&aio->done_cv, &aio->aio_mtx);
    939       1.1     rmind 		if (error)
    940       1.1     rmind 			error = EINTR;
    941       1.1     rmind 	}
    942       1.1     rmind 
    943       1.1     rmind err:
    944       1.4     rmind 	if (--lio->refcnt != 0)
    945       1.4     rmind 		lio = NULL;
    946       1.4     rmind 	mutex_exit(&aio->aio_mtx);
    947       1.4     rmind 	if (lio != NULL) {
    948       1.4     rmind 		aio_sendsig(p, &lio->sig);
    949       1.4     rmind 		pool_put(&aio_lio_pool, lio);
    950       1.4     rmind 	}
    951       1.1     rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    952       1.1     rmind 	return error;
    953       1.1     rmind }
    954       1.1     rmind 
    955       1.1     rmind /*
    956       1.1     rmind  * SysCtl
    957       1.1     rmind  */
    958       1.1     rmind 
    959       1.1     rmind static int
    960       1.1     rmind sysctl_aio_listio_max(SYSCTLFN_ARGS)
    961       1.1     rmind {
    962       1.1     rmind 	struct sysctlnode node;
    963       1.1     rmind 	int error, newsize;
    964       1.1     rmind 
    965       1.1     rmind 	node = *rnode;
    966       1.1     rmind 	node.sysctl_data = &newsize;
    967       1.1     rmind 
    968       1.1     rmind 	newsize = aio_listio_max;
    969       1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    970       1.1     rmind 	if (error || newp == NULL)
    971       1.1     rmind 		return error;
    972       1.1     rmind 
    973       1.1     rmind 	if (newsize < 1 || newsize > aio_max)
    974       1.1     rmind 		return EINVAL;
    975       1.1     rmind 	aio_listio_max = newsize;
    976       1.1     rmind 
    977       1.1     rmind 	return 0;
    978       1.1     rmind }
    979       1.1     rmind 
    980       1.1     rmind static int
    981       1.1     rmind sysctl_aio_max(SYSCTLFN_ARGS)
    982       1.1     rmind {
    983       1.1     rmind 	struct sysctlnode node;
    984       1.1     rmind 	int error, newsize;
    985       1.1     rmind 
    986       1.1     rmind 	node = *rnode;
    987       1.1     rmind 	node.sysctl_data = &newsize;
    988       1.1     rmind 
    989       1.1     rmind 	newsize = aio_max;
    990       1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    991       1.1     rmind 	if (error || newp == NULL)
    992       1.1     rmind 		return error;
    993       1.1     rmind 
    994       1.1     rmind 	if (newsize < 1 || newsize < aio_listio_max)
    995       1.1     rmind 		return EINVAL;
    996       1.1     rmind 	aio_max = newsize;
    997       1.1     rmind 
    998       1.1     rmind 	return 0;
    999       1.1     rmind }
   1000       1.1     rmind 
   1001       1.1     rmind SYSCTL_SETUP(sysctl_aio_setup, "sysctl aio setup")
   1002       1.1     rmind {
   1003       1.1     rmind 
   1004       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1005       1.1     rmind 		CTLFLAG_PERMANENT,
   1006       1.1     rmind 		CTLTYPE_NODE, "kern", NULL,
   1007       1.1     rmind 		NULL, 0, NULL, 0,
   1008       1.1     rmind 		CTL_KERN, CTL_EOL);
   1009       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1010       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
   1011       1.1     rmind 		CTLTYPE_INT, "posix_aio",
   1012       1.1     rmind 		SYSCTL_DESCR("Version of IEEE Std 1003.1 and its "
   1013       1.1     rmind 			     "Asynchronous I/O option to which the "
   1014       1.1     rmind 			     "system attempts to conform"),
   1015       1.1     rmind 		NULL, _POSIX_ASYNCHRONOUS_IO, NULL, 0,
   1016       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1017       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1018       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1019       1.1     rmind 		CTLTYPE_INT, "aio_listio_max",
   1020       1.1     rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1021       1.1     rmind 			     "operations in a single list I/O call"),
   1022       1.1     rmind 		sysctl_aio_listio_max, 0, &aio_listio_max, 0,
   1023       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1024       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1025       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1026       1.1     rmind 		CTLTYPE_INT, "aio_max",
   1027       1.1     rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1028       1.1     rmind 			     "operations"),
   1029       1.1     rmind 		sysctl_aio_max, 0, &aio_max, 0,
   1030       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1031       1.1     rmind }
   1032       1.1     rmind 
   1033       1.1     rmind /*
   1034       1.1     rmind  * Debugging
   1035       1.1     rmind  */
   1036       1.1     rmind #if defined(DDB)
   1037       1.1     rmind void
   1038       1.1     rmind aio_print_jobs(void (*pr)(const char *, ...))
   1039       1.1     rmind {
   1040       1.1     rmind 	struct proc *p = (curlwp == NULL ? NULL : curlwp->l_proc);
   1041       1.1     rmind 	struct aioproc *aio;
   1042       1.1     rmind 	struct aio_job *a_job;
   1043       1.1     rmind 	struct aiocb *aiocbp;
   1044       1.1     rmind 
   1045       1.1     rmind 	if (p == NULL) {
   1046       1.1     rmind 		(*pr)("AIO: We are not in the processes right now.\n");
   1047       1.1     rmind 		return;
   1048       1.1     rmind 	}
   1049       1.1     rmind 
   1050       1.1     rmind 	aio = p->p_aio;
   1051       1.1     rmind 	if (aio == NULL) {
   1052       1.1     rmind 		(*pr)("AIO data is not initialized (PID = %d).\n", p->p_pid);
   1053       1.1     rmind 		return;
   1054       1.1     rmind 	}
   1055       1.1     rmind 
   1056       1.1     rmind 	(*pr)("AIO: PID = %d\n", p->p_pid);
   1057       1.1     rmind 	(*pr)("AIO: Global count of the jobs = %u\n", aio_jobs_count);
   1058       1.1     rmind 	(*pr)("AIO: Count of the jobs = %u\n", aio->jobs_count);
   1059       1.1     rmind 
   1060       1.1     rmind 	if (aio->curjob) {
   1061       1.1     rmind 		a_job = aio->curjob;
   1062       1.1     rmind 		(*pr)("\nAIO current job:\n");
   1063       1.1     rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1064       1.1     rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1065       1.1     rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1066       1.1     rmind 		aiocbp = &a_job->aiocbp;
   1067       1.1     rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1068       1.1     rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1069       1.1     rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1070       1.1     rmind 	}
   1071       1.1     rmind 
   1072       1.1     rmind 	(*pr)("\nAIO queue:\n");
   1073       1.1     rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
   1074       1.1     rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1075       1.1     rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1076       1.1     rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1077       1.1     rmind 		aiocbp = &a_job->aiocbp;
   1078       1.1     rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1079       1.1     rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1080       1.1     rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1081       1.1     rmind 	}
   1082       1.1     rmind }
   1083       1.1     rmind #endif /* defined(DDB) */
   1084