Home | History | Annotate | Line # | Download | only in kern
sys_aio.c revision 1.16.2.2
      1  1.16.2.2  christos /*	$NetBSD: sys_aio.c,v 1.16.2.2 2008/11/01 21:22:27 christos Exp $	*/
      2       1.1     rmind 
      3       1.1     rmind /*
      4       1.1     rmind  * Copyright (c) 2007, Mindaugas Rasiukevicius <rmind at NetBSD org>
      5      1.10     rmind  * All rights reserved.
      6       1.1     rmind  *
      7       1.1     rmind  * Redistribution and use in source and binary forms, with or without
      8       1.1     rmind  * modification, are permitted provided that the following conditions
      9       1.1     rmind  * are met:
     10       1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     11       1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     12       1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     14       1.1     rmind  *    documentation and/or other materials provided with the distribution.
     15       1.1     rmind  *
     16  1.16.2.2  christos  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  1.16.2.2  christos  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  1.16.2.2  christos  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  1.16.2.2  christos  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  1.16.2.2  christos  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  1.16.2.2  christos  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  1.16.2.2  christos  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  1.16.2.2  christos  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  1.16.2.2  christos  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  1.16.2.2  christos  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  1.16.2.2  christos  * SUCH DAMAGE.
     27       1.1     rmind  */
     28       1.1     rmind 
     29       1.1     rmind /*
     30  1.16.2.2  christos  * Implementation of POSIX asynchronous I/O.
     31  1.16.2.2  christos  * Defined in the Base Definitions volume of IEEE Std 1003.1-2001.
     32       1.1     rmind  */
     33       1.1     rmind 
     34       1.1     rmind #include <sys/cdefs.h>
     35  1.16.2.2  christos __KERNEL_RCSID(0, "$NetBSD: sys_aio.c,v 1.16.2.2 2008/11/01 21:22:27 christos Exp $");
     36       1.4     rmind 
     37       1.4     rmind #include "opt_ddb.h"
     38       1.1     rmind 
     39       1.1     rmind #include <sys/param.h>
     40       1.1     rmind #include <sys/condvar.h>
     41       1.1     rmind #include <sys/file.h>
     42       1.1     rmind #include <sys/filedesc.h>
     43       1.1     rmind #include <sys/kernel.h>
     44       1.1     rmind #include <sys/kmem.h>
     45       1.1     rmind #include <sys/lwp.h>
     46       1.1     rmind #include <sys/mutex.h>
     47       1.1     rmind #include <sys/pool.h>
     48       1.1     rmind #include <sys/proc.h>
     49       1.1     rmind #include <sys/queue.h>
     50       1.1     rmind #include <sys/signal.h>
     51       1.1     rmind #include <sys/signalvar.h>
     52       1.1     rmind #include <sys/syscallargs.h>
     53       1.1     rmind #include <sys/sysctl.h>
     54       1.1     rmind #include <sys/systm.h>
     55       1.1     rmind #include <sys/types.h>
     56       1.1     rmind #include <sys/vnode.h>
     57      1.11        ad #include <sys/atomic.h>
     58       1.1     rmind 
     59       1.1     rmind #include <uvm/uvm_extern.h>
     60       1.1     rmind 
     61       1.1     rmind /*
     62       1.1     rmind  * System-wide limits and counter of AIO operations.
     63       1.1     rmind  */
     64  1.16.2.1  christos u_int aio_listio_max = AIO_LISTIO_MAX;
     65       1.4     rmind static u_int aio_max = AIO_MAX;
     66       1.4     rmind static u_int aio_jobs_count;
     67       1.1     rmind 
     68       1.4     rmind static struct pool aio_job_pool;
     69       1.4     rmind static struct pool aio_lio_pool;
     70       1.1     rmind 
     71       1.1     rmind /* Prototypes */
     72       1.1     rmind void aio_worker(void *);
     73       1.1     rmind static void aio_process(struct aio_job *);
     74       1.1     rmind static void aio_sendsig(struct proc *, struct sigevent *);
     75       1.1     rmind static int aio_enqueue_job(int, void *, struct lio_req *);
     76       1.1     rmind 
     77       1.1     rmind /*
     78       1.4     rmind  * Initialize the AIO system.
     79       1.4     rmind  */
     80       1.4     rmind void
     81       1.4     rmind aio_sysinit(void)
     82       1.4     rmind {
     83       1.4     rmind 
     84       1.4     rmind 	pool_init(&aio_job_pool, sizeof(struct aio_job), 0, 0, 0,
     85       1.4     rmind 	    "aio_jobs_pool", &pool_allocator_nointr, IPL_NONE);
     86       1.4     rmind 	pool_init(&aio_lio_pool, sizeof(struct lio_req), 0, 0, 0,
     87       1.4     rmind 	    "aio_lio_pool", &pool_allocator_nointr, IPL_NONE);
     88       1.4     rmind }
     89       1.4     rmind 
     90       1.4     rmind /*
     91       1.1     rmind  * Initialize Asynchronous I/O data structures for the process.
     92       1.1     rmind  */
     93       1.1     rmind int
     94       1.1     rmind aio_init(struct proc *p)
     95       1.1     rmind {
     96       1.1     rmind 	struct aioproc *aio;
     97       1.1     rmind 	struct lwp *l;
     98       1.8        ad 	int error;
     99       1.1     rmind 	bool inmem;
    100       1.1     rmind 	vaddr_t uaddr;
    101       1.1     rmind 
    102       1.1     rmind 	/* Allocate and initialize AIO structure */
    103      1.15        ad 	aio = kmem_zalloc(sizeof(struct aioproc), KM_SLEEP);
    104       1.1     rmind 	if (aio == NULL)
    105       1.1     rmind 		return EAGAIN;
    106       1.1     rmind 
    107       1.4     rmind 	/* Initialize queue and their synchronization structures */
    108       1.1     rmind 	mutex_init(&aio->aio_mtx, MUTEX_DEFAULT, IPL_NONE);
    109       1.1     rmind 	cv_init(&aio->aio_worker_cv, "aiowork");
    110       1.1     rmind 	cv_init(&aio->done_cv, "aiodone");
    111       1.1     rmind 	TAILQ_INIT(&aio->jobs_queue);
    112       1.1     rmind 
    113       1.1     rmind 	/*
    114       1.1     rmind 	 * Create an AIO worker thread.
    115       1.1     rmind 	 * XXX: Currently, AIO thread is not protected against user's actions.
    116       1.1     rmind 	 */
    117       1.1     rmind 	inmem = uvm_uarea_alloc(&uaddr);
    118       1.1     rmind 	if (uaddr == 0) {
    119       1.5     rmind 		aio_exit(p, aio);
    120       1.1     rmind 		return EAGAIN;
    121       1.1     rmind 	}
    122       1.8        ad 	error = lwp_create(curlwp, p, uaddr, inmem, 0, NULL, 0, aio_worker,
    123       1.8        ad 	    NULL, &l, curlwp->l_class);
    124       1.8        ad 	if (error != 0) {
    125       1.8        ad 		uvm_uarea_free(uaddr, curcpu());
    126       1.5     rmind 		aio_exit(p, aio);
    127       1.8        ad 		return error;
    128       1.1     rmind 	}
    129       1.1     rmind 
    130       1.5     rmind 	/* Recheck if we are really first */
    131  1.16.2.2  christos 	mutex_enter(p->p_lock);
    132       1.5     rmind 	if (p->p_aio) {
    133  1.16.2.2  christos 		mutex_exit(p->p_lock);
    134       1.5     rmind 		aio_exit(p, aio);
    135       1.5     rmind 		lwp_exit(l);
    136       1.5     rmind 		return 0;
    137       1.5     rmind 	}
    138       1.5     rmind 	p->p_aio = aio;
    139       1.5     rmind 
    140       1.1     rmind 	/* Complete the initialization of thread, and run it */
    141       1.1     rmind 	aio->aio_worker = l;
    142       1.1     rmind 	p->p_nrlwps++;
    143       1.1     rmind 	lwp_lock(l);
    144       1.1     rmind 	l->l_stat = LSRUN;
    145      1.12     rmind 	l->l_priority = MAXPRI_USER;
    146       1.2      yamt 	sched_enqueue(l, false);
    147       1.1     rmind 	lwp_unlock(l);
    148  1.16.2.2  christos 	mutex_exit(p->p_lock);
    149       1.1     rmind 
    150       1.1     rmind 	return 0;
    151       1.1     rmind }
    152       1.1     rmind 
    153       1.1     rmind /*
    154       1.1     rmind  * Exit of Asynchronous I/O subsystem of process.
    155       1.1     rmind  */
    156       1.1     rmind void
    157       1.5     rmind aio_exit(struct proc *p, struct aioproc *aio)
    158       1.1     rmind {
    159       1.1     rmind 	struct aio_job *a_job;
    160       1.1     rmind 
    161       1.5     rmind 	if (aio == NULL)
    162       1.1     rmind 		return;
    163       1.1     rmind 
    164       1.1     rmind 	/* Free AIO queue */
    165       1.1     rmind 	while (!TAILQ_EMPTY(&aio->jobs_queue)) {
    166       1.1     rmind 		a_job = TAILQ_FIRST(&aio->jobs_queue);
    167       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    168       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    169      1.11        ad 		atomic_dec_uint(&aio_jobs_count);
    170       1.1     rmind 	}
    171       1.1     rmind 
    172       1.1     rmind 	/* Destroy and free the entire AIO data structure */
    173       1.1     rmind 	cv_destroy(&aio->aio_worker_cv);
    174       1.1     rmind 	cv_destroy(&aio->done_cv);
    175       1.1     rmind 	mutex_destroy(&aio->aio_mtx);
    176       1.1     rmind 	kmem_free(aio, sizeof(struct aioproc));
    177       1.1     rmind }
    178       1.1     rmind 
    179       1.1     rmind /*
    180       1.1     rmind  * AIO worker thread and processor.
    181       1.1     rmind  */
    182       1.1     rmind void
    183       1.1     rmind aio_worker(void *arg)
    184       1.1     rmind {
    185       1.1     rmind 	struct proc *p = curlwp->l_proc;
    186       1.1     rmind 	struct aioproc *aio = p->p_aio;
    187       1.1     rmind 	struct aio_job *a_job;
    188       1.1     rmind 	struct lio_req *lio;
    189       1.1     rmind 	sigset_t oss, nss;
    190       1.4     rmind 	int error, refcnt;
    191       1.1     rmind 
    192       1.1     rmind 	/*
    193       1.1     rmind 	 * Make an empty signal mask, so it
    194       1.1     rmind 	 * handles only SIGKILL and SIGSTOP.
    195       1.1     rmind 	 */
    196       1.1     rmind 	sigfillset(&nss);
    197  1.16.2.2  christos 	mutex_enter(p->p_lock);
    198       1.1     rmind 	error = sigprocmask1(curlwp, SIG_SETMASK, &nss, &oss);
    199  1.16.2.2  christos 	mutex_exit(p->p_lock);
    200       1.1     rmind 	KASSERT(error == 0);
    201       1.1     rmind 
    202       1.1     rmind 	for (;;) {
    203       1.1     rmind 		/*
    204       1.1     rmind 		 * Loop for each job in the queue.  If there
    205       1.4     rmind 		 * are no jobs then sleep.
    206       1.1     rmind 		 */
    207       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    208       1.1     rmind 		while ((a_job = TAILQ_FIRST(&aio->jobs_queue)) == NULL) {
    209       1.1     rmind 			if (cv_wait_sig(&aio->aio_worker_cv, &aio->aio_mtx)) {
    210       1.1     rmind 				/*
    211       1.4     rmind 				 * Thread was interrupted - check for
    212       1.4     rmind 				 * pending exit or suspend.
    213       1.1     rmind 				 */
    214       1.4     rmind 				mutex_exit(&aio->aio_mtx);
    215       1.4     rmind 				lwp_userret(curlwp);
    216       1.4     rmind 				mutex_enter(&aio->aio_mtx);
    217       1.1     rmind 			}
    218       1.1     rmind 		}
    219       1.1     rmind 
    220       1.1     rmind 		/* Take the job from the queue */
    221       1.1     rmind 		aio->curjob = a_job;
    222       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    223       1.1     rmind 
    224      1.11        ad 		atomic_dec_uint(&aio_jobs_count);
    225       1.1     rmind 		aio->jobs_count--;
    226       1.1     rmind 
    227       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    228       1.1     rmind 
    229       1.1     rmind 		/* Process an AIO operation */
    230       1.1     rmind 		aio_process(a_job);
    231       1.1     rmind 
    232       1.1     rmind 		/* Copy data structure back to the user-space */
    233       1.1     rmind 		(void)copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    234       1.1     rmind 		    sizeof(struct aiocb));
    235       1.1     rmind 
    236       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    237       1.1     rmind 		aio->curjob = NULL;
    238       1.4     rmind 
    239       1.1     rmind 		/* Decrease a reference counter, if there is a LIO structure */
    240       1.1     rmind 		lio = a_job->lio;
    241       1.4     rmind 		refcnt = (lio != NULL ? --lio->refcnt : -1);
    242       1.4     rmind 
    243       1.1     rmind 		/* Notify all suspenders */
    244       1.1     rmind 		cv_broadcast(&aio->done_cv);
    245       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    246       1.1     rmind 
    247       1.1     rmind 		/* Send a signal, if any */
    248       1.1     rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    249       1.1     rmind 
    250       1.1     rmind 		/* Destroy the LIO structure */
    251       1.4     rmind 		if (refcnt == 0) {
    252       1.1     rmind 			aio_sendsig(p, &lio->sig);
    253       1.4     rmind 			pool_put(&aio_lio_pool, lio);
    254       1.1     rmind 		}
    255       1.1     rmind 
    256       1.1     rmind 		/* Destroy the the job */
    257       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    258       1.1     rmind 	}
    259       1.1     rmind 
    260       1.4     rmind 	/* NOTREACHED */
    261       1.1     rmind }
    262       1.1     rmind 
    263       1.1     rmind static void
    264       1.1     rmind aio_process(struct aio_job *a_job)
    265       1.1     rmind {
    266       1.1     rmind 	struct proc *p = curlwp->l_proc;
    267       1.1     rmind 	struct aiocb *aiocbp = &a_job->aiocbp;
    268       1.1     rmind 	struct file *fp;
    269       1.1     rmind 	int fd = aiocbp->aio_fildes;
    270       1.1     rmind 	int error = 0;
    271       1.1     rmind 
    272       1.1     rmind 	KASSERT(a_job->aio_op != 0);
    273       1.1     rmind 
    274       1.4     rmind 	if ((a_job->aio_op & (AIO_READ | AIO_WRITE)) != 0) {
    275       1.1     rmind 		struct iovec aiov;
    276       1.1     rmind 		struct uio auio;
    277       1.1     rmind 
    278       1.1     rmind 		if (aiocbp->aio_nbytes > SSIZE_MAX) {
    279       1.1     rmind 			error = EINVAL;
    280       1.1     rmind 			goto done;
    281       1.1     rmind 		}
    282       1.1     rmind 
    283      1.16        ad 		fp = fd_getfile(fd);
    284       1.1     rmind 		if (fp == NULL) {
    285       1.1     rmind 			error = EBADF;
    286       1.1     rmind 			goto done;
    287       1.1     rmind 		}
    288       1.1     rmind 
    289       1.1     rmind 		aiov.iov_base = (void *)(uintptr_t)aiocbp->aio_buf;
    290       1.1     rmind 		aiov.iov_len = aiocbp->aio_nbytes;
    291       1.1     rmind 		auio.uio_iov = &aiov;
    292       1.1     rmind 		auio.uio_iovcnt = 1;
    293       1.1     rmind 		auio.uio_resid = aiocbp->aio_nbytes;
    294       1.1     rmind 		auio.uio_vmspace = p->p_vmspace;
    295       1.1     rmind 
    296       1.1     rmind 		if (a_job->aio_op & AIO_READ) {
    297       1.1     rmind 			/*
    298       1.1     rmind 			 * Perform a Read operation
    299       1.1     rmind 			 */
    300       1.1     rmind 			KASSERT((a_job->aio_op & AIO_WRITE) == 0);
    301       1.1     rmind 
    302       1.1     rmind 			if ((fp->f_flag & FREAD) == 0) {
    303      1.16        ad 				fd_putfile(fd);
    304       1.1     rmind 				error = EBADF;
    305       1.1     rmind 				goto done;
    306       1.1     rmind 			}
    307       1.1     rmind 			auio.uio_rw = UIO_READ;
    308       1.1     rmind 			error = (*fp->f_ops->fo_read)(fp, &aiocbp->aio_offset,
    309       1.1     rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    310       1.1     rmind 		} else {
    311       1.1     rmind 			/*
    312       1.1     rmind 			 * Perform a Write operation
    313       1.1     rmind 			 */
    314       1.1     rmind 			KASSERT(a_job->aio_op & AIO_WRITE);
    315       1.1     rmind 
    316       1.1     rmind 			if ((fp->f_flag & FWRITE) == 0) {
    317      1.16        ad 				fd_putfile(fd);
    318       1.1     rmind 				error = EBADF;
    319       1.1     rmind 				goto done;
    320       1.1     rmind 			}
    321       1.1     rmind 			auio.uio_rw = UIO_WRITE;
    322       1.1     rmind 			error = (*fp->f_ops->fo_write)(fp, &aiocbp->aio_offset,
    323       1.1     rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    324       1.1     rmind 		}
    325      1.16        ad 		fd_putfile(fd);
    326       1.1     rmind 
    327       1.1     rmind 		/* Store the result value */
    328       1.1     rmind 		a_job->aiocbp.aio_nbytes -= auio.uio_resid;
    329       1.1     rmind 		a_job->aiocbp._retval = (error == 0) ?
    330       1.1     rmind 		    a_job->aiocbp.aio_nbytes : -1;
    331       1.1     rmind 
    332       1.4     rmind 	} else if ((a_job->aio_op & (AIO_SYNC | AIO_DSYNC)) != 0) {
    333       1.1     rmind 		/*
    334       1.1     rmind 		 * Perform a file Sync operation
    335       1.1     rmind 		 */
    336       1.1     rmind 		struct vnode *vp;
    337       1.1     rmind 
    338      1.16        ad 		if ((error = fd_getvnode(fd, &fp)) != 0)
    339       1.1     rmind 			goto done;
    340       1.1     rmind 
    341       1.1     rmind 		if ((fp->f_flag & FWRITE) == 0) {
    342      1.16        ad 			fd_putfile(fd);
    343       1.1     rmind 			error = EBADF;
    344       1.1     rmind 			goto done;
    345       1.1     rmind 		}
    346       1.1     rmind 
    347       1.1     rmind 		vp = (struct vnode *)fp->f_data;
    348       1.1     rmind 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    349       1.1     rmind 		if (a_job->aio_op & AIO_DSYNC) {
    350       1.1     rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    351       1.9     pooka 			    FSYNC_WAIT | FSYNC_DATAONLY, 0, 0);
    352       1.1     rmind 		} else if (a_job->aio_op & AIO_SYNC) {
    353       1.1     rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    354       1.9     pooka 			    FSYNC_WAIT, 0, 0);
    355       1.7     pooka 			if (error == 0 && bioopsp != NULL &&
    356       1.1     rmind 			    vp->v_mount &&
    357       1.1     rmind 			    (vp->v_mount->mnt_flag & MNT_SOFTDEP))
    358       1.7     pooka 			    bioopsp->io_fsync(vp, 0);
    359       1.1     rmind 		}
    360       1.1     rmind 		VOP_UNLOCK(vp, 0);
    361      1.16        ad 		fd_putfile(fd);
    362       1.1     rmind 
    363       1.1     rmind 		/* Store the result value */
    364       1.1     rmind 		a_job->aiocbp._retval = (error == 0) ? 0 : -1;
    365       1.1     rmind 
    366       1.1     rmind 	} else
    367       1.1     rmind 		panic("aio_process: invalid operation code\n");
    368       1.1     rmind 
    369       1.1     rmind done:
    370       1.1     rmind 	/* Job is done, set the error, if any */
    371       1.1     rmind 	a_job->aiocbp._errno = error;
    372       1.1     rmind 	a_job->aiocbp._state = JOB_DONE;
    373       1.1     rmind }
    374       1.1     rmind 
    375       1.1     rmind /*
    376       1.1     rmind  * Send AIO signal.
    377       1.1     rmind  */
    378       1.1     rmind static void
    379       1.1     rmind aio_sendsig(struct proc *p, struct sigevent *sig)
    380       1.1     rmind {
    381       1.1     rmind 	ksiginfo_t ksi;
    382       1.1     rmind 
    383       1.1     rmind 	if (sig->sigev_signo == 0 || sig->sigev_notify == SIGEV_NONE)
    384       1.1     rmind 		return;
    385       1.1     rmind 
    386       1.1     rmind 	KSI_INIT(&ksi);
    387       1.1     rmind 	ksi.ksi_signo = sig->sigev_signo;
    388       1.1     rmind 	ksi.ksi_code = SI_ASYNCIO;
    389       1.3  christos 	ksi.ksi_value = sig->sigev_value;
    390  1.16.2.2  christos 	mutex_enter(proc_lock);
    391       1.1     rmind 	kpsignal(p, &ksi, NULL);
    392  1.16.2.2  christos 	mutex_exit(proc_lock);
    393       1.1     rmind }
    394       1.1     rmind 
    395       1.1     rmind /*
    396       1.1     rmind  * Enqueue the job.
    397       1.1     rmind  */
    398       1.1     rmind static int
    399       1.1     rmind aio_enqueue_job(int op, void *aiocb_uptr, struct lio_req *lio)
    400       1.1     rmind {
    401       1.1     rmind 	struct proc *p = curlwp->l_proc;
    402       1.1     rmind 	struct aioproc *aio;
    403       1.1     rmind 	struct aio_job *a_job;
    404       1.1     rmind 	struct aiocb aiocbp;
    405       1.1     rmind 	struct sigevent *sig;
    406       1.1     rmind 	int error;
    407       1.1     rmind 
    408      1.12     rmind 	/* Non-accurate check for the limit */
    409      1.12     rmind 	if (aio_jobs_count + 1 > aio_max)
    410       1.1     rmind 		return EAGAIN;
    411       1.1     rmind 
    412       1.1     rmind 	/* Get the data structure from user-space */
    413       1.1     rmind 	error = copyin(aiocb_uptr, &aiocbp, sizeof(struct aiocb));
    414       1.1     rmind 	if (error)
    415       1.1     rmind 		return error;
    416       1.1     rmind 
    417       1.1     rmind 	/* Check if signal is set, and validate it */
    418       1.1     rmind 	sig = &aiocbp.aio_sigevent;
    419       1.1     rmind 	if (sig->sigev_signo < 0 || sig->sigev_signo >= NSIG ||
    420       1.1     rmind 	    sig->sigev_notify < SIGEV_NONE || sig->sigev_notify > SIGEV_SA)
    421       1.1     rmind 		return EINVAL;
    422       1.1     rmind 
    423       1.1     rmind 	/* Buffer and byte count */
    424       1.1     rmind 	if (((AIO_SYNC | AIO_DSYNC) & op) == 0)
    425       1.1     rmind 		if (aiocbp.aio_buf == NULL || aiocbp.aio_nbytes > SSIZE_MAX)
    426       1.1     rmind 			return EINVAL;
    427       1.1     rmind 
    428       1.1     rmind 	/* Check the opcode, if LIO_NOP - simply ignore */
    429       1.1     rmind 	if (op == AIO_LIO) {
    430       1.1     rmind 		KASSERT(lio != NULL);
    431       1.1     rmind 		if (aiocbp.aio_lio_opcode == LIO_WRITE)
    432       1.1     rmind 			op = AIO_WRITE;
    433       1.1     rmind 		else if (aiocbp.aio_lio_opcode == LIO_READ)
    434       1.1     rmind 			op = AIO_READ;
    435       1.1     rmind 		else
    436       1.1     rmind 			return (aiocbp.aio_lio_opcode == LIO_NOP) ? 0 : EINVAL;
    437       1.1     rmind 	} else {
    438       1.1     rmind 		KASSERT(lio == NULL);
    439       1.1     rmind 	}
    440       1.1     rmind 
    441       1.1     rmind 	/*
    442       1.1     rmind 	 * Look for already existing job.  If found - the job is in-progress.
    443       1.1     rmind 	 * According to POSIX this is invalid, so return the error.
    444       1.1     rmind 	 */
    445       1.1     rmind 	aio = p->p_aio;
    446       1.1     rmind 	if (aio) {
    447       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    448       1.1     rmind 		if (aio->curjob) {
    449       1.1     rmind 			a_job = aio->curjob;
    450       1.1     rmind 			if (a_job->aiocb_uptr == aiocb_uptr) {
    451       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    452       1.1     rmind 				return EINVAL;
    453       1.1     rmind 			}
    454       1.1     rmind 		}
    455       1.1     rmind 		TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    456       1.1     rmind 			if (a_job->aiocb_uptr != aiocb_uptr)
    457       1.1     rmind 				continue;
    458       1.1     rmind 			mutex_exit(&aio->aio_mtx);
    459       1.1     rmind 			return EINVAL;
    460       1.1     rmind 		}
    461       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    462       1.1     rmind 	}
    463       1.1     rmind 
    464       1.1     rmind 	/*
    465       1.1     rmind 	 * Check if AIO structure is initialized, if not - initialize it.
    466       1.1     rmind 	 * In LIO case, we did that already.  We will recheck this with
    467       1.1     rmind 	 * the lock in aio_init().
    468       1.1     rmind 	 */
    469       1.1     rmind 	if (lio == NULL && p->p_aio == NULL)
    470       1.1     rmind 		if (aio_init(p))
    471       1.1     rmind 			return EAGAIN;
    472       1.1     rmind 	aio = p->p_aio;
    473       1.1     rmind 
    474       1.1     rmind 	/*
    475       1.1     rmind 	 * Set the state with errno, and copy data
    476       1.1     rmind 	 * structure back to the user-space.
    477       1.1     rmind 	 */
    478       1.1     rmind 	aiocbp._state = JOB_WIP;
    479       1.1     rmind 	aiocbp._errno = EINPROGRESS;
    480       1.1     rmind 	aiocbp._retval = -1;
    481       1.1     rmind 	error = copyout(&aiocbp, aiocb_uptr, sizeof(struct aiocb));
    482       1.1     rmind 	if (error)
    483       1.1     rmind 		return error;
    484       1.1     rmind 
    485       1.1     rmind 	/* Allocate and initialize a new AIO job */
    486       1.4     rmind 	a_job = pool_get(&aio_job_pool, PR_WAITOK);
    487       1.1     rmind 	memset(a_job, 0, sizeof(struct aio_job));
    488       1.1     rmind 
    489       1.1     rmind 	/*
    490       1.1     rmind 	 * Set the data.
    491       1.1     rmind 	 * Store the user-space pointer for searching.  Since we
    492       1.1     rmind 	 * are storing only per proc pointers - it is safe.
    493       1.1     rmind 	 */
    494       1.1     rmind 	memcpy(&a_job->aiocbp, &aiocbp, sizeof(struct aiocb));
    495       1.1     rmind 	a_job->aiocb_uptr = aiocb_uptr;
    496       1.1     rmind 	a_job->aio_op |= op;
    497       1.1     rmind 	a_job->lio = lio;
    498       1.1     rmind 
    499       1.1     rmind 	/*
    500       1.1     rmind 	 * Add the job to the queue, update the counters, and
    501       1.1     rmind 	 * notify the AIO worker thread to handle the job.
    502       1.1     rmind 	 */
    503       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    504       1.1     rmind 
    505       1.1     rmind 	/* Fail, if the limit was reached */
    506      1.13     rmind 	if (atomic_inc_uint_nv(&aio_jobs_count) > aio_max ||
    507      1.13     rmind 	    aio->jobs_count >= aio_listio_max) {
    508      1.12     rmind 		atomic_dec_uint(&aio_jobs_count);
    509       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    510       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    511       1.1     rmind 		return EAGAIN;
    512       1.1     rmind 	}
    513       1.1     rmind 
    514       1.1     rmind 	TAILQ_INSERT_TAIL(&aio->jobs_queue, a_job, list);
    515       1.1     rmind 	aio->jobs_count++;
    516       1.1     rmind 	if (lio)
    517       1.1     rmind 		lio->refcnt++;
    518       1.1     rmind 	cv_signal(&aio->aio_worker_cv);
    519       1.1     rmind 
    520       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    521       1.1     rmind 
    522       1.1     rmind 	/*
    523       1.1     rmind 	 * One would handle the errors only with aio_error() function.
    524       1.1     rmind 	 * This way is appropriate according to POSIX.
    525       1.1     rmind 	 */
    526       1.1     rmind 	return 0;
    527       1.1     rmind }
    528       1.1     rmind 
    529       1.1     rmind /*
    530       1.1     rmind  * Syscall functions.
    531       1.1     rmind  */
    532       1.1     rmind 
    533       1.1     rmind int
    534      1.14       dsl sys_aio_cancel(struct lwp *l, const struct sys_aio_cancel_args *uap, register_t *retval)
    535       1.1     rmind {
    536      1.14       dsl 	/* {
    537       1.1     rmind 		syscallarg(int) fildes;
    538       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    539      1.14       dsl 	} */
    540       1.1     rmind 	struct proc *p = l->l_proc;
    541       1.1     rmind 	struct aioproc *aio;
    542       1.1     rmind 	struct aio_job *a_job;
    543       1.1     rmind 	struct aiocb *aiocbp_ptr;
    544       1.1     rmind 	struct lio_req *lio;
    545       1.1     rmind 	struct filedesc	*fdp = p->p_fd;
    546       1.1     rmind 	unsigned int cn, errcnt, fildes;
    547       1.1     rmind 
    548       1.1     rmind 	TAILQ_HEAD(, aio_job) tmp_jobs_list;
    549       1.1     rmind 
    550       1.1     rmind 	/* Check for invalid file descriptor */
    551       1.1     rmind 	fildes = (unsigned int)SCARG(uap, fildes);
    552      1.16        ad 	if (fildes >= fdp->fd_nfiles)
    553      1.16        ad 		return EBADF;
    554      1.16        ad 	membar_consumer();
    555      1.16        ad 	if (fdp->fd_ofiles[fildes] == NULL || fdp->fd_ofiles[fildes]->ff_file == NULL)
    556       1.1     rmind 		return EBADF;
    557       1.1     rmind 
    558       1.1     rmind 	/* Check if AIO structure is initialized */
    559       1.1     rmind 	if (p->p_aio == NULL) {
    560       1.1     rmind 		*retval = AIO_NOTCANCELED;
    561       1.1     rmind 		return 0;
    562       1.1     rmind 	}
    563       1.1     rmind 
    564       1.1     rmind 	aio = p->p_aio;
    565       1.1     rmind 	aiocbp_ptr = (struct aiocb *)SCARG(uap, aiocbp);
    566       1.1     rmind 
    567       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    568       1.1     rmind 
    569       1.1     rmind 	/* Cancel the jobs, and remove them from the queue */
    570       1.1     rmind 	cn = 0;
    571       1.1     rmind 	TAILQ_INIT(&tmp_jobs_list);
    572       1.1     rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    573       1.1     rmind 		if (aiocbp_ptr) {
    574       1.1     rmind 			if (aiocbp_ptr != a_job->aiocb_uptr)
    575       1.1     rmind 				continue;
    576       1.1     rmind 			if (fildes != a_job->aiocbp.aio_fildes) {
    577       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    578       1.1     rmind 				return EBADF;
    579       1.1     rmind 			}
    580       1.1     rmind 		} else if (a_job->aiocbp.aio_fildes != fildes)
    581       1.1     rmind 			continue;
    582       1.1     rmind 
    583       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    584       1.1     rmind 		TAILQ_INSERT_TAIL(&tmp_jobs_list, a_job, list);
    585       1.1     rmind 
    586       1.1     rmind 		/* Decrease the counters */
    587      1.11        ad 		atomic_dec_uint(&aio_jobs_count);
    588       1.1     rmind 		aio->jobs_count--;
    589       1.1     rmind 		lio = a_job->lio;
    590       1.4     rmind 		if (lio != NULL && --lio->refcnt != 0)
    591       1.4     rmind 			a_job->lio = NULL;
    592       1.1     rmind 
    593       1.1     rmind 		cn++;
    594       1.1     rmind 		if (aiocbp_ptr)
    595       1.1     rmind 			break;
    596       1.1     rmind 	}
    597       1.1     rmind 
    598       1.1     rmind 	/* There are canceled jobs */
    599       1.1     rmind 	if (cn)
    600       1.1     rmind 		*retval = AIO_CANCELED;
    601       1.1     rmind 
    602       1.1     rmind 	/* We cannot cancel current job */
    603       1.1     rmind 	a_job = aio->curjob;
    604       1.1     rmind 	if (a_job && ((a_job->aiocbp.aio_fildes == fildes) ||
    605       1.1     rmind 	    (a_job->aiocb_uptr == aiocbp_ptr)))
    606       1.1     rmind 		*retval = AIO_NOTCANCELED;
    607       1.1     rmind 
    608       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    609       1.1     rmind 
    610       1.1     rmind 	/* Free the jobs after the lock */
    611       1.1     rmind 	errcnt = 0;
    612       1.1     rmind 	while (!TAILQ_EMPTY(&tmp_jobs_list)) {
    613       1.1     rmind 		a_job = TAILQ_FIRST(&tmp_jobs_list);
    614       1.1     rmind 		TAILQ_REMOVE(&tmp_jobs_list, a_job, list);
    615       1.1     rmind 		/* Set the errno and copy structures back to the user-space */
    616       1.1     rmind 		a_job->aiocbp._errno = ECANCELED;
    617       1.1     rmind 		a_job->aiocbp._state = JOB_DONE;
    618       1.1     rmind 		if (copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    619       1.1     rmind 		    sizeof(struct aiocb)))
    620       1.1     rmind 			errcnt++;
    621       1.1     rmind 		/* Send a signal if any */
    622       1.1     rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    623       1.6     rmind 		if (a_job->lio) {
    624       1.6     rmind 			lio = a_job->lio;
    625       1.6     rmind 			aio_sendsig(p, &lio->sig);
    626       1.6     rmind 			pool_put(&aio_lio_pool, lio);
    627       1.6     rmind 		}
    628       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    629       1.1     rmind 	}
    630       1.1     rmind 
    631       1.1     rmind 	if (errcnt)
    632       1.1     rmind 		return EFAULT;
    633       1.1     rmind 
    634       1.1     rmind 	/* Set a correct return value */
    635       1.1     rmind 	if (*retval == 0)
    636       1.1     rmind 		*retval = AIO_ALLDONE;
    637       1.1     rmind 
    638       1.1     rmind 	return 0;
    639       1.1     rmind }
    640       1.1     rmind 
    641       1.1     rmind int
    642      1.14       dsl sys_aio_error(struct lwp *l, const struct sys_aio_error_args *uap, register_t *retval)
    643       1.1     rmind {
    644      1.14       dsl 	/* {
    645       1.1     rmind 		syscallarg(const struct aiocb *) aiocbp;
    646      1.14       dsl 	} */
    647       1.1     rmind 	struct proc *p = l->l_proc;
    648       1.1     rmind 	struct aioproc *aio = p->p_aio;
    649       1.1     rmind 	struct aiocb aiocbp;
    650       1.1     rmind 	int error;
    651       1.1     rmind 
    652       1.1     rmind 	if (aio == NULL)
    653       1.1     rmind 		return EINVAL;
    654       1.1     rmind 
    655       1.1     rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    656       1.1     rmind 	if (error)
    657       1.1     rmind 		return error;
    658       1.1     rmind 
    659       1.1     rmind 	if (aiocbp._state == JOB_NONE)
    660       1.1     rmind 		return EINVAL;
    661       1.1     rmind 
    662       1.1     rmind 	*retval = aiocbp._errno;
    663       1.1     rmind 
    664       1.1     rmind 	return 0;
    665       1.1     rmind }
    666       1.1     rmind 
    667       1.1     rmind int
    668      1.14       dsl sys_aio_fsync(struct lwp *l, const struct sys_aio_fsync_args *uap, register_t *retval)
    669       1.1     rmind {
    670      1.14       dsl 	/* {
    671       1.1     rmind 		syscallarg(int) op;
    672       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    673      1.14       dsl 	} */
    674       1.1     rmind 	int op = SCARG(uap, op);
    675       1.1     rmind 
    676       1.1     rmind 	if ((op != O_DSYNC) && (op != O_SYNC))
    677       1.1     rmind 		return EINVAL;
    678       1.1     rmind 
    679       1.1     rmind 	op = O_DSYNC ? AIO_DSYNC : AIO_SYNC;
    680       1.1     rmind 
    681       1.1     rmind 	return aio_enqueue_job(op, SCARG(uap, aiocbp), NULL);
    682       1.1     rmind }
    683       1.1     rmind 
    684       1.1     rmind int
    685      1.14       dsl sys_aio_read(struct lwp *l, const struct sys_aio_read_args *uap, register_t *retval)
    686       1.1     rmind {
    687      1.14       dsl 	/* {
    688       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    689      1.14       dsl 	} */
    690       1.1     rmind 
    691       1.1     rmind 	return aio_enqueue_job(AIO_READ, SCARG(uap, aiocbp), NULL);
    692       1.1     rmind }
    693       1.1     rmind 
    694       1.1     rmind int
    695      1.14       dsl sys_aio_return(struct lwp *l, const struct sys_aio_return_args *uap, register_t *retval)
    696       1.1     rmind {
    697      1.14       dsl 	/* {
    698       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    699      1.14       dsl 	} */
    700       1.1     rmind 	struct proc *p = l->l_proc;
    701       1.1     rmind 	struct aioproc *aio = p->p_aio;
    702       1.1     rmind 	struct aiocb aiocbp;
    703       1.1     rmind 	int error;
    704       1.1     rmind 
    705       1.1     rmind 	if (aio == NULL)
    706       1.1     rmind 		return EINVAL;
    707       1.1     rmind 
    708       1.1     rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    709       1.1     rmind 	if (error)
    710       1.1     rmind 		return error;
    711       1.1     rmind 
    712       1.1     rmind 	if (aiocbp._errno == EINPROGRESS || aiocbp._state != JOB_DONE)
    713       1.1     rmind 		return EINVAL;
    714       1.1     rmind 
    715       1.1     rmind 	*retval = aiocbp._retval;
    716       1.1     rmind 
    717       1.1     rmind 	/* Reset the internal variables */
    718       1.1     rmind 	aiocbp._errno = 0;
    719       1.1     rmind 	aiocbp._retval = -1;
    720       1.1     rmind 	aiocbp._state = JOB_NONE;
    721       1.1     rmind 	error = copyout(&aiocbp, SCARG(uap, aiocbp), sizeof(struct aiocb));
    722       1.1     rmind 
    723       1.1     rmind 	return error;
    724       1.1     rmind }
    725       1.1     rmind 
    726       1.1     rmind int
    727  1.16.2.1  christos sys___aio_suspend50(struct lwp *l, const struct sys___aio_suspend50_args *uap,
    728  1.16.2.1  christos     register_t *retval)
    729       1.1     rmind {
    730      1.14       dsl 	/* {
    731       1.1     rmind 		syscallarg(const struct aiocb *const[]) list;
    732       1.1     rmind 		syscallarg(int) nent;
    733       1.1     rmind 		syscallarg(const struct timespec *) timeout;
    734      1.14       dsl 	} */
    735  1.16.2.1  christos 	struct aiocb **list;
    736       1.1     rmind 	struct timespec ts;
    737  1.16.2.1  christos 	int error, nent;
    738       1.1     rmind 
    739       1.1     rmind 	nent = SCARG(uap, nent);
    740       1.1     rmind 	if (nent <= 0 || nent > aio_listio_max)
    741       1.1     rmind 		return EAGAIN;
    742       1.1     rmind 
    743       1.1     rmind 	if (SCARG(uap, timeout)) {
    744       1.1     rmind 		/* Convert timespec to ticks */
    745       1.1     rmind 		error = copyin(SCARG(uap, timeout), &ts,
    746       1.1     rmind 		    sizeof(struct timespec));
    747       1.1     rmind 		if (error)
    748       1.1     rmind 			return error;
    749  1.16.2.1  christos 	}
    750  1.16.2.1  christos 	list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    751  1.16.2.1  christos 	error = copyin(SCARG(uap, list), list, nent * sizeof(struct aiocb));
    752  1.16.2.1  christos 	if (error)
    753  1.16.2.1  christos 		goto out;
    754  1.16.2.1  christos 	error = aio_suspend1(l, list, nent, SCARG(uap, timeout) ? &ts : NULL);
    755  1.16.2.1  christos out:
    756  1.16.2.1  christos 	kmem_free(list, nent * sizeof(struct aio_job));
    757  1.16.2.1  christos 	return error;
    758  1.16.2.1  christos }
    759  1.16.2.1  christos 
    760  1.16.2.1  christos int
    761  1.16.2.1  christos aio_suspend1(struct lwp *l, struct aiocb **aiocbp_list, int nent,
    762  1.16.2.1  christos     struct timespec *ts)
    763  1.16.2.1  christos {
    764  1.16.2.1  christos 	struct proc *p = l->l_proc;
    765  1.16.2.1  christos 	struct aioproc *aio;
    766  1.16.2.1  christos 	struct aio_job *a_job;
    767  1.16.2.1  christos 	int i, error, timo;
    768  1.16.2.1  christos 
    769  1.16.2.1  christos 	if (p->p_aio == NULL)
    770  1.16.2.1  christos 		return EAGAIN;
    771  1.16.2.1  christos 	aio = p->p_aio;
    772  1.16.2.1  christos 
    773  1.16.2.1  christos 	if (ts) {
    774  1.16.2.1  christos 		timo = mstohz((ts->tv_sec * 1000) + (ts->tv_nsec / 1000000));
    775  1.16.2.1  christos 		if (timo == 0 && ts->tv_sec == 0 && ts->tv_nsec > 0)
    776       1.1     rmind 			timo = 1;
    777       1.1     rmind 		if (timo <= 0)
    778       1.1     rmind 			return EAGAIN;
    779       1.1     rmind 	} else
    780       1.1     rmind 		timo = 0;
    781       1.1     rmind 
    782       1.1     rmind 	/* Get the list from user-space */
    783       1.1     rmind 
    784       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    785       1.1     rmind 	for (;;) {
    786       1.1     rmind 
    787       1.1     rmind 		for (i = 0; i < nent; i++) {
    788       1.1     rmind 
    789       1.1     rmind 			/* Skip NULL entries */
    790       1.1     rmind 			if (aiocbp_list[i] == NULL)
    791       1.1     rmind 				continue;
    792       1.1     rmind 
    793       1.1     rmind 			/* Skip current job */
    794       1.1     rmind 			if (aio->curjob) {
    795       1.1     rmind 				a_job = aio->curjob;
    796       1.1     rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    797       1.1     rmind 					continue;
    798       1.1     rmind 			}
    799       1.1     rmind 
    800       1.1     rmind 			/* Look for a job in the queue */
    801       1.1     rmind 			TAILQ_FOREACH(a_job, &aio->jobs_queue, list)
    802       1.1     rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    803       1.1     rmind 					break;
    804       1.1     rmind 
    805       1.1     rmind 			if (a_job == NULL) {
    806       1.1     rmind 				struct aiocb aiocbp;
    807       1.1     rmind 
    808       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    809       1.1     rmind 
    810       1.1     rmind 				error = copyin(aiocbp_list[i], &aiocbp,
    811       1.1     rmind 				    sizeof(struct aiocb));
    812       1.1     rmind 				if (error == 0 && aiocbp._state != JOB_DONE) {
    813       1.1     rmind 					mutex_enter(&aio->aio_mtx);
    814       1.1     rmind 					continue;
    815       1.1     rmind 				}
    816       1.1     rmind 
    817       1.1     rmind 				kmem_free(aiocbp_list,
    818       1.1     rmind 				    nent * sizeof(struct aio_job));
    819       1.1     rmind 				return error;
    820       1.1     rmind 			}
    821       1.1     rmind 		}
    822       1.1     rmind 
    823       1.1     rmind 		/* Wait for a signal or when timeout occurs */
    824       1.1     rmind 		error = cv_timedwait_sig(&aio->done_cv, &aio->aio_mtx, timo);
    825       1.1     rmind 		if (error) {
    826       1.1     rmind 			if (error == EWOULDBLOCK)
    827       1.1     rmind 				error = EAGAIN;
    828       1.1     rmind 			break;
    829       1.1     rmind 		}
    830       1.1     rmind 	}
    831       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    832       1.1     rmind 	return error;
    833       1.1     rmind }
    834       1.1     rmind 
    835       1.1     rmind int
    836      1.14       dsl sys_aio_write(struct lwp *l, const struct sys_aio_write_args *uap, register_t *retval)
    837       1.1     rmind {
    838      1.14       dsl 	/* {
    839       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    840      1.14       dsl 	} */
    841       1.1     rmind 
    842       1.1     rmind 	return aio_enqueue_job(AIO_WRITE, SCARG(uap, aiocbp), NULL);
    843       1.1     rmind }
    844       1.1     rmind 
    845       1.1     rmind int
    846      1.14       dsl sys_lio_listio(struct lwp *l, const struct sys_lio_listio_args *uap, register_t *retval)
    847       1.1     rmind {
    848      1.14       dsl 	/* {
    849       1.1     rmind 		syscallarg(int) mode;
    850       1.1     rmind 		syscallarg(struct aiocb *const[]) list;
    851       1.1     rmind 		syscallarg(int) nent;
    852       1.1     rmind 		syscallarg(struct sigevent *) sig;
    853      1.14       dsl 	} */
    854       1.1     rmind 	struct proc *p = l->l_proc;
    855       1.1     rmind 	struct aioproc *aio;
    856       1.1     rmind 	struct aiocb **aiocbp_list;
    857       1.1     rmind 	struct lio_req *lio;
    858       1.1     rmind 	int i, error, errcnt, mode, nent;
    859       1.1     rmind 
    860       1.1     rmind 	mode = SCARG(uap, mode);
    861       1.1     rmind 	nent = SCARG(uap, nent);
    862       1.1     rmind 
    863      1.12     rmind 	/* Non-accurate checks for the limit and invalid values */
    864       1.1     rmind 	if (nent < 1 || nent > aio_listio_max)
    865       1.1     rmind 		return EINVAL;
    866      1.12     rmind 	if (aio_jobs_count + nent > aio_max)
    867       1.1     rmind 		return EAGAIN;
    868       1.1     rmind 
    869       1.1     rmind 	/* Check if AIO structure is initialized, if not - initialize it */
    870       1.1     rmind 	if (p->p_aio == NULL)
    871       1.1     rmind 		if (aio_init(p))
    872       1.1     rmind 			return EAGAIN;
    873       1.1     rmind 	aio = p->p_aio;
    874       1.1     rmind 
    875       1.1     rmind 	/* Create a LIO structure */
    876       1.4     rmind 	lio = pool_get(&aio_lio_pool, PR_WAITOK);
    877       1.4     rmind 	lio->refcnt = 1;
    878       1.4     rmind 	error = 0;
    879       1.4     rmind 
    880       1.4     rmind 	switch (mode) {
    881       1.4     rmind 	case LIO_WAIT:
    882       1.1     rmind 		memset(&lio->sig, 0, sizeof(struct sigevent));
    883       1.4     rmind 		break;
    884       1.4     rmind 	case LIO_NOWAIT:
    885       1.4     rmind 		/* Check for signal, validate it */
    886       1.4     rmind 		if (SCARG(uap, sig)) {
    887       1.4     rmind 			struct sigevent *sig = &lio->sig;
    888       1.4     rmind 
    889       1.4     rmind 			error = copyin(SCARG(uap, sig), &lio->sig,
    890       1.4     rmind 			    sizeof(struct sigevent));
    891       1.4     rmind 			if (error == 0 &&
    892       1.4     rmind 			    (sig->sigev_signo < 0 ||
    893       1.4     rmind 			    sig->sigev_signo >= NSIG ||
    894       1.4     rmind 			    sig->sigev_notify < SIGEV_NONE ||
    895       1.4     rmind 			    sig->sigev_notify > SIGEV_SA))
    896       1.4     rmind 				error = EINVAL;
    897       1.4     rmind 		} else
    898       1.4     rmind 			memset(&lio->sig, 0, sizeof(struct sigevent));
    899       1.4     rmind 		break;
    900       1.4     rmind 	default:
    901       1.4     rmind 		error = EINVAL;
    902       1.4     rmind 		break;
    903       1.4     rmind 	}
    904       1.4     rmind 
    905       1.4     rmind 	if (error != 0) {
    906       1.4     rmind 		pool_put(&aio_lio_pool, lio);
    907       1.4     rmind 		return error;
    908       1.4     rmind 	}
    909       1.1     rmind 
    910       1.1     rmind 	/* Get the list from user-space */
    911       1.1     rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    912       1.1     rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    913       1.1     rmind 	    nent * sizeof(struct aiocb));
    914       1.4     rmind 	if (error) {
    915       1.4     rmind 		mutex_enter(&aio->aio_mtx);
    916       1.1     rmind 		goto err;
    917       1.4     rmind 	}
    918       1.1     rmind 
    919       1.1     rmind 	/* Enqueue all jobs */
    920       1.1     rmind 	errcnt = 0;
    921       1.1     rmind 	for (i = 0; i < nent; i++) {
    922       1.1     rmind 		error = aio_enqueue_job(AIO_LIO, aiocbp_list[i], lio);
    923       1.1     rmind 		/*
    924       1.1     rmind 		 * According to POSIX, in such error case it may
    925       1.1     rmind 		 * fail with other I/O operations initiated.
    926       1.1     rmind 		 */
    927       1.1     rmind 		if (error)
    928       1.1     rmind 			errcnt++;
    929       1.1     rmind 	}
    930       1.1     rmind 
    931       1.4     rmind 	mutex_enter(&aio->aio_mtx);
    932       1.4     rmind 
    933       1.1     rmind 	/* Return an error, if any */
    934       1.1     rmind 	if (errcnt) {
    935       1.1     rmind 		error = EIO;
    936       1.1     rmind 		goto err;
    937       1.1     rmind 	}
    938       1.1     rmind 
    939       1.1     rmind 	if (mode == LIO_WAIT) {
    940       1.1     rmind 		/*
    941       1.1     rmind 		 * Wait for AIO completion.  In such case,
    942       1.1     rmind 		 * the LIO structure will be freed here.
    943       1.1     rmind 		 */
    944       1.4     rmind 		while (lio->refcnt > 1 && error == 0)
    945       1.1     rmind 			error = cv_wait_sig(&aio->done_cv, &aio->aio_mtx);
    946       1.1     rmind 		if (error)
    947       1.1     rmind 			error = EINTR;
    948       1.1     rmind 	}
    949       1.1     rmind 
    950       1.1     rmind err:
    951       1.4     rmind 	if (--lio->refcnt != 0)
    952       1.4     rmind 		lio = NULL;
    953       1.4     rmind 	mutex_exit(&aio->aio_mtx);
    954       1.4     rmind 	if (lio != NULL) {
    955       1.4     rmind 		aio_sendsig(p, &lio->sig);
    956       1.4     rmind 		pool_put(&aio_lio_pool, lio);
    957       1.4     rmind 	}
    958       1.1     rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    959       1.1     rmind 	return error;
    960       1.1     rmind }
    961       1.1     rmind 
    962       1.1     rmind /*
    963       1.1     rmind  * SysCtl
    964       1.1     rmind  */
    965       1.1     rmind 
    966       1.1     rmind static int
    967       1.1     rmind sysctl_aio_listio_max(SYSCTLFN_ARGS)
    968       1.1     rmind {
    969       1.1     rmind 	struct sysctlnode node;
    970       1.1     rmind 	int error, newsize;
    971       1.1     rmind 
    972       1.1     rmind 	node = *rnode;
    973       1.1     rmind 	node.sysctl_data = &newsize;
    974       1.1     rmind 
    975       1.1     rmind 	newsize = aio_listio_max;
    976       1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    977       1.1     rmind 	if (error || newp == NULL)
    978       1.1     rmind 		return error;
    979       1.1     rmind 
    980       1.1     rmind 	if (newsize < 1 || newsize > aio_max)
    981       1.1     rmind 		return EINVAL;
    982       1.1     rmind 	aio_listio_max = newsize;
    983       1.1     rmind 
    984       1.1     rmind 	return 0;
    985       1.1     rmind }
    986       1.1     rmind 
    987       1.1     rmind static int
    988       1.1     rmind sysctl_aio_max(SYSCTLFN_ARGS)
    989       1.1     rmind {
    990       1.1     rmind 	struct sysctlnode node;
    991       1.1     rmind 	int error, newsize;
    992       1.1     rmind 
    993       1.1     rmind 	node = *rnode;
    994       1.1     rmind 	node.sysctl_data = &newsize;
    995       1.1     rmind 
    996       1.1     rmind 	newsize = aio_max;
    997       1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    998       1.1     rmind 	if (error || newp == NULL)
    999       1.1     rmind 		return error;
   1000       1.1     rmind 
   1001       1.1     rmind 	if (newsize < 1 || newsize < aio_listio_max)
   1002       1.1     rmind 		return EINVAL;
   1003       1.1     rmind 	aio_max = newsize;
   1004       1.1     rmind 
   1005       1.1     rmind 	return 0;
   1006       1.1     rmind }
   1007       1.1     rmind 
   1008       1.1     rmind SYSCTL_SETUP(sysctl_aio_setup, "sysctl aio setup")
   1009       1.1     rmind {
   1010       1.1     rmind 
   1011       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1012       1.1     rmind 		CTLFLAG_PERMANENT,
   1013       1.1     rmind 		CTLTYPE_NODE, "kern", NULL,
   1014       1.1     rmind 		NULL, 0, NULL, 0,
   1015       1.1     rmind 		CTL_KERN, CTL_EOL);
   1016       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1017       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
   1018       1.1     rmind 		CTLTYPE_INT, "posix_aio",
   1019       1.1     rmind 		SYSCTL_DESCR("Version of IEEE Std 1003.1 and its "
   1020       1.1     rmind 			     "Asynchronous I/O option to which the "
   1021       1.1     rmind 			     "system attempts to conform"),
   1022       1.1     rmind 		NULL, _POSIX_ASYNCHRONOUS_IO, NULL, 0,
   1023       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1024       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1025       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1026       1.1     rmind 		CTLTYPE_INT, "aio_listio_max",
   1027       1.1     rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1028       1.1     rmind 			     "operations in a single list I/O call"),
   1029       1.1     rmind 		sysctl_aio_listio_max, 0, &aio_listio_max, 0,
   1030       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1031       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1032       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1033       1.1     rmind 		CTLTYPE_INT, "aio_max",
   1034       1.1     rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1035       1.1     rmind 			     "operations"),
   1036       1.1     rmind 		sysctl_aio_max, 0, &aio_max, 0,
   1037       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1038       1.1     rmind }
   1039       1.1     rmind 
   1040       1.1     rmind /*
   1041       1.1     rmind  * Debugging
   1042       1.1     rmind  */
   1043       1.1     rmind #if defined(DDB)
   1044       1.1     rmind void
   1045       1.1     rmind aio_print_jobs(void (*pr)(const char *, ...))
   1046       1.1     rmind {
   1047       1.1     rmind 	struct proc *p = (curlwp == NULL ? NULL : curlwp->l_proc);
   1048       1.1     rmind 	struct aioproc *aio;
   1049       1.1     rmind 	struct aio_job *a_job;
   1050       1.1     rmind 	struct aiocb *aiocbp;
   1051       1.1     rmind 
   1052       1.1     rmind 	if (p == NULL) {
   1053       1.1     rmind 		(*pr)("AIO: We are not in the processes right now.\n");
   1054       1.1     rmind 		return;
   1055       1.1     rmind 	}
   1056       1.1     rmind 
   1057       1.1     rmind 	aio = p->p_aio;
   1058       1.1     rmind 	if (aio == NULL) {
   1059       1.1     rmind 		(*pr)("AIO data is not initialized (PID = %d).\n", p->p_pid);
   1060       1.1     rmind 		return;
   1061       1.1     rmind 	}
   1062       1.1     rmind 
   1063       1.1     rmind 	(*pr)("AIO: PID = %d\n", p->p_pid);
   1064       1.1     rmind 	(*pr)("AIO: Global count of the jobs = %u\n", aio_jobs_count);
   1065       1.1     rmind 	(*pr)("AIO: Count of the jobs = %u\n", aio->jobs_count);
   1066       1.1     rmind 
   1067       1.1     rmind 	if (aio->curjob) {
   1068       1.1     rmind 		a_job = aio->curjob;
   1069       1.1     rmind 		(*pr)("\nAIO current job:\n");
   1070       1.1     rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1071       1.1     rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1072       1.1     rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1073       1.1     rmind 		aiocbp = &a_job->aiocbp;
   1074       1.1     rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1075       1.1     rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1076       1.1     rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1077       1.1     rmind 	}
   1078       1.1     rmind 
   1079       1.1     rmind 	(*pr)("\nAIO queue:\n");
   1080       1.1     rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
   1081       1.1     rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1082       1.1     rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1083       1.1     rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1084       1.1     rmind 		aiocbp = &a_job->aiocbp;
   1085       1.1     rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1086       1.1     rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1087       1.1     rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1088       1.1     rmind 	}
   1089       1.1     rmind }
   1090       1.1     rmind #endif /* defined(DDB) */
   1091