Home | History | Annotate | Line # | Download | only in kern
sys_aio.c revision 1.6.10.1
      1  1.6.10.1      matt /*	$NetBSD: sys_aio.c,v 1.6.10.1 2007/11/06 23:32:21 matt Exp $	*/
      2       1.1     rmind 
      3       1.1     rmind /*
      4       1.1     rmind  * Copyright (c) 2007, Mindaugas Rasiukevicius <rmind at NetBSD org>
      5       1.1     rmind  *
      6       1.1     rmind  * Redistribution and use in source and binary forms, with or without
      7       1.1     rmind  * modification, are permitted provided that the following conditions
      8       1.1     rmind  * are met:
      9       1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     10       1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     11       1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     12       1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     13       1.1     rmind  *    documentation and/or other materials provided with the distribution.
     14       1.1     rmind  *
     15       1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16       1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     17       1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     18       1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     19       1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     20       1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     21       1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     22       1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     23       1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     24       1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25       1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     26       1.1     rmind  */
     27       1.1     rmind 
     28       1.1     rmind /*
     29       1.1     rmind  * TODO:
     30       1.1     rmind  *   1. Additional work for VCHR and maybe VBLK devices.
     31       1.1     rmind  *   2. Consider making the job-finding O(n) per one file descriptor.
     32       1.1     rmind  */
     33       1.1     rmind 
     34       1.1     rmind #include <sys/cdefs.h>
     35  1.6.10.1      matt __KERNEL_RCSID(0, "$NetBSD: sys_aio.c,v 1.6.10.1 2007/11/06 23:32:21 matt Exp $");
     36       1.4     rmind 
     37       1.4     rmind #include "opt_ddb.h"
     38       1.1     rmind 
     39       1.1     rmind #include <sys/param.h>
     40       1.1     rmind #include <sys/condvar.h>
     41       1.1     rmind #include <sys/file.h>
     42       1.1     rmind #include <sys/filedesc.h>
     43       1.1     rmind #include <sys/kernel.h>
     44       1.1     rmind #include <sys/kmem.h>
     45       1.1     rmind #include <sys/lwp.h>
     46       1.1     rmind #include <sys/mutex.h>
     47       1.1     rmind #include <sys/pool.h>
     48       1.1     rmind #include <sys/proc.h>
     49       1.1     rmind #include <sys/queue.h>
     50       1.1     rmind #include <sys/signal.h>
     51       1.1     rmind #include <sys/signalvar.h>
     52       1.1     rmind #include <sys/syscallargs.h>
     53       1.1     rmind #include <sys/sysctl.h>
     54       1.1     rmind #include <sys/systm.h>
     55       1.1     rmind #include <sys/types.h>
     56       1.1     rmind #include <sys/vnode.h>
     57       1.1     rmind 
     58       1.1     rmind #include <uvm/uvm_extern.h>
     59       1.1     rmind 
     60       1.1     rmind /*
     61       1.1     rmind  * System-wide limits and counter of AIO operations.
     62       1.1     rmind  * XXXSMP: We should spin-lock it, or modify atomically.
     63       1.1     rmind  */
     64       1.4     rmind static u_int aio_listio_max = AIO_LISTIO_MAX;
     65       1.4     rmind static u_int aio_max = AIO_MAX;
     66       1.4     rmind static u_int aio_jobs_count;
     67       1.1     rmind 
     68       1.4     rmind static struct pool aio_job_pool;
     69       1.4     rmind static struct pool aio_lio_pool;
     70       1.1     rmind 
     71       1.1     rmind /* Prototypes */
     72       1.1     rmind void aio_worker(void *);
     73       1.1     rmind static void aio_process(struct aio_job *);
     74       1.1     rmind static void aio_sendsig(struct proc *, struct sigevent *);
     75       1.1     rmind static int aio_enqueue_job(int, void *, struct lio_req *);
     76       1.1     rmind 
     77       1.1     rmind /*
     78       1.4     rmind  * Initialize the AIO system.
     79       1.4     rmind  */
     80       1.4     rmind void
     81       1.4     rmind aio_sysinit(void)
     82       1.4     rmind {
     83       1.4     rmind 
     84       1.4     rmind 	pool_init(&aio_job_pool, sizeof(struct aio_job), 0, 0, 0,
     85       1.4     rmind 	    "aio_jobs_pool", &pool_allocator_nointr, IPL_NONE);
     86       1.4     rmind 	pool_init(&aio_lio_pool, sizeof(struct lio_req), 0, 0, 0,
     87       1.4     rmind 	    "aio_lio_pool", &pool_allocator_nointr, IPL_NONE);
     88       1.4     rmind }
     89       1.4     rmind 
     90       1.4     rmind /*
     91       1.1     rmind  * Initialize Asynchronous I/O data structures for the process.
     92       1.1     rmind  */
     93       1.1     rmind int
     94       1.1     rmind aio_init(struct proc *p)
     95       1.1     rmind {
     96       1.1     rmind 	struct aioproc *aio;
     97       1.1     rmind 	struct lwp *l;
     98  1.6.10.1      matt 	int error;
     99       1.1     rmind 	bool inmem;
    100       1.1     rmind 	vaddr_t uaddr;
    101       1.1     rmind 
    102       1.1     rmind 	/* Allocate and initialize AIO structure */
    103       1.1     rmind 	aio = kmem_zalloc(sizeof(struct aioproc), KM_NOSLEEP);
    104       1.1     rmind 	if (aio == NULL)
    105       1.1     rmind 		return EAGAIN;
    106       1.1     rmind 
    107       1.4     rmind 	/* Initialize queue and their synchronization structures */
    108       1.1     rmind 	mutex_init(&aio->aio_mtx, MUTEX_DEFAULT, IPL_NONE);
    109       1.1     rmind 	cv_init(&aio->aio_worker_cv, "aiowork");
    110       1.1     rmind 	cv_init(&aio->done_cv, "aiodone");
    111       1.1     rmind 	TAILQ_INIT(&aio->jobs_queue);
    112       1.1     rmind 
    113       1.1     rmind 	/*
    114       1.1     rmind 	 * Create an AIO worker thread.
    115       1.1     rmind 	 * XXX: Currently, AIO thread is not protected against user's actions.
    116       1.1     rmind 	 */
    117       1.1     rmind 	inmem = uvm_uarea_alloc(&uaddr);
    118       1.1     rmind 	if (uaddr == 0) {
    119       1.5     rmind 		aio_exit(p, aio);
    120       1.1     rmind 		return EAGAIN;
    121       1.1     rmind 	}
    122  1.6.10.1      matt 	error = lwp_create(curlwp, p, uaddr, inmem, 0, NULL, 0, aio_worker,
    123  1.6.10.1      matt 	    NULL, &l, curlwp->l_class);
    124  1.6.10.1      matt 	if (error != 0) {
    125  1.6.10.1      matt 		uvm_uarea_free(uaddr, curcpu());
    126       1.5     rmind 		aio_exit(p, aio);
    127  1.6.10.1      matt 		return error;
    128       1.1     rmind 	}
    129       1.1     rmind 
    130       1.5     rmind 	/* Recheck if we are really first */
    131       1.5     rmind 	mutex_enter(&p->p_mutex);
    132       1.5     rmind 	if (p->p_aio) {
    133       1.5     rmind 		mutex_exit(&p->p_mutex);
    134       1.5     rmind 		aio_exit(p, aio);
    135       1.5     rmind 		lwp_exit(l);
    136       1.5     rmind 		return 0;
    137       1.5     rmind 	}
    138       1.5     rmind 	p->p_aio = aio;
    139       1.5     rmind 	mutex_exit(&p->p_mutex);
    140       1.5     rmind 
    141       1.1     rmind 	/* Complete the initialization of thread, and run it */
    142       1.1     rmind 	mutex_enter(&p->p_smutex);
    143       1.1     rmind 	aio->aio_worker = l;
    144       1.1     rmind 	p->p_nrlwps++;
    145       1.1     rmind 	lwp_lock(l);
    146       1.1     rmind 	l->l_stat = LSRUN;
    147  1.6.10.1      matt 	l->l_priority = PRI_KERNEL - 1;
    148       1.2      yamt 	sched_enqueue(l, false);
    149       1.1     rmind 	lwp_unlock(l);
    150       1.1     rmind 	mutex_exit(&p->p_smutex);
    151       1.1     rmind 
    152       1.1     rmind 	return 0;
    153       1.1     rmind }
    154       1.1     rmind 
    155       1.1     rmind /*
    156       1.1     rmind  * Exit of Asynchronous I/O subsystem of process.
    157       1.1     rmind  */
    158       1.1     rmind void
    159       1.5     rmind aio_exit(struct proc *p, struct aioproc *aio)
    160       1.1     rmind {
    161       1.1     rmind 	struct aio_job *a_job;
    162       1.1     rmind 
    163       1.5     rmind 	if (aio == NULL)
    164       1.1     rmind 		return;
    165       1.1     rmind 
    166       1.1     rmind 	/* Free AIO queue */
    167       1.1     rmind 	while (!TAILQ_EMPTY(&aio->jobs_queue)) {
    168       1.1     rmind 		a_job = TAILQ_FIRST(&aio->jobs_queue);
    169       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    170       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    171       1.1     rmind 		aio_jobs_count--; /* XXXSMP */
    172       1.1     rmind 	}
    173       1.1     rmind 
    174       1.1     rmind 	/* Destroy and free the entire AIO data structure */
    175       1.1     rmind 	cv_destroy(&aio->aio_worker_cv);
    176       1.1     rmind 	cv_destroy(&aio->done_cv);
    177       1.1     rmind 	mutex_destroy(&aio->aio_mtx);
    178       1.1     rmind 	kmem_free(aio, sizeof(struct aioproc));
    179       1.1     rmind }
    180       1.1     rmind 
    181       1.1     rmind /*
    182       1.1     rmind  * AIO worker thread and processor.
    183       1.1     rmind  */
    184       1.1     rmind void
    185       1.1     rmind aio_worker(void *arg)
    186       1.1     rmind {
    187       1.1     rmind 	struct proc *p = curlwp->l_proc;
    188       1.1     rmind 	struct aioproc *aio = p->p_aio;
    189       1.1     rmind 	struct aio_job *a_job;
    190       1.1     rmind 	struct lio_req *lio;
    191       1.1     rmind 	sigset_t oss, nss;
    192       1.4     rmind 	int error, refcnt;
    193       1.1     rmind 
    194       1.1     rmind 	/*
    195       1.1     rmind 	 * Make an empty signal mask, so it
    196       1.1     rmind 	 * handles only SIGKILL and SIGSTOP.
    197       1.1     rmind 	 */
    198       1.1     rmind 	sigfillset(&nss);
    199       1.1     rmind 	mutex_enter(&p->p_smutex);
    200       1.1     rmind 	error = sigprocmask1(curlwp, SIG_SETMASK, &nss, &oss);
    201       1.4     rmind 	mutex_exit(&p->p_smutex);
    202       1.1     rmind 	KASSERT(error == 0);
    203       1.1     rmind 
    204       1.1     rmind 	for (;;) {
    205       1.1     rmind 		/*
    206       1.1     rmind 		 * Loop for each job in the queue.  If there
    207       1.4     rmind 		 * are no jobs then sleep.
    208       1.1     rmind 		 */
    209       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    210       1.1     rmind 		while ((a_job = TAILQ_FIRST(&aio->jobs_queue)) == NULL) {
    211       1.1     rmind 			if (cv_wait_sig(&aio->aio_worker_cv, &aio->aio_mtx)) {
    212       1.1     rmind 				/*
    213       1.4     rmind 				 * Thread was interrupted - check for
    214       1.4     rmind 				 * pending exit or suspend.
    215       1.1     rmind 				 */
    216       1.4     rmind 				mutex_exit(&aio->aio_mtx);
    217       1.4     rmind 				lwp_userret(curlwp);
    218       1.4     rmind 				mutex_enter(&aio->aio_mtx);
    219       1.1     rmind 			}
    220       1.1     rmind 		}
    221       1.1     rmind 
    222       1.1     rmind 		/* Take the job from the queue */
    223       1.1     rmind 		aio->curjob = a_job;
    224       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    225       1.1     rmind 
    226       1.1     rmind 		aio_jobs_count--; /* XXXSMP */
    227       1.1     rmind 		aio->jobs_count--;
    228       1.1     rmind 
    229       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    230       1.1     rmind 
    231       1.1     rmind 		/* Process an AIO operation */
    232       1.1     rmind 		aio_process(a_job);
    233       1.1     rmind 
    234       1.1     rmind 		/* Copy data structure back to the user-space */
    235       1.1     rmind 		(void)copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    236       1.1     rmind 		    sizeof(struct aiocb));
    237       1.1     rmind 
    238       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    239       1.1     rmind 		aio->curjob = NULL;
    240       1.4     rmind 
    241       1.1     rmind 		/* Decrease a reference counter, if there is a LIO structure */
    242       1.1     rmind 		lio = a_job->lio;
    243       1.4     rmind 		refcnt = (lio != NULL ? --lio->refcnt : -1);
    244       1.4     rmind 
    245       1.1     rmind 		/* Notify all suspenders */
    246       1.1     rmind 		cv_broadcast(&aio->done_cv);
    247       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    248       1.1     rmind 
    249       1.1     rmind 		/* Send a signal, if any */
    250       1.1     rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    251       1.1     rmind 
    252       1.1     rmind 		/* Destroy the LIO structure */
    253       1.4     rmind 		if (refcnt == 0) {
    254       1.1     rmind 			aio_sendsig(p, &lio->sig);
    255       1.4     rmind 			pool_put(&aio_lio_pool, lio);
    256       1.1     rmind 		}
    257       1.1     rmind 
    258       1.1     rmind 		/* Destroy the the job */
    259       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    260       1.1     rmind 	}
    261       1.1     rmind 
    262       1.4     rmind 	/* NOTREACHED */
    263       1.1     rmind }
    264       1.1     rmind 
    265       1.1     rmind static void
    266       1.1     rmind aio_process(struct aio_job *a_job)
    267       1.1     rmind {
    268       1.1     rmind 	struct proc *p = curlwp->l_proc;
    269       1.1     rmind 	struct aiocb *aiocbp = &a_job->aiocbp;
    270       1.1     rmind 	struct file *fp;
    271       1.1     rmind 	struct filedesc	*fdp = p->p_fd;
    272       1.1     rmind 	int fd = aiocbp->aio_fildes;
    273       1.1     rmind 	int error = 0;
    274       1.1     rmind 
    275       1.1     rmind 	KASSERT(fdp != NULL);
    276       1.1     rmind 	KASSERT(a_job->aio_op != 0);
    277       1.1     rmind 
    278       1.4     rmind 	if ((a_job->aio_op & (AIO_READ | AIO_WRITE)) != 0) {
    279       1.1     rmind 		struct iovec aiov;
    280       1.1     rmind 		struct uio auio;
    281       1.1     rmind 
    282       1.1     rmind 		if (aiocbp->aio_nbytes > SSIZE_MAX) {
    283       1.1     rmind 			error = EINVAL;
    284       1.1     rmind 			goto done;
    285       1.1     rmind 		}
    286       1.1     rmind 
    287       1.1     rmind 		fp = fd_getfile(fdp, fd);
    288       1.1     rmind 		if (fp == NULL) {
    289       1.1     rmind 			error = EBADF;
    290       1.1     rmind 			goto done;
    291       1.1     rmind 		}
    292       1.1     rmind 
    293       1.1     rmind 		aiov.iov_base = (void *)(uintptr_t)aiocbp->aio_buf;
    294       1.1     rmind 		aiov.iov_len = aiocbp->aio_nbytes;
    295       1.1     rmind 		auio.uio_iov = &aiov;
    296       1.1     rmind 		auio.uio_iovcnt = 1;
    297       1.1     rmind 		auio.uio_resid = aiocbp->aio_nbytes;
    298       1.1     rmind 		auio.uio_vmspace = p->p_vmspace;
    299       1.1     rmind 
    300       1.1     rmind 		FILE_USE(fp);
    301       1.1     rmind 		if (a_job->aio_op & AIO_READ) {
    302       1.1     rmind 			/*
    303       1.1     rmind 			 * Perform a Read operation
    304       1.1     rmind 			 */
    305       1.1     rmind 			KASSERT((a_job->aio_op & AIO_WRITE) == 0);
    306       1.1     rmind 
    307       1.1     rmind 			if ((fp->f_flag & FREAD) == 0) {
    308       1.1     rmind 				FILE_UNUSE(fp, curlwp);
    309       1.1     rmind 				error = EBADF;
    310       1.1     rmind 				goto done;
    311       1.1     rmind 			}
    312       1.1     rmind 			auio.uio_rw = UIO_READ;
    313       1.1     rmind 			error = (*fp->f_ops->fo_read)(fp, &aiocbp->aio_offset,
    314       1.1     rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    315       1.1     rmind 		} else {
    316       1.1     rmind 			/*
    317       1.1     rmind 			 * Perform a Write operation
    318       1.1     rmind 			 */
    319       1.1     rmind 			KASSERT(a_job->aio_op & AIO_WRITE);
    320       1.1     rmind 
    321       1.1     rmind 			if ((fp->f_flag & FWRITE) == 0) {
    322       1.1     rmind 				FILE_UNUSE(fp, curlwp);
    323       1.1     rmind 				error = EBADF;
    324       1.1     rmind 				goto done;
    325       1.1     rmind 			}
    326       1.1     rmind 			auio.uio_rw = UIO_WRITE;
    327       1.1     rmind 			error = (*fp->f_ops->fo_write)(fp, &aiocbp->aio_offset,
    328       1.1     rmind 			    &auio, fp->f_cred, FOF_UPDATE_OFFSET);
    329       1.1     rmind 		}
    330       1.1     rmind 		FILE_UNUSE(fp, curlwp);
    331       1.1     rmind 
    332       1.1     rmind 		/* Store the result value */
    333       1.1     rmind 		a_job->aiocbp.aio_nbytes -= auio.uio_resid;
    334       1.1     rmind 		a_job->aiocbp._retval = (error == 0) ?
    335       1.1     rmind 		    a_job->aiocbp.aio_nbytes : -1;
    336       1.1     rmind 
    337       1.4     rmind 	} else if ((a_job->aio_op & (AIO_SYNC | AIO_DSYNC)) != 0) {
    338       1.1     rmind 		/*
    339       1.1     rmind 		 * Perform a file Sync operation
    340       1.1     rmind 		 */
    341       1.1     rmind 		struct vnode *vp;
    342       1.1     rmind 
    343       1.1     rmind 		if ((error = getvnode(fdp, fd, &fp)) != 0)
    344       1.1     rmind 			goto done;
    345       1.1     rmind 
    346       1.1     rmind 		if ((fp->f_flag & FWRITE) == 0) {
    347       1.1     rmind 			FILE_UNUSE(fp, curlwp);
    348       1.1     rmind 			error = EBADF;
    349       1.1     rmind 			goto done;
    350       1.1     rmind 		}
    351       1.1     rmind 
    352       1.1     rmind 		vp = (struct vnode *)fp->f_data;
    353       1.1     rmind 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    354       1.1     rmind 		if (a_job->aio_op & AIO_DSYNC) {
    355       1.1     rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    356       1.1     rmind 			    FSYNC_WAIT | FSYNC_DATAONLY, 0, 0, curlwp);
    357       1.1     rmind 		} else if (a_job->aio_op & AIO_SYNC) {
    358       1.1     rmind 			error = VOP_FSYNC(vp, fp->f_cred,
    359       1.1     rmind 			    FSYNC_WAIT, 0, 0, curlwp);
    360  1.6.10.1      matt 			if (error == 0 && bioopsp != NULL &&
    361       1.1     rmind 			    vp->v_mount &&
    362       1.1     rmind 			    (vp->v_mount->mnt_flag & MNT_SOFTDEP))
    363  1.6.10.1      matt 			    bioopsp->io_fsync(vp, 0);
    364       1.1     rmind 		}
    365       1.1     rmind 		VOP_UNLOCK(vp, 0);
    366       1.1     rmind 		FILE_UNUSE(fp, curlwp);
    367       1.1     rmind 
    368       1.1     rmind 		/* Store the result value */
    369       1.1     rmind 		a_job->aiocbp._retval = (error == 0) ? 0 : -1;
    370       1.1     rmind 
    371       1.1     rmind 	} else
    372       1.1     rmind 		panic("aio_process: invalid operation code\n");
    373       1.1     rmind 
    374       1.1     rmind done:
    375       1.1     rmind 	/* Job is done, set the error, if any */
    376       1.1     rmind 	a_job->aiocbp._errno = error;
    377       1.1     rmind 	a_job->aiocbp._state = JOB_DONE;
    378       1.1     rmind }
    379       1.1     rmind 
    380       1.1     rmind /*
    381       1.1     rmind  * Send AIO signal.
    382       1.1     rmind  */
    383       1.1     rmind static void
    384       1.1     rmind aio_sendsig(struct proc *p, struct sigevent *sig)
    385       1.1     rmind {
    386       1.1     rmind 	ksiginfo_t ksi;
    387       1.1     rmind 
    388       1.1     rmind 	if (sig->sigev_signo == 0 || sig->sigev_notify == SIGEV_NONE)
    389       1.1     rmind 		return;
    390       1.1     rmind 
    391       1.1     rmind 	KSI_INIT(&ksi);
    392       1.1     rmind 	ksi.ksi_signo = sig->sigev_signo;
    393       1.1     rmind 	ksi.ksi_code = SI_ASYNCIO;
    394       1.3  christos 	ksi.ksi_value = sig->sigev_value;
    395       1.1     rmind 	mutex_enter(&proclist_mutex);
    396       1.1     rmind 	kpsignal(p, &ksi, NULL);
    397       1.1     rmind 	mutex_exit(&proclist_mutex);
    398       1.1     rmind }
    399       1.1     rmind 
    400       1.1     rmind /*
    401       1.1     rmind  * Enqueue the job.
    402       1.1     rmind  */
    403       1.1     rmind static int
    404       1.1     rmind aio_enqueue_job(int op, void *aiocb_uptr, struct lio_req *lio)
    405       1.1     rmind {
    406       1.1     rmind 	struct proc *p = curlwp->l_proc;
    407       1.1     rmind 	struct aioproc *aio;
    408       1.1     rmind 	struct aio_job *a_job;
    409       1.1     rmind 	struct aiocb aiocbp;
    410       1.1     rmind 	struct sigevent *sig;
    411       1.1     rmind 	int error;
    412       1.1     rmind 
    413       1.1     rmind 	/* Check for the limit */
    414       1.1     rmind 	if (aio_jobs_count + 1 > aio_max) /* XXXSMP */
    415       1.1     rmind 		return EAGAIN;
    416       1.1     rmind 
    417       1.1     rmind 	/* Get the data structure from user-space */
    418       1.1     rmind 	error = copyin(aiocb_uptr, &aiocbp, sizeof(struct aiocb));
    419       1.1     rmind 	if (error)
    420       1.1     rmind 		return error;
    421       1.1     rmind 
    422       1.1     rmind 	/* Check if signal is set, and validate it */
    423       1.1     rmind 	sig = &aiocbp.aio_sigevent;
    424       1.1     rmind 	if (sig->sigev_signo < 0 || sig->sigev_signo >= NSIG ||
    425       1.1     rmind 	    sig->sigev_notify < SIGEV_NONE || sig->sigev_notify > SIGEV_SA)
    426       1.1     rmind 		return EINVAL;
    427       1.1     rmind 
    428       1.1     rmind 	/* Buffer and byte count */
    429       1.1     rmind 	if (((AIO_SYNC | AIO_DSYNC) & op) == 0)
    430       1.1     rmind 		if (aiocbp.aio_buf == NULL || aiocbp.aio_nbytes > SSIZE_MAX)
    431       1.1     rmind 			return EINVAL;
    432       1.1     rmind 
    433       1.1     rmind 	/* Check the opcode, if LIO_NOP - simply ignore */
    434       1.1     rmind 	if (op == AIO_LIO) {
    435       1.1     rmind 		KASSERT(lio != NULL);
    436       1.1     rmind 		if (aiocbp.aio_lio_opcode == LIO_WRITE)
    437       1.1     rmind 			op = AIO_WRITE;
    438       1.1     rmind 		else if (aiocbp.aio_lio_opcode == LIO_READ)
    439       1.1     rmind 			op = AIO_READ;
    440       1.1     rmind 		else
    441       1.1     rmind 			return (aiocbp.aio_lio_opcode == LIO_NOP) ? 0 : EINVAL;
    442       1.1     rmind 	} else {
    443       1.1     rmind 		KASSERT(lio == NULL);
    444       1.1     rmind 	}
    445       1.1     rmind 
    446       1.1     rmind 	/*
    447       1.1     rmind 	 * Look for already existing job.  If found - the job is in-progress.
    448       1.1     rmind 	 * According to POSIX this is invalid, so return the error.
    449       1.1     rmind 	 */
    450       1.1     rmind 	aio = p->p_aio;
    451       1.1     rmind 	if (aio) {
    452       1.1     rmind 		mutex_enter(&aio->aio_mtx);
    453       1.1     rmind 		if (aio->curjob) {
    454       1.1     rmind 			a_job = aio->curjob;
    455       1.1     rmind 			if (a_job->aiocb_uptr == aiocb_uptr) {
    456       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    457       1.1     rmind 				return EINVAL;
    458       1.1     rmind 			}
    459       1.1     rmind 		}
    460       1.1     rmind 		TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    461       1.1     rmind 			if (a_job->aiocb_uptr != aiocb_uptr)
    462       1.1     rmind 				continue;
    463       1.1     rmind 			mutex_exit(&aio->aio_mtx);
    464       1.1     rmind 			return EINVAL;
    465       1.1     rmind 		}
    466       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    467       1.1     rmind 	}
    468       1.1     rmind 
    469       1.1     rmind 	/*
    470       1.1     rmind 	 * Check if AIO structure is initialized, if not - initialize it.
    471       1.1     rmind 	 * In LIO case, we did that already.  We will recheck this with
    472       1.1     rmind 	 * the lock in aio_init().
    473       1.1     rmind 	 */
    474       1.1     rmind 	if (lio == NULL && p->p_aio == NULL)
    475       1.1     rmind 		if (aio_init(p))
    476       1.1     rmind 			return EAGAIN;
    477       1.1     rmind 	aio = p->p_aio;
    478       1.1     rmind 
    479       1.1     rmind 	/*
    480       1.1     rmind 	 * Set the state with errno, and copy data
    481       1.1     rmind 	 * structure back to the user-space.
    482       1.1     rmind 	 */
    483       1.1     rmind 	aiocbp._state = JOB_WIP;
    484       1.1     rmind 	aiocbp._errno = EINPROGRESS;
    485       1.1     rmind 	aiocbp._retval = -1;
    486       1.1     rmind 	error = copyout(&aiocbp, aiocb_uptr, sizeof(struct aiocb));
    487       1.1     rmind 	if (error)
    488       1.1     rmind 		return error;
    489       1.1     rmind 
    490       1.1     rmind 	/* Allocate and initialize a new AIO job */
    491       1.4     rmind 	a_job = pool_get(&aio_job_pool, PR_WAITOK);
    492       1.1     rmind 	memset(a_job, 0, sizeof(struct aio_job));
    493       1.1     rmind 
    494       1.1     rmind 	/*
    495       1.1     rmind 	 * Set the data.
    496       1.1     rmind 	 * Store the user-space pointer for searching.  Since we
    497       1.1     rmind 	 * are storing only per proc pointers - it is safe.
    498       1.1     rmind 	 */
    499       1.1     rmind 	memcpy(&a_job->aiocbp, &aiocbp, sizeof(struct aiocb));
    500       1.1     rmind 	a_job->aiocb_uptr = aiocb_uptr;
    501       1.1     rmind 	a_job->aio_op |= op;
    502       1.1     rmind 	a_job->lio = lio;
    503       1.1     rmind 
    504       1.1     rmind 	/*
    505       1.1     rmind 	 * Add the job to the queue, update the counters, and
    506       1.1     rmind 	 * notify the AIO worker thread to handle the job.
    507       1.1     rmind 	 */
    508       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    509       1.1     rmind 
    510       1.1     rmind 	/* Fail, if the limit was reached */
    511       1.1     rmind 	if (aio->jobs_count >= aio_listio_max) {
    512       1.1     rmind 		mutex_exit(&aio->aio_mtx);
    513       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    514       1.1     rmind 		return EAGAIN;
    515       1.1     rmind 	}
    516       1.1     rmind 
    517       1.1     rmind 	TAILQ_INSERT_TAIL(&aio->jobs_queue, a_job, list);
    518       1.1     rmind 	aio_jobs_count++; /* XXXSMP */
    519       1.1     rmind 	aio->jobs_count++;
    520       1.1     rmind 	if (lio)
    521       1.1     rmind 		lio->refcnt++;
    522       1.1     rmind 	cv_signal(&aio->aio_worker_cv);
    523       1.1     rmind 
    524       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    525       1.1     rmind 
    526       1.1     rmind 	/*
    527       1.1     rmind 	 * One would handle the errors only with aio_error() function.
    528       1.1     rmind 	 * This way is appropriate according to POSIX.
    529       1.1     rmind 	 */
    530       1.1     rmind 	return 0;
    531       1.1     rmind }
    532       1.1     rmind 
    533       1.1     rmind /*
    534       1.1     rmind  * Syscall functions.
    535       1.1     rmind  */
    536       1.1     rmind 
    537       1.1     rmind int
    538       1.1     rmind sys_aio_cancel(struct lwp *l, void *v, register_t *retval)
    539       1.1     rmind {
    540       1.1     rmind 	struct sys_aio_cancel_args /* {
    541       1.1     rmind 		syscallarg(int) fildes;
    542       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    543       1.1     rmind 	} */ *uap = v;
    544       1.1     rmind 	struct proc *p = l->l_proc;
    545       1.1     rmind 	struct aioproc *aio;
    546       1.1     rmind 	struct aio_job *a_job;
    547       1.1     rmind 	struct aiocb *aiocbp_ptr;
    548       1.1     rmind 	struct lio_req *lio;
    549       1.1     rmind 	struct filedesc	*fdp = p->p_fd;
    550       1.1     rmind 	unsigned int cn, errcnt, fildes;
    551       1.1     rmind 
    552       1.1     rmind 	TAILQ_HEAD(, aio_job) tmp_jobs_list;
    553       1.1     rmind 
    554       1.1     rmind 	/* Check for invalid file descriptor */
    555       1.1     rmind 	fildes = (unsigned int)SCARG(uap, fildes);
    556       1.1     rmind 	if (fildes >= fdp->fd_nfiles || fdp->fd_ofiles[fildes] == NULL)
    557       1.1     rmind 		return EBADF;
    558       1.1     rmind 
    559       1.1     rmind 	/* Check if AIO structure is initialized */
    560       1.1     rmind 	if (p->p_aio == NULL) {
    561       1.1     rmind 		*retval = AIO_NOTCANCELED;
    562       1.1     rmind 		return 0;
    563       1.1     rmind 	}
    564       1.1     rmind 
    565       1.1     rmind 	aio = p->p_aio;
    566       1.1     rmind 	aiocbp_ptr = (struct aiocb *)SCARG(uap, aiocbp);
    567       1.1     rmind 
    568       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    569       1.1     rmind 
    570       1.1     rmind 	/* Cancel the jobs, and remove them from the queue */
    571       1.1     rmind 	cn = 0;
    572       1.1     rmind 	TAILQ_INIT(&tmp_jobs_list);
    573       1.1     rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
    574       1.1     rmind 		if (aiocbp_ptr) {
    575       1.1     rmind 			if (aiocbp_ptr != a_job->aiocb_uptr)
    576       1.1     rmind 				continue;
    577       1.1     rmind 			if (fildes != a_job->aiocbp.aio_fildes) {
    578       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    579       1.1     rmind 				return EBADF;
    580       1.1     rmind 			}
    581       1.1     rmind 		} else if (a_job->aiocbp.aio_fildes != fildes)
    582       1.1     rmind 			continue;
    583       1.1     rmind 
    584       1.1     rmind 		TAILQ_REMOVE(&aio->jobs_queue, a_job, list);
    585       1.1     rmind 		TAILQ_INSERT_TAIL(&tmp_jobs_list, a_job, list);
    586       1.1     rmind 
    587       1.1     rmind 		/* Decrease the counters */
    588       1.1     rmind 		aio_jobs_count--; /* XXXSMP */
    589       1.1     rmind 		aio->jobs_count--;
    590       1.1     rmind 		lio = a_job->lio;
    591       1.4     rmind 		if (lio != NULL && --lio->refcnt != 0)
    592       1.4     rmind 			a_job->lio = NULL;
    593       1.1     rmind 
    594       1.1     rmind 		cn++;
    595       1.1     rmind 		if (aiocbp_ptr)
    596       1.1     rmind 			break;
    597       1.1     rmind 	}
    598       1.1     rmind 
    599       1.1     rmind 	/* There are canceled jobs */
    600       1.1     rmind 	if (cn)
    601       1.1     rmind 		*retval = AIO_CANCELED;
    602       1.1     rmind 
    603       1.1     rmind 	/* We cannot cancel current job */
    604       1.1     rmind 	a_job = aio->curjob;
    605       1.1     rmind 	if (a_job && ((a_job->aiocbp.aio_fildes == fildes) ||
    606       1.1     rmind 	    (a_job->aiocb_uptr == aiocbp_ptr)))
    607       1.1     rmind 		*retval = AIO_NOTCANCELED;
    608       1.1     rmind 
    609       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    610       1.1     rmind 
    611       1.1     rmind 	/* Free the jobs after the lock */
    612       1.1     rmind 	errcnt = 0;
    613       1.1     rmind 	while (!TAILQ_EMPTY(&tmp_jobs_list)) {
    614       1.1     rmind 		a_job = TAILQ_FIRST(&tmp_jobs_list);
    615       1.1     rmind 		TAILQ_REMOVE(&tmp_jobs_list, a_job, list);
    616       1.1     rmind 		/* Set the errno and copy structures back to the user-space */
    617       1.1     rmind 		a_job->aiocbp._errno = ECANCELED;
    618       1.1     rmind 		a_job->aiocbp._state = JOB_DONE;
    619       1.1     rmind 		if (copyout(&a_job->aiocbp, a_job->aiocb_uptr,
    620       1.1     rmind 		    sizeof(struct aiocb)))
    621       1.1     rmind 			errcnt++;
    622       1.1     rmind 		/* Send a signal if any */
    623       1.1     rmind 		aio_sendsig(p, &a_job->aiocbp.aio_sigevent);
    624       1.6     rmind 		if (a_job->lio) {
    625       1.6     rmind 			lio = a_job->lio;
    626       1.6     rmind 			aio_sendsig(p, &lio->sig);
    627       1.6     rmind 			pool_put(&aio_lio_pool, lio);
    628       1.6     rmind 		}
    629       1.4     rmind 		pool_put(&aio_job_pool, a_job);
    630       1.1     rmind 	}
    631       1.1     rmind 
    632       1.1     rmind 	if (errcnt)
    633       1.1     rmind 		return EFAULT;
    634       1.1     rmind 
    635       1.1     rmind 	/* Set a correct return value */
    636       1.1     rmind 	if (*retval == 0)
    637       1.1     rmind 		*retval = AIO_ALLDONE;
    638       1.1     rmind 
    639       1.1     rmind 	return 0;
    640       1.1     rmind }
    641       1.1     rmind 
    642       1.1     rmind int
    643       1.1     rmind sys_aio_error(struct lwp *l, void *v, register_t *retval)
    644       1.1     rmind {
    645       1.1     rmind 	struct sys_aio_error_args /* {
    646       1.1     rmind 		syscallarg(const struct aiocb *) aiocbp;
    647       1.1     rmind 	} */ *uap = v;
    648       1.1     rmind 	struct proc *p = l->l_proc;
    649       1.1     rmind 	struct aioproc *aio = p->p_aio;
    650       1.1     rmind 	struct aiocb aiocbp;
    651       1.1     rmind 	int error;
    652       1.1     rmind 
    653       1.1     rmind 	if (aio == NULL)
    654       1.1     rmind 		return EINVAL;
    655       1.1     rmind 
    656       1.1     rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    657       1.1     rmind 	if (error)
    658       1.1     rmind 		return error;
    659       1.1     rmind 
    660       1.1     rmind 	if (aiocbp._state == JOB_NONE)
    661       1.1     rmind 		return EINVAL;
    662       1.1     rmind 
    663       1.1     rmind 	*retval = aiocbp._errno;
    664       1.1     rmind 
    665       1.1     rmind 	return 0;
    666       1.1     rmind }
    667       1.1     rmind 
    668       1.1     rmind int
    669       1.1     rmind sys_aio_fsync(struct lwp *l, void *v, register_t *retval)
    670       1.1     rmind {
    671       1.1     rmind 	struct sys_aio_fsync_args /* {
    672       1.1     rmind 		syscallarg(int) op;
    673       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    674       1.1     rmind 	} */ *uap = v;
    675       1.1     rmind 	int op = SCARG(uap, op);
    676       1.1     rmind 
    677       1.1     rmind 	if ((op != O_DSYNC) && (op != O_SYNC))
    678       1.1     rmind 		return EINVAL;
    679       1.1     rmind 
    680       1.1     rmind 	op = O_DSYNC ? AIO_DSYNC : AIO_SYNC;
    681       1.1     rmind 
    682       1.1     rmind 	return aio_enqueue_job(op, SCARG(uap, aiocbp), NULL);
    683       1.1     rmind }
    684       1.1     rmind 
    685       1.1     rmind int
    686       1.1     rmind sys_aio_read(struct lwp *l, void *v, register_t *retval)
    687       1.1     rmind {
    688       1.1     rmind 	struct sys_aio_read_args /* {
    689       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    690       1.1     rmind 	} */ *uap = v;
    691       1.1     rmind 
    692       1.1     rmind 	return aio_enqueue_job(AIO_READ, SCARG(uap, aiocbp), NULL);
    693       1.1     rmind }
    694       1.1     rmind 
    695       1.1     rmind int
    696       1.1     rmind sys_aio_return(struct lwp *l, void *v, register_t *retval)
    697       1.1     rmind {
    698       1.1     rmind 	struct sys_aio_return_args /* {
    699       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    700       1.1     rmind 	} */ *uap = v;
    701       1.1     rmind 	struct proc *p = l->l_proc;
    702       1.1     rmind 	struct aioproc *aio = p->p_aio;
    703       1.1     rmind 	struct aiocb aiocbp;
    704       1.1     rmind 	int error;
    705       1.1     rmind 
    706       1.1     rmind 	if (aio == NULL)
    707       1.1     rmind 		return EINVAL;
    708       1.1     rmind 
    709       1.1     rmind 	error = copyin(SCARG(uap, aiocbp), &aiocbp, sizeof(struct aiocb));
    710       1.1     rmind 	if (error)
    711       1.1     rmind 		return error;
    712       1.1     rmind 
    713       1.1     rmind 	if (aiocbp._errno == EINPROGRESS || aiocbp._state != JOB_DONE)
    714       1.1     rmind 		return EINVAL;
    715       1.1     rmind 
    716       1.1     rmind 	*retval = aiocbp._retval;
    717       1.1     rmind 
    718       1.1     rmind 	/* Reset the internal variables */
    719       1.1     rmind 	aiocbp._errno = 0;
    720       1.1     rmind 	aiocbp._retval = -1;
    721       1.1     rmind 	aiocbp._state = JOB_NONE;
    722       1.1     rmind 	error = copyout(&aiocbp, SCARG(uap, aiocbp), sizeof(struct aiocb));
    723       1.1     rmind 
    724       1.1     rmind 	return error;
    725       1.1     rmind }
    726       1.1     rmind 
    727       1.1     rmind int
    728       1.1     rmind sys_aio_suspend(struct lwp *l, void *v, register_t *retval)
    729       1.1     rmind {
    730       1.1     rmind 	struct sys_aio_suspend_args /* {
    731       1.1     rmind 		syscallarg(const struct aiocb *const[]) list;
    732       1.1     rmind 		syscallarg(int) nent;
    733       1.1     rmind 		syscallarg(const struct timespec *) timeout;
    734       1.1     rmind 	} */ *uap = v;
    735       1.1     rmind 	struct proc *p = l->l_proc;
    736       1.1     rmind 	struct aioproc *aio;
    737       1.1     rmind 	struct aio_job *a_job;
    738       1.1     rmind 	struct aiocb **aiocbp_list;
    739       1.1     rmind 	struct timespec ts;
    740       1.1     rmind 	int i, error, nent, timo;
    741       1.1     rmind 
    742       1.1     rmind 	if (p->p_aio == NULL)
    743       1.1     rmind 		return EAGAIN;
    744       1.1     rmind 	aio = p->p_aio;
    745       1.1     rmind 
    746       1.1     rmind 	nent = SCARG(uap, nent);
    747       1.1     rmind 	if (nent <= 0 || nent > aio_listio_max)
    748       1.1     rmind 		return EAGAIN;
    749       1.1     rmind 
    750       1.1     rmind 	if (SCARG(uap, timeout)) {
    751       1.1     rmind 		/* Convert timespec to ticks */
    752       1.1     rmind 		error = copyin(SCARG(uap, timeout), &ts,
    753       1.1     rmind 		    sizeof(struct timespec));
    754       1.1     rmind 		if (error)
    755       1.1     rmind 			return error;
    756       1.1     rmind 		timo = mstohz((ts.tv_sec * 1000) + (ts.tv_nsec / 1000000));
    757       1.1     rmind 		if (timo == 0 && ts.tv_sec == 0 && ts.tv_nsec > 0)
    758       1.1     rmind 			timo = 1;
    759       1.1     rmind 		if (timo <= 0)
    760       1.1     rmind 			return EAGAIN;
    761       1.1     rmind 	} else
    762       1.1     rmind 		timo = 0;
    763       1.1     rmind 
    764       1.1     rmind 	/* Get the list from user-space */
    765       1.1     rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    766       1.1     rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    767       1.1     rmind 	    nent * sizeof(struct aiocb));
    768       1.1     rmind 	if (error) {
    769       1.1     rmind 		kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    770       1.1     rmind 		return error;
    771       1.1     rmind 	}
    772       1.1     rmind 
    773       1.1     rmind 	mutex_enter(&aio->aio_mtx);
    774       1.1     rmind 	for (;;) {
    775       1.1     rmind 
    776       1.1     rmind 		for (i = 0; i < nent; i++) {
    777       1.1     rmind 
    778       1.1     rmind 			/* Skip NULL entries */
    779       1.1     rmind 			if (aiocbp_list[i] == NULL)
    780       1.1     rmind 				continue;
    781       1.1     rmind 
    782       1.1     rmind 			/* Skip current job */
    783       1.1     rmind 			if (aio->curjob) {
    784       1.1     rmind 				a_job = aio->curjob;
    785       1.1     rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    786       1.1     rmind 					continue;
    787       1.1     rmind 			}
    788       1.1     rmind 
    789       1.1     rmind 			/* Look for a job in the queue */
    790       1.1     rmind 			TAILQ_FOREACH(a_job, &aio->jobs_queue, list)
    791       1.1     rmind 				if (a_job->aiocb_uptr == aiocbp_list[i])
    792       1.1     rmind 					break;
    793       1.1     rmind 
    794       1.1     rmind 			if (a_job == NULL) {
    795       1.1     rmind 				struct aiocb aiocbp;
    796       1.1     rmind 
    797       1.1     rmind 				mutex_exit(&aio->aio_mtx);
    798       1.1     rmind 
    799       1.1     rmind 				error = copyin(aiocbp_list[i], &aiocbp,
    800       1.1     rmind 				    sizeof(struct aiocb));
    801       1.1     rmind 				if (error == 0 && aiocbp._state != JOB_DONE) {
    802       1.1     rmind 					mutex_enter(&aio->aio_mtx);
    803       1.1     rmind 					continue;
    804       1.1     rmind 				}
    805       1.1     rmind 
    806       1.1     rmind 				kmem_free(aiocbp_list,
    807       1.1     rmind 				    nent * sizeof(struct aio_job));
    808       1.1     rmind 				return error;
    809       1.1     rmind 			}
    810       1.1     rmind 		}
    811       1.1     rmind 
    812       1.1     rmind 		/* Wait for a signal or when timeout occurs */
    813       1.1     rmind 		error = cv_timedwait_sig(&aio->done_cv, &aio->aio_mtx, timo);
    814       1.1     rmind 		if (error) {
    815       1.1     rmind 			if (error == EWOULDBLOCK)
    816       1.1     rmind 				error = EAGAIN;
    817       1.1     rmind 			break;
    818       1.1     rmind 		}
    819       1.1     rmind 	}
    820       1.1     rmind 	mutex_exit(&aio->aio_mtx);
    821       1.1     rmind 
    822       1.1     rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    823       1.1     rmind 	return error;
    824       1.1     rmind }
    825       1.1     rmind 
    826       1.1     rmind int
    827       1.1     rmind sys_aio_write(struct lwp *l, void *v, register_t *retval)
    828       1.1     rmind {
    829       1.1     rmind 	struct sys_aio_write_args /* {
    830       1.1     rmind 		syscallarg(struct aiocb *) aiocbp;
    831       1.1     rmind 	} */ *uap = v;
    832       1.1     rmind 
    833       1.1     rmind 	return aio_enqueue_job(AIO_WRITE, SCARG(uap, aiocbp), NULL);
    834       1.1     rmind }
    835       1.1     rmind 
    836       1.1     rmind int
    837       1.1     rmind sys_lio_listio(struct lwp *l, void *v, register_t *retval)
    838       1.1     rmind {
    839       1.1     rmind 	struct sys_lio_listio_args /* {
    840       1.1     rmind 		syscallarg(int) mode;
    841       1.1     rmind 		syscallarg(struct aiocb *const[]) list;
    842       1.1     rmind 		syscallarg(int) nent;
    843       1.1     rmind 		syscallarg(struct sigevent *) sig;
    844       1.1     rmind 	} */ *uap = v;
    845       1.1     rmind 	struct proc *p = l->l_proc;
    846       1.1     rmind 	struct aioproc *aio;
    847       1.1     rmind 	struct aiocb **aiocbp_list;
    848       1.1     rmind 	struct lio_req *lio;
    849       1.1     rmind 	int i, error, errcnt, mode, nent;
    850       1.1     rmind 
    851       1.1     rmind 	mode = SCARG(uap, mode);
    852       1.1     rmind 	nent = SCARG(uap, nent);
    853       1.1     rmind 
    854       1.1     rmind 	/* Check for the limits, and invalid values */
    855       1.1     rmind 	if (nent < 1 || nent > aio_listio_max)
    856       1.1     rmind 		return EINVAL;
    857       1.1     rmind 	if (aio_jobs_count + nent > aio_max) /* XXXSMP */
    858       1.1     rmind 		return EAGAIN;
    859       1.1     rmind 
    860       1.1     rmind 	/* Check if AIO structure is initialized, if not - initialize it */
    861       1.1     rmind 	if (p->p_aio == NULL)
    862       1.1     rmind 		if (aio_init(p))
    863       1.1     rmind 			return EAGAIN;
    864       1.1     rmind 	aio = p->p_aio;
    865       1.1     rmind 
    866       1.1     rmind 	/* Create a LIO structure */
    867       1.4     rmind 	lio = pool_get(&aio_lio_pool, PR_WAITOK);
    868       1.4     rmind 	lio->refcnt = 1;
    869       1.4     rmind 	error = 0;
    870       1.4     rmind 
    871       1.4     rmind 	switch (mode) {
    872       1.4     rmind 	case LIO_WAIT:
    873       1.1     rmind 		memset(&lio->sig, 0, sizeof(struct sigevent));
    874       1.4     rmind 		break;
    875       1.4     rmind 	case LIO_NOWAIT:
    876       1.4     rmind 		/* Check for signal, validate it */
    877       1.4     rmind 		if (SCARG(uap, sig)) {
    878       1.4     rmind 			struct sigevent *sig = &lio->sig;
    879       1.4     rmind 
    880       1.4     rmind 			error = copyin(SCARG(uap, sig), &lio->sig,
    881       1.4     rmind 			    sizeof(struct sigevent));
    882       1.4     rmind 			if (error == 0 &&
    883       1.4     rmind 			    (sig->sigev_signo < 0 ||
    884       1.4     rmind 			    sig->sigev_signo >= NSIG ||
    885       1.4     rmind 			    sig->sigev_notify < SIGEV_NONE ||
    886       1.4     rmind 			    sig->sigev_notify > SIGEV_SA))
    887       1.4     rmind 				error = EINVAL;
    888       1.4     rmind 		} else
    889       1.4     rmind 			memset(&lio->sig, 0, sizeof(struct sigevent));
    890       1.4     rmind 		break;
    891       1.4     rmind 	default:
    892       1.4     rmind 		error = EINVAL;
    893       1.4     rmind 		break;
    894       1.4     rmind 	}
    895       1.4     rmind 
    896       1.4     rmind 	if (error != 0) {
    897       1.4     rmind 		pool_put(&aio_lio_pool, lio);
    898       1.4     rmind 		return error;
    899       1.4     rmind 	}
    900       1.1     rmind 
    901       1.1     rmind 	/* Get the list from user-space */
    902       1.1     rmind 	aiocbp_list = kmem_zalloc(nent * sizeof(struct aio_job), KM_SLEEP);
    903       1.1     rmind 	error = copyin(SCARG(uap, list), aiocbp_list,
    904       1.1     rmind 	    nent * sizeof(struct aiocb));
    905       1.4     rmind 	if (error) {
    906       1.4     rmind 		mutex_enter(&aio->aio_mtx);
    907       1.1     rmind 		goto err;
    908       1.4     rmind 	}
    909       1.1     rmind 
    910       1.1     rmind 	/* Enqueue all jobs */
    911       1.1     rmind 	errcnt = 0;
    912       1.1     rmind 	for (i = 0; i < nent; i++) {
    913       1.1     rmind 		error = aio_enqueue_job(AIO_LIO, aiocbp_list[i], lio);
    914       1.1     rmind 		/*
    915       1.1     rmind 		 * According to POSIX, in such error case it may
    916       1.1     rmind 		 * fail with other I/O operations initiated.
    917       1.1     rmind 		 */
    918       1.1     rmind 		if (error)
    919       1.1     rmind 			errcnt++;
    920       1.1     rmind 	}
    921       1.1     rmind 
    922       1.4     rmind 	mutex_enter(&aio->aio_mtx);
    923       1.4     rmind 
    924       1.1     rmind 	/* Return an error, if any */
    925       1.1     rmind 	if (errcnt) {
    926       1.1     rmind 		error = EIO;
    927       1.1     rmind 		goto err;
    928       1.1     rmind 	}
    929       1.1     rmind 
    930       1.1     rmind 	if (mode == LIO_WAIT) {
    931       1.1     rmind 		/*
    932       1.1     rmind 		 * Wait for AIO completion.  In such case,
    933       1.1     rmind 		 * the LIO structure will be freed here.
    934       1.1     rmind 		 */
    935       1.4     rmind 		while (lio->refcnt > 1 && error == 0)
    936       1.1     rmind 			error = cv_wait_sig(&aio->done_cv, &aio->aio_mtx);
    937       1.1     rmind 		if (error)
    938       1.1     rmind 			error = EINTR;
    939       1.1     rmind 	}
    940       1.1     rmind 
    941       1.1     rmind err:
    942       1.4     rmind 	if (--lio->refcnt != 0)
    943       1.4     rmind 		lio = NULL;
    944       1.4     rmind 	mutex_exit(&aio->aio_mtx);
    945       1.4     rmind 	if (lio != NULL) {
    946       1.4     rmind 		aio_sendsig(p, &lio->sig);
    947       1.4     rmind 		pool_put(&aio_lio_pool, lio);
    948       1.4     rmind 	}
    949       1.1     rmind 	kmem_free(aiocbp_list, nent * sizeof(struct aio_job));
    950       1.1     rmind 	return error;
    951       1.1     rmind }
    952       1.1     rmind 
    953       1.1     rmind /*
    954       1.1     rmind  * SysCtl
    955       1.1     rmind  */
    956       1.1     rmind 
    957       1.1     rmind static int
    958       1.1     rmind sysctl_aio_listio_max(SYSCTLFN_ARGS)
    959       1.1     rmind {
    960       1.1     rmind 	struct sysctlnode node;
    961       1.1     rmind 	int error, newsize;
    962       1.1     rmind 
    963       1.1     rmind 	node = *rnode;
    964       1.1     rmind 	node.sysctl_data = &newsize;
    965       1.1     rmind 
    966       1.1     rmind 	newsize = aio_listio_max;
    967       1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    968       1.1     rmind 	if (error || newp == NULL)
    969       1.1     rmind 		return error;
    970       1.1     rmind 
    971       1.1     rmind 	/* XXXSMP */
    972       1.1     rmind 	if (newsize < 1 || newsize > aio_max)
    973       1.1     rmind 		return EINVAL;
    974       1.1     rmind 	aio_listio_max = newsize;
    975       1.1     rmind 
    976       1.1     rmind 	return 0;
    977       1.1     rmind }
    978       1.1     rmind 
    979       1.1     rmind static int
    980       1.1     rmind sysctl_aio_max(SYSCTLFN_ARGS)
    981       1.1     rmind {
    982       1.1     rmind 	struct sysctlnode node;
    983       1.1     rmind 	int error, newsize;
    984       1.1     rmind 
    985       1.1     rmind 	node = *rnode;
    986       1.1     rmind 	node.sysctl_data = &newsize;
    987       1.1     rmind 
    988       1.1     rmind 	newsize = aio_max;
    989       1.1     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    990       1.1     rmind 	if (error || newp == NULL)
    991       1.1     rmind 		return error;
    992       1.1     rmind 
    993       1.1     rmind 	/* XXXSMP */
    994       1.1     rmind 	if (newsize < 1 || newsize < aio_listio_max)
    995       1.1     rmind 		return EINVAL;
    996       1.1     rmind 	aio_max = newsize;
    997       1.1     rmind 
    998       1.1     rmind 	return 0;
    999       1.1     rmind }
   1000       1.1     rmind 
   1001       1.1     rmind SYSCTL_SETUP(sysctl_aio_setup, "sysctl aio setup")
   1002       1.1     rmind {
   1003       1.1     rmind 
   1004       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1005       1.1     rmind 		CTLFLAG_PERMANENT,
   1006       1.1     rmind 		CTLTYPE_NODE, "kern", NULL,
   1007       1.1     rmind 		NULL, 0, NULL, 0,
   1008       1.1     rmind 		CTL_KERN, CTL_EOL);
   1009       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1010       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
   1011       1.1     rmind 		CTLTYPE_INT, "posix_aio",
   1012       1.1     rmind 		SYSCTL_DESCR("Version of IEEE Std 1003.1 and its "
   1013       1.1     rmind 			     "Asynchronous I/O option to which the "
   1014       1.1     rmind 			     "system attempts to conform"),
   1015       1.1     rmind 		NULL, _POSIX_ASYNCHRONOUS_IO, NULL, 0,
   1016       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1017       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1018       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1019       1.1     rmind 		CTLTYPE_INT, "aio_listio_max",
   1020       1.1     rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1021       1.1     rmind 			     "operations in a single list I/O call"),
   1022       1.1     rmind 		sysctl_aio_listio_max, 0, &aio_listio_max, 0,
   1023       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1024       1.1     rmind 	sysctl_createv(clog, 0, NULL, NULL,
   1025       1.1     rmind 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1026       1.1     rmind 		CTLTYPE_INT, "aio_max",
   1027       1.1     rmind 		SYSCTL_DESCR("Maximum number of asynchronous I/O "
   1028       1.1     rmind 			     "operations"),
   1029       1.1     rmind 		sysctl_aio_max, 0, &aio_max, 0,
   1030       1.1     rmind 		CTL_KERN, CTL_CREATE, CTL_EOL);
   1031       1.1     rmind }
   1032       1.1     rmind 
   1033       1.1     rmind /*
   1034       1.1     rmind  * Debugging
   1035       1.1     rmind  */
   1036       1.1     rmind #if defined(DDB)
   1037       1.1     rmind void
   1038       1.1     rmind aio_print_jobs(void (*pr)(const char *, ...))
   1039       1.1     rmind {
   1040       1.1     rmind 	struct proc *p = (curlwp == NULL ? NULL : curlwp->l_proc);
   1041       1.1     rmind 	struct aioproc *aio;
   1042       1.1     rmind 	struct aio_job *a_job;
   1043       1.1     rmind 	struct aiocb *aiocbp;
   1044       1.1     rmind 
   1045       1.1     rmind 	if (p == NULL) {
   1046       1.1     rmind 		(*pr)("AIO: We are not in the processes right now.\n");
   1047       1.1     rmind 		return;
   1048       1.1     rmind 	}
   1049       1.1     rmind 
   1050       1.1     rmind 	aio = p->p_aio;
   1051       1.1     rmind 	if (aio == NULL) {
   1052       1.1     rmind 		(*pr)("AIO data is not initialized (PID = %d).\n", p->p_pid);
   1053       1.1     rmind 		return;
   1054       1.1     rmind 	}
   1055       1.1     rmind 
   1056       1.1     rmind 	(*pr)("AIO: PID = %d\n", p->p_pid);
   1057       1.1     rmind 	(*pr)("AIO: Global count of the jobs = %u\n", aio_jobs_count);
   1058       1.1     rmind 	(*pr)("AIO: Count of the jobs = %u\n", aio->jobs_count);
   1059       1.1     rmind 
   1060       1.1     rmind 	if (aio->curjob) {
   1061       1.1     rmind 		a_job = aio->curjob;
   1062       1.1     rmind 		(*pr)("\nAIO current job:\n");
   1063       1.1     rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1064       1.1     rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1065       1.1     rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1066       1.1     rmind 		aiocbp = &a_job->aiocbp;
   1067       1.1     rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1068       1.1     rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1069       1.1     rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1070       1.1     rmind 	}
   1071       1.1     rmind 
   1072       1.1     rmind 	(*pr)("\nAIO queue:\n");
   1073       1.1     rmind 	TAILQ_FOREACH(a_job, &aio->jobs_queue, list) {
   1074       1.1     rmind 		(*pr)(" opcode = %d, errno = %d, state = %d, aiocb_ptr = %p\n",
   1075       1.1     rmind 		    a_job->aio_op, a_job->aiocbp._errno,
   1076       1.1     rmind 		    a_job->aiocbp._state, a_job->aiocb_uptr);
   1077       1.1     rmind 		aiocbp = &a_job->aiocbp;
   1078       1.1     rmind 		(*pr)("   fd = %d, offset = %u, buf = %p, nbytes = %u\n",
   1079       1.1     rmind 		    aiocbp->aio_fildes, aiocbp->aio_offset,
   1080       1.1     rmind 		    aiocbp->aio_buf, aiocbp->aio_nbytes);
   1081       1.1     rmind 	}
   1082       1.1     rmind }
   1083       1.1     rmind #endif /* defined(DDB) */
   1084