Home | History | Annotate | Line # | Download | only in dmover
dmover_io.c revision 1.43
      1 /*	$NetBSD: dmover_io.c,v 1.43 2014/07/25 08:10:36 dholland Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * dmover_io.c: Support for user-space access to dmover-api
     40  *
     41  * This interface is quite simple:
     42  *
     43  *	1.  The user opens /dev/dmover, which is a cloning device.  This
     44  *	    allocates internal state for the session.
     45  *
     46  *	2.  The user does a DMIO_SETFUNC to select the data movement
     47  *	    function.  This actually creates the dmover session.
     48  *
     49  *	3.  The user writes request messages to its dmover handle.
     50  *
     51  *	4.  The user reads request responses from its dmover handle.
     52  *
     53  *	5.  The user closes the file descriptor and the session is
     54  *	    torn down.
     55  */
     56 
     57 #include <sys/cdefs.h>
     58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.43 2014/07/25 08:10:36 dholland Exp $");
     59 
     60 #include <sys/param.h>
     61 #include <sys/queue.h>
     62 #include <sys/conf.h>
     63 #include <sys/pool.h>
     64 #include <sys/proc.h>
     65 #include <sys/poll.h>
     66 #include <sys/malloc.h>
     67 #include <sys/file.h>
     68 #include <sys/filedesc.h>
     69 #include <sys/filio.h>
     70 #include <sys/select.h>
     71 #include <sys/systm.h>
     72 #include <sys/workqueue.h>
     73 #include <sys/once.h>
     74 #include <sys/stat.h>
     75 #include <sys/kauth.h>
     76 #include <sys/mutex.h>
     77 #include <sys/condvar.h>
     78 
     79 #include <uvm/uvm_extern.h>
     80 
     81 #include <dev/dmover/dmovervar.h>
     82 #include <dev/dmover/dmover_io.h>
     83 
     84 struct dmio_usrreq_state {
     85 	union {
     86 		struct work u_work;
     87 		TAILQ_ENTRY(dmio_usrreq_state) u_q;
     88 	} dus_u;
     89 #define	dus_q		dus_u.u_q
     90 #define	dus_work	dus_u.u_work
     91 	struct uio dus_uio_out;
     92 	struct uio *dus_uio_in;
     93 	struct dmover_request *dus_req;
     94 	uint32_t dus_id;
     95 	struct vmspace *dus_vmspace;
     96 };
     97 
     98 struct dmio_state {
     99 	struct dmover_session *ds_session;
    100 	TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
    101 	TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
    102 	struct selinfo ds_selq;
    103 	volatile int ds_flags;
    104 	u_int ds_nreqs;
    105 	kmutex_t ds_lock;
    106 	kcondvar_t ds_complete_cv;
    107 	kcondvar_t ds_nreqs_cv;
    108 	struct timespec ds_atime;
    109 	struct timespec ds_mtime;
    110 	struct timespec ds_btime;
    111 };
    112 
    113 static ONCE_DECL(dmio_cleaner_control);
    114 static struct workqueue *dmio_cleaner;
    115 static int dmio_cleaner_init(void);
    116 static struct dmio_state *dmio_state_get(void);
    117 static void dmio_state_put(struct dmio_state *);
    118 static void dmio_usrreq_fini1(struct work *wk, void *);
    119 
    120 #define	DMIO_STATE_SEL		0x0001
    121 #define	DMIO_STATE_DEAD		0x0002
    122 #define	DMIO_STATE_LARVAL	0x0004
    123 #define	DMIO_STATE_READ_WAIT	0x0008
    124 #define	DMIO_STATE_WRITE_WAIT	0x0010
    125 
    126 #define	DMIO_NREQS_MAX		64	/* XXX pulled out of a hat */
    127 
    128 struct pool dmio_state_pool;
    129 struct pool dmio_usrreq_state_pool;
    130 
    131 void	dmoverioattach(int);
    132 
    133 dev_type_open(dmoverioopen);
    134 
    135 const struct cdevsw dmoverio_cdevsw = {
    136 	.d_open = dmoverioopen,
    137 	.d_close = noclose,
    138 	.d_read = noread,
    139 	.d_write = nowrite,
    140 	.d_ioctl = noioctl,
    141 	.d_stop = nostop,
    142 	.d_tty = notty,
    143 	.d_poll = nopoll,
    144 	.d_mmap = nommap,
    145 	.d_kqfilter = nokqfilter,
    146 	.d_discard = nodiscard,
    147 	.d_flag = D_OTHER
    148 };
    149 
    150 /*
    151  * dmoverioattach:
    152  *
    153  *	Pseudo-device attach routine.
    154  */
    155 void
    156 dmoverioattach(int count)
    157 {
    158 
    159 	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
    160 	    0, 0, 0, "dmiostate", NULL, IPL_SOFTCLOCK);
    161 	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
    162 	    0, 0, 0, "dmiourstate", NULL, IPL_SOFTCLOCK);
    163 }
    164 
    165 /*
    166  * dmio_cleaner_init:
    167  *
    168  *	Create cleaner thread.
    169  */
    170 static int
    171 dmio_cleaner_init(void)
    172 {
    173 
    174 	return workqueue_create(&dmio_cleaner, "dmioclean", dmio_usrreq_fini1,
    175 	    NULL, PWAIT, IPL_SOFTCLOCK, 0);
    176 }
    177 
    178 static struct dmio_state *
    179 dmio_state_get(void)
    180 {
    181 	struct dmio_state *ds;
    182 
    183 	ds = pool_get(&dmio_state_pool, PR_WAITOK);
    184 
    185 	memset(ds, 0, sizeof(*ds));
    186 
    187 	getnanotime(&ds->ds_btime);
    188 	ds->ds_atime = ds->ds_mtime = ds->ds_btime;
    189 
    190 	mutex_init(&ds->ds_lock, MUTEX_DEFAULT, IPL_SOFTCLOCK);
    191 	cv_init(&ds->ds_complete_cv, "dmvrrd");
    192 	cv_init(&ds->ds_nreqs_cv, "dmiowr");
    193 	TAILQ_INIT(&ds->ds_pending);
    194 	TAILQ_INIT(&ds->ds_complete);
    195 	selinit(&ds->ds_selq);
    196 
    197 	return ds;
    198 }
    199 
    200 static void
    201 dmio_state_put(struct dmio_state *ds)
    202 {
    203 
    204 	seldestroy(&ds->ds_selq);
    205 	cv_destroy(&ds->ds_nreqs_cv);
    206 	cv_destroy(&ds->ds_complete_cv);
    207 	mutex_destroy(&ds->ds_lock);
    208 
    209 	pool_put(&dmio_state_pool, ds);
    210 }
    211 
    212 /*
    213  * dmio_usrreq_init:
    214  *
    215  *	Build a request structure.
    216  */
    217 static int
    218 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
    219     struct dmio_usrreq *req, struct dmover_request *dreq)
    220 {
    221 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    222 	struct dmover_session *dses = ds->ds_session;
    223 	struct uio *uio_out = &dus->dus_uio_out;
    224 	struct uio *uio_in;
    225 	dmio_buffer inbuf;
    226 	size_t len;
    227 	int i, error;
    228 	u_int j;
    229 
    230 	/* XXX How should malloc interact w/ FNONBLOCK? */
    231 
    232 	error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
    233 	if (error) {
    234 		return error;
    235 	}
    236 
    237 	error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
    238 	if (error) {
    239 		return error;
    240 	}
    241 
    242 	if (req->req_outbuf.dmbuf_iovcnt != 0) {
    243 		if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
    244 			return (EINVAL);
    245 		len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
    246 		uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
    247 		error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
    248 		    len);
    249 		if (error) {
    250 			free(uio_out->uio_iov, M_TEMP);
    251 			return (error);
    252 		}
    253 
    254 		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
    255 			len += uio_out->uio_iov[j].iov_len;
    256 			if (len > SSIZE_MAX) {
    257 				free(uio_out->uio_iov, M_TEMP);
    258 				return (EINVAL);
    259 			}
    260 		}
    261 
    262 		uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
    263 		uio_out->uio_resid = len;
    264 		uio_out->uio_rw = UIO_READ;
    265 		uio_out->uio_vmspace = dus->dus_vmspace;
    266 
    267 		dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
    268 		dreq->dreq_outbuf.dmbuf_uio = uio_out;
    269 	} else {
    270 		uio_out->uio_iov = NULL;
    271 		uio_out = NULL;
    272 		dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
    273 	}
    274 
    275 	memcpy(dreq->dreq_immediate, req->req_immediate,
    276 	    sizeof(dreq->dreq_immediate));
    277 
    278 	if (dses->dses_ninputs == 0) {
    279 		/* No inputs; all done. */
    280 		return (0);
    281 	}
    282 
    283 	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
    284 
    285 	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
    286 	    M_TEMP, M_WAITOK);
    287 	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
    288 
    289 	for (i = 0; i < dses->dses_ninputs; i++) {
    290 		uio_in = &dus->dus_uio_in[i];
    291 
    292 		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
    293 		if (error)
    294 			goto bad;
    295 
    296 		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
    297 			error = EINVAL;
    298 			goto bad;
    299 		}
    300 		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
    301 		if (len == 0) {
    302 			error = EINVAL;
    303 			goto bad;
    304 		}
    305 		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
    306 
    307 		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
    308 		if (error) {
    309 			free(uio_in->uio_iov, M_TEMP);
    310 			goto bad;
    311 		}
    312 
    313 		for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
    314 			len += uio_in->uio_iov[j].iov_len;
    315 			if (len > SSIZE_MAX) {
    316 				free(uio_in->uio_iov, M_TEMP);
    317 				error = EINVAL;
    318 				goto bad;
    319 			}
    320 		}
    321 
    322 		if (uio_out != NULL && len != uio_out->uio_resid) {
    323 			free(uio_in->uio_iov, M_TEMP);
    324 			error = EINVAL;
    325 			goto bad;
    326 		}
    327 
    328 		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
    329 		uio_in->uio_resid = len;
    330 		uio_in->uio_rw = UIO_WRITE;
    331 		uio_in->uio_vmspace = dus->dus_vmspace;
    332 
    333 		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
    334 	}
    335 
    336 	return (0);
    337 
    338  bad:
    339 	if (i > 0) {
    340 		for (--i; i >= 0; i--) {
    341 			uio_in = &dus->dus_uio_in[i];
    342 			free(uio_in->uio_iov, M_TEMP);
    343 		}
    344 	}
    345 	free(dus->dus_uio_in, M_TEMP);
    346 	if (uio_out != NULL)
    347 		free(uio_out->uio_iov, M_TEMP);
    348 	uvmspace_free(dus->dus_vmspace);
    349 	return (error);
    350 }
    351 
    352 /*
    353  * dmio_usrreq_fini:
    354  *
    355  *	Tear down a request.  Must be called at splsoftclock().
    356  */
    357 static void
    358 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
    359 {
    360 	struct dmover_session *dses = ds->ds_session;
    361 	struct uio *uio_out = &dus->dus_uio_out;
    362 	struct uio *uio_in;
    363 	int i;
    364 
    365 	if (uio_out->uio_iov != NULL)
    366 		free(uio_out->uio_iov, M_TEMP);
    367 
    368 	if (dses->dses_ninputs) {
    369 		for (i = 0; i < dses->dses_ninputs; i++) {
    370 			uio_in = &dus->dus_uio_in[i];
    371 			free(uio_in->uio_iov, M_TEMP);
    372 		}
    373 		free(dus->dus_uio_in, M_TEMP);
    374 	}
    375 
    376 	workqueue_enqueue(dmio_cleaner, &dus->dus_work, NULL);
    377 }
    378 
    379 static void
    380 dmio_usrreq_fini1(struct work *wk, void *dummy)
    381 {
    382 	struct dmio_usrreq_state *dus = (void *)wk;
    383 
    384 	KASSERT(wk == &dus->dus_work);
    385 
    386 	uvmspace_free(dus->dus_vmspace);
    387 	pool_put(&dmio_usrreq_state_pool, dus);
    388 }
    389 
    390 /*
    391  * dmio_read:
    392  *
    393  *	Read file op.
    394  */
    395 static int
    396 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
    397     kauth_cred_t cred, int flags)
    398 {
    399 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    400 	struct dmio_usrreq_state *dus;
    401 	struct dmover_request *dreq;
    402 	struct dmio_usrresp resp;
    403 	int error = 0, progress = 0;
    404 
    405 	if ((uio->uio_resid % sizeof(resp)) != 0)
    406 		return (EINVAL);
    407 
    408 	if (ds->ds_session == NULL)
    409 		return (ENXIO);
    410 
    411 	getnanotime(&ds->ds_atime);
    412 	mutex_enter(&ds->ds_lock);
    413 
    414 	while (uio->uio_resid != 0) {
    415 
    416 		for (;;) {
    417 			dus = TAILQ_FIRST(&ds->ds_complete);
    418 			if (dus == NULL) {
    419 				if (fp->f_flag & FNONBLOCK) {
    420 					error = progress ? 0 : EWOULDBLOCK;
    421 					goto out;
    422 				}
    423 				ds->ds_flags |= DMIO_STATE_READ_WAIT;
    424 				error = cv_wait_sig(&ds->ds_complete_cv, &ds->ds_lock);
    425 				if (error)
    426 					goto out;
    427 				continue;
    428 			}
    429 			/* Have a completed request. */
    430 			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
    431 			ds->ds_nreqs--;
    432 			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
    433 				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
    434 				cv_broadcast(&ds->ds_nreqs_cv);
    435 			}
    436 			if (ds->ds_flags & DMIO_STATE_SEL) {
    437 				ds->ds_flags &= ~DMIO_STATE_SEL;
    438 				selnotify(&ds->ds_selq, POLLIN | POLLRDNORM, 0);
    439 			}
    440 			break;
    441 		}
    442 
    443 		dreq = dus->dus_req;
    444 		resp.resp_id = dus->dus_id;
    445 		if (dreq->dreq_flags & DMOVER_REQ_ERROR)
    446 			resp.resp_error = dreq->dreq_error;
    447 		else {
    448 			resp.resp_error = 0;
    449 			memcpy(resp.resp_immediate, dreq->dreq_immediate,
    450 			    sizeof(resp.resp_immediate));
    451 		}
    452 
    453 		dmio_usrreq_fini(ds, dus);
    454 
    455 		mutex_exit(&ds->ds_lock);
    456 
    457 		progress = 1;
    458 
    459 		dmover_request_free(dreq);
    460 
    461 		error = uiomove(&resp, sizeof(resp), uio);
    462 		if (error)
    463 			return (error);
    464 
    465 		mutex_enter(&ds->ds_lock);
    466 	}
    467 
    468  out:
    469 	mutex_exit(&ds->ds_lock);
    470 
    471 	return (error);
    472 }
    473 
    474 /*
    475  * dmio_usrreq_done:
    476  *
    477  *	Dmover completion callback.
    478  */
    479 static void
    480 dmio_usrreq_done(struct dmover_request *dreq)
    481 {
    482 	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
    483 	struct dmio_state *ds = dreq->dreq_session->dses_cookie;
    484 
    485 	/* We're already at splsoftclock(). */
    486 
    487 	mutex_enter(&ds->ds_lock);
    488 	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
    489 	if (ds->ds_flags & DMIO_STATE_DEAD) {
    490 		int nreqs = --ds->ds_nreqs;
    491 		mutex_exit(&ds->ds_lock);
    492 		dmio_usrreq_fini(ds, dus);
    493 		dmover_request_free(dreq);
    494 		if (nreqs == 0) {
    495 			dmio_state_put(ds);
    496 		}
    497 		return;
    498 	}
    499 
    500 	TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
    501 	if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
    502 		ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
    503 		cv_broadcast(&ds->ds_complete_cv);
    504 	}
    505 	if (ds->ds_flags & DMIO_STATE_SEL) {
    506 		ds->ds_flags &= ~DMIO_STATE_SEL;
    507 		selnotify(&ds->ds_selq, POLLOUT | POLLWRNORM, 0);
    508 	}
    509 	mutex_exit(&ds->ds_lock);
    510 }
    511 
    512 /*
    513  * dmio_write:
    514  *
    515  *	Write file op.
    516  */
    517 static int
    518 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
    519     kauth_cred_t cred, int flags)
    520 {
    521 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    522 	struct dmio_usrreq_state *dus;
    523 	struct dmover_request *dreq;
    524 	struct dmio_usrreq req;
    525 	int error = 0, progress = 0;
    526 
    527 	if ((uio->uio_resid % sizeof(req)) != 0)
    528 		return (EINVAL);
    529 
    530 	if (ds->ds_session == NULL)
    531 		return (ENXIO);
    532 
    533 	getnanotime(&ds->ds_mtime);
    534 	mutex_enter(&ds->ds_lock);
    535 
    536 	while (uio->uio_resid != 0) {
    537 
    538 		if (ds->ds_nreqs == DMIO_NREQS_MAX) {
    539 			if (fp->f_flag & FNONBLOCK) {
    540 				error = progress ? 0 : EWOULDBLOCK;
    541 				break;
    542 			}
    543 			ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
    544 			error = cv_wait_sig(&ds->ds_complete_cv, &ds->ds_lock);
    545 			if (error)
    546 				break;
    547 			continue;
    548 		}
    549 
    550 		ds->ds_nreqs++;
    551 
    552 		mutex_exit(&ds->ds_lock);
    553 
    554 		progress = 1;
    555 
    556 		error = uiomove(&req, sizeof(req), uio);
    557 		if (error) {
    558 			mutex_enter(&ds->ds_lock);
    559 			ds->ds_nreqs--;
    560 			break;
    561 		}
    562 
    563 		/* XXX How should this interact with FNONBLOCK? */
    564 		dreq = dmover_request_alloc(ds->ds_session, NULL);
    565 		if (dreq == NULL) {
    566 			/* XXX */
    567 			ds->ds_nreqs--;
    568 			error = ENOMEM;
    569 			return error;
    570 		}
    571 		dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
    572 
    573 		error = dmio_usrreq_init(fp, dus, &req, dreq);
    574 		if (error) {
    575 			dmover_request_free(dreq);
    576 			pool_put(&dmio_usrreq_state_pool, dus);
    577 			return error;
    578 		}
    579 
    580 		dreq->dreq_callback = dmio_usrreq_done;
    581 		dreq->dreq_cookie = dus;
    582 
    583 		dus->dus_req = dreq;
    584 		dus->dus_id = req.req_id;
    585 
    586 		mutex_enter(&ds->ds_lock);
    587 
    588 		TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
    589 
    590 		mutex_exit(&ds->ds_lock);
    591 
    592 		dmover_process(dreq);
    593 
    594 		mutex_enter(&ds->ds_lock);
    595 	}
    596 
    597 	mutex_exit(&ds->ds_lock);
    598 
    599 	return (error);
    600 }
    601 
    602 static int
    603 dmio_stat(struct file *fp, struct stat *st)
    604 {
    605 	struct dmio_state *ds = fp->f_data;
    606 
    607 	(void)memset(st, 0, sizeof(*st));
    608 	KERNEL_LOCK(1, NULL);
    609 	st->st_dev = makedev(cdevsw_lookup_major(&dmoverio_cdevsw), 0);
    610 	st->st_atimespec = ds->ds_atime;
    611 	st->st_mtimespec = ds->ds_mtime;
    612 	st->st_ctimespec = st->st_birthtimespec = ds->ds_btime;
    613 	st->st_uid = kauth_cred_geteuid(fp->f_cred);
    614 	st->st_gid = kauth_cred_getegid(fp->f_cred);
    615 	KERNEL_UNLOCK_ONE(NULL);
    616 	return 0;
    617 }
    618 
    619 /*
    620  * dmio_ioctl:
    621  *
    622  *	Ioctl file op.
    623  */
    624 static int
    625 dmio_ioctl(struct file *fp, u_long cmd, void *data)
    626 {
    627 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    628 	int error;
    629 
    630 	switch (cmd) {
    631 	case FIONBIO:
    632 	case FIOASYNC:
    633 		return (0);
    634 
    635 	case DMIO_SETFUNC:
    636 	    {
    637 		struct dmio_setfunc *dsf = data;
    638 		struct dmover_session *dses;
    639 
    640 		mutex_enter(&ds->ds_lock);
    641 
    642 		if (ds->ds_session != NULL ||
    643 		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
    644 			mutex_exit(&ds->ds_lock);
    645 			return (EBUSY);
    646 		}
    647 
    648 		ds->ds_flags |= DMIO_STATE_LARVAL;
    649 
    650 		mutex_exit(&ds->ds_lock);
    651 
    652 		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
    653 		error = dmover_session_create(dsf->dsf_name, &dses);
    654 
    655 		mutex_enter(&ds->ds_lock);
    656 
    657 		if (error == 0) {
    658 			dses->dses_cookie = ds;
    659 			ds->ds_session = dses;
    660 		}
    661 		ds->ds_flags &= ~DMIO_STATE_LARVAL;
    662 
    663 		mutex_exit(&ds->ds_lock);
    664 		break;
    665 	    }
    666 
    667 	default:
    668 		error = ENOTTY;
    669 	}
    670 
    671 	return (error);
    672 }
    673 
    674 /*
    675  * dmio_poll:
    676  *
    677  *	Poll file op.
    678  */
    679 static int
    680 dmio_poll(struct file *fp, int events)
    681 {
    682 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    683 	int revents = 0;
    684 
    685 	if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
    686 		return (revents);
    687 
    688 	mutex_enter(&ds->ds_lock);
    689 
    690 	if (ds->ds_flags & DMIO_STATE_DEAD) {
    691 		/* EOF */
    692 		revents |= events & (POLLIN | POLLRDNORM |
    693 		    POLLOUT | POLLWRNORM);
    694 		goto out;
    695 	}
    696 
    697 	/* We can read if there are completed requests. */
    698 	if (events & (POLLIN | POLLRDNORM))
    699 		if (TAILQ_EMPTY(&ds->ds_complete) == 0)
    700 			revents |= events & (POLLIN | POLLRDNORM);
    701 
    702 	/*
    703 	 * We can write if there is there are fewer then DMIO_NREQS_MAX
    704 	 * are already in the queue.
    705 	 */
    706 	if (events & (POLLOUT | POLLWRNORM))
    707 		if (ds->ds_nreqs < DMIO_NREQS_MAX)
    708 			revents |= events & (POLLOUT | POLLWRNORM);
    709 
    710 	if (revents == 0) {
    711 		selrecord(curlwp, &ds->ds_selq);
    712 		ds->ds_flags |= DMIO_STATE_SEL;
    713 	}
    714 
    715  out:
    716 	mutex_exit(&ds->ds_lock);
    717 
    718 	return (revents);
    719 }
    720 
    721 /*
    722  * dmio_close:
    723  *
    724  *	Close file op.
    725  */
    726 static int
    727 dmio_close(struct file *fp)
    728 {
    729 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    730 	struct dmio_usrreq_state *dus;
    731 	struct dmover_session *dses;
    732 
    733 	mutex_enter(&ds->ds_lock);
    734 
    735 	ds->ds_flags |= DMIO_STATE_DEAD;
    736 
    737 	/* Garbage-collect all the responses on the queue. */
    738 	while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
    739 		TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
    740 		ds->ds_nreqs--;
    741 		mutex_exit(&ds->ds_lock);
    742 		dmover_request_free(dus->dus_req);
    743 		dmio_usrreq_fini(ds, dus);
    744 		mutex_enter(&ds->ds_lock);
    745 	}
    746 
    747 	/*
    748 	 * If there are any requests pending, we have to wait for
    749 	 * them.  Don't free the dmio_state in this case.
    750 	 */
    751 	if (ds->ds_nreqs == 0) {
    752 		dses = ds->ds_session;
    753 		mutex_exit(&ds->ds_lock);
    754 		dmio_state_put(ds);
    755 	} else {
    756 		dses = NULL;
    757 		mutex_exit(&ds->ds_lock);
    758 	}
    759 
    760 	fp->f_data = NULL;
    761 
    762 	if (dses != NULL)
    763 		dmover_session_destroy(dses);
    764 
    765 	return (0);
    766 }
    767 
    768 static const struct fileops dmio_fileops = {
    769 	.fo_read = dmio_read,
    770 	.fo_write = dmio_write,
    771 	.fo_ioctl = dmio_ioctl,
    772 	.fo_fcntl = fnullop_fcntl,
    773 	.fo_poll = dmio_poll,
    774 	.fo_stat = dmio_stat,
    775 	.fo_close = dmio_close,
    776 	.fo_kqfilter = fnullop_kqfilter,
    777 	.fo_restart = fnullop_restart,
    778 };
    779 
    780 /*
    781  * dmoverioopen:
    782  *
    783  *	Device switch open routine.
    784  */
    785 int
    786 dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l)
    787 {
    788 	struct dmio_state *ds;
    789 	struct file *fp;
    790 	int error, fd;
    791 
    792 	if ((error = fd_allocfile(&fp, &fd)) != 0)
    793 		return (error);
    794 
    795 	ds = dmio_state_get();
    796 
    797 	return fd_clone(fp, fd, flag, &dmio_fileops, ds);
    798 }
    799