Home | History | Annotate | Line # | Download | only in dmover
dmover_io.c revision 1.1
      1 /*	$NetBSD: dmover_io.c,v 1.1 2002/08/02 00:30:38 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * dmover_io.c: Support for user-space access to dmover-api
     40  *
     41  * This interface is quite simple:
     42  *
     43  *	1.  The user opens /dev/dmover, which is a cloning device.  This
     44  *	    allocates internal state for the session.
     45  *
     46  *	2.  The user does a DMIO_SETFUNC to select the data movement
     47  *	    function.  This actually creates the dmover session.
     48  *
     49  *	3.  The user writes request messages to its dmover handle.
     50  *
     51  *	4.  The user reads request responses from its dmover handle.
     52  *
     53  *	5.  The user closes the file descriptor and the session is
     54  *	    torn down.
     55  */
     56 
     57 #include <sys/cdefs.h>
     58 __KERNEL_RCSID(0, "$NetBSD");
     59 
     60 #include <sys/param.h>
     61 #include <sys/queue.h>
     62 #include <sys/conf.h>
     63 #include <sys/pool.h>
     64 #include <sys/proc.h>
     65 #include <sys/poll.h>
     66 #include <sys/malloc.h>
     67 #include <sys/lock.h>
     68 #include <sys/file.h>
     69 #include <sys/filedesc.h>
     70 #include <sys/filio.h>
     71 #include <sys/select.h>
     72 #include <sys/systm.h>
     73 
     74 #include <dev/dmover/dmovervar.h>
     75 #include <dev/dmover/dmover_io.h>
     76 
     77 struct dmio_usrreq_state {
     78 	TAILQ_ENTRY(dmio_usrreq_state) dus_q;
     79 	struct uio dus_uio_out;
     80 	struct uio *dus_uio_in;
     81 	struct dmover_request *dus_req;
     82 	uint32_t dus_id;
     83 };
     84 
     85 struct dmio_state {
     86 	struct dmover_session *ds_session;
     87 	TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
     88 	TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
     89 	struct selinfo ds_selq;
     90 	__volatile int ds_flags;
     91 	u_int ds_nreqs;
     92 	struct simplelock ds_slock;
     93 };
     94 
     95 #define	DMIO_STATE_SEL		0x0001
     96 #define	DMIO_STATE_DEAD		0x0002
     97 #define	DMIO_STATE_LARVAL	0x0004
     98 #define	DMIO_STATE_READ_WAIT	0x0008
     99 #define	DMIO_STATE_WRITE_WAIT	0x0010
    100 
    101 #define	DMIO_NREQS_MAX		64	/* XXX pulled out of a hat */
    102 
    103 struct pool dmio_state_pool;
    104 struct pool dmio_usrreq_state_pool;
    105 
    106 void	dmoverioattach(int);
    107 cdev_decl(dmoverio);
    108 
    109 /*
    110  * dmoverioattach:
    111  *
    112  *	Pseudo-device attach routine.
    113  */
    114 void
    115 dmoverioattach(int count)
    116 {
    117 
    118 	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
    119 	    0, 0, 0, "dmiostate", NULL);
    120 	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
    121 	    0, 0, 0, "dmiourstate", NULL);
    122 }
    123 
    124 /*
    125  * dmio_usrreq_init:
    126  *
    127  *	Build a request structure.
    128  */
    129 static int
    130 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
    131     struct dmio_usrreq *req, struct dmover_request *dreq)
    132 {
    133 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    134 	struct dmover_session *dses = ds->ds_session;
    135 	struct uio *uio_out = &dus->dus_uio_out;
    136 	struct uio *uio_in;
    137 	dmio_buffer inbuf;
    138 	size_t len;
    139 	u_int i, j;
    140 	int error;
    141 
    142 	/* XXX How should malloc interact w/ FNONBLOCK? */
    143 
    144 	if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
    145 		return (EINVAL);
    146 	len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
    147 	if (len == 0)
    148 		return (EINVAL);
    149 	uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
    150 
    151 	error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov, len);
    152 	if (error) {
    153 		free(uio_out->uio_iov, M_TEMP);
    154 		return (error);
    155 	}
    156 
    157 	for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
    158 		len += uio_out->uio_iov[j].iov_len;
    159 		if (len > SSIZE_MAX) {
    160 			free(uio_out->uio_iov, M_TEMP);
    161 			return (error);
    162 		}
    163 	}
    164 
    165 	uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
    166 	uio_out->uio_resid = len;
    167 	uio_out->uio_rw = UIO_READ;
    168 	uio_out->uio_segflg = UIO_USERSPACE;
    169 	uio_out->uio_procp = curproc;
    170 
    171 	dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
    172 	dreq->dreq_outbuf.dmbuf_uio = uio_out;
    173 
    174 	if (dses->dses_ninputs == 0) {
    175 		/* No inputs; copy the immediate. */
    176 		memcpy(dreq->dreq_immediate, req->req_immediate,
    177 		    sizeof(dreq->dreq_immediate));
    178 		return (0);
    179 	}
    180 
    181 	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
    182 
    183 	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
    184 	    M_TEMP, M_WAITOK);
    185 	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
    186 
    187 	for (i = 0; i < dses->dses_ninputs; i++) {
    188 		uio_in = &dus->dus_uio_in[i];
    189 
    190 		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
    191 		if (error)
    192 			goto bad;
    193 
    194 		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
    195 			error = EINVAL;
    196 			goto bad;
    197 		}
    198 		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
    199 		if (len == 0) {
    200 			error = EINVAL;
    201 			goto bad;
    202 		}
    203 		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
    204 
    205 		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
    206 		if (error) {
    207 			free(uio_in->uio_iov, M_TEMP);
    208 			goto bad;
    209 		}
    210 
    211 		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
    212 			len += uio_in->uio_iov[j].iov_len;
    213 			if (len > SSIZE_MAX) {
    214 				free(uio_in->uio_iov, M_TEMP);
    215 				error = EINVAL;
    216 				goto bad;
    217 			}
    218 		}
    219 
    220 		if (len != uio_out->uio_resid) {
    221 			free(uio_in->uio_iov, M_TEMP);
    222 			error = EINVAL;
    223 			goto bad;
    224 		}
    225 
    226 		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
    227 		uio_in->uio_resid = len;
    228 		uio_in->uio_rw = UIO_WRITE;
    229 		uio_in->uio_segflg = UIO_USERSPACE;
    230 		uio_in->uio_procp = curproc;
    231 
    232 		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
    233 	}
    234 
    235 	return (0);
    236 
    237  bad:
    238 	if (i > 0) {
    239 		for (--i; i >= 0; i--) {
    240 			uio_in = &dus->dus_uio_in[i];
    241 			free(uio_in->uio_iov, M_TEMP);
    242 		}
    243 	}
    244 	free(dus->dus_uio_in, M_TEMP);
    245 	free(uio_out->uio_iov, M_TEMP);
    246 	return (error);
    247 }
    248 
    249 /*
    250  * dmio_usrreq_fini:
    251  *
    252  *	Tear down a request.  Must be called at splsoftclock().
    253  */
    254 static void
    255 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
    256 {
    257 	struct dmover_session *dses = ds->ds_session;
    258 	struct uio *uio_out = &dus->dus_uio_out;
    259 	struct uio *uio_in;
    260 	int i;
    261 
    262 	free(uio_out->uio_iov, M_TEMP);
    263 
    264 	if (dses->dses_ninputs == 0) {
    265 		pool_put(&dmio_usrreq_state_pool, dus);
    266 		return;
    267 	}
    268 
    269 	for (i = 0; i < dses->dses_ninputs; i++) {
    270 		uio_in = &dus->dus_uio_in[i];
    271 		free(uio_in->uio_iov, M_TEMP);
    272 	}
    273 
    274 	free(dus->dus_uio_in, M_TEMP);
    275 
    276 	pool_put(&dmio_usrreq_state_pool, dus);
    277 }
    278 
    279 /*
    280  * dmio_read:
    281  *
    282  *	Read file op.
    283  */
    284 static int
    285 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
    286     struct ucred *cred, int flags)
    287 {
    288 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    289 	struct dmio_usrreq_state *dus;
    290 	struct dmover_request *dreq;
    291 	struct dmio_usrresp resp;
    292 	int s, error = 0, progress = 0;
    293 
    294 	if ((uio->uio_resid % sizeof(resp)) != 0)
    295 		return (EINVAL);
    296 
    297 	if (ds->ds_session == NULL)
    298 		return (ENXIO);
    299 
    300 	s = splsoftclock();
    301 	simple_lock(&ds->ds_slock);
    302 
    303 	while (uio->uio_resid != 0) {
    304 
    305 		for (;;) {
    306 			dus = TAILQ_FIRST(&ds->ds_complete);
    307 			if (dus == NULL) {
    308 				if (fp->f_flag & FNONBLOCK) {
    309 					error = progress ? 0 : EWOULDBLOCK;
    310 					goto out;
    311 				}
    312 				error = ltsleep(&ds->ds_complete,
    313 				    PRIBIO | PCATCH, "dmvrrd", 0,
    314 				    &ds->ds_slock);
    315 				if (error)
    316 					goto out;
    317 				continue;
    318 			}
    319 			/* Have a completed request. */
    320 			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
    321 			ds->ds_nreqs--;
    322 			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
    323 				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
    324 				wakeup(&ds->ds_nreqs);
    325 			}
    326 			if (ds->ds_flags & DMIO_STATE_SEL) {
    327 				ds->ds_flags &= ~DMIO_STATE_SEL;
    328 				selwakeup(&ds->ds_selq);
    329 			}
    330 			break;
    331 		}
    332 
    333 		simple_unlock(&ds->ds_slock);
    334 
    335 		dreq = dus->dus_req;
    336 		resp.resp_id = dus->dus_id;
    337 		resp.resp_error = (dreq->dreq_flags & DMOVER_REQ_ERROR) ?
    338 		    dreq->dreq_error : 0;
    339 
    340 		dmio_usrreq_fini(ds, dus);
    341 
    342 		splx(s);
    343 
    344 		progress = 1;
    345 
    346 		dmover_request_free(dreq);
    347 
    348 		error = uiomove(&resp, sizeof(resp), uio);
    349 		if (error)
    350 			return (error);
    351 
    352 		s = splsoftclock();
    353 		simple_lock(&ds->ds_slock);
    354 	}
    355 
    356  out:
    357 	simple_unlock(&ds->ds_slock);
    358 	splx(s);
    359 
    360 	return (error);
    361 }
    362 
    363 /*
    364  * dmio_usrreq_done:
    365  *
    366  *	Dmover completion callback.
    367  */
    368 static void
    369 dmio_usrreq_done(struct dmover_request *dreq)
    370 {
    371 	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
    372 	struct dmio_state *ds = dreq->dreq_session->dses_cookie;
    373 
    374 	/* We're already at splsoftclock(). */
    375 
    376 	simple_lock(&ds->ds_slock);
    377 	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
    378 	if (ds->ds_flags & DMIO_STATE_DEAD) {
    379 		ds->ds_nreqs--;
    380 		dmio_usrreq_fini(ds, dus);
    381 		dmover_request_free(dreq);
    382 		if (ds->ds_nreqs == 0) {
    383 			simple_unlock(&ds->ds_slock);
    384 			pool_put(&dmio_state_pool, ds);
    385 			return;
    386 		}
    387 	} else {
    388 		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
    389 		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
    390 			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
    391 			wakeup(&ds->ds_complete);
    392 		}
    393 		if (ds->ds_flags & DMIO_STATE_SEL) {
    394 			ds->ds_flags &= ~DMIO_STATE_SEL;
    395 			selwakeup(&ds->ds_selq);
    396 		}
    397 	}
    398 	simple_unlock(&ds->ds_slock);
    399 }
    400 
    401 /*
    402  * dmio_write:
    403  *
    404  *	Write file op.
    405  */
    406 static int
    407 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
    408     struct ucred *cred, int flags)
    409 {
    410 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    411 	struct dmio_usrreq_state *dus;
    412 	struct dmover_request *dreq;
    413 	struct dmio_usrreq req;
    414 	int error = 0, s, progress = 0;
    415 
    416 	if ((uio->uio_resid % sizeof(req)) != 0)
    417 		return (EINVAL);
    418 
    419 	if (ds->ds_session == NULL)
    420 		return (ENXIO);
    421 
    422 	s = splsoftclock();
    423 	simple_lock(&ds->ds_slock);
    424 
    425 	while (uio->uio_resid != 0) {
    426 
    427 		if (ds->ds_nreqs == DMIO_NREQS_MAX) {
    428 			if (fp->f_flag & FNONBLOCK) {
    429 				error = progress ? 0 : EWOULDBLOCK;
    430 				break;
    431 			}
    432 			ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
    433 			error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH,
    434 			    "dmiowr", 0, &ds->ds_slock);
    435 			if (error)
    436 				break;
    437 			continue;
    438 		}
    439 
    440 		ds->ds_nreqs++;
    441 
    442 		simple_unlock(&ds->ds_slock);
    443 		splx(s);
    444 
    445 		progress = 1;
    446 
    447 		error = uiomove(&req, sizeof(req), uio);
    448 		if (error) {
    449 			s = splsoftclock();
    450 			simple_lock(&ds->ds_slock);
    451 			ds->ds_nreqs--;
    452 			break;
    453 		}
    454 
    455 		/* XXX How should this interact with FNONBLOCK? */
    456 		dreq = dmover_request_alloc(ds->ds_session, NULL);
    457 		if (dreq == NULL) {
    458 			/* XXX */
    459 			s = splsoftclock();
    460 			simple_lock(&ds->ds_slock);
    461 			ds->ds_nreqs--;
    462 			error = ENOMEM;
    463 			break;
    464 		}
    465 		s = splsoftclock();
    466 		dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
    467 		splx(s);
    468 
    469 		error = dmio_usrreq_init(fp, dus, &req, dreq);
    470 		if (error) {
    471 			dmover_request_free(dreq);
    472 			s = splsoftclock();
    473 			pool_put(&dmio_usrreq_state_pool, dus);
    474 			simple_lock(&ds->ds_slock);
    475 			break;
    476 		}
    477 
    478 		dreq->dreq_callback = dmio_usrreq_done;
    479 		dreq->dreq_cookie = dus;
    480 
    481 		dus->dus_req = dreq;
    482 		dus->dus_id = req.req_id;
    483 
    484 		s = splsoftclock();
    485 		simple_lock(&ds->ds_slock);
    486 
    487 		TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
    488 
    489 		simple_unlock(&ds->ds_slock);
    490 		splx(s);
    491 
    492 		dmover_process(dreq);
    493 
    494 		s = splsoftclock();
    495 		simple_lock(&ds->ds_slock);
    496 	}
    497 
    498 	simple_unlock(&ds->ds_slock);
    499 	splx(s);
    500 
    501 	return (error);
    502 }
    503 
    504 /*
    505  * dmio_ioctl:
    506  *
    507  *	Ioctl file op.
    508  */
    509 static int
    510 dmio_ioctl(struct file *fp, u_long cmd, caddr_t data, struct proc *p)
    511 {
    512 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    513 	int error, s;
    514 
    515 	switch (cmd) {
    516 	case FIONBIO:
    517 	case FIOASYNC:
    518 		return (0);
    519 
    520 	case DMIO_SETFUNC:
    521 	    {
    522 		struct dmio_setfunc *dsf = (void *) data;
    523 		struct dmover_session *dses;
    524 
    525 		s = splsoftclock();
    526 		simple_lock(&ds->ds_slock);
    527 
    528 		if (ds->ds_session != NULL ||
    529 		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
    530 			simple_unlock(&ds->ds_slock);
    531 			splx(s);
    532 			return (EBUSY);
    533 		}
    534 
    535 		ds->ds_flags |= DMIO_STATE_LARVAL;
    536 
    537 		simple_unlock(&ds->ds_slock);
    538 		splx(s);
    539 
    540 		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
    541 		error = dmover_session_create(dsf->dsf_name, &dses);
    542 
    543 		s = splsoftclock();
    544 		simple_lock(&ds->ds_slock);
    545 
    546 		if (error == 0) {
    547 			dses->dses_cookie = ds;
    548 			ds->ds_session = dses;
    549 		}
    550 		ds->ds_flags &= ~DMIO_STATE_LARVAL;
    551 
    552 		simple_unlock(&ds->ds_slock);
    553 		splx(s);
    554 		break;
    555 	    }
    556 
    557 	default:
    558 		error = ENOTTY;
    559 	}
    560 
    561 	return (error);
    562 }
    563 
    564 /*
    565  * dmio_fcntl:
    566  *
    567  *	Fcntl file op.
    568  */
    569 static int
    570 dmio_fcntl(struct file *fp, u_int cmd, caddr_t data, struct proc *p)
    571 {
    572 
    573 	if (cmd == FNONBLOCK || cmd == FASYNC)
    574 		return (0);
    575 
    576 	return (EOPNOTSUPP);
    577 }
    578 
    579 /*
    580  * dmio_poll:
    581  *
    582  *	Poll file op.
    583  */
    584 static int
    585 dmio_poll(struct file *fp, int events, struct proc *p)
    586 {
    587 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    588 	int s, revents = 0;
    589 
    590 	if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
    591 		return (revents);
    592 
    593 	s = splsoftclock();
    594 	simple_lock(&ds->ds_slock);
    595 
    596 	if (ds->ds_flags & DMIO_STATE_DEAD) {
    597 		/* EOF */
    598 		revents |= events & (POLLIN | POLLRDNORM |
    599 		    POLLOUT | POLLWRNORM);
    600 		goto out;
    601 	}
    602 
    603 	/* We can read if there are completed requests. */
    604 	if (events & (POLLIN | POLLRDNORM))
    605 		if (TAILQ_EMPTY(&ds->ds_complete) == 0)
    606 			revents |= events & (POLLIN | POLLRDNORM);
    607 
    608 	/*
    609 	 * We can write if there is there are fewer then DMIO_NREQS_MAX
    610 	 * are already in the queue.
    611 	 */
    612 	if (events & (POLLOUT | POLLWRNORM))
    613 		if (ds->ds_nreqs < DMIO_NREQS_MAX)
    614 			revents |= events & (POLLOUT | POLLWRNORM);
    615 
    616 	if (revents == 0) {
    617 		selrecord(p, &ds->ds_selq);
    618 		ds->ds_flags |= DMIO_STATE_SEL;
    619 	}
    620 
    621  out:
    622 	simple_unlock(&ds->ds_slock);
    623 	splx(s);
    624 
    625 	return (revents);
    626 }
    627 
    628 /*
    629  * dmio_stat:
    630  *
    631  *	Stat file op.
    632  */
    633 static int
    634 dmio_stat(struct file *fp, struct stat *sb, struct proc *p)
    635 {
    636 
    637 	return (EOPNOTSUPP);
    638 }
    639 
    640 /*
    641  * dmio_close:
    642  *
    643  *	Close file op.
    644  */
    645 static int
    646 dmio_close(struct file *fp, struct proc *p)
    647 {
    648 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    649 	struct dmio_usrreq_state *dus;
    650 	struct dmover_session *dses;
    651 	int s;
    652 
    653 	s = splsoftclock();
    654 	simple_lock(&ds->ds_slock);
    655 
    656 	ds->ds_flags |= DMIO_STATE_DEAD;
    657 
    658 	/* Garbage-collect all the responses on the queue. */
    659 	while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
    660 		TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
    661 		ds->ds_nreqs--;
    662 		dmover_request_free(dus->dus_req);
    663 		dmio_usrreq_fini(ds, dus);
    664 	}
    665 
    666 	/*
    667 	 * If there are any requests pending, we have to wait for
    668 	 * them.  Don't free the dmio_state in this case.
    669 	 */
    670 	if (ds->ds_nreqs == 0) {
    671 		dses = ds->ds_session;
    672 		simple_unlock(&ds->ds_slock);
    673 		pool_put(&dmio_state_pool, ds);
    674 	} else {
    675 		dses = NULL;
    676 		simple_unlock(&ds->ds_slock);
    677 	}
    678 
    679 	splx(s);
    680 
    681 	fp->f_data = NULL;
    682 
    683 	if (dses != NULL)
    684 		dmover_session_destroy(dses);
    685 
    686 	return (0);
    687 }
    688 
    689 static struct fileops dmio_fileops = {
    690 	dmio_read,
    691 	dmio_write,
    692 	dmio_ioctl,
    693 	dmio_fcntl,
    694 	dmio_poll,
    695 	dmio_stat,
    696 	dmio_close,
    697 };
    698 
    699 /*
    700  * dmoverioopen:
    701  *
    702  *	Device switch open routine.
    703  */
    704 int
    705 dmoverioopen(dev_t dev, int flag, int mode, struct proc *p)
    706 {
    707 	struct dmio_state *ds;
    708 	struct file *fp;
    709 	int error, fd, s;
    710 
    711 	/* falloc() will use the descriptor for us. */
    712 	if ((error = falloc(p, &fp, &fd)) != 0)
    713 		return (error);
    714 
    715 	s = splsoftclock();
    716 	ds = pool_get(&dmio_state_pool, PR_WAITOK);
    717 	splx(s);
    718 
    719 	memset(ds, 0, sizeof(*ds));
    720 	TAILQ_INIT(&ds->ds_pending);
    721 	TAILQ_INIT(&ds->ds_complete);
    722 
    723 	fp->f_flag = FREAD | FWRITE;
    724 	fp->f_type = DTYPE_MISC;
    725 	fp->f_ops = &dmio_fileops;
    726 	fp->f_data = (caddr_t) ds;
    727 
    728 	p->p_dupfd = fd;
    729 	FILE_SET_MATURE(fp);
    730 	FILE_UNUSE(fp, p);
    731 
    732 	return (ENXIO);
    733 }
    734