Home | History | Annotate | Line # | Download | only in dmover
dmover_io.c revision 1.8
      1 /*	$NetBSD: dmover_io.c,v 1.8 2003/03/21 21:13:55 dsl Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * dmover_io.c: Support for user-space access to dmover-api
     40  *
     41  * This interface is quite simple:
     42  *
     43  *	1.  The user opens /dev/dmover, which is a cloning device.  This
     44  *	    allocates internal state for the session.
     45  *
     46  *	2.  The user does a DMIO_SETFUNC to select the data movement
     47  *	    function.  This actually creates the dmover session.
     48  *
     49  *	3.  The user writes request messages to its dmover handle.
     50  *
     51  *	4.  The user reads request responses from its dmover handle.
     52  *
     53  *	5.  The user closes the file descriptor and the session is
     54  *	    torn down.
     55  */
     56 
     57 #include <sys/cdefs.h>
     58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.8 2003/03/21 21:13:55 dsl Exp $");
     59 
     60 #include <sys/param.h>
     61 #include <sys/queue.h>
     62 #include <sys/conf.h>
     63 #include <sys/pool.h>
     64 #include <sys/proc.h>
     65 #include <sys/poll.h>
     66 #include <sys/malloc.h>
     67 #include <sys/lock.h>
     68 #include <sys/file.h>
     69 #include <sys/filedesc.h>
     70 #include <sys/filio.h>
     71 #include <sys/select.h>
     72 #include <sys/systm.h>
     73 
     74 #include <dev/dmover/dmovervar.h>
     75 #include <dev/dmover/dmover_io.h>
     76 
     77 struct dmio_usrreq_state {
     78 	TAILQ_ENTRY(dmio_usrreq_state) dus_q;
     79 	struct uio dus_uio_out;
     80 	struct uio *dus_uio_in;
     81 	struct dmover_request *dus_req;
     82 	uint32_t dus_id;
     83 };
     84 
     85 struct dmio_state {
     86 	struct dmover_session *ds_session;
     87 	TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
     88 	TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
     89 	struct selinfo ds_selq;
     90 	__volatile int ds_flags;
     91 	u_int ds_nreqs;
     92 	struct simplelock ds_slock;
     93 };
     94 
     95 #define	DMIO_STATE_SEL		0x0001
     96 #define	DMIO_STATE_DEAD		0x0002
     97 #define	DMIO_STATE_LARVAL	0x0004
     98 #define	DMIO_STATE_READ_WAIT	0x0008
     99 #define	DMIO_STATE_WRITE_WAIT	0x0010
    100 
    101 #define	DMIO_NREQS_MAX		64	/* XXX pulled out of a hat */
    102 
    103 struct pool dmio_state_pool;
    104 struct pool dmio_usrreq_state_pool;
    105 
    106 void	dmoverioattach(int);
    107 
    108 dev_type_open(dmoverioopen);
    109 
    110 const struct cdevsw dmoverio_cdevsw = {
    111 	dmoverioopen, noclose, noread, nowrite, noioctl,
    112 	nostop, notty, nopoll, nommap, nokqfilter,
    113 };
    114 
    115 /*
    116  * dmoverioattach:
    117  *
    118  *	Pseudo-device attach routine.
    119  */
    120 void
    121 dmoverioattach(int count)
    122 {
    123 
    124 	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
    125 	    0, 0, 0, "dmiostate", NULL);
    126 	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
    127 	    0, 0, 0, "dmiourstate", NULL);
    128 }
    129 
    130 /*
    131  * dmio_usrreq_init:
    132  *
    133  *	Build a request structure.
    134  */
    135 static int
    136 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
    137     struct dmio_usrreq *req, struct dmover_request *dreq)
    138 {
    139 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    140 	struct dmover_session *dses = ds->ds_session;
    141 	struct uio *uio_out = &dus->dus_uio_out;
    142 	struct uio *uio_in;
    143 	dmio_buffer inbuf;
    144 	size_t len;
    145 	int i, error;
    146 	u_int j;
    147 
    148 	/* XXX How should malloc interact w/ FNONBLOCK? */
    149 
    150 	if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
    151 		return (EINVAL);
    152 	len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
    153 	if (len == 0)
    154 		return (EINVAL);
    155 	uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
    156 
    157 	error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov, len);
    158 	if (error) {
    159 		free(uio_out->uio_iov, M_TEMP);
    160 		return (error);
    161 	}
    162 
    163 	for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
    164 		len += uio_out->uio_iov[j].iov_len;
    165 		if (len > SSIZE_MAX) {
    166 			free(uio_out->uio_iov, M_TEMP);
    167 			return (EINVAL);
    168 		}
    169 	}
    170 
    171 	uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
    172 	uio_out->uio_resid = len;
    173 	uio_out->uio_rw = UIO_READ;
    174 	uio_out->uio_segflg = UIO_USERSPACE;
    175 	uio_out->uio_procp = curproc;
    176 
    177 	dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
    178 	dreq->dreq_outbuf.dmbuf_uio = uio_out;
    179 
    180 	if (dses->dses_ninputs == 0) {
    181 		/* No inputs; copy the immediate. */
    182 		memcpy(dreq->dreq_immediate, req->req_immediate,
    183 		    sizeof(dreq->dreq_immediate));
    184 		return (0);
    185 	}
    186 
    187 	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
    188 
    189 	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
    190 	    M_TEMP, M_WAITOK);
    191 	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
    192 
    193 	for (i = 0; i < dses->dses_ninputs; i++) {
    194 		uio_in = &dus->dus_uio_in[i];
    195 
    196 		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
    197 		if (error)
    198 			goto bad;
    199 
    200 		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
    201 			error = EINVAL;
    202 			goto bad;
    203 		}
    204 		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
    205 		if (len == 0) {
    206 			error = EINVAL;
    207 			goto bad;
    208 		}
    209 		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
    210 
    211 		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
    212 		if (error) {
    213 			free(uio_in->uio_iov, M_TEMP);
    214 			goto bad;
    215 		}
    216 
    217 		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
    218 			len += uio_in->uio_iov[j].iov_len;
    219 			if (len > SSIZE_MAX) {
    220 				free(uio_in->uio_iov, M_TEMP);
    221 				error = EINVAL;
    222 				goto bad;
    223 			}
    224 		}
    225 
    226 		if (len != uio_out->uio_resid) {
    227 			free(uio_in->uio_iov, M_TEMP);
    228 			error = EINVAL;
    229 			goto bad;
    230 		}
    231 
    232 		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
    233 		uio_in->uio_resid = len;
    234 		uio_in->uio_rw = UIO_WRITE;
    235 		uio_in->uio_segflg = UIO_USERSPACE;
    236 		uio_in->uio_procp = curproc;
    237 
    238 		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
    239 	}
    240 
    241 	return (0);
    242 
    243  bad:
    244 	if (i > 0) {
    245 		for (--i; i >= 0; i--) {
    246 			uio_in = &dus->dus_uio_in[i];
    247 			free(uio_in->uio_iov, M_TEMP);
    248 		}
    249 	}
    250 	free(dus->dus_uio_in, M_TEMP);
    251 	free(uio_out->uio_iov, M_TEMP);
    252 	return (error);
    253 }
    254 
    255 /*
    256  * dmio_usrreq_fini:
    257  *
    258  *	Tear down a request.  Must be called at splsoftclock().
    259  */
    260 static void
    261 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
    262 {
    263 	struct dmover_session *dses = ds->ds_session;
    264 	struct uio *uio_out = &dus->dus_uio_out;
    265 	struct uio *uio_in;
    266 	int i;
    267 
    268 	free(uio_out->uio_iov, M_TEMP);
    269 
    270 	if (dses->dses_ninputs == 0) {
    271 		pool_put(&dmio_usrreq_state_pool, dus);
    272 		return;
    273 	}
    274 
    275 	for (i = 0; i < dses->dses_ninputs; i++) {
    276 		uio_in = &dus->dus_uio_in[i];
    277 		free(uio_in->uio_iov, M_TEMP);
    278 	}
    279 
    280 	free(dus->dus_uio_in, M_TEMP);
    281 
    282 	pool_put(&dmio_usrreq_state_pool, dus);
    283 }
    284 
    285 /*
    286  * dmio_read:
    287  *
    288  *	Read file op.
    289  */
    290 static int
    291 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
    292     struct ucred *cred, int flags)
    293 {
    294 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    295 	struct dmio_usrreq_state *dus;
    296 	struct dmover_request *dreq;
    297 	struct dmio_usrresp resp;
    298 	int s, error = 0, progress = 0;
    299 
    300 	if ((uio->uio_resid % sizeof(resp)) != 0)
    301 		return (EINVAL);
    302 
    303 	if (ds->ds_session == NULL)
    304 		return (ENXIO);
    305 
    306 	s = splsoftclock();
    307 	simple_lock(&ds->ds_slock);
    308 
    309 	while (uio->uio_resid != 0) {
    310 
    311 		for (;;) {
    312 			dus = TAILQ_FIRST(&ds->ds_complete);
    313 			if (dus == NULL) {
    314 				if (fp->f_flag & FNONBLOCK) {
    315 					error = progress ? 0 : EWOULDBLOCK;
    316 					goto out;
    317 				}
    318 				error = ltsleep(&ds->ds_complete,
    319 				    PRIBIO | PCATCH, "dmvrrd", 0,
    320 				    &ds->ds_slock);
    321 				if (error)
    322 					goto out;
    323 				continue;
    324 			}
    325 			/* Have a completed request. */
    326 			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
    327 			ds->ds_nreqs--;
    328 			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
    329 				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
    330 				wakeup(&ds->ds_nreqs);
    331 			}
    332 			if (ds->ds_flags & DMIO_STATE_SEL) {
    333 				ds->ds_flags &= ~DMIO_STATE_SEL;
    334 				selwakeup(&ds->ds_selq);
    335 			}
    336 			break;
    337 		}
    338 
    339 		simple_unlock(&ds->ds_slock);
    340 
    341 		dreq = dus->dus_req;
    342 		resp.resp_id = dus->dus_id;
    343 		resp.resp_error = (dreq->dreq_flags & DMOVER_REQ_ERROR) ?
    344 		    dreq->dreq_error : 0;
    345 
    346 		dmio_usrreq_fini(ds, dus);
    347 
    348 		splx(s);
    349 
    350 		progress = 1;
    351 
    352 		dmover_request_free(dreq);
    353 
    354 		error = uiomove(&resp, sizeof(resp), uio);
    355 		if (error)
    356 			return (error);
    357 
    358 		s = splsoftclock();
    359 		simple_lock(&ds->ds_slock);
    360 	}
    361 
    362  out:
    363 	simple_unlock(&ds->ds_slock);
    364 	splx(s);
    365 
    366 	return (error);
    367 }
    368 
    369 /*
    370  * dmio_usrreq_done:
    371  *
    372  *	Dmover completion callback.
    373  */
    374 static void
    375 dmio_usrreq_done(struct dmover_request *dreq)
    376 {
    377 	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
    378 	struct dmio_state *ds = dreq->dreq_session->dses_cookie;
    379 
    380 	/* We're already at splsoftclock(). */
    381 
    382 	simple_lock(&ds->ds_slock);
    383 	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
    384 	if (ds->ds_flags & DMIO_STATE_DEAD) {
    385 		ds->ds_nreqs--;
    386 		dmio_usrreq_fini(ds, dus);
    387 		dmover_request_free(dreq);
    388 		if (ds->ds_nreqs == 0) {
    389 			simple_unlock(&ds->ds_slock);
    390 			pool_put(&dmio_state_pool, ds);
    391 			return;
    392 		}
    393 	} else {
    394 		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
    395 		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
    396 			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
    397 			wakeup(&ds->ds_complete);
    398 		}
    399 		if (ds->ds_flags & DMIO_STATE_SEL) {
    400 			ds->ds_flags &= ~DMIO_STATE_SEL;
    401 			selwakeup(&ds->ds_selq);
    402 		}
    403 	}
    404 	simple_unlock(&ds->ds_slock);
    405 }
    406 
    407 /*
    408  * dmio_write:
    409  *
    410  *	Write file op.
    411  */
    412 static int
    413 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
    414     struct ucred *cred, int flags)
    415 {
    416 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    417 	struct dmio_usrreq_state *dus;
    418 	struct dmover_request *dreq;
    419 	struct dmio_usrreq req;
    420 	int error = 0, s, progress = 0;
    421 
    422 	if ((uio->uio_resid % sizeof(req)) != 0)
    423 		return (EINVAL);
    424 
    425 	if (ds->ds_session == NULL)
    426 		return (ENXIO);
    427 
    428 	s = splsoftclock();
    429 	simple_lock(&ds->ds_slock);
    430 
    431 	while (uio->uio_resid != 0) {
    432 
    433 		if (ds->ds_nreqs == DMIO_NREQS_MAX) {
    434 			if (fp->f_flag & FNONBLOCK) {
    435 				error = progress ? 0 : EWOULDBLOCK;
    436 				break;
    437 			}
    438 			ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
    439 			error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH,
    440 			    "dmiowr", 0, &ds->ds_slock);
    441 			if (error)
    442 				break;
    443 			continue;
    444 		}
    445 
    446 		ds->ds_nreqs++;
    447 
    448 		simple_unlock(&ds->ds_slock);
    449 		splx(s);
    450 
    451 		progress = 1;
    452 
    453 		error = uiomove(&req, sizeof(req), uio);
    454 		if (error) {
    455 			s = splsoftclock();
    456 			simple_lock(&ds->ds_slock);
    457 			ds->ds_nreqs--;
    458 			break;
    459 		}
    460 
    461 		/* XXX How should this interact with FNONBLOCK? */
    462 		dreq = dmover_request_alloc(ds->ds_session, NULL);
    463 		if (dreq == NULL) {
    464 			/* XXX */
    465 			s = splsoftclock();
    466 			simple_lock(&ds->ds_slock);
    467 			ds->ds_nreqs--;
    468 			error = ENOMEM;
    469 			break;
    470 		}
    471 		s = splsoftclock();
    472 		dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
    473 		splx(s);
    474 
    475 		error = dmio_usrreq_init(fp, dus, &req, dreq);
    476 		if (error) {
    477 			dmover_request_free(dreq);
    478 			s = splsoftclock();
    479 			pool_put(&dmio_usrreq_state_pool, dus);
    480 			simple_lock(&ds->ds_slock);
    481 			break;
    482 		}
    483 
    484 		dreq->dreq_callback = dmio_usrreq_done;
    485 		dreq->dreq_cookie = dus;
    486 
    487 		dus->dus_req = dreq;
    488 		dus->dus_id = req.req_id;
    489 
    490 		s = splsoftclock();
    491 		simple_lock(&ds->ds_slock);
    492 
    493 		TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
    494 
    495 		simple_unlock(&ds->ds_slock);
    496 		splx(s);
    497 
    498 		dmover_process(dreq);
    499 
    500 		s = splsoftclock();
    501 		simple_lock(&ds->ds_slock);
    502 	}
    503 
    504 	simple_unlock(&ds->ds_slock);
    505 	splx(s);
    506 
    507 	return (error);
    508 }
    509 
    510 /*
    511  * dmio_ioctl:
    512  *
    513  *	Ioctl file op.
    514  */
    515 static int
    516 dmio_ioctl(struct file *fp, u_long cmd, void *data, struct proc *p)
    517 {
    518 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    519 	int error, s;
    520 
    521 	switch (cmd) {
    522 	case FIONBIO:
    523 	case FIOASYNC:
    524 		return (0);
    525 
    526 	case DMIO_SETFUNC:
    527 	    {
    528 		struct dmio_setfunc *dsf = data;
    529 		struct dmover_session *dses;
    530 
    531 		s = splsoftclock();
    532 		simple_lock(&ds->ds_slock);
    533 
    534 		if (ds->ds_session != NULL ||
    535 		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
    536 			simple_unlock(&ds->ds_slock);
    537 			splx(s);
    538 			return (EBUSY);
    539 		}
    540 
    541 		ds->ds_flags |= DMIO_STATE_LARVAL;
    542 
    543 		simple_unlock(&ds->ds_slock);
    544 		splx(s);
    545 
    546 		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
    547 		error = dmover_session_create(dsf->dsf_name, &dses);
    548 
    549 		s = splsoftclock();
    550 		simple_lock(&ds->ds_slock);
    551 
    552 		if (error == 0) {
    553 			dses->dses_cookie = ds;
    554 			ds->ds_session = dses;
    555 		}
    556 		ds->ds_flags &= ~DMIO_STATE_LARVAL;
    557 
    558 		simple_unlock(&ds->ds_slock);
    559 		splx(s);
    560 		break;
    561 	    }
    562 
    563 	default:
    564 		error = ENOTTY;
    565 	}
    566 
    567 	return (error);
    568 }
    569 
    570 /*
    571  * dmio_fcntl:
    572  *
    573  *	Fcntl file op.
    574  */
    575 static int
    576 dmio_fcntl(struct file *fp, u_int cmd, void *data, struct proc *p)
    577 {
    578 
    579 	if (cmd == FNONBLOCK || cmd == FASYNC)
    580 		return (0);
    581 
    582 	return (EOPNOTSUPP);
    583 }
    584 
    585 /*
    586  * dmio_poll:
    587  *
    588  *	Poll file op.
    589  */
    590 static int
    591 dmio_poll(struct file *fp, int events, struct proc *p)
    592 {
    593 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    594 	int s, revents = 0;
    595 
    596 	if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
    597 		return (revents);
    598 
    599 	s = splsoftclock();
    600 	simple_lock(&ds->ds_slock);
    601 
    602 	if (ds->ds_flags & DMIO_STATE_DEAD) {
    603 		/* EOF */
    604 		revents |= events & (POLLIN | POLLRDNORM |
    605 		    POLLOUT | POLLWRNORM);
    606 		goto out;
    607 	}
    608 
    609 	/* We can read if there are completed requests. */
    610 	if (events & (POLLIN | POLLRDNORM))
    611 		if (TAILQ_EMPTY(&ds->ds_complete) == 0)
    612 			revents |= events & (POLLIN | POLLRDNORM);
    613 
    614 	/*
    615 	 * We can write if there is there are fewer then DMIO_NREQS_MAX
    616 	 * are already in the queue.
    617 	 */
    618 	if (events & (POLLOUT | POLLWRNORM))
    619 		if (ds->ds_nreqs < DMIO_NREQS_MAX)
    620 			revents |= events & (POLLOUT | POLLWRNORM);
    621 
    622 	if (revents == 0) {
    623 		selrecord(p, &ds->ds_selq);
    624 		ds->ds_flags |= DMIO_STATE_SEL;
    625 	}
    626 
    627  out:
    628 	simple_unlock(&ds->ds_slock);
    629 	splx(s);
    630 
    631 	return (revents);
    632 }
    633 
    634 /*
    635  * dmio_stat:
    636  *
    637  *	Stat file op.
    638  */
    639 static int
    640 dmio_stat(struct file *fp, struct stat *sb, struct proc *p)
    641 {
    642 
    643 	return (EOPNOTSUPP);
    644 }
    645 
    646 /*
    647  * dmio_close:
    648  *
    649  *	Close file op.
    650  */
    651 static int
    652 dmio_close(struct file *fp, struct proc *p)
    653 {
    654 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
    655 	struct dmio_usrreq_state *dus;
    656 	struct dmover_session *dses;
    657 	int s;
    658 
    659 	s = splsoftclock();
    660 	simple_lock(&ds->ds_slock);
    661 
    662 	ds->ds_flags |= DMIO_STATE_DEAD;
    663 
    664 	/* Garbage-collect all the responses on the queue. */
    665 	while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
    666 		TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
    667 		ds->ds_nreqs--;
    668 		dmover_request_free(dus->dus_req);
    669 		dmio_usrreq_fini(ds, dus);
    670 	}
    671 
    672 	/*
    673 	 * If there are any requests pending, we have to wait for
    674 	 * them.  Don't free the dmio_state in this case.
    675 	 */
    676 	if (ds->ds_nreqs == 0) {
    677 		dses = ds->ds_session;
    678 		simple_unlock(&ds->ds_slock);
    679 		pool_put(&dmio_state_pool, ds);
    680 	} else {
    681 		dses = NULL;
    682 		simple_unlock(&ds->ds_slock);
    683 	}
    684 
    685 	splx(s);
    686 
    687 	fp->f_data = NULL;
    688 
    689 	if (dses != NULL)
    690 		dmover_session_destroy(dses);
    691 
    692 	return (0);
    693 }
    694 
    695 static struct fileops dmio_fileops = {
    696 	dmio_read,
    697 	dmio_write,
    698 	dmio_ioctl,
    699 	dmio_fcntl,
    700 	dmio_poll,
    701 	dmio_stat,
    702 	dmio_close,
    703 };
    704 
    705 /*
    706  * dmoverioopen:
    707  *
    708  *	Device switch open routine.
    709  */
    710 int
    711 dmoverioopen(dev_t dev, int flag, int mode, struct proc *p)
    712 {
    713 	struct dmio_state *ds;
    714 	struct file *fp;
    715 	int error, fd, s;
    716 
    717 	/* falloc() will use the descriptor for us. */
    718 	if ((error = falloc(p, &fp, &fd)) != 0)
    719 		return (error);
    720 
    721 	s = splsoftclock();
    722 	ds = pool_get(&dmio_state_pool, PR_WAITOK);
    723 	splx(s);
    724 
    725 	memset(ds, 0, sizeof(*ds));
    726 	TAILQ_INIT(&ds->ds_pending);
    727 	TAILQ_INIT(&ds->ds_complete);
    728 
    729 	fp->f_flag = FREAD | FWRITE;
    730 	fp->f_type = DTYPE_MISC;
    731 	fp->f_ops = &dmio_fileops;
    732 	fp->f_data = (caddr_t) ds;
    733 
    734 	p->p_dupfd = fd;
    735 	FILE_SET_MATURE(fp);
    736 	FILE_UNUSE(fp, p);
    737 
    738 	return (ENXIO);
    739 }
    740