dmover_io.c revision 1.38 1 /* $NetBSD: dmover_io.c,v 1.38 2010/11/13 13:51:58 uebayasi Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * dmover_io.c: Support for user-space access to dmover-api
40 *
41 * This interface is quite simple:
42 *
43 * 1. The user opens /dev/dmover, which is a cloning device. This
44 * allocates internal state for the session.
45 *
46 * 2. The user does a DMIO_SETFUNC to select the data movement
47 * function. This actually creates the dmover session.
48 *
49 * 3. The user writes request messages to its dmover handle.
50 *
51 * 4. The user reads request responses from its dmover handle.
52 *
53 * 5. The user closes the file descriptor and the session is
54 * torn down.
55 */
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.38 2010/11/13 13:51:58 uebayasi Exp $");
59
60 #include <sys/param.h>
61 #include <sys/queue.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/proc.h>
65 #include <sys/poll.h>
66 #include <sys/malloc.h>
67 #include <sys/simplelock.h>
68 #include <sys/file.h>
69 #include <sys/filedesc.h>
70 #include <sys/filio.h>
71 #include <sys/select.h>
72 #include <sys/systm.h>
73 #include <sys/workqueue.h>
74 #include <sys/once.h>
75 #include <sys/stat.h>
76 #include <sys/kauth.h>
77
78 #include <dev/dmover/dmovervar.h>
79 #include <dev/dmover/dmover_io.h>
80
81 struct dmio_usrreq_state {
82 union {
83 struct work u_work;
84 TAILQ_ENTRY(dmio_usrreq_state) u_q;
85 } dus_u;
86 #define dus_q dus_u.u_q
87 #define dus_work dus_u.u_work
88 struct uio dus_uio_out;
89 struct uio *dus_uio_in;
90 struct dmover_request *dus_req;
91 uint32_t dus_id;
92 struct vmspace *dus_vmspace;
93 };
94
95 struct dmio_state {
96 struct dmover_session *ds_session;
97 TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
98 TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
99 struct selinfo ds_selq;
100 volatile int ds_flags;
101 u_int ds_nreqs;
102 struct simplelock ds_slock;
103 struct timespec ds_atime;
104 struct timespec ds_mtime;
105 struct timespec ds_btime;
106 };
107
108 static ONCE_DECL(dmio_cleaner_control);
109 static struct workqueue *dmio_cleaner;
110 static int dmio_cleaner_init(void);
111 static void dmio_usrreq_fini1(struct work *wk, void *);
112
113 #define DMIO_STATE_SEL 0x0001
114 #define DMIO_STATE_DEAD 0x0002
115 #define DMIO_STATE_LARVAL 0x0004
116 #define DMIO_STATE_READ_WAIT 0x0008
117 #define DMIO_STATE_WRITE_WAIT 0x0010
118
119 #define DMIO_NREQS_MAX 64 /* XXX pulled out of a hat */
120
121 struct pool dmio_state_pool;
122 struct pool dmio_usrreq_state_pool;
123
124 void dmoverioattach(int);
125
126 dev_type_open(dmoverioopen);
127
128 const struct cdevsw dmoverio_cdevsw = {
129 dmoverioopen, noclose, noread, nowrite, noioctl,
130 nostop, notty, nopoll, nommap, nokqfilter,
131 D_OTHER
132 };
133
134 /*
135 * dmoverioattach:
136 *
137 * Pseudo-device attach routine.
138 */
139 void
140 dmoverioattach(int count)
141 {
142
143 pool_init(&dmio_state_pool, sizeof(struct dmio_state),
144 0, 0, 0, "dmiostate", NULL, IPL_SOFTCLOCK);
145 pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
146 0, 0, 0, "dmiourstate", NULL, IPL_SOFTCLOCK);
147 }
148
149 /*
150 * dmio_cleaner_init:
151 *
152 * Create cleaner thread.
153 */
154 static int
155 dmio_cleaner_init(void)
156 {
157
158 return workqueue_create(&dmio_cleaner, "dmioclean", dmio_usrreq_fini1,
159 NULL, PWAIT, IPL_SOFTCLOCK, 0);
160 }
161
162 /*
163 * dmio_usrreq_init:
164 *
165 * Build a request structure.
166 */
167 static int
168 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
169 struct dmio_usrreq *req, struct dmover_request *dreq)
170 {
171 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
172 struct dmover_session *dses = ds->ds_session;
173 struct uio *uio_out = &dus->dus_uio_out;
174 struct uio *uio_in;
175 dmio_buffer inbuf;
176 size_t len;
177 int i, error;
178 u_int j;
179
180 /* XXX How should malloc interact w/ FNONBLOCK? */
181
182 error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
183 if (error) {
184 return error;
185 }
186
187 error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
188 if (error) {
189 return error;
190 }
191
192 if (req->req_outbuf.dmbuf_iovcnt != 0) {
193 if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
194 return (EINVAL);
195 len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
196 uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
197 error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
198 len);
199 if (error) {
200 free(uio_out->uio_iov, M_TEMP);
201 return (error);
202 }
203
204 for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
205 len += uio_out->uio_iov[j].iov_len;
206 if (len > SSIZE_MAX) {
207 free(uio_out->uio_iov, M_TEMP);
208 return (EINVAL);
209 }
210 }
211
212 uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
213 uio_out->uio_resid = len;
214 uio_out->uio_rw = UIO_READ;
215 uio_out->uio_vmspace = dus->dus_vmspace;
216
217 dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
218 dreq->dreq_outbuf.dmbuf_uio = uio_out;
219 } else {
220 uio_out->uio_iov = NULL;
221 uio_out = NULL;
222 dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
223 }
224
225 memcpy(dreq->dreq_immediate, req->req_immediate,
226 sizeof(dreq->dreq_immediate));
227
228 if (dses->dses_ninputs == 0) {
229 /* No inputs; all done. */
230 return (0);
231 }
232
233 dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
234
235 dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
236 M_TEMP, M_WAITOK);
237 memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
238
239 for (i = 0; i < dses->dses_ninputs; i++) {
240 uio_in = &dus->dus_uio_in[i];
241
242 error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
243 if (error)
244 goto bad;
245
246 if (inbuf.dmbuf_iovcnt > IOV_MAX) {
247 error = EINVAL;
248 goto bad;
249 }
250 len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
251 if (len == 0) {
252 error = EINVAL;
253 goto bad;
254 }
255 uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
256
257 error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
258 if (error) {
259 free(uio_in->uio_iov, M_TEMP);
260 goto bad;
261 }
262
263 for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
264 len += uio_in->uio_iov[j].iov_len;
265 if (len > SSIZE_MAX) {
266 free(uio_in->uio_iov, M_TEMP);
267 error = EINVAL;
268 goto bad;
269 }
270 }
271
272 if (uio_out != NULL && len != uio_out->uio_resid) {
273 free(uio_in->uio_iov, M_TEMP);
274 error = EINVAL;
275 goto bad;
276 }
277
278 uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
279 uio_in->uio_resid = len;
280 uio_in->uio_rw = UIO_WRITE;
281 uio_in->uio_vmspace = dus->dus_vmspace;
282
283 dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
284 }
285
286 return (0);
287
288 bad:
289 if (i > 0) {
290 for (--i; i >= 0; i--) {
291 uio_in = &dus->dus_uio_in[i];
292 free(uio_in->uio_iov, M_TEMP);
293 }
294 }
295 free(dus->dus_uio_in, M_TEMP);
296 if (uio_out != NULL)
297 free(uio_out->uio_iov, M_TEMP);
298 uvmspace_free(dus->dus_vmspace);
299 return (error);
300 }
301
302 /*
303 * dmio_usrreq_fini:
304 *
305 * Tear down a request. Must be called at splsoftclock().
306 */
307 static void
308 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
309 {
310 struct dmover_session *dses = ds->ds_session;
311 struct uio *uio_out = &dus->dus_uio_out;
312 struct uio *uio_in;
313 int i;
314
315 if (uio_out->uio_iov != NULL)
316 free(uio_out->uio_iov, M_TEMP);
317
318 if (dses->dses_ninputs) {
319 for (i = 0; i < dses->dses_ninputs; i++) {
320 uio_in = &dus->dus_uio_in[i];
321 free(uio_in->uio_iov, M_TEMP);
322 }
323 free(dus->dus_uio_in, M_TEMP);
324 }
325
326 workqueue_enqueue(dmio_cleaner, &dus->dus_work, NULL);
327 }
328
329 static void
330 dmio_usrreq_fini1(struct work *wk, void *dummy)
331 {
332 struct dmio_usrreq_state *dus = (void *)wk;
333 int s;
334
335 KASSERT(wk == &dus->dus_work);
336
337 uvmspace_free(dus->dus_vmspace);
338 s = splsoftclock();
339 pool_put(&dmio_usrreq_state_pool, dus);
340 splx(s);
341 }
342
343 /*
344 * dmio_read:
345 *
346 * Read file op.
347 */
348 static int
349 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
350 kauth_cred_t cred, int flags)
351 {
352 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
353 struct dmio_usrreq_state *dus;
354 struct dmover_request *dreq;
355 struct dmio_usrresp resp;
356 int s, error = 0, progress = 0;
357
358 if ((uio->uio_resid % sizeof(resp)) != 0)
359 return (EINVAL);
360
361 if (ds->ds_session == NULL)
362 return (ENXIO);
363
364 getnanotime(&ds->ds_atime);
365 s = splsoftclock();
366 simple_lock(&ds->ds_slock);
367
368 while (uio->uio_resid != 0) {
369
370 for (;;) {
371 dus = TAILQ_FIRST(&ds->ds_complete);
372 if (dus == NULL) {
373 if (fp->f_flag & FNONBLOCK) {
374 error = progress ? 0 : EWOULDBLOCK;
375 goto out;
376 }
377 ds->ds_flags |= DMIO_STATE_READ_WAIT;
378 error = ltsleep(&ds->ds_complete,
379 PRIBIO | PCATCH, "dmvrrd", 0,
380 &ds->ds_slock);
381 if (error)
382 goto out;
383 continue;
384 }
385 /* Have a completed request. */
386 TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
387 ds->ds_nreqs--;
388 if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
389 ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
390 wakeup(&ds->ds_nreqs);
391 }
392 if (ds->ds_flags & DMIO_STATE_SEL) {
393 ds->ds_flags &= ~DMIO_STATE_SEL;
394 selnotify(&ds->ds_selq, POLLIN | POLLRDNORM, 0);
395 }
396 break;
397 }
398
399 simple_unlock(&ds->ds_slock);
400
401 dreq = dus->dus_req;
402 resp.resp_id = dus->dus_id;
403 if (dreq->dreq_flags & DMOVER_REQ_ERROR)
404 resp.resp_error = dreq->dreq_error;
405 else {
406 resp.resp_error = 0;
407 memcpy(resp.resp_immediate, dreq->dreq_immediate,
408 sizeof(resp.resp_immediate));
409 }
410
411 dmio_usrreq_fini(ds, dus);
412
413 splx(s);
414
415 progress = 1;
416
417 dmover_request_free(dreq);
418
419 error = uiomove(&resp, sizeof(resp), uio);
420 if (error)
421 return (error);
422
423 s = splsoftclock();
424 simple_lock(&ds->ds_slock);
425 }
426
427 out:
428 simple_unlock(&ds->ds_slock);
429 splx(s);
430
431 return (error);
432 }
433
434 /*
435 * dmio_usrreq_done:
436 *
437 * Dmover completion callback.
438 */
439 static void
440 dmio_usrreq_done(struct dmover_request *dreq)
441 {
442 struct dmio_usrreq_state *dus = dreq->dreq_cookie;
443 struct dmio_state *ds = dreq->dreq_session->dses_cookie;
444
445 /* We're already at splsoftclock(). */
446
447 simple_lock(&ds->ds_slock);
448 TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
449 if (ds->ds_flags & DMIO_STATE_DEAD) {
450 ds->ds_nreqs--;
451 dmio_usrreq_fini(ds, dus);
452 dmover_request_free(dreq);
453 if (ds->ds_nreqs == 0) {
454 simple_unlock(&ds->ds_slock);
455 seldestroy(&ds->ds_selq);
456 pool_put(&dmio_state_pool, ds);
457 return;
458 }
459 } else {
460 TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
461 if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
462 ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
463 wakeup(&ds->ds_complete);
464 }
465 if (ds->ds_flags & DMIO_STATE_SEL) {
466 ds->ds_flags &= ~DMIO_STATE_SEL;
467 selnotify(&ds->ds_selq, POLLOUT | POLLWRNORM, 0);
468 }
469 }
470 simple_unlock(&ds->ds_slock);
471 }
472
473 /*
474 * dmio_write:
475 *
476 * Write file op.
477 */
478 static int
479 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
480 kauth_cred_t cred, int flags)
481 {
482 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
483 struct dmio_usrreq_state *dus;
484 struct dmover_request *dreq;
485 struct dmio_usrreq req;
486 int error = 0, s, progress = 0;
487
488 if ((uio->uio_resid % sizeof(req)) != 0)
489 return (EINVAL);
490
491 if (ds->ds_session == NULL)
492 return (ENXIO);
493
494 getnanotime(&ds->ds_mtime);
495 s = splsoftclock();
496 simple_lock(&ds->ds_slock);
497
498 while (uio->uio_resid != 0) {
499
500 if (ds->ds_nreqs == DMIO_NREQS_MAX) {
501 if (fp->f_flag & FNONBLOCK) {
502 error = progress ? 0 : EWOULDBLOCK;
503 break;
504 }
505 ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
506 error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH,
507 "dmiowr", 0, &ds->ds_slock);
508 if (error)
509 break;
510 continue;
511 }
512
513 ds->ds_nreqs++;
514
515 simple_unlock(&ds->ds_slock);
516 splx(s);
517
518 progress = 1;
519
520 error = uiomove(&req, sizeof(req), uio);
521 if (error) {
522 s = splsoftclock();
523 simple_lock(&ds->ds_slock);
524 ds->ds_nreqs--;
525 break;
526 }
527
528 /* XXX How should this interact with FNONBLOCK? */
529 dreq = dmover_request_alloc(ds->ds_session, NULL);
530 if (dreq == NULL) {
531 /* XXX */
532 s = splsoftclock();
533 simple_lock(&ds->ds_slock);
534 ds->ds_nreqs--;
535 error = ENOMEM;
536 break;
537 }
538 s = splsoftclock();
539 dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
540 splx(s);
541
542 error = dmio_usrreq_init(fp, dus, &req, dreq);
543 if (error) {
544 dmover_request_free(dreq);
545 s = splsoftclock();
546 pool_put(&dmio_usrreq_state_pool, dus);
547 simple_lock(&ds->ds_slock);
548 break;
549 }
550
551 dreq->dreq_callback = dmio_usrreq_done;
552 dreq->dreq_cookie = dus;
553
554 dus->dus_req = dreq;
555 dus->dus_id = req.req_id;
556
557 s = splsoftclock();
558 simple_lock(&ds->ds_slock);
559
560 TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
561
562 simple_unlock(&ds->ds_slock);
563 splx(s);
564
565 dmover_process(dreq);
566
567 s = splsoftclock();
568 simple_lock(&ds->ds_slock);
569 }
570
571 simple_unlock(&ds->ds_slock);
572 splx(s);
573
574 return (error);
575 }
576
577 static int
578 dmio_stat(struct file *fp, struct stat *st)
579 {
580 struct dmio_state *ds = fp->f_data;
581
582 (void)memset(st, 0, sizeof(st));
583 KERNEL_LOCK(1, NULL);
584 st->st_dev = makedev(cdevsw_lookup_major(&dmoverio_cdevsw), 0);
585 st->st_atimespec = ds->ds_atime;
586 st->st_mtimespec = ds->ds_mtime;
587 st->st_ctimespec = st->st_birthtimespec = ds->ds_btime;
588 st->st_uid = kauth_cred_geteuid(fp->f_cred);
589 st->st_gid = kauth_cred_getegid(fp->f_cred);
590 KERNEL_UNLOCK_ONE(NULL);
591 return 0;
592 }
593
594 /*
595 * dmio_ioctl:
596 *
597 * Ioctl file op.
598 */
599 static int
600 dmio_ioctl(struct file *fp, u_long cmd, void *data)
601 {
602 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
603 int error, s;
604
605 switch (cmd) {
606 case FIONBIO:
607 case FIOASYNC:
608 return (0);
609
610 case DMIO_SETFUNC:
611 {
612 struct dmio_setfunc *dsf = data;
613 struct dmover_session *dses;
614
615 s = splsoftclock();
616 simple_lock(&ds->ds_slock);
617
618 if (ds->ds_session != NULL ||
619 (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
620 simple_unlock(&ds->ds_slock);
621 splx(s);
622 return (EBUSY);
623 }
624
625 ds->ds_flags |= DMIO_STATE_LARVAL;
626
627 simple_unlock(&ds->ds_slock);
628 splx(s);
629
630 dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
631 error = dmover_session_create(dsf->dsf_name, &dses);
632
633 s = splsoftclock();
634 simple_lock(&ds->ds_slock);
635
636 if (error == 0) {
637 dses->dses_cookie = ds;
638 ds->ds_session = dses;
639 }
640 ds->ds_flags &= ~DMIO_STATE_LARVAL;
641
642 simple_unlock(&ds->ds_slock);
643 splx(s);
644 break;
645 }
646
647 default:
648 error = ENOTTY;
649 }
650
651 return (error);
652 }
653
654 /*
655 * dmio_poll:
656 *
657 * Poll file op.
658 */
659 static int
660 dmio_poll(struct file *fp, int events)
661 {
662 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
663 int s, revents = 0;
664
665 if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
666 return (revents);
667
668 s = splsoftclock();
669 simple_lock(&ds->ds_slock);
670
671 if (ds->ds_flags & DMIO_STATE_DEAD) {
672 /* EOF */
673 revents |= events & (POLLIN | POLLRDNORM |
674 POLLOUT | POLLWRNORM);
675 goto out;
676 }
677
678 /* We can read if there are completed requests. */
679 if (events & (POLLIN | POLLRDNORM))
680 if (TAILQ_EMPTY(&ds->ds_complete) == 0)
681 revents |= events & (POLLIN | POLLRDNORM);
682
683 /*
684 * We can write if there is there are fewer then DMIO_NREQS_MAX
685 * are already in the queue.
686 */
687 if (events & (POLLOUT | POLLWRNORM))
688 if (ds->ds_nreqs < DMIO_NREQS_MAX)
689 revents |= events & (POLLOUT | POLLWRNORM);
690
691 if (revents == 0) {
692 selrecord(curlwp, &ds->ds_selq);
693 ds->ds_flags |= DMIO_STATE_SEL;
694 }
695
696 out:
697 simple_unlock(&ds->ds_slock);
698 splx(s);
699
700 return (revents);
701 }
702
703 /*
704 * dmio_close:
705 *
706 * Close file op.
707 */
708 static int
709 dmio_close(struct file *fp)
710 {
711 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
712 struct dmio_usrreq_state *dus;
713 struct dmover_session *dses;
714 int s;
715
716 s = splsoftclock();
717 simple_lock(&ds->ds_slock);
718
719 ds->ds_flags |= DMIO_STATE_DEAD;
720
721 /* Garbage-collect all the responses on the queue. */
722 while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
723 TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
724 ds->ds_nreqs--;
725 dmover_request_free(dus->dus_req);
726 dmio_usrreq_fini(ds, dus);
727 }
728
729 /*
730 * If there are any requests pending, we have to wait for
731 * them. Don't free the dmio_state in this case.
732 */
733 if (ds->ds_nreqs == 0) {
734 dses = ds->ds_session;
735 simple_unlock(&ds->ds_slock);
736 seldestroy(&ds->ds_selq);
737 pool_put(&dmio_state_pool, ds);
738 } else {
739 dses = NULL;
740 simple_unlock(&ds->ds_slock);
741 }
742
743 splx(s);
744
745 fp->f_data = NULL;
746
747 if (dses != NULL)
748 dmover_session_destroy(dses);
749
750 return (0);
751 }
752
753 static const struct fileops dmio_fileops = {
754 .fo_read = dmio_read,
755 .fo_write = dmio_write,
756 .fo_ioctl = dmio_ioctl,
757 .fo_fcntl = fnullop_fcntl,
758 .fo_poll = dmio_poll,
759 .fo_stat = dmio_stat,
760 .fo_close = dmio_close,
761 .fo_kqfilter = fnullop_kqfilter,
762 .fo_restart = fnullop_restart,
763 };
764
765 /*
766 * dmoverioopen:
767 *
768 * Device switch open routine.
769 */
770 int
771 dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l)
772 {
773 struct dmio_state *ds;
774 struct file *fp;
775 int error, fd, s;
776
777 /* falloc() will use the descriptor for us. */
778 if ((error = fd_allocfile(&fp, &fd)) != 0)
779 return (error);
780
781 s = splsoftclock();
782 ds = pool_get(&dmio_state_pool, PR_WAITOK);
783 splx(s);
784 getnanotime(&ds->ds_btime);
785 ds->ds_atime = ds->ds_mtime = ds->ds_btime;
786
787 memset(ds, 0, sizeof(*ds));
788 simple_lock_init(&ds->ds_slock);
789 TAILQ_INIT(&ds->ds_pending);
790 TAILQ_INIT(&ds->ds_complete);
791 selinit(&ds->ds_selq);
792
793 return fd_clone(fp, fd, flag, &dmio_fileops, ds);
794 }
795