md.c revision 1.76.2.5 1 /* $NetBSD: md.c,v 1.76.2.5 2016/07/27 03:25:00 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1995 Gordon W. Ross, Leo Weppelman.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * This implements a general-purpose memory-disk.
30 * See md.h for notes on the config types.
31 *
32 * Note that this driver provides the same functionality
33 * as the MFS filesystem hack, but this is better because
34 * you can use this for any filesystem type you'd like!
35 *
36 * Credit for most of the kmem ramdisk code goes to:
37 * Leo Weppelman (atari) and Phil Nelson (pc532)
38 * Credit for the ideas behind the "user space memory" code goes
39 * to the authors of the MFS implementation.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: md.c,v 1.76.2.5 2016/07/27 03:25:00 pgoyette Exp $");
44
45 #ifdef _KERNEL_OPT
46 #include "opt_md.h"
47 #else
48 #define MEMORY_DISK_SERVER 1
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/systm.h>
55 #include <sys/buf.h>
56 #include <sys/bufq.h>
57 #include <sys/device.h>
58 #include <sys/disk.h>
59 #include <sys/stat.h>
60 #include <sys/proc.h>
61 #include <sys/conf.h>
62 #include <sys/disklabel.h>
63 #include <sys/localcount.h>
64
65 #include <uvm/uvm_extern.h>
66
67 #include <dev/md.h>
68
69 #include "ioconf.h"
70 /*
71 * The user-space functionality is included by default.
72 * Use `options MEMORY_DISK_SERVER=0' to turn it off.
73 */
74 #ifndef MEMORY_DISK_SERVER
75 #error MEMORY_DISK_SERVER should be defined by opt_md.h
76 #endif /* MEMORY_DISK_SERVER */
77
78 /*
79 * We should use the raw partition for ioctl.
80 */
81 #define MD_UNIT(unit) DISKUNIT(unit)
82
83 /* autoconfig stuff... */
84
85 struct md_softc {
86 device_t sc_dev; /* Self. */
87 struct disk sc_dkdev; /* hook for generic disk handling */
88 struct md_conf sc_md;
89 kmutex_t sc_lock; /* Protect self. */
90 kcondvar_t sc_cv; /* Wait here for work. */
91 struct bufq_state *sc_buflist;
92 };
93 /* shorthand for fields in sc_md: */
94 #define sc_addr sc_md.md_addr
95 #define sc_size sc_md.md_size
96 #define sc_type sc_md.md_type
97
98 static void md_attach(device_t, device_t, void *);
99 static int md_detach(device_t, int);
100
101 static dev_type_open(mdopen);
102 static dev_type_close(mdclose);
103 static dev_type_read(mdread);
104 static dev_type_write(mdwrite);
105 static dev_type_ioctl(mdioctl);
106 static dev_type_strategy(mdstrategy);
107 static dev_type_size(mdsize);
108
109 const struct bdevsw md_bdevsw = {
110 DEVSW_MODULE_INIT
111 .d_open = mdopen,
112 .d_close = mdclose,
113 .d_strategy = mdstrategy,
114 .d_ioctl = mdioctl,
115 .d_dump = nodump,
116 .d_psize = mdsize,
117 .d_discard = nodiscard,
118 .d_flag = D_DISK | D_MPSAFE
119 };
120
121 const struct cdevsw md_cdevsw = {
122 DEVSW_MODULE_INIT
123 .d_open = mdopen,
124 .d_close = mdclose,
125 .d_read = mdread,
126 .d_write = mdwrite,
127 .d_ioctl = mdioctl,
128 .d_stop = nostop,
129 .d_tty = notty,
130 .d_poll = nopoll,
131 .d_mmap = nommap,
132 .d_kqfilter = nokqfilter,
133 .d_discard = nodiscard,
134 .d_flag = D_DISK
135 };
136
137 static struct dkdriver mddkdriver = {
138 .d_strategy = mdstrategy
139 };
140
141 extern struct cfdriver md_cd;
142 CFATTACH_DECL3_NEW(md, sizeof(struct md_softc),
143 0, md_attach, md_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
144
145 static kmutex_t md_device_lock; /* Protect unit creation / deletion. */
146 extern size_t md_root_size;
147
148 static void md_set_disklabel(struct md_softc *);
149
150 /*
151 * This is called if we are configured as a pseudo-device
152 */
153 void
154 mdattach(int n)
155 {
156
157 mutex_init(&md_device_lock, MUTEX_DEFAULT, IPL_NONE);
158 if (config_cfattach_attach(md_cd.cd_name, &md_ca)) {
159 aprint_error("%s: cfattach_attach failed\n", md_cd.cd_name);
160 return;
161 }
162 }
163
164 static void
165 md_attach(device_t parent, device_t self, void *aux)
166 {
167 struct md_softc *sc = device_private(self);
168
169 sc->sc_dev = self;
170 sc->sc_type = MD_UNCONFIGURED;
171 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
172 cv_init(&sc->sc_cv, "mdidle");
173 bufq_alloc(&sc->sc_buflist, "fcfs", 0);
174
175 /* XXX - Could accept aux info here to set the config. */
176 #ifdef MEMORY_DISK_HOOKS
177 /*
178 * This external function might setup a pre-loaded disk.
179 * All it would need to do is setup the md_conf struct.
180 * See sys/dev/md_root.c for an example.
181 */
182 md_attach_hook(device_unit(self), &sc->sc_md);
183 #endif
184
185 /*
186 * Initialize and attach the disk structure.
187 */
188 disk_init(&sc->sc_dkdev, device_xname(self), &mddkdriver);
189 disk_attach(&sc->sc_dkdev);
190
191 if (sc->sc_type != MD_UNCONFIGURED)
192 md_set_disklabel(sc);
193
194 if (!pmf_device_register(self, NULL, NULL))
195 aprint_error_dev(self, "couldn't establish power handler\n");
196 }
197
198 /*
199 * Caller must hold a reference to the device's localcount. The reference
200 * is released if detach is successful.
201 */
202 static int
203 md_detach(device_t self, int flags)
204 {
205 struct md_softc *sc = device_private(self);
206 int rc;
207
208 rc = 0;
209 mutex_enter(&sc->sc_dkdev.dk_openlock);
210 if (sc->sc_dkdev.dk_openmask == 0 && sc->sc_type == MD_UNCONFIGURED)
211 ; /* nothing to do */
212 else if ((flags & DETACH_FORCE) == 0)
213 rc = EBUSY;
214 mutex_exit(&sc->sc_dkdev.dk_openlock);
215
216 if (rc != 0)
217 return rc;
218
219 device_release(self);
220 pmf_device_deregister(self);
221 disk_detach(&sc->sc_dkdev);
222 disk_destroy(&sc->sc_dkdev);
223 bufq_free(sc->sc_buflist);
224 mutex_destroy(&sc->sc_lock);
225 cv_destroy(&sc->sc_cv);
226 return 0;
227 }
228
229 /*
230 * operational routines:
231 * open, close, read, write, strategy,
232 * ioctl, dump, size
233 */
234
235 #if MEMORY_DISK_SERVER
236 static int md_server_loop(struct md_softc *sc);
237 static int md_ioctl_server(struct md_softc *sc, struct md_conf *umd,
238 struct lwp *l);
239 #endif /* MEMORY_DISK_SERVER */
240 static int md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd,
241 struct lwp *l);
242
243 static int
244 mdsize(dev_t dev)
245 {
246 device_t self;
247 struct md_softc *sc;
248 int res;
249
250 self = device_lookup_acquire(&md_cd, MD_UNIT(dev));
251 if (self == NULL)
252 return 0;
253 sc = device_private(self);
254
255 mutex_enter(&sc->sc_lock);
256 if (sc->sc_type == MD_UNCONFIGURED)
257 res = 0;
258 else
259 res = sc->sc_size >> DEV_BSHIFT;
260 mutex_exit(&sc->sc_lock);
261
262 device_release(self);
263 return res;
264 }
265
266 static int
267 mdopen(dev_t dev, int flag, int fmt, struct lwp *l)
268 {
269 device_t self;
270 int unit;
271 int part = DISKPART(dev);
272 int pmask = 1 << part;
273 cfdata_t cf;
274 struct md_softc *sc;
275 struct disk *dk;
276 #ifdef MEMORY_DISK_HOOKS
277 bool configured;
278 #endif
279
280 mutex_enter(&md_device_lock);
281 unit = MD_UNIT(dev);
282 sc = NULL;
283 self = device_lookup_acquire(&md_cd, unit);
284 if (self == NULL) {
285 if (part != RAW_PART) {
286 mutex_exit(&md_device_lock);
287 return ENXIO;
288 }
289 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
290 cf->cf_name = md_cd.cd_name;
291 cf->cf_atname = md_cd.cd_name;
292 cf->cf_unit = unit;
293 cf->cf_fstate = FSTATE_STAR;
294 self = config_attach_pseudo(cf);
295 if (self != NULL) {
296 device_acquire(self);
297 sc = device_private(self);
298 }
299 if (sc == NULL) {
300 mutex_exit(&md_device_lock);
301 device_release(self);
302 return ENOMEM;
303 }
304 }
305 else
306 sc = device_private(self);
307
308 dk = &sc->sc_dkdev;
309
310 /*
311 * The raw partition is used for ioctl to configure.
312 */
313 if (part == RAW_PART)
314 goto ok;
315
316 #ifdef MEMORY_DISK_HOOKS
317 /* Call the open hook to allow loading the device. */
318 configured = (sc->sc_type != MD_UNCONFIGURED);
319 md_open_hook(unit, &sc->sc_md);
320 /* initialize disklabel if the device is configured in open hook */
321 if (!configured && sc->sc_type != MD_UNCONFIGURED)
322 md_set_disklabel(sc);
323 #endif
324
325 /*
326 * This is a normal, "slave" device, so
327 * enforce initialized.
328 */
329 if (sc->sc_type == MD_UNCONFIGURED) {
330 mutex_exit(&md_device_lock);
331 device_release(self);
332 return ENXIO;
333 }
334
335 ok:
336 /* XXX duplicates code in dk_open(). Call dk_open(), instead? */
337 mutex_enter(&dk->dk_openlock);
338 /* Mark our unit as open. */
339 switch (fmt) {
340 case S_IFCHR:
341 dk->dk_copenmask |= pmask;
342 break;
343 case S_IFBLK:
344 dk->dk_bopenmask |= pmask;
345 break;
346 }
347
348 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
349
350 mutex_exit(&dk->dk_openlock);
351 mutex_exit(&md_device_lock);
352 device_release(self);
353 return 0;
354 }
355
356 static int
357 mdclose(dev_t dev, int flag, int fmt, struct lwp *l)
358 {
359 device_t self;
360 int part = DISKPART(dev);
361 int pmask = 1 << part;
362 int error;
363 cfdata_t cf;
364 struct md_softc *sc;
365 struct disk *dk;
366
367 self = device_lookup_acquire(&md_cd, MD_UNIT(dev));
368 if (self == NULL)
369 return ENXIO;
370 sc = device_private(self);
371
372 dk = &sc->sc_dkdev;
373
374 mutex_enter(&dk->dk_openlock);
375
376 switch (fmt) {
377 case S_IFCHR:
378 dk->dk_copenmask &= ~pmask;
379 break;
380 case S_IFBLK:
381 dk->dk_bopenmask &= ~pmask;
382 break;
383 }
384 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
385 if (dk->dk_openmask != 0) {
386 mutex_exit(&dk->dk_openlock);
387 device_release(self);
388 return 0;
389 }
390
391 mutex_exit(&dk->dk_openlock);
392
393 mutex_enter(&md_device_lock);
394 cf = device_cfdata(sc->sc_dev);
395 error = config_detach(sc->sc_dev, DETACH_QUIET);
396 if (! error)
397 free(cf, M_DEVBUF);
398 mutex_exit(&md_device_lock);
399 device_release(self);
400 return error;
401 }
402
403 static int
404 mdread(dev_t dev, struct uio *uio, int flags)
405 {
406 device_t self;
407 struct md_softc *sc;
408 int error;
409
410 self = device_lookup_acquire(&md_cd, MD_UNIT(dev));
411 if (self == NULL)
412 return ENXIO;
413
414 sc = device_private(self);
415 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) {
416 device_release(self);
417 return ENXIO;
418 }
419
420 error = (physio(mdstrategy, NULL, dev, B_READ, minphys, uio));
421 device_release(self);
422 return error;
423 }
424
425 static int
426 mdwrite(dev_t dev, struct uio *uio, int flags)
427 {
428 device_t self;
429 struct md_softc *sc;
430 int error;
431
432 self = device_lookup_acquire(&md_cd, MD_UNIT(dev));
433 if (self == NULL)
434 return ENXIO;
435
436 sc = device_private(self);
437 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) {
438 device_release(self);
439 return ENXIO;
440 }
441
442 error = (physio(mdstrategy, NULL, dev, B_WRITE, minphys, uio));
443
444 device_release(self);
445 return error;
446 }
447
448 /*
449 * Handle I/O requests, either directly, or
450 * by passing them to the server process.
451 */
452 static void
453 mdstrategy(struct buf *bp)
454 {
455 device_t self;
456 struct md_softc *sc;
457 void * addr;
458 size_t off, xfer;
459 bool is_read;
460
461 self = device_lookup_acquire(&md_cd, MD_UNIT(bp->b_dev));
462 if (self == NULL) {
463 bp->b_error = ENXIO;
464 goto done;
465 }
466
467 sc = device_private(self);
468 if (sc->sc_type == MD_UNCONFIGURED) {
469 bp->b_error = ENXIO;
470 goto done;
471 }
472 mutex_enter(&sc->sc_lock);
473
474 switch (sc->sc_type) {
475 #if MEMORY_DISK_SERVER
476 case MD_UMEM_SERVER:
477 /* Just add this job to the server's queue. */
478 bufq_put(sc->sc_buflist, bp);
479 cv_signal(&sc->sc_cv);
480 mutex_exit(&sc->sc_lock);
481 /* see md_server_loop() */
482 /* no biodone in this case */
483 device_release(self);
484 return;
485 #endif /* MEMORY_DISK_SERVER */
486
487 case MD_KMEM_FIXED:
488 case MD_KMEM_ALLOCATED:
489 /* These are in kernel space. Access directly. */
490 is_read = ((bp->b_flags & B_READ) == B_READ);
491 bp->b_resid = bp->b_bcount;
492 off = (bp->b_blkno << DEV_BSHIFT);
493 if (off >= sc->sc_size) {
494 if (is_read)
495 break; /* EOF */
496 goto set_eio;
497 }
498 xfer = bp->b_resid;
499 if (xfer > (sc->sc_size - off))
500 xfer = (sc->sc_size - off);
501 addr = (char *)sc->sc_addr + off;
502 disk_busy(&sc->sc_dkdev);
503 if (is_read)
504 memcpy(bp->b_data, addr, xfer);
505 else
506 memcpy(addr, bp->b_data, xfer);
507 disk_unbusy(&sc->sc_dkdev, xfer, is_read);
508 bp->b_resid -= xfer;
509 break;
510
511 default:
512 bp->b_resid = bp->b_bcount;
513 set_eio:
514 bp->b_error = EIO;
515 break;
516 }
517
518 mutex_exit(&sc->sc_lock);
519 done:
520 biodone(bp);
521 if (self != NULL)
522 device_release(self);
523 }
524
525 static int
526 mdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
527 {
528 device_t self;
529 struct md_softc *sc;
530 struct md_conf *umd;
531 int error;
532
533 self = device_lookup_private(&md_cd, MD_UNIT(dev));
534 if (self == NULL)
535 return ENXIO;
536
537 sc = device_private(self);
538 mutex_enter(&sc->sc_lock);
539 if (sc->sc_type != MD_UNCONFIGURED) {
540 error = disk_ioctl(&sc->sc_dkdev, dev, cmd, data, flag, l);
541 if (error != EPASSTHROUGH) {
542 mutex_exit(&sc->sc_lock);
543 device_release(self);
544 return 0;
545 }
546 }
547
548 /* If this is not the raw partition, punt! */
549 if (DISKPART(dev) != RAW_PART) {
550 mutex_exit(&sc->sc_lock);
551 device_release(self);
552 return ENOTTY;
553 }
554
555 umd = (struct md_conf *)data;
556 error = EINVAL;
557 switch (cmd) {
558 case MD_GETCONF:
559 *umd = sc->sc_md;
560 error = 0;
561 break;
562
563 case MD_SETCONF:
564 /* Can only set it once. */
565 if (sc->sc_type != MD_UNCONFIGURED)
566 break;
567 switch (umd->md_type) {
568 case MD_KMEM_ALLOCATED:
569 error = md_ioctl_kalloc(sc, umd, l);
570 break;
571 #if MEMORY_DISK_SERVER
572 case MD_UMEM_SERVER:
573 error = md_ioctl_server(sc, umd, l);
574 break;
575 #endif /* MEMORY_DISK_SERVER */
576 default:
577 break;
578 }
579 break;
580 }
581 mutex_exit(&sc->sc_lock);
582 device_release(self);
583 return error;
584 }
585
586 static void
587 md_set_disklabel(struct md_softc *sc)
588 {
589 struct disk_geom *dg = &sc->sc_dkdev.dk_geom;
590 struct disklabel *lp = sc->sc_dkdev.dk_label;
591 struct partition *pp;
592
593 memset(lp, 0, sizeof(*lp));
594
595 lp->d_secsize = DEV_BSIZE;
596 lp->d_secperunit = sc->sc_size / DEV_BSIZE;
597 if (lp->d_secperunit >= (32*64)) {
598 lp->d_nsectors = 32;
599 lp->d_ntracks = 64;
600 lp->d_ncylinders = lp->d_secperunit / (32*64);
601 } else {
602 lp->d_nsectors = 1;
603 lp->d_ntracks = 1;
604 lp->d_ncylinders = lp->d_secperunit;
605 }
606 lp->d_secpercyl = lp->d_ntracks*lp->d_nsectors;
607
608 strncpy(lp->d_typename, md_cd.cd_name, sizeof(lp->d_typename));
609 lp->d_type = DKTYPE_MD;
610 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
611 lp->d_rpm = 3600;
612 lp->d_interleave = 1;
613 lp->d_flags = 0;
614
615 pp = &lp->d_partitions[0];
616 pp->p_offset = 0;
617 pp->p_size = lp->d_secperunit;
618 pp->p_fstype = FS_BSDFFS;
619
620 pp = &lp->d_partitions[RAW_PART];
621 pp->p_offset = 0;
622 pp->p_size = lp->d_secperunit;
623 pp->p_fstype = FS_UNUSED;
624
625 lp->d_npartitions = RAW_PART+1;
626 lp->d_magic = DISKMAGIC;
627 lp->d_magic2 = DISKMAGIC;
628 lp->d_checksum = dkcksum(lp);
629
630 memset(dg, 0, sizeof(*dg));
631
632 dg->dg_secsize = lp->d_secsize;
633 dg->dg_secperunit = lp->d_secperunit;
634 dg->dg_nsectors = lp->d_nsectors;
635 dg->dg_ntracks = lp->d_ntracks = 64;;
636 dg->dg_ncylinders = lp->d_ncylinders;
637
638 disk_set_info(sc->sc_dev, &sc->sc_dkdev, NULL);
639 }
640
641 /*
642 * Handle ioctl MD_SETCONF for (sc_type == MD_KMEM_ALLOCATED)
643 * Just allocate some kernel memory and return.
644 */
645 static int
646 md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd,
647 struct lwp *l)
648 {
649 vaddr_t addr;
650 vsize_t size;
651
652 mutex_exit(&sc->sc_lock);
653
654 /* Sanity check the size. */
655 size = umd->md_size;
656 addr = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
657
658 mutex_enter(&sc->sc_lock);
659
660 if (!addr)
661 return ENOMEM;
662
663 /* If another thread beat us to configure this unit: fail. */
664 if (sc->sc_type != MD_UNCONFIGURED) {
665 uvm_km_free(kernel_map, addr, size, UVM_KMF_WIRED);
666 return EINVAL;
667 }
668
669 /* This unit is now configured. */
670 sc->sc_addr = (void *)addr; /* kernel space */
671 sc->sc_size = (size_t)size;
672 sc->sc_type = MD_KMEM_ALLOCATED;
673 md_set_disklabel(sc);
674 return 0;
675 }
676
677 #if MEMORY_DISK_SERVER
678
679 /*
680 * Handle ioctl MD_SETCONF for (sc_type == MD_UMEM_SERVER)
681 * Set config, then become the I/O server for this unit.
682 */
683 static int
684 md_ioctl_server(struct md_softc *sc, struct md_conf *umd,
685 struct lwp *l)
686 {
687 vaddr_t end;
688 int error;
689
690 KASSERT(mutex_owned(&sc->sc_lock));
691
692 /* Sanity check addr, size. */
693 end = (vaddr_t) ((char *)umd->md_addr + umd->md_size);
694
695 if ((end >= VM_MAXUSER_ADDRESS) ||
696 (end < ((vaddr_t) umd->md_addr)) )
697 return EINVAL;
698
699 /* This unit is now configured. */
700 sc->sc_addr = umd->md_addr; /* user space */
701 sc->sc_size = umd->md_size;
702 sc->sc_type = MD_UMEM_SERVER;
703 md_set_disklabel(sc);
704
705 /* Become the server daemon */
706 error = md_server_loop(sc);
707
708 /* This server is now going away! */
709 sc->sc_type = MD_UNCONFIGURED;
710 sc->sc_addr = 0;
711 sc->sc_size = 0;
712
713 return (error);
714 }
715
716 static int
717 md_server_loop(struct md_softc *sc)
718 {
719 struct buf *bp;
720 void *addr; /* user space address */
721 size_t off; /* offset into "device" */
722 size_t xfer; /* amount to transfer */
723 int error;
724 bool is_read;
725
726 KASSERT(mutex_owned(&sc->sc_lock));
727
728 for (;;) {
729 /* Wait for some work to arrive. */
730 while ((bp = bufq_get(sc->sc_buflist)) == NULL) {
731 error = cv_wait_sig(&sc->sc_cv, &sc->sc_lock);
732 if (error)
733 return error;
734 }
735
736 /* Do the transfer to/from user space. */
737 mutex_exit(&sc->sc_lock);
738 error = 0;
739 is_read = ((bp->b_flags & B_READ) == B_READ);
740 bp->b_resid = bp->b_bcount;
741 off = (bp->b_blkno << DEV_BSHIFT);
742 if (off >= sc->sc_size) {
743 if (is_read)
744 goto done; /* EOF (not an error) */
745 error = EIO;
746 goto done;
747 }
748 xfer = bp->b_resid;
749 if (xfer > (sc->sc_size - off))
750 xfer = (sc->sc_size - off);
751 addr = (char *)sc->sc_addr + off;
752 disk_busy(&sc->sc_dkdev);
753 if (is_read)
754 error = copyin(addr, bp->b_data, xfer);
755 else
756 error = copyout(bp->b_data, addr, xfer);
757 disk_unbusy(&sc->sc_dkdev, (error ? 0 : xfer), is_read);
758 if (!error)
759 bp->b_resid -= xfer;
760
761 done:
762 if (error) {
763 bp->b_error = error;
764 }
765 biodone(bp);
766 mutex_enter(&sc->sc_lock);
767 }
768 }
769 #endif /* MEMORY_DISK_SERVER */
770