md.c revision 1.78.6.2 1 /* $NetBSD: md.c,v 1.78.6.2 2017/04/27 23:18:21 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1995 Gordon W. Ross, Leo Weppelman.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * This implements a general-purpose memory-disk.
30 * See md.h for notes on the config types.
31 *
32 * Note that this driver provides the same functionality
33 * as the MFS filesystem hack, but this is better because
34 * you can use this for any filesystem type you'd like!
35 *
36 * Credit for most of the kmem ramdisk code goes to:
37 * Leo Weppelman (atari) and Phil Nelson (pc532)
38 * Credit for the ideas behind the "user space memory" code goes
39 * to the authors of the MFS implementation.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: md.c,v 1.78.6.2 2017/04/27 23:18:21 pgoyette Exp $");
44
45 #ifdef _KERNEL_OPT
46 #include "opt_md.h"
47 #else
48 #define MEMORY_DISK_SERVER 1
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/systm.h>
55 #include <sys/buf.h>
56 #include <sys/bufq.h>
57 #include <sys/device.h>
58 #include <sys/disk.h>
59 #include <sys/stat.h>
60 #include <sys/proc.h>
61 #include <sys/conf.h>
62 #include <sys/disklabel.h>
63 #include <sys/localcount.h>
64
65 #include <uvm/uvm_extern.h>
66
67 #include <dev/md.h>
68
69 #include "ioconf.h"
70 /*
71 * The user-space functionality is included by default.
72 * Use `options MEMORY_DISK_SERVER=0' to turn it off.
73 */
74 #ifndef MEMORY_DISK_SERVER
75 #error MEMORY_DISK_SERVER should be defined by opt_md.h
76 #endif /* MEMORY_DISK_SERVER */
77
78 /*
79 * We should use the raw partition for ioctl.
80 */
81 #define MD_UNIT(unit) DISKUNIT(unit)
82
83 /* autoconfig stuff... */
84
85 struct md_softc {
86 device_t sc_dev; /* Self. */
87 struct disk sc_dkdev; /* hook for generic disk handling */
88 struct md_conf sc_md;
89 kmutex_t sc_lock; /* Protect self. */
90 kcondvar_t sc_cv; /* Wait here for work. */
91 struct bufq_state *sc_buflist;
92 };
93 /* shorthand for fields in sc_md: */
94 #define sc_addr sc_md.md_addr
95 #define sc_size sc_md.md_size
96 #define sc_type sc_md.md_type
97
98 static void md_attach(device_t, device_t, void *);
99 static int md_detach(device_t, int);
100
101 static dev_type_open(mdopen);
102 static dev_type_close(mdclose);
103 static dev_type_read(mdread);
104 static dev_type_write(mdwrite);
105 static dev_type_ioctl(mdioctl);
106 static dev_type_strategy(mdstrategy);
107 static dev_type_size(mdsize);
108
109 const struct bdevsw md_bdevsw = {
110 DEVSW_MODULE_INIT
111 .d_open = mdopen,
112 .d_close = mdclose,
113 .d_strategy = mdstrategy,
114 .d_ioctl = mdioctl,
115 .d_dump = nodump,
116 .d_psize = mdsize,
117 .d_discard = nodiscard,
118 .d_flag = D_DISK | D_MPSAFE
119 };
120
121 const struct cdevsw md_cdevsw = {
122 DEVSW_MODULE_INIT
123 .d_open = mdopen,
124 .d_close = mdclose,
125 .d_read = mdread,
126 .d_write = mdwrite,
127 .d_ioctl = mdioctl,
128 .d_stop = nostop,
129 .d_tty = notty,
130 .d_poll = nopoll,
131 .d_mmap = nommap,
132 .d_kqfilter = nokqfilter,
133 .d_discard = nodiscard,
134 .d_flag = D_DISK
135 };
136
137 static struct dkdriver mddkdriver = {
138 .d_strategy = mdstrategy
139 };
140
141 extern struct cfdriver md_cd;
142 CFATTACH_DECL3_NEW(md, sizeof(struct md_softc),
143 0, md_attach, md_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
144
145 static kmutex_t md_device_lock; /* Protect unit creation / deletion. */
146 extern size_t md_root_size;
147
148 static void md_set_disklabel(struct md_softc *);
149
150 /*
151 * This is called if we are configured as a pseudo-device
152 */
153 void
154 mdattach(int n)
155 {
156
157 mutex_init(&md_device_lock, MUTEX_DEFAULT, IPL_NONE);
158 if (config_cfattach_attach(md_cd.cd_name, &md_ca)) {
159 aprint_error("%s: cfattach_attach failed\n", md_cd.cd_name);
160 return;
161 }
162 }
163
164 static void
165 md_attach(device_t parent, device_t self, void *aux)
166 {
167 struct md_softc *sc = device_private(self);
168
169 sc->sc_dev = self;
170 sc->sc_type = MD_UNCONFIGURED;
171 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
172 cv_init(&sc->sc_cv, "mdidle");
173 bufq_alloc(&sc->sc_buflist, "fcfs", 0);
174
175 /* XXX - Could accept aux info here to set the config. */
176 #ifdef MEMORY_DISK_HOOKS
177 /*
178 * This external function might setup a pre-loaded disk.
179 * All it would need to do is setup the md_conf struct.
180 * See sys/dev/md_root.c for an example.
181 */
182 md_attach_hook(device_unit(self), &sc->sc_md);
183 #endif
184
185 /*
186 * Initialize and attach the disk structure.
187 */
188 disk_init(&sc->sc_dkdev, device_xname(self), &mddkdriver);
189 disk_attach(&sc->sc_dkdev);
190
191 if (sc->sc_type != MD_UNCONFIGURED)
192 md_set_disklabel(sc);
193
194 if (!pmf_device_register(self, NULL, NULL))
195 aprint_error_dev(self, "couldn't establish power handler\n");
196 }
197
198 /*
199 * Caller must hold a reference to the device's localcount. The reference
200 * is released if detach is successful.
201 */
202 static int
203 md_detach(device_t self, int flags)
204 {
205 struct md_softc *sc = device_private(self);
206 int rc;
207
208 rc = 0;
209 mutex_enter(&sc->sc_dkdev.dk_openlock);
210 if (sc->sc_dkdev.dk_openmask == 0 && sc->sc_type == MD_UNCONFIGURED)
211 ; /* nothing to do */
212 else if ((flags & DETACH_FORCE) == 0)
213 rc = EBUSY;
214 mutex_exit(&sc->sc_dkdev.dk_openlock);
215
216 if (rc != 0)
217 return rc;
218
219 device_release(self);
220 pmf_device_deregister(self);
221 disk_detach(&sc->sc_dkdev);
222 disk_destroy(&sc->sc_dkdev);
223 bufq_free(sc->sc_buflist);
224 mutex_destroy(&sc->sc_lock);
225 cv_destroy(&sc->sc_cv);
226 return 0;
227 }
228
229 /*
230 * operational routines:
231 * open, close, read, write, strategy,
232 * ioctl, dump, size
233 */
234
235 #if MEMORY_DISK_SERVER
236 static int md_server_loop(struct md_softc *sc);
237 static int md_ioctl_server(struct md_softc *sc, struct md_conf *umd,
238 struct lwp *l);
239 #endif /* MEMORY_DISK_SERVER */
240 static int md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd,
241 struct lwp *l);
242
243 static int
244 mdsize(dev_t dev)
245 {
246 struct md_softc *sc;
247 int res;
248
249 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
250 if (sc == NULL)
251 return 0;
252
253 mutex_enter(&sc->sc_lock);
254 if (sc->sc_type == MD_UNCONFIGURED)
255 res = 0;
256 else
257 res = sc->sc_size >> DEV_BSHIFT;
258 mutex_exit(&sc->sc_lock);
259
260 device_release(sc->sc_dev);
261 return res;
262 }
263
264 static int
265 mdopen(dev_t dev, int flag, int fmt, struct lwp *l)
266 {
267 device_t self, new_self;
268 int unit;
269 int part = DISKPART(dev);
270 int pmask = 1 << part;
271 cfdata_t cf;
272 struct md_softc *sc;
273 struct disk *dk;
274 #ifdef MEMORY_DISK_HOOKS
275 bool configured;
276 #endif
277
278 mutex_enter(&md_device_lock);
279 unit = MD_UNIT(dev);
280 sc = device_lookup_private_acquire(&md_cd, unit);
281 if (sc == NULL) {
282 if (part != RAW_PART) {
283 mutex_exit(&md_device_lock);
284 return ENXIO;
285 }
286 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
287 cf->cf_name = md_cd.cd_name;
288 cf->cf_atname = md_cd.cd_name;
289 cf->cf_unit = unit;
290 cf->cf_fstate = FSTATE_STAR;
291 new_self = config_attach_pseudo(cf);
292 self = device_lookup_acquire(&md_cd, unit);
293 KASSERT(self == new_self);
294 sc = device_private(self);
295 if (sc == NULL) {
296 mutex_exit(&md_device_lock);
297 device_release(self);
298 return ENOMEM;
299 }
300 }
301
302 dk = &sc->sc_dkdev;
303
304 /*
305 * The raw partition is used for ioctl to configure.
306 */
307 if (part == RAW_PART)
308 goto ok;
309
310 #ifdef MEMORY_DISK_HOOKS
311 /* Call the open hook to allow loading the device. */
312 configured = (sc->sc_type != MD_UNCONFIGURED);
313 md_open_hook(unit, &sc->sc_md);
314 /* initialize disklabel if the device is configured in open hook */
315 if (!configured && sc->sc_type != MD_UNCONFIGURED)
316 md_set_disklabel(sc);
317 #endif
318
319 /*
320 * This is a normal, "slave" device, so
321 * enforce initialized.
322 */
323 if (sc->sc_type == MD_UNCONFIGURED) {
324 mutex_exit(&md_device_lock);
325 device_release(sc->sc_dev);
326 return ENXIO;
327 }
328
329 ok:
330 /* XXX duplicates code in dk_open(). Call dk_open(), instead? */
331 mutex_enter(&dk->dk_openlock);
332 /* Mark our unit as open. */
333 switch (fmt) {
334 case S_IFCHR:
335 dk->dk_copenmask |= pmask;
336 break;
337 case S_IFBLK:
338 dk->dk_bopenmask |= pmask;
339 break;
340 }
341
342 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
343
344 mutex_exit(&dk->dk_openlock);
345 mutex_exit(&md_device_lock);
346 device_release(sc->sc_dev);
347 return 0;
348 }
349
350 static int
351 mdclose(dev_t dev, int flag, int fmt, struct lwp *l)
352 {
353 int part = DISKPART(dev);
354 int pmask = 1 << part;
355 int error;
356 cfdata_t cf;
357 struct md_softc *sc;
358 struct disk *dk;
359
360 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
361 if (sc == NULL)
362 return ENXIO;
363
364 dk = &sc->sc_dkdev;
365
366 mutex_enter(&dk->dk_openlock);
367
368 switch (fmt) {
369 case S_IFCHR:
370 dk->dk_copenmask &= ~pmask;
371 break;
372 case S_IFBLK:
373 dk->dk_bopenmask &= ~pmask;
374 break;
375 }
376 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
377 if (dk->dk_openmask != 0) {
378 mutex_exit(&dk->dk_openlock);
379 device_release(sc->sc_dev);
380 return 0;
381 }
382
383 mutex_exit(&dk->dk_openlock);
384
385 mutex_enter(&md_device_lock);
386 cf = device_cfdata(sc->sc_dev);
387 error = config_detach(sc->sc_dev, DETACH_QUIET);
388 if (! error)
389 free(cf, M_DEVBUF);
390 mutex_exit(&md_device_lock);
391 if (error)
392 device_release(sc->sc_dev);
393 return error;
394 }
395
396 static int
397 mdread(dev_t dev, struct uio *uio, int flags)
398 {
399 struct md_softc *sc;
400 int error;
401
402 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
403
404 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) {
405 if (sc != NULL)
406 device_release(sc->sc_dev);
407 return ENXIO;
408 }
409
410 error = (physio(mdstrategy, NULL, dev, B_READ, minphys, uio));
411 device_release(sc->sc_dev);
412 return error;
413 }
414
415 static int
416 mdwrite(dev_t dev, struct uio *uio, int flags)
417 {
418 struct md_softc *sc;
419 int error;
420
421 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
422
423 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) {
424 if (sc != NULL) {
425 device_release(sc->sc_dev);
426 }
427 return ENXIO;
428 }
429
430 error = physio(mdstrategy, NULL, dev, B_WRITE, minphys, uio);
431
432 device_release(sc->sc_dev);
433 return error;
434 }
435
436 /*
437 * Handle I/O requests, either directly, or
438 * by passing them to the server process.
439 */
440 static void
441 mdstrategy(struct buf *bp)
442 {
443 struct md_softc *sc;
444 void * addr;
445 size_t off, xfer;
446 bool is_read;
447
448 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(bp->b_dev));
449 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) {
450 bp->b_error = ENXIO;
451 goto done;
452 }
453
454 mutex_enter(&sc->sc_lock);
455
456 switch (sc->sc_type) {
457 #if MEMORY_DISK_SERVER
458 case MD_UMEM_SERVER:
459 /* Just add this job to the server's queue. */
460 bufq_put(sc->sc_buflist, bp);
461 cv_signal(&sc->sc_cv);
462 mutex_exit(&sc->sc_lock);
463 /* see md_server_loop() */
464 /* no biodone in this case */
465 device_release(sc->sc_dev);
466 return;
467 #endif /* MEMORY_DISK_SERVER */
468
469 case MD_KMEM_FIXED:
470 case MD_KMEM_ALLOCATED:
471 /* These are in kernel space. Access directly. */
472 is_read = ((bp->b_flags & B_READ) == B_READ);
473 bp->b_resid = bp->b_bcount;
474 off = (bp->b_blkno << DEV_BSHIFT);
475 if (off >= sc->sc_size) {
476 if (is_read)
477 break; /* EOF */
478 goto set_eio;
479 }
480 xfer = bp->b_resid;
481 if (xfer > (sc->sc_size - off))
482 xfer = (sc->sc_size - off);
483 addr = (char *)sc->sc_addr + off;
484 disk_busy(&sc->sc_dkdev);
485 if (is_read)
486 memcpy(bp->b_data, addr, xfer);
487 else
488 memcpy(addr, bp->b_data, xfer);
489 disk_unbusy(&sc->sc_dkdev, xfer, is_read);
490 bp->b_resid -= xfer;
491 break;
492
493 default:
494 bp->b_resid = bp->b_bcount;
495 set_eio:
496 bp->b_error = EIO;
497 break;
498 }
499 mutex_exit(&sc->sc_lock);
500
501 done:
502 biodone(bp);
503 if (sc != NULL)
504 device_release(sc->sc_dev);
505 }
506
507 static int
508 mdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
509 {
510 struct md_softc *sc;
511 struct md_conf *umd;
512 int error;
513
514 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
515 if (sc == NULL)
516 return ENXIO;
517
518 mutex_enter(&sc->sc_lock);
519 if (sc->sc_type != MD_UNCONFIGURED) {
520 error = disk_ioctl(&sc->sc_dkdev, dev, cmd, data, flag, l);
521 if (error != EPASSTHROUGH) {
522 mutex_exit(&sc->sc_lock);
523 device_release(sc->sc_dev);
524 return 0;
525 }
526 }
527
528 /* If this is not the raw partition, punt! */
529 if (DISKPART(dev) != RAW_PART) {
530 mutex_exit(&sc->sc_lock);
531 device_release(sc->sc_dev);
532 return ENOTTY;
533 }
534
535 umd = (struct md_conf *)data;
536 error = EINVAL;
537 switch (cmd) {
538 case MD_GETCONF:
539 *umd = sc->sc_md;
540 error = 0;
541 break;
542
543 case MD_SETCONF:
544 /* Can only set it once. */
545 if (sc->sc_type != MD_UNCONFIGURED)
546 break;
547 switch (umd->md_type) {
548 case MD_KMEM_ALLOCATED:
549 error = md_ioctl_kalloc(sc, umd, l);
550 break;
551 #if MEMORY_DISK_SERVER
552 case MD_UMEM_SERVER:
553 error = md_ioctl_server(sc, umd, l);
554 break;
555 #endif /* MEMORY_DISK_SERVER */
556 default:
557 break;
558 }
559 break;
560 }
561 mutex_exit(&sc->sc_lock);
562 device_release(sc->sc_dev);
563 return error;
564 }
565
566 static void
567 md_set_disklabel(struct md_softc *sc)
568 {
569 struct disk_geom *dg = &sc->sc_dkdev.dk_geom;
570 struct disklabel *lp = sc->sc_dkdev.dk_label;
571 struct partition *pp;
572
573 memset(lp, 0, sizeof(*lp));
574
575 lp->d_secsize = DEV_BSIZE;
576 lp->d_secperunit = sc->sc_size / DEV_BSIZE;
577 if (lp->d_secperunit >= (32*64)) {
578 lp->d_nsectors = 32;
579 lp->d_ntracks = 64;
580 lp->d_ncylinders = lp->d_secperunit / (32*64);
581 } else {
582 lp->d_nsectors = 1;
583 lp->d_ntracks = 1;
584 lp->d_ncylinders = lp->d_secperunit;
585 }
586 lp->d_secpercyl = lp->d_ntracks*lp->d_nsectors;
587
588 strncpy(lp->d_typename, md_cd.cd_name, sizeof(lp->d_typename));
589 lp->d_type = DKTYPE_MD;
590 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
591 lp->d_rpm = 3600;
592 lp->d_interleave = 1;
593 lp->d_flags = 0;
594
595 pp = &lp->d_partitions[0];
596 pp->p_offset = 0;
597 pp->p_size = lp->d_secperunit;
598 pp->p_fstype = FS_BSDFFS;
599
600 pp = &lp->d_partitions[RAW_PART];
601 pp->p_offset = 0;
602 pp->p_size = lp->d_secperunit;
603 pp->p_fstype = FS_UNUSED;
604
605 lp->d_npartitions = RAW_PART+1;
606 lp->d_magic = DISKMAGIC;
607 lp->d_magic2 = DISKMAGIC;
608 lp->d_checksum = dkcksum(lp);
609
610 memset(dg, 0, sizeof(*dg));
611
612 dg->dg_secsize = lp->d_secsize;
613 dg->dg_secperunit = lp->d_secperunit;
614 dg->dg_nsectors = lp->d_nsectors;
615 dg->dg_ntracks = lp->d_ntracks = 64;;
616 dg->dg_ncylinders = lp->d_ncylinders;
617
618 disk_set_info(sc->sc_dev, &sc->sc_dkdev, NULL);
619 }
620
621 /*
622 * Handle ioctl MD_SETCONF for (sc_type == MD_KMEM_ALLOCATED)
623 * Just allocate some kernel memory and return.
624 */
625 static int
626 md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd,
627 struct lwp *l)
628 {
629 vaddr_t addr;
630 vsize_t size;
631
632 mutex_exit(&sc->sc_lock);
633
634 /* Sanity check the size. */
635 size = umd->md_size;
636 addr = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
637
638 mutex_enter(&sc->sc_lock);
639
640 if (!addr)
641 return ENOMEM;
642
643 /* If another thread beat us to configure this unit: fail. */
644 if (sc->sc_type != MD_UNCONFIGURED) {
645 uvm_km_free(kernel_map, addr, size, UVM_KMF_WIRED);
646 return EINVAL;
647 }
648
649 /* This unit is now configured. */
650 sc->sc_addr = (void *)addr; /* kernel space */
651 sc->sc_size = (size_t)size;
652 sc->sc_type = MD_KMEM_ALLOCATED;
653 md_set_disklabel(sc);
654 return 0;
655 }
656
657 #if MEMORY_DISK_SERVER
658
659 /*
660 * Handle ioctl MD_SETCONF for (sc_type == MD_UMEM_SERVER)
661 * Set config, then become the I/O server for this unit.
662 */
663 static int
664 md_ioctl_server(struct md_softc *sc, struct md_conf *umd,
665 struct lwp *l)
666 {
667 vaddr_t end;
668 int error;
669
670 KASSERT(mutex_owned(&sc->sc_lock));
671
672 /* Sanity check addr, size. */
673 end = (vaddr_t) ((char *)umd->md_addr + umd->md_size);
674
675 if ((end >= VM_MAXUSER_ADDRESS) ||
676 (end < ((vaddr_t) umd->md_addr)) )
677 return EINVAL;
678
679 /* This unit is now configured. */
680 sc->sc_addr = umd->md_addr; /* user space */
681 sc->sc_size = umd->md_size;
682 sc->sc_type = MD_UMEM_SERVER;
683 md_set_disklabel(sc);
684
685 /* Become the server daemon */
686 error = md_server_loop(sc);
687
688 /* This server is now going away! */
689 sc->sc_type = MD_UNCONFIGURED;
690 sc->sc_addr = 0;
691 sc->sc_size = 0;
692
693 return (error);
694 }
695
696 static int
697 md_server_loop(struct md_softc *sc)
698 {
699 struct buf *bp;
700 void *addr; /* user space address */
701 size_t off; /* offset into "device" */
702 size_t xfer; /* amount to transfer */
703 int error;
704 bool is_read;
705
706 KASSERT(mutex_owned(&sc->sc_lock));
707
708 for (;;) {
709 /* Wait for some work to arrive. */
710 while ((bp = bufq_get(sc->sc_buflist)) == NULL) {
711 error = cv_wait_sig(&sc->sc_cv, &sc->sc_lock);
712 if (error)
713 return error;
714 }
715
716 /* Do the transfer to/from user space. */
717 mutex_exit(&sc->sc_lock);
718 error = 0;
719 is_read = ((bp->b_flags & B_READ) == B_READ);
720 bp->b_resid = bp->b_bcount;
721 off = (bp->b_blkno << DEV_BSHIFT);
722 if (off >= sc->sc_size) {
723 if (is_read)
724 goto done; /* EOF (not an error) */
725 error = EIO;
726 goto done;
727 }
728 xfer = bp->b_resid;
729 if (xfer > (sc->sc_size - off))
730 xfer = (sc->sc_size - off);
731 addr = (char *)sc->sc_addr + off;
732 disk_busy(&sc->sc_dkdev);
733 if (is_read)
734 error = copyin(addr, bp->b_data, xfer);
735 else
736 error = copyout(bp->b_data, addr, xfer);
737 disk_unbusy(&sc->sc_dkdev, (error ? 0 : xfer), is_read);
738 if (!error)
739 bp->b_resid -= xfer;
740
741 done:
742 if (error) {
743 bp->b_error = error;
744 }
745 biodone(bp);
746 mutex_enter(&sc->sc_lock);
747 }
748 }
749 #endif /* MEMORY_DISK_SERVER */
750