md.c revision 1.76.2.7 1 /* $NetBSD: md.c,v 1.76.2.7 2016/08/06 00:19:07 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1995 Gordon W. Ross, Leo Weppelman.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * This implements a general-purpose memory-disk.
30 * See md.h for notes on the config types.
31 *
32 * Note that this driver provides the same functionality
33 * as the MFS filesystem hack, but this is better because
34 * you can use this for any filesystem type you'd like!
35 *
36 * Credit for most of the kmem ramdisk code goes to:
37 * Leo Weppelman (atari) and Phil Nelson (pc532)
38 * Credit for the ideas behind the "user space memory" code goes
39 * to the authors of the MFS implementation.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: md.c,v 1.76.2.7 2016/08/06 00:19:07 pgoyette Exp $");
44
45 #ifdef _KERNEL_OPT
46 #include "opt_md.h"
47 #else
48 #define MEMORY_DISK_SERVER 1
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/systm.h>
55 #include <sys/buf.h>
56 #include <sys/bufq.h>
57 #include <sys/device.h>
58 #include <sys/disk.h>
59 #include <sys/stat.h>
60 #include <sys/proc.h>
61 #include <sys/conf.h>
62 #include <sys/disklabel.h>
63 #include <sys/localcount.h>
64
65 #include <uvm/uvm_extern.h>
66
67 #include <dev/md.h>
68
69 #include "ioconf.h"
70 /*
71 * The user-space functionality is included by default.
72 * Use `options MEMORY_DISK_SERVER=0' to turn it off.
73 */
74 #ifndef MEMORY_DISK_SERVER
75 #error MEMORY_DISK_SERVER should be defined by opt_md.h
76 #endif /* MEMORY_DISK_SERVER */
77
78 /*
79 * We should use the raw partition for ioctl.
80 */
81 #define MD_UNIT(unit) DISKUNIT(unit)
82
83 /* autoconfig stuff... */
84
85 struct md_softc {
86 device_t sc_dev; /* Self. */
87 struct disk sc_dkdev; /* hook for generic disk handling */
88 struct md_conf sc_md;
89 kmutex_t sc_lock; /* Protect self. */
90 kcondvar_t sc_cv; /* Wait here for work. */
91 struct bufq_state *sc_buflist;
92 };
93 /* shorthand for fields in sc_md: */
94 #define sc_addr sc_md.md_addr
95 #define sc_size sc_md.md_size
96 #define sc_type sc_md.md_type
97
98 static void md_attach(device_t, device_t, void *);
99 static int md_detach(device_t, int);
100
101 static dev_type_open(mdopen);
102 static dev_type_close(mdclose);
103 static dev_type_read(mdread);
104 static dev_type_write(mdwrite);
105 static dev_type_ioctl(mdioctl);
106 static dev_type_strategy(mdstrategy);
107 static dev_type_size(mdsize);
108
109 const struct bdevsw md_bdevsw = {
110 DEVSW_MODULE_INIT
111 .d_open = mdopen,
112 .d_close = mdclose,
113 .d_strategy = mdstrategy,
114 .d_ioctl = mdioctl,
115 .d_dump = nodump,
116 .d_psize = mdsize,
117 .d_discard = nodiscard,
118 .d_flag = D_DISK | D_MPSAFE
119 };
120
121 const struct cdevsw md_cdevsw = {
122 DEVSW_MODULE_INIT
123 .d_open = mdopen,
124 .d_close = mdclose,
125 .d_read = mdread,
126 .d_write = mdwrite,
127 .d_ioctl = mdioctl,
128 .d_stop = nostop,
129 .d_tty = notty,
130 .d_poll = nopoll,
131 .d_mmap = nommap,
132 .d_kqfilter = nokqfilter,
133 .d_discard = nodiscard,
134 .d_flag = D_DISK
135 };
136
137 static struct dkdriver mddkdriver = {
138 .d_strategy = mdstrategy
139 };
140
141 extern struct cfdriver md_cd;
142 CFATTACH_DECL3_NEW(md, sizeof(struct md_softc),
143 0, md_attach, md_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
144
145 static kmutex_t md_device_lock; /* Protect unit creation / deletion. */
146 extern size_t md_root_size;
147
148 static void md_set_disklabel(struct md_softc *);
149
150 /*
151 * This is called if we are configured as a pseudo-device
152 */
153 void
154 mdattach(int n)
155 {
156
157 mutex_init(&md_device_lock, MUTEX_DEFAULT, IPL_NONE);
158 if (config_cfattach_attach(md_cd.cd_name, &md_ca)) {
159 aprint_error("%s: cfattach_attach failed\n", md_cd.cd_name);
160 return;
161 }
162 }
163
164 static void
165 md_attach(device_t parent, device_t self, void *aux)
166 {
167 struct md_softc *sc = device_private(self);
168
169 sc->sc_dev = self;
170 sc->sc_type = MD_UNCONFIGURED;
171 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
172 cv_init(&sc->sc_cv, "mdidle");
173 bufq_alloc(&sc->sc_buflist, "fcfs", 0);
174
175 /* XXX - Could accept aux info here to set the config. */
176 #ifdef MEMORY_DISK_HOOKS
177 /*
178 * This external function might setup a pre-loaded disk.
179 * All it would need to do is setup the md_conf struct.
180 * See sys/dev/md_root.c for an example.
181 */
182 md_attach_hook(device_unit(self), &sc->sc_md);
183 #endif
184
185 /*
186 * Initialize and attach the disk structure.
187 */
188 disk_init(&sc->sc_dkdev, device_xname(self), &mddkdriver);
189 disk_attach(&sc->sc_dkdev);
190
191 if (sc->sc_type != MD_UNCONFIGURED)
192 md_set_disklabel(sc);
193
194 if (!pmf_device_register(self, NULL, NULL))
195 aprint_error_dev(self, "couldn't establish power handler\n");
196 }
197
198 /*
199 * Caller must hold a reference to the device's localcount. The reference
200 * is released if detach is successful.
201 */
202 static int
203 md_detach(device_t self, int flags)
204 {
205 struct md_softc *sc = device_private(self);
206 int rc;
207
208 rc = 0;
209 mutex_enter(&sc->sc_dkdev.dk_openlock);
210 if (sc->sc_dkdev.dk_openmask == 0 && sc->sc_type == MD_UNCONFIGURED)
211 ; /* nothing to do */
212 else if ((flags & DETACH_FORCE) == 0)
213 rc = EBUSY;
214 mutex_exit(&sc->sc_dkdev.dk_openlock);
215
216 if (rc != 0)
217 return rc;
218
219 device_release(self);
220 pmf_device_deregister(self);
221 disk_detach(&sc->sc_dkdev);
222 disk_destroy(&sc->sc_dkdev);
223 bufq_free(sc->sc_buflist);
224 mutex_destroy(&sc->sc_lock);
225 cv_destroy(&sc->sc_cv);
226 return 0;
227 }
228
229 /*
230 * operational routines:
231 * open, close, read, write, strategy,
232 * ioctl, dump, size
233 */
234
235 #if MEMORY_DISK_SERVER
236 static int md_server_loop(struct md_softc *sc);
237 static int md_ioctl_server(struct md_softc *sc, struct md_conf *umd,
238 struct lwp *l);
239 #endif /* MEMORY_DISK_SERVER */
240 static int md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd,
241 struct lwp *l);
242
243 static int
244 mdsize(dev_t dev)
245 {
246 struct md_softc *sc;
247 int res;
248
249 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
250 if (sc == NULL)
251 return 0;
252
253 mutex_enter(&sc->sc_lock);
254 if (sc->sc_type == MD_UNCONFIGURED)
255 res = 0;
256 else
257 res = sc->sc_size >> DEV_BSHIFT;
258 mutex_exit(&sc->sc_lock);
259
260 device_release(sc->sc_dev);
261 return res;
262 }
263
264 static int
265 mdopen(dev_t dev, int flag, int fmt, struct lwp *l)
266 {
267 device_t self, new_self;
268 int unit;
269 int part = DISKPART(dev);
270 int pmask = 1 << part;
271 cfdata_t cf;
272 struct md_softc *sc;
273 struct disk *dk;
274 #ifdef MEMORY_DISK_HOOKS
275 bool configured;
276 #endif
277
278 mutex_enter(&md_device_lock);
279 unit = MD_UNIT(dev);
280 sc = device_lookup_private_acquire(&md_cd, unit);
281 if (sc == NULL) {
282 if (part != RAW_PART) {
283 mutex_exit(&md_device_lock);
284 return ENXIO;
285 }
286 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
287 cf->cf_name = md_cd.cd_name;
288 cf->cf_atname = md_cd.cd_name;
289 cf->cf_unit = unit;
290 cf->cf_fstate = FSTATE_STAR;
291 new_self = config_attach_pseudo(cf);
292 self = device_lookup_acquire(&md_cd, unit);
293 KASSERT(self == new_self);
294 sc = device_private(self);
295 if (sc == NULL) {
296 mutex_exit(&md_device_lock);
297 device_release(sc->sc_dev);
298 return ENOMEM;
299 }
300 }
301
302 dk = &sc->sc_dkdev;
303
304 /*
305 * The raw partition is used for ioctl to configure.
306 */
307 if (part == RAW_PART)
308 goto ok;
309
310 #ifdef MEMORY_DISK_HOOKS
311 /* Call the open hook to allow loading the device. */
312 configured = (sc->sc_type != MD_UNCONFIGURED);
313 md_open_hook(unit, &sc->sc_md);
314 /* initialize disklabel if the device is configured in open hook */
315 if (!configured && sc->sc_type != MD_UNCONFIGURED)
316 md_set_disklabel(sc);
317 #endif
318
319 /*
320 * This is a normal, "slave" device, so
321 * enforce initialized.
322 */
323 if (sc->sc_type == MD_UNCONFIGURED) {
324 mutex_exit(&md_device_lock);
325 device_release(sc->sc_dev);
326 return ENXIO;
327 }
328
329 ok:
330 /* XXX duplicates code in dk_open(). Call dk_open(), instead? */
331 mutex_enter(&dk->dk_openlock);
332 /* Mark our unit as open. */
333 switch (fmt) {
334 case S_IFCHR:
335 dk->dk_copenmask |= pmask;
336 break;
337 case S_IFBLK:
338 dk->dk_bopenmask |= pmask;
339 break;
340 }
341
342 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
343
344 mutex_exit(&dk->dk_openlock);
345 mutex_exit(&md_device_lock);
346 device_release(sc->sc_dev);
347 return 0;
348 }
349
350 static int
351 mdclose(dev_t dev, int flag, int fmt, struct lwp *l)
352 {
353 int part = DISKPART(dev);
354 int pmask = 1 << part;
355 int error;
356 cfdata_t cf;
357 struct md_softc *sc;
358 struct disk *dk;
359
360 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
361 if (sc == NULL)
362 return ENXIO;
363
364 dk = &sc->sc_dkdev;
365
366 mutex_enter(&dk->dk_openlock);
367
368 switch (fmt) {
369 case S_IFCHR:
370 dk->dk_copenmask &= ~pmask;
371 break;
372 case S_IFBLK:
373 dk->dk_bopenmask &= ~pmask;
374 break;
375 }
376 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
377 if (dk->dk_openmask != 0) {
378 mutex_exit(&dk->dk_openlock);
379 device_release(sc->sc_dev);
380 return 0;
381 }
382
383 mutex_exit(&dk->dk_openlock);
384
385 mutex_enter(&md_device_lock);
386 cf = device_cfdata(sc->sc_dev);
387 error = config_detach(sc->sc_dev, DETACH_QUIET);
388 if (! error)
389 free(cf, M_DEVBUF);
390 mutex_exit(&md_device_lock);
391 if (error)
392 device_release(sc->sc_dev);
393 return error;
394 }
395
396 static int
397 mdread(dev_t dev, struct uio *uio, int flags)
398 {
399 struct md_softc *sc;
400 int error;
401
402 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
403
404 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) {
405 if (sc != NULL)
406 device_release(sc->sc_dev);
407 return ENXIO;
408 }
409
410 error = (physio(mdstrategy, NULL, dev, B_READ, minphys, uio));
411 device_release(sc->sc_dev);
412 return error;
413 }
414
415 static int
416 mdwrite(dev_t dev, struct uio *uio, int flags)
417 {
418 struct md_softc *sc;
419 int error;
420
421 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
422
423 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) {
424 if (sc != NULL) {
425 device_release(sc->sc_dev);
426 }
427 return ENXIO;
428 }
429
430 error = physio(mdstrategy, NULL, dev, B_WRITE, minphys, uio);
431
432 device_release(sc->sc_dev);
433 return error;
434 }
435
436 /*
437 * Handle I/O requests, either directly, or
438 * by passing them to the server process.
439 */
440 static void
441 mdstrategy(struct buf *bp)
442 {
443 struct md_softc *sc;
444 void * addr;
445 size_t off, xfer;
446 bool is_read;
447
448 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(bp->b_dev));
449 if (sc == NULL || sc->sc_type == MD_UNCONFIGURED) {
450 bp->b_error = ENXIO;
451 goto done;
452 }
453 mutex_enter(&sc->sc_lock);
454
455 mutex_enter(&sc->sc_lock);
456
457 switch (sc->sc_type) {
458 #if MEMORY_DISK_SERVER
459 case MD_UMEM_SERVER:
460 /* Just add this job to the server's queue. */
461 bufq_put(sc->sc_buflist, bp);
462 cv_signal(&sc->sc_cv);
463 mutex_exit(&sc->sc_lock);
464 /* see md_server_loop() */
465 /* no biodone in this case */
466 device_release(sc->sc_dev);
467 return;
468 #endif /* MEMORY_DISK_SERVER */
469
470 case MD_KMEM_FIXED:
471 case MD_KMEM_ALLOCATED:
472 /* These are in kernel space. Access directly. */
473 is_read = ((bp->b_flags & B_READ) == B_READ);
474 bp->b_resid = bp->b_bcount;
475 off = (bp->b_blkno << DEV_BSHIFT);
476 if (off >= sc->sc_size) {
477 if (is_read)
478 break; /* EOF */
479 goto set_eio;
480 }
481 xfer = bp->b_resid;
482 if (xfer > (sc->sc_size - off))
483 xfer = (sc->sc_size - off);
484 addr = (char *)sc->sc_addr + off;
485 disk_busy(&sc->sc_dkdev);
486 if (is_read)
487 memcpy(bp->b_data, addr, xfer);
488 else
489 memcpy(addr, bp->b_data, xfer);
490 disk_unbusy(&sc->sc_dkdev, xfer, is_read);
491 bp->b_resid -= xfer;
492 break;
493
494 default:
495 bp->b_resid = bp->b_bcount;
496 set_eio:
497 bp->b_error = EIO;
498 break;
499 }
500 mutex_exit(&sc->sc_lock);
501
502 mutex_exit(&sc->sc_lock);
503
504 done:
505 biodone(bp);
506 if (sc != NULL)
507 device_release(sc->sc_dev);
508 }
509
510 static int
511 mdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
512 {
513 struct md_softc *sc;
514 struct md_conf *umd;
515 int error;
516
517 sc = device_lookup_private_acquire(&md_cd, MD_UNIT(dev));
518 if (sc == NULL)
519 return ENXIO;
520
521 mutex_enter(&sc->sc_lock);
522 if (sc->sc_type != MD_UNCONFIGURED) {
523 error = disk_ioctl(&sc->sc_dkdev, dev, cmd, data, flag, l);
524 if (error != EPASSTHROUGH) {
525 mutex_exit(&sc->sc_lock);
526 device_release(sc->sc_dev);
527 return 0;
528 }
529 }
530
531 /* If this is not the raw partition, punt! */
532 if (DISKPART(dev) != RAW_PART) {
533 mutex_exit(&sc->sc_lock);
534 device_release(sc->sc_dev);
535 return ENOTTY;
536 }
537
538 umd = (struct md_conf *)data;
539 error = EINVAL;
540 switch (cmd) {
541 case MD_GETCONF:
542 *umd = sc->sc_md;
543 error = 0;
544 break;
545
546 case MD_SETCONF:
547 /* Can only set it once. */
548 if (sc->sc_type != MD_UNCONFIGURED)
549 break;
550 switch (umd->md_type) {
551 case MD_KMEM_ALLOCATED:
552 error = md_ioctl_kalloc(sc, umd, l);
553 break;
554 #if MEMORY_DISK_SERVER
555 case MD_UMEM_SERVER:
556 error = md_ioctl_server(sc, umd, l);
557 break;
558 #endif /* MEMORY_DISK_SERVER */
559 default:
560 break;
561 }
562 break;
563 }
564 mutex_exit(&sc->sc_lock);
565 device_release(sc->sc_dev);
566 return error;
567 }
568
569 static void
570 md_set_disklabel(struct md_softc *sc)
571 {
572 struct disk_geom *dg = &sc->sc_dkdev.dk_geom;
573 struct disklabel *lp = sc->sc_dkdev.dk_label;
574 struct partition *pp;
575
576 memset(lp, 0, sizeof(*lp));
577
578 lp->d_secsize = DEV_BSIZE;
579 lp->d_secperunit = sc->sc_size / DEV_BSIZE;
580 if (lp->d_secperunit >= (32*64)) {
581 lp->d_nsectors = 32;
582 lp->d_ntracks = 64;
583 lp->d_ncylinders = lp->d_secperunit / (32*64);
584 } else {
585 lp->d_nsectors = 1;
586 lp->d_ntracks = 1;
587 lp->d_ncylinders = lp->d_secperunit;
588 }
589 lp->d_secpercyl = lp->d_ntracks*lp->d_nsectors;
590
591 strncpy(lp->d_typename, md_cd.cd_name, sizeof(lp->d_typename));
592 lp->d_type = DKTYPE_MD;
593 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
594 lp->d_rpm = 3600;
595 lp->d_interleave = 1;
596 lp->d_flags = 0;
597
598 pp = &lp->d_partitions[0];
599 pp->p_offset = 0;
600 pp->p_size = lp->d_secperunit;
601 pp->p_fstype = FS_BSDFFS;
602
603 pp = &lp->d_partitions[RAW_PART];
604 pp->p_offset = 0;
605 pp->p_size = lp->d_secperunit;
606 pp->p_fstype = FS_UNUSED;
607
608 lp->d_npartitions = RAW_PART+1;
609 lp->d_magic = DISKMAGIC;
610 lp->d_magic2 = DISKMAGIC;
611 lp->d_checksum = dkcksum(lp);
612
613 memset(dg, 0, sizeof(*dg));
614
615 dg->dg_secsize = lp->d_secsize;
616 dg->dg_secperunit = lp->d_secperunit;
617 dg->dg_nsectors = lp->d_nsectors;
618 dg->dg_ntracks = lp->d_ntracks = 64;;
619 dg->dg_ncylinders = lp->d_ncylinders;
620
621 disk_set_info(sc->sc_dev, &sc->sc_dkdev, NULL);
622 }
623
624 /*
625 * Handle ioctl MD_SETCONF for (sc_type == MD_KMEM_ALLOCATED)
626 * Just allocate some kernel memory and return.
627 */
628 static int
629 md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd,
630 struct lwp *l)
631 {
632 vaddr_t addr;
633 vsize_t size;
634
635 mutex_exit(&sc->sc_lock);
636
637 /* Sanity check the size. */
638 size = umd->md_size;
639 addr = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
640
641 mutex_enter(&sc->sc_lock);
642
643 if (!addr)
644 return ENOMEM;
645
646 /* If another thread beat us to configure this unit: fail. */
647 if (sc->sc_type != MD_UNCONFIGURED) {
648 uvm_km_free(kernel_map, addr, size, UVM_KMF_WIRED);
649 return EINVAL;
650 }
651
652 /* This unit is now configured. */
653 sc->sc_addr = (void *)addr; /* kernel space */
654 sc->sc_size = (size_t)size;
655 sc->sc_type = MD_KMEM_ALLOCATED;
656 md_set_disklabel(sc);
657 return 0;
658 }
659
660 #if MEMORY_DISK_SERVER
661
662 /*
663 * Handle ioctl MD_SETCONF for (sc_type == MD_UMEM_SERVER)
664 * Set config, then become the I/O server for this unit.
665 */
666 static int
667 md_ioctl_server(struct md_softc *sc, struct md_conf *umd,
668 struct lwp *l)
669 {
670 vaddr_t end;
671 int error;
672
673 KASSERT(mutex_owned(&sc->sc_lock));
674
675 /* Sanity check addr, size. */
676 end = (vaddr_t) ((char *)umd->md_addr + umd->md_size);
677
678 if ((end >= VM_MAXUSER_ADDRESS) ||
679 (end < ((vaddr_t) umd->md_addr)) )
680 return EINVAL;
681
682 /* This unit is now configured. */
683 sc->sc_addr = umd->md_addr; /* user space */
684 sc->sc_size = umd->md_size;
685 sc->sc_type = MD_UMEM_SERVER;
686 md_set_disklabel(sc);
687
688 /* Become the server daemon */
689 error = md_server_loop(sc);
690
691 /* This server is now going away! */
692 sc->sc_type = MD_UNCONFIGURED;
693 sc->sc_addr = 0;
694 sc->sc_size = 0;
695
696 return (error);
697 }
698
699 static int
700 md_server_loop(struct md_softc *sc)
701 {
702 struct buf *bp;
703 void *addr; /* user space address */
704 size_t off; /* offset into "device" */
705 size_t xfer; /* amount to transfer */
706 int error;
707 bool is_read;
708
709 KASSERT(mutex_owned(&sc->sc_lock));
710
711 for (;;) {
712 /* Wait for some work to arrive. */
713 while ((bp = bufq_get(sc->sc_buflist)) == NULL) {
714 error = cv_wait_sig(&sc->sc_cv, &sc->sc_lock);
715 if (error)
716 return error;
717 }
718
719 /* Do the transfer to/from user space. */
720 mutex_exit(&sc->sc_lock);
721 error = 0;
722 is_read = ((bp->b_flags & B_READ) == B_READ);
723 bp->b_resid = bp->b_bcount;
724 off = (bp->b_blkno << DEV_BSHIFT);
725 if (off >= sc->sc_size) {
726 if (is_read)
727 goto done; /* EOF (not an error) */
728 error = EIO;
729 goto done;
730 }
731 xfer = bp->b_resid;
732 if (xfer > (sc->sc_size - off))
733 xfer = (sc->sc_size - off);
734 addr = (char *)sc->sc_addr + off;
735 disk_busy(&sc->sc_dkdev);
736 if (is_read)
737 error = copyin(addr, bp->b_data, xfer);
738 else
739 error = copyout(bp->b_data, addr, xfer);
740 disk_unbusy(&sc->sc_dkdev, (error ? 0 : xfer), is_read);
741 if (!error)
742 bp->b_resid -= xfer;
743
744 done:
745 if (error) {
746 bp->b_error = error;
747 }
748 biodone(bp);
749 mutex_enter(&sc->sc_lock);
750 }
751 }
752 #endif /* MEMORY_DISK_SERVER */
753