ld.c revision 1.13 1 /* $NetBSD: ld.c,v 1.13 2002/05/08 15:49:07 drochner Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Disk driver for use by RAID controllers.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.13 2002/05/08 15:49:07 drochner Exp $");
45
46 #include "rnd.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/endian.h>
56 #include <sys/disklabel.h>
57 #include <sys/disk.h>
58 #include <sys/dkio.h>
59 #include <sys/stat.h>
60 #include <sys/lock.h>
61 #include <sys/conf.h>
62 #include <sys/fcntl.h>
63 #include <sys/vnode.h>
64 #include <sys/syslog.h>
65 #if NRND > 0
66 #include <sys/rnd.h>
67 #endif
68
69 #include <dev/ldvar.h>
70
71 static void ldgetdefaultlabel(struct ld_softc *, struct disklabel *);
72 static void ldgetdisklabel(struct ld_softc *);
73 static int ldlock(struct ld_softc *);
74 static void ldminphys(struct buf *bp);
75 static void ldshutdown(void *);
76 static int ldstart(struct ld_softc *, struct buf *);
77 static void ldunlock(struct ld_softc *);
78
79 extern struct cfdriver ld_cd;
80
81 static struct dkdriver lddkdriver = { ldstrategy };
82 static void *ld_sdh;
83
84 void
85 ldattach(struct ld_softc *sc)
86 {
87 char buf[9];
88
89 if ((sc->sc_flags & LDF_ENABLED) == 0) {
90 printf("%s: disabled\n", sc->sc_dv.dv_xname);
91 return;
92 }
93
94 /* Initialise and attach the disk structure. */
95 sc->sc_dk.dk_driver = &lddkdriver;
96 sc->sc_dk.dk_name = sc->sc_dv.dv_xname;
97 disk_attach(&sc->sc_dk);
98
99 if (sc->sc_maxxfer > MAXPHYS)
100 sc->sc_maxxfer = MAXPHYS;
101
102 /* Build synthetic geometry. */
103 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */
104 sc->sc_nheads = 16;
105 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */
106 sc->sc_nheads = 32;
107 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */
108 sc->sc_nheads = 64;
109 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */
110 sc->sc_nheads = 128;
111 else
112 sc->sc_nheads = 255;
113
114 sc->sc_nsectors = 63;
115 sc->sc_ncylinders = sc->sc_secperunit /
116 (sc->sc_nheads * sc->sc_nsectors);
117
118 format_bytes(buf, sizeof(buf), (u_int64_t)sc->sc_secperunit *
119 sc->sc_secsize);
120 printf("%s: %s, %d cyl, %d head, %d sec, %d bytes/sect x %d sectors\n",
121 sc->sc_dv.dv_xname, buf, sc->sc_ncylinders, sc->sc_nheads,
122 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
123
124 #if NRND > 0
125 /* Attach the device into the rnd source list. */
126 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname,
127 RND_TYPE_DISK, 0);
128 #endif
129
130 /* Set the `shutdownhook'. */
131 if (ld_sdh == NULL)
132 ld_sdh = shutdownhook_establish(ldshutdown, NULL);
133 BUFQ_INIT(&sc->sc_bufq);
134 }
135
136 int
137 ldadjqparam(struct ld_softc *sc, int max)
138 {
139 int s, rv;
140
141 s = splbio();
142 sc->sc_maxqueuecnt = max;
143 if (sc->sc_queuecnt > max) {
144 sc->sc_flags |= LDF_DRAIN;
145 rv = tsleep(&sc->sc_queuecnt, PRIBIO, "lddrn", 30 * hz);
146 sc->sc_flags &= ~LDF_DRAIN;
147 } else
148 rv = 0;
149 splx(s);
150
151 return (rv);
152 }
153
154 int
155 ldbegindetach(struct ld_softc *sc, int flags)
156 {
157 int s, rv;
158
159 if ((sc->sc_flags & LDF_ENABLED) == 0)
160 return (0);
161
162 if ((flags & DETACH_FORCE) == 0 && sc->sc_dk.dk_openmask != 0)
163 return (EBUSY);
164
165 s = splbio();
166 sc->sc_flags |= LDF_DETACH;
167 rv = ldadjqparam(sc, 0);
168 splx(s);
169
170 return (rv);
171 }
172
173 void
174 ldenddetach(struct ld_softc *sc)
175 {
176 struct buf *bp;
177 int s, bmaj, cmaj, i, mn;
178
179 if ((sc->sc_flags & LDF_ENABLED) == 0)
180 return;
181
182 /* Wait for commands queued with the hardware to complete. */
183 if (sc->sc_queuecnt != 0)
184 if (tsleep(&sc->sc_queuecnt, PRIBIO, "lddtch", 30 * hz))
185 printf("%s: not drained\n", sc->sc_dv.dv_xname);
186
187 /* Locate the major numbers. */
188 for (bmaj = 0; bmaj <= nblkdev; bmaj++)
189 if (bdevsw[bmaj].d_open == ldopen)
190 break;
191 for (cmaj = 0; cmaj <= nchrdev; cmaj++)
192 if (cdevsw[cmaj].d_open == ldopen)
193 break;
194
195 /* Kill off any queued buffers. */
196 s = splbio();
197 while ((bp = BUFQ_FIRST(&sc->sc_bufq)) != NULL) {
198 BUFQ_REMOVE(&sc->sc_bufq, bp);
199 bp->b_error = EIO;
200 bp->b_flags |= B_ERROR;
201 bp->b_resid = bp->b_bcount;
202 biodone(bp);
203 }
204 splx(s);
205
206 /* Nuke the vnodes for any open instances. */
207 for (i = 0; i < MAXPARTITIONS; i++) {
208 mn = DISKMINOR(sc->sc_dv.dv_unit, i);
209 vdevgone(bmaj, mn, mn, VBLK);
210 vdevgone(cmaj, mn, mn, VCHR);
211 }
212
213 /* Detach from the disk list. */
214 disk_detach(&sc->sc_dk);
215
216 #if NRND > 0
217 /* Unhook the entropy source. */
218 rnd_detach_source(&sc->sc_rnd_source);
219 #endif
220
221 /* Flush the device's cache. */
222 if (sc->sc_flush != NULL)
223 if ((*sc->sc_flush)(sc) != 0)
224 printf("%s: unable to flush cache\n",
225 sc->sc_dv.dv_xname);
226 }
227
228 /* ARGSUSED */
229 static void
230 ldshutdown(void *cookie)
231 {
232 struct ld_softc *sc;
233 int i;
234
235 for (i = 0; i < ld_cd.cd_ndevs; i++) {
236 if ((sc = device_lookup(&ld_cd, i)) == NULL)
237 continue;
238 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc) != 0)
239 printf("%s: unable to flush cache\n",
240 sc->sc_dv.dv_xname);
241 }
242 }
243
244 /* ARGSUSED */
245 int
246 ldopen(dev_t dev, int flags, int fmt, struct proc *p)
247 {
248 struct ld_softc *sc;
249 int unit, part;
250
251 unit = DISKUNIT(dev);
252 if ((sc = device_lookup(&ld_cd, unit))== NULL)
253 return (ENXIO);
254 if ((sc->sc_flags & LDF_ENABLED) == 0)
255 return (ENODEV);
256 part = DISKPART(dev);
257 ldlock(sc);
258
259 if (sc->sc_dk.dk_openmask == 0)
260 ldgetdisklabel(sc);
261
262 /* Check that the partition exists. */
263 if (part != RAW_PART && (part >= sc->sc_dk.dk_label->d_npartitions ||
264 sc->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
265 ldunlock(sc);
266 return (ENXIO);
267 }
268
269 /* Ensure only one open at a time. */
270 switch (fmt) {
271 case S_IFCHR:
272 sc->sc_dk.dk_copenmask |= (1 << part);
273 break;
274 case S_IFBLK:
275 sc->sc_dk.dk_bopenmask |= (1 << part);
276 break;
277 }
278 sc->sc_dk.dk_openmask =
279 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
280
281 ldunlock(sc);
282 return (0);
283 }
284
285 /* ARGSUSED */
286 int
287 ldclose(dev_t dev, int flags, int fmt, struct proc *p)
288 {
289 struct ld_softc *sc;
290 int part, unit;
291
292 unit = DISKUNIT(dev);
293 part = DISKPART(dev);
294 sc = device_lookup(&ld_cd, unit);
295 ldlock(sc);
296
297 switch (fmt) {
298 case S_IFCHR:
299 sc->sc_dk.dk_copenmask &= ~(1 << part);
300 break;
301 case S_IFBLK:
302 sc->sc_dk.dk_bopenmask &= ~(1 << part);
303 break;
304 }
305 sc->sc_dk.dk_openmask =
306 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
307
308 if (sc->sc_dk.dk_openmask == 0 && sc->sc_flush != NULL)
309 if ((*sc->sc_flush)(sc) != 0)
310 printf("%s: unable to flush cache\n",
311 sc->sc_dv.dv_xname);
312
313 ldunlock(sc);
314 return (0);
315 }
316
317 /* ARGSUSED */
318 int
319 ldread(dev_t dev, struct uio *uio, int ioflag)
320 {
321
322 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
323 }
324
325 /* ARGSUSED */
326 int
327 ldwrite(dev_t dev, struct uio *uio, int ioflag)
328 {
329
330 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
331 }
332
333 /* ARGSUSED */
334 int
335 ldioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct proc *p)
336 {
337 struct ld_softc *sc;
338 int part, unit, error;
339 #ifdef __HAVE_OLD_DISKLABEL
340 struct disklabel newlabel;
341 #endif
342 struct disklabel *lp;
343
344 unit = DISKUNIT(dev);
345 part = DISKPART(dev);
346 sc = device_lookup(&ld_cd, unit);
347 error = 0;
348
349 switch (cmd) {
350 case DIOCGDINFO:
351 memcpy(addr, sc->sc_dk.dk_label, sizeof(struct disklabel));
352 return (0);
353
354 #ifdef __HAVE_OLD_DISKLABEL
355 case ODIOCGDINFO:
356 newlabel = *(sc->sc_dk.dk_label);
357 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
358 return ENOTTY;
359 memcpy(addr, &newlabel, sizeof(struct olddisklabel));
360 return (0);
361 #endif
362
363 case DIOCGPART:
364 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
365 ((struct partinfo *)addr)->part =
366 &sc->sc_dk.dk_label->d_partitions[part];
367 break;
368
369 case DIOCWDINFO:
370 case DIOCSDINFO:
371 #ifdef __HAVE_OLD_DISKLABEL
372 case ODIOCWDINFO:
373 case ODIOCSDINFO:
374
375 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
376 memset(&newlabel, 0, sizeof newlabel);
377 memcpy(&newlabel, addr, sizeof (struct olddisklabel));
378 lp = &newlabel;
379 } else
380 #endif
381 lp = (struct disklabel *)addr;
382
383 if ((flag & FWRITE) == 0)
384 return (EBADF);
385
386 if ((error = ldlock(sc)) != 0)
387 return (error);
388 sc->sc_flags |= LDF_LABELLING;
389
390 error = setdisklabel(sc->sc_dk.dk_label,
391 lp, /*sc->sc_dk.dk_openmask : */0,
392 sc->sc_dk.dk_cpulabel);
393 if (error == 0 && (cmd == DIOCWDINFO
394 #ifdef __HAVE_OLD_DISKLABEL
395 || cmd == ODIOCWDINFO
396 #endif
397 ))
398 error = writedisklabel(
399 MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
400 ldstrategy, sc->sc_dk.dk_label,
401 sc->sc_dk.dk_cpulabel);
402
403 sc->sc_flags &= ~LDF_LABELLING;
404 ldunlock(sc);
405 break;
406
407 case DIOCWLABEL:
408 if ((flag & FWRITE) == 0)
409 return (EBADF);
410 if (*(int *)addr)
411 sc->sc_flags |= LDF_WLABEL;
412 else
413 sc->sc_flags &= ~LDF_WLABEL;
414 break;
415
416 case DIOCGDEFLABEL:
417 ldgetdefaultlabel(sc, (struct disklabel *)addr);
418 break;
419
420 #ifdef __HAVE_OLD_DISKLABEL
421 case ODIOCGDEFLABEL:
422 ldgetdefaultlabel(sc, &newlabel);
423 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
424 return ENOTTY;
425 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
426 break;
427 #endif
428
429 default:
430 error = ENOTTY;
431 break;
432 }
433
434 return (error);
435 }
436
437 void
438 ldstrategy(struct buf *bp)
439 {
440 struct ld_softc *sc;
441 int s;
442
443 sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev));
444
445 s = splbio();
446 if (sc->sc_queuecnt >= sc->sc_maxqueuecnt) {
447 BUFQ_INSERT_TAIL(&sc->sc_bufq, bp);
448 splx(s);
449 return;
450 }
451 splx(s);
452 ldstart(sc, bp);
453 }
454
455 static int
456 ldstart(struct ld_softc *sc, struct buf *bp)
457 {
458 struct disklabel *lp;
459 int part, s, rv;
460
461 if ((sc->sc_flags & LDF_DETACH) != 0) {
462 bp->b_error = EIO;
463 bp->b_flags |= B_ERROR;
464 bp->b_resid = bp->b_bcount;
465 biodone(bp);
466 return (-1);
467 }
468
469 part = DISKPART(bp->b_dev);
470 lp = sc->sc_dk.dk_label;
471
472 /*
473 * The transfer must be a whole number of blocks and the offset must
474 * not be negative.
475 */
476 if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) {
477 bp->b_flags |= B_ERROR;
478 biodone(bp);
479 return (-1);
480 }
481
482 /*
483 * If it's a null transfer, return.
484 */
485 if (bp->b_bcount == 0) {
486 bp->b_resid = bp->b_bcount;
487 biodone(bp);
488 return (-1);
489 }
490
491 /*
492 * Do bounds checking and adjust the transfer. If error, process.
493 * If past the end of partition, just return.
494 */
495 if (part != RAW_PART &&
496 bounds_check_with_label(bp, lp,
497 (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) {
498 bp->b_resid = bp->b_bcount;
499 biodone(bp);
500 return (-1);
501 }
502
503 /*
504 * Convert the logical block number to a physical one and put it in
505 * terms of the device's logical block size.
506 */
507 if (lp->d_secsize >= DEV_BSIZE)
508 bp->b_rawblkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
509 else
510 bp->b_rawblkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
511
512 if (part != RAW_PART)
513 bp->b_rawblkno += lp->d_partitions[part].p_offset;
514
515 s = splbio();
516 disk_busy(&sc->sc_dk);
517 sc->sc_queuecnt++;
518 splx(s);
519
520 if ((rv = (*sc->sc_start)(sc, bp)) != 0) {
521 bp->b_error = rv;
522 bp->b_flags |= B_ERROR;
523 bp->b_resid = bp->b_bcount;
524 s = splbio();
525 lddone(sc, bp);
526 splx(s);
527 }
528
529 return (0);
530 }
531
532 void
533 lddone(struct ld_softc *sc, struct buf *bp)
534 {
535
536 if ((bp->b_flags & B_ERROR) != 0) {
537 diskerr(bp, "ld", "error", LOG_PRINTF, 0, sc->sc_dk.dk_label);
538 printf("\n");
539 }
540
541 disk_unbusy(&sc->sc_dk, bp->b_bcount - bp->b_resid);
542 #if NRND > 0
543 rnd_add_uint32(&sc->sc_rnd_source, bp->b_rawblkno);
544 #endif
545 biodone(bp);
546
547 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
548 if ((sc->sc_flags & LDF_DRAIN) != 0)
549 wakeup(&sc->sc_queuecnt);
550 while ((bp = BUFQ_FIRST(&sc->sc_bufq)) != NULL) {
551 BUFQ_REMOVE(&sc->sc_bufq, bp);
552 if (!ldstart(sc, bp))
553 break;
554 }
555 }
556 }
557
558 int
559 ldsize(dev_t dev)
560 {
561 struct ld_softc *sc;
562 int part, unit, omask, size;
563
564 unit = DISKUNIT(dev);
565 if ((sc = device_lookup(&ld_cd, unit)) == NULL)
566 return (ENODEV);
567 if ((sc->sc_flags & LDF_ENABLED) == 0)
568 return (ENODEV);
569 part = DISKPART(dev);
570
571 omask = sc->sc_dk.dk_openmask & (1 << part);
572
573 if (omask == 0 && ldopen(dev, 0, S_IFBLK, NULL) != 0)
574 return (-1);
575 else if (sc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
576 size = -1;
577 else
578 size = sc->sc_dk.dk_label->d_partitions[part].p_size *
579 (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
580 if (omask == 0 && ldclose(dev, 0, S_IFBLK, NULL) != 0)
581 return (-1);
582
583 return (size);
584 }
585
586 /*
587 * Load the label information from the specified device.
588 */
589 static void
590 ldgetdisklabel(struct ld_softc *sc)
591 {
592 const char *errstring;
593
594 ldgetdefaultlabel(sc, sc->sc_dk.dk_label);
595
596 /* Call the generic disklabel extraction routine. */
597 errstring = readdisklabel(MAKEDISKDEV(0, sc->sc_dv.dv_unit, RAW_PART),
598 ldstrategy, sc->sc_dk.dk_label, sc->sc_dk.dk_cpulabel);
599 if (errstring != NULL)
600 printf("%s: %s\n", sc->sc_dv.dv_xname, errstring);
601 }
602
603 /*
604 * Construct a ficticious label.
605 */
606 static void
607 ldgetdefaultlabel(struct ld_softc *sc, struct disklabel *lp)
608 {
609
610 memset(lp, 0, sizeof(struct disklabel));
611
612 lp->d_secsize = sc->sc_secsize;
613 lp->d_ntracks = sc->sc_nheads;
614 lp->d_nsectors = sc->sc_nsectors;
615 lp->d_ncylinders = sc->sc_ncylinders;
616 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
617 lp->d_type = DTYPE_LD;
618 strcpy(lp->d_typename, "unknown");
619 strcpy(lp->d_packname, "fictitious");
620 lp->d_secperunit = sc->sc_secperunit;
621 lp->d_rpm = 7200;
622 lp->d_interleave = 1;
623 lp->d_flags = 0;
624
625 lp->d_partitions[RAW_PART].p_offset = 0;
626 lp->d_partitions[RAW_PART].p_size =
627 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
628 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
629 lp->d_npartitions = RAW_PART + 1;
630
631 lp->d_magic = DISKMAGIC;
632 lp->d_magic2 = DISKMAGIC;
633 lp->d_checksum = dkcksum(lp);
634 }
635
636 /*
637 * Wait interruptibly for an exclusive lock.
638 *
639 * XXX Several drivers do this; it should be abstracted and made MP-safe.
640 */
641 static int
642 ldlock(struct ld_softc *sc)
643 {
644 int error;
645
646 while ((sc->sc_flags & LDF_LKHELD) != 0) {
647 sc->sc_flags |= LDF_LKWANTED;
648 if ((error = tsleep(sc, PRIBIO | PCATCH, "ldlck", 0)) != 0)
649 return (error);
650 }
651 sc->sc_flags |= LDF_LKHELD;
652 return (0);
653 }
654
655 /*
656 * Unlock and wake up any waiters.
657 */
658 static void
659 ldunlock(struct ld_softc *sc)
660 {
661
662 sc->sc_flags &= ~LDF_LKHELD;
663 if ((sc->sc_flags & LDF_LKWANTED) != 0) {
664 sc->sc_flags &= ~LDF_LKWANTED;
665 wakeup(sc);
666 }
667 }
668
669 /*
670 * Take a dump.
671 */
672 int
673 lddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
674 {
675 struct ld_softc *sc;
676 struct disklabel *lp;
677 int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv;
678 static int dumping;
679
680 unit = DISKUNIT(dev);
681 if ((sc = device_lookup(&ld_cd, unit)) == NULL)
682 return (ENXIO);
683 if ((sc->sc_flags & LDF_ENABLED) == 0)
684 return (ENODEV);
685 if (sc->sc_dump == NULL)
686 return (ENXIO);
687
688 /* Check if recursive dump; if so, punt. */
689 if (dumping)
690 return (EFAULT);
691 dumping = 1;
692
693 /* Convert to disk sectors. Request must be a multiple of size. */
694 part = DISKPART(dev);
695 lp = sc->sc_dk.dk_label;
696 if ((size % lp->d_secsize) != 0)
697 return (EFAULT);
698 towrt = size / lp->d_secsize;
699 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in DEV_BSIZE units */
700
701 nsects = lp->d_partitions[part].p_size;
702 sectoff = lp->d_partitions[part].p_offset;
703
704 /* Check transfer bounds against partition size. */
705 if ((blkno < 0) || ((blkno + towrt) > nsects))
706 return (EINVAL);
707
708 /* Offset block number to start of partition. */
709 blkno += sectoff;
710
711 /* Start dumping and return when done. */
712 maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1;
713 while (towrt > 0) {
714 nblk = min(maxblkcnt, towrt);
715
716 if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0)
717 return (rv);
718
719 towrt -= nblk;
720 blkno += nblk;
721 va += nblk * sc->sc_secsize;
722 }
723
724 dumping = 0;
725 return (0);
726 }
727
728 /*
729 * Adjust the size of a transfer.
730 */
731 static void
732 ldminphys(struct buf *bp)
733 {
734 struct ld_softc *sc;
735
736 sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev));
737
738 if (bp->b_bcount > sc->sc_maxxfer)
739 bp->b_bcount = sc->sc_maxxfer;
740 minphys(bp);
741 }
742