ld.c revision 1.31 1 /* $NetBSD: ld.c,v 1.31 2004/10/17 17:02:48 jdolecek Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Disk driver for use by RAID controllers.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.31 2004/10/17 17:02:48 jdolecek Exp $");
45
46 #include "rnd.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/endian.h>
56 #include <sys/disklabel.h>
57 #include <sys/disk.h>
58 #include <sys/dkio.h>
59 #include <sys/stat.h>
60 #include <sys/lock.h>
61 #include <sys/conf.h>
62 #include <sys/fcntl.h>
63 #include <sys/vnode.h>
64 #include <sys/syslog.h>
65 #if NRND > 0
66 #include <sys/rnd.h>
67 #endif
68
69 #include <dev/ldvar.h>
70
71 static void ldgetdefaultlabel(struct ld_softc *, struct disklabel *);
72 static void ldgetdisklabel(struct ld_softc *);
73 static void ldminphys(struct buf *bp);
74 static void ldshutdown(void *);
75 static void ldstart(struct ld_softc *);
76
77 extern struct cfdriver ld_cd;
78
79 static dev_type_open(ldopen);
80 static dev_type_close(ldclose);
81 static dev_type_read(ldread);
82 static dev_type_write(ldwrite);
83 static dev_type_ioctl(ldioctl);
84 static dev_type_strategy(ldstrategy);
85 static dev_type_dump(lddump);
86 static dev_type_size(ldsize);
87
88 const struct bdevsw ld_bdevsw = {
89 ldopen, ldclose, ldstrategy, ldioctl, lddump, ldsize, D_DISK
90 };
91
92 const struct cdevsw ld_cdevsw = {
93 ldopen, ldclose, ldread, ldwrite, ldioctl,
94 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
95 };
96
97 static struct dkdriver lddkdriver = { ldstrategy, ldminphys };
98 static void *ld_sdh;
99
100 void
101 ldattach(struct ld_softc *sc)
102 {
103 char buf[9];
104
105 if ((sc->sc_flags & LDF_ENABLED) == 0) {
106 printf("%s: disabled\n", sc->sc_dv.dv_xname);
107 return;
108 }
109
110 /* Initialise and attach the disk structure. */
111 sc->sc_dk.dk_driver = &lddkdriver;
112 sc->sc_dk.dk_name = sc->sc_dv.dv_xname;
113 disk_attach(&sc->sc_dk);
114
115 if (sc->sc_maxxfer > MAXPHYS)
116 sc->sc_maxxfer = MAXPHYS;
117
118 /* Build synthetic geometry if necessary. */
119 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 ||
120 sc->sc_ncylinders == 0) {
121 uint64_t ncyl;
122
123 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */
124 sc->sc_nheads = 16;
125 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */
126 sc->sc_nheads = 32;
127 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */
128 sc->sc_nheads = 64;
129 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */
130 sc->sc_nheads = 128;
131 else
132 sc->sc_nheads = 255;
133
134 sc->sc_nsectors = 63;
135 sc->sc_ncylinders = INT_MAX;
136 ncyl = sc->sc_secperunit /
137 (sc->sc_nheads * sc->sc_nsectors);
138 if (ncyl < INT_MAX)
139 sc->sc_ncylinders = (int)ncyl;
140 }
141
142 format_bytes(buf, sizeof(buf), sc->sc_secperunit *
143 sc->sc_secsize);
144 printf("%s: %s, %d cyl, %d head, %d sec, %d bytes/sect x %"PRIu64" sectors\n",
145 sc->sc_dv.dv_xname, buf, sc->sc_ncylinders, sc->sc_nheads,
146 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
147
148 #if NRND > 0
149 /* Attach the device into the rnd source list. */
150 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname,
151 RND_TYPE_DISK, 0);
152 #endif
153
154 /* Set the `shutdownhook'. */
155 if (ld_sdh == NULL)
156 ld_sdh = shutdownhook_establish(ldshutdown, NULL);
157 bufq_alloc(&sc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT()|BUFQ_SORT_RAWBLOCK);
158
159 /* Discover wedges on this disk. */
160 dkwedge_discover(&sc->sc_dk);
161 }
162
163 int
164 ldadjqparam(struct ld_softc *sc, int max)
165 {
166 int s;
167
168 s = splbio();
169 sc->sc_maxqueuecnt = max;
170 splx(s);
171
172 return (0);
173 }
174
175 int
176 ldbegindetach(struct ld_softc *sc, int flags)
177 {
178 int s, rv = 0;
179
180 if ((sc->sc_flags & LDF_ENABLED) == 0)
181 return (0);
182
183 if ((flags & DETACH_FORCE) == 0 && sc->sc_dk.dk_openmask != 0)
184 return (EBUSY);
185
186 s = splbio();
187 sc->sc_maxqueuecnt = 0;
188 sc->sc_flags |= LDF_DETACH;
189 while (sc->sc_queuecnt > 0) {
190 sc->sc_flags |= LDF_DRAIN;
191 rv = tsleep(&sc->sc_queuecnt, PRIBIO, "lddrn", 0);
192 if (rv)
193 break;
194 }
195 splx(s);
196
197 return (rv);
198 }
199
200 void
201 ldenddetach(struct ld_softc *sc)
202 {
203 struct buf *bp;
204 int s, bmaj, cmaj, i, mn;
205
206 if ((sc->sc_flags & LDF_ENABLED) == 0)
207 return;
208
209 /* Wait for commands queued with the hardware to complete. */
210 if (sc->sc_queuecnt != 0)
211 if (tsleep(&sc->sc_queuecnt, PRIBIO, "lddtch", 30 * hz))
212 printf("%s: not drained\n", sc->sc_dv.dv_xname);
213
214 /* Locate the major numbers. */
215 bmaj = bdevsw_lookup_major(&ld_bdevsw);
216 cmaj = cdevsw_lookup_major(&ld_cdevsw);
217
218 /* Kill off any queued buffers. */
219 s = splbio();
220 while ((bp = BUFQ_GET(&sc->sc_bufq)) != NULL) {
221 bp->b_error = EIO;
222 bp->b_flags |= B_ERROR;
223 bp->b_resid = bp->b_bcount;
224 biodone(bp);
225 }
226 bufq_free(&sc->sc_bufq);
227 splx(s);
228
229 /* Nuke the vnodes for any open instances. */
230 for (i = 0; i < MAXPARTITIONS; i++) {
231 mn = DISKMINOR(sc->sc_dv.dv_unit, i);
232 vdevgone(bmaj, mn, mn, VBLK);
233 vdevgone(cmaj, mn, mn, VCHR);
234 }
235
236 /* Delete all of our wedges. */
237 dkwedge_delall(&sc->sc_dk);
238
239 /* Detach from the disk list. */
240 disk_detach(&sc->sc_dk);
241
242 #if NRND > 0
243 /* Unhook the entropy source. */
244 rnd_detach_source(&sc->sc_rnd_source);
245 #endif
246
247 /*
248 * XXX We can't really flush the cache here, beceause the
249 * XXX device may already be non-existent from the controller's
250 * XXX perspective.
251 */
252 #if 0
253 /* Flush the device's cache. */
254 if (sc->sc_flush != NULL)
255 if ((*sc->sc_flush)(sc) != 0)
256 printf("%s: unable to flush cache\n",
257 sc->sc_dv.dv_xname);
258 #endif
259 }
260
261 /* ARGSUSED */
262 static void
263 ldshutdown(void *cookie)
264 {
265 struct ld_softc *sc;
266 int i;
267
268 for (i = 0; i < ld_cd.cd_ndevs; i++) {
269 if ((sc = device_lookup(&ld_cd, i)) == NULL)
270 continue;
271 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc) != 0)
272 printf("%s: unable to flush cache\n",
273 sc->sc_dv.dv_xname);
274 }
275 }
276
277 /* ARGSUSED */
278 static int
279 ldopen(dev_t dev, int flags, int fmt, struct proc *p)
280 {
281 struct ld_softc *sc;
282 int error, unit, part;
283
284 unit = DISKUNIT(dev);
285 if ((sc = device_lookup(&ld_cd, unit)) == NULL)
286 return (ENXIO);
287 if ((sc->sc_flags & LDF_ENABLED) == 0)
288 return (ENODEV);
289 part = DISKPART(dev);
290
291 if ((error = lockmgr(&sc->sc_dk.dk_openlock, LK_EXCLUSIVE, NULL)) != 0)
292 return (error);
293
294 if (sc->sc_dk.dk_openmask == 0) {
295 /* Load the partition info if not already loaded. */
296 if ((sc->sc_flags & LDF_VLABEL) == 0)
297 ldgetdisklabel(sc);
298 }
299
300 /* Check that the partition exists. */
301 if (part != RAW_PART && (part >= sc->sc_dk.dk_label->d_npartitions ||
302 sc->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
303 error = ENXIO;
304 goto bad1;
305 }
306
307 /* Ensure only one open at a time. */
308 switch (fmt) {
309 case S_IFCHR:
310 sc->sc_dk.dk_copenmask |= (1 << part);
311 break;
312 case S_IFBLK:
313 sc->sc_dk.dk_bopenmask |= (1 << part);
314 break;
315 }
316 sc->sc_dk.dk_openmask =
317 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
318
319 (void) lockmgr(&sc->sc_dk.dk_openlock, LK_RELEASE, NULL);
320 return (0);
321
322 bad1:
323 (void) lockmgr(&sc->sc_dk.dk_openlock, LK_RELEASE, NULL);
324 return (error);
325 }
326
327 /* ARGSUSED */
328 static int
329 ldclose(dev_t dev, int flags, int fmt, struct proc *p)
330 {
331 struct ld_softc *sc;
332 int error, part, unit;
333
334 unit = DISKUNIT(dev);
335 part = DISKPART(dev);
336 sc = device_lookup(&ld_cd, unit);
337
338 if ((error = lockmgr(&sc->sc_dk.dk_openlock, LK_EXCLUSIVE, NULL)) != 0)
339 return (error);
340
341 switch (fmt) {
342 case S_IFCHR:
343 sc->sc_dk.dk_copenmask &= ~(1 << part);
344 break;
345 case S_IFBLK:
346 sc->sc_dk.dk_bopenmask &= ~(1 << part);
347 break;
348 }
349 sc->sc_dk.dk_openmask =
350 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
351
352 if (sc->sc_dk.dk_openmask == 0) {
353 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc) != 0)
354 printf("%s: unable to flush cache\n",
355 sc->sc_dv.dv_xname);
356 if ((sc->sc_flags & LDF_KLABEL) == 0)
357 sc->sc_flags &= ~LDF_VLABEL;
358 }
359
360 (void) lockmgr(&sc->sc_dk.dk_openlock, LK_RELEASE, NULL);
361 return (0);
362 }
363
364 /* ARGSUSED */
365 static int
366 ldread(dev_t dev, struct uio *uio, int ioflag)
367 {
368
369 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
370 }
371
372 /* ARGSUSED */
373 static int
374 ldwrite(dev_t dev, struct uio *uio, int ioflag)
375 {
376
377 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
378 }
379
380 /* ARGSUSED */
381 static int
382 ldioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct proc *p)
383 {
384 struct ld_softc *sc;
385 int part, unit, error;
386 #ifdef __HAVE_OLD_DISKLABEL
387 struct disklabel newlabel;
388 #endif
389 struct disklabel *lp;
390
391 unit = DISKUNIT(dev);
392 part = DISKPART(dev);
393 sc = device_lookup(&ld_cd, unit);
394 error = 0;
395
396 switch (cmd) {
397 case DIOCGDINFO:
398 memcpy(addr, sc->sc_dk.dk_label, sizeof(struct disklabel));
399 return (0);
400
401 #ifdef __HAVE_OLD_DISKLABEL
402 case ODIOCGDINFO:
403 newlabel = *(sc->sc_dk.dk_label);
404 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
405 return ENOTTY;
406 memcpy(addr, &newlabel, sizeof(struct olddisklabel));
407 return (0);
408 #endif
409
410 case DIOCGPART:
411 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
412 ((struct partinfo *)addr)->part =
413 &sc->sc_dk.dk_label->d_partitions[part];
414 break;
415
416 case DIOCWDINFO:
417 case DIOCSDINFO:
418 #ifdef __HAVE_OLD_DISKLABEL
419 case ODIOCWDINFO:
420 case ODIOCSDINFO:
421
422 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
423 memset(&newlabel, 0, sizeof newlabel);
424 memcpy(&newlabel, addr, sizeof (struct olddisklabel));
425 lp = &newlabel;
426 } else
427 #endif
428 lp = (struct disklabel *)addr;
429
430 if ((flag & FWRITE) == 0)
431 return (EBADF);
432
433 if ((error = lockmgr(&sc->sc_dk.dk_openlock, LK_EXCLUSIVE,
434 NULL)) != 0)
435 return (error);
436 sc->sc_flags |= LDF_LABELLING;
437
438 error = setdisklabel(sc->sc_dk.dk_label,
439 lp, /*sc->sc_dk.dk_openmask : */0,
440 sc->sc_dk.dk_cpulabel);
441 if (error == 0 && (cmd == DIOCWDINFO
442 #ifdef __HAVE_OLD_DISKLABEL
443 || cmd == ODIOCWDINFO
444 #endif
445 ))
446 error = writedisklabel(
447 MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
448 ldstrategy, sc->sc_dk.dk_label,
449 sc->sc_dk.dk_cpulabel);
450
451 sc->sc_flags &= ~LDF_LABELLING;
452 (void) lockmgr(&sc->sc_dk.dk_openlock, LK_RELEASE, NULL);
453 break;
454
455 case DIOCKLABEL:
456 if ((flag & FWRITE) == 0)
457 return (EBADF);
458 if (*(int *)addr)
459 sc->sc_flags |= LDF_KLABEL;
460 else
461 sc->sc_flags &= ~LDF_KLABEL;
462 break;
463
464 case DIOCWLABEL:
465 if ((flag & FWRITE) == 0)
466 return (EBADF);
467 if (*(int *)addr)
468 sc->sc_flags |= LDF_WLABEL;
469 else
470 sc->sc_flags &= ~LDF_WLABEL;
471 break;
472
473 case DIOCGDEFLABEL:
474 ldgetdefaultlabel(sc, (struct disklabel *)addr);
475 break;
476
477 #ifdef __HAVE_OLD_DISKLABEL
478 case ODIOCGDEFLABEL:
479 ldgetdefaultlabel(sc, &newlabel);
480 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
481 return ENOTTY;
482 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
483 break;
484 #endif
485
486 case DIOCAWEDGE:
487 {
488 struct dkwedge_info *dkw = (void *) addr;
489
490 if ((flag & FWRITE) == 0)
491 return (EBADF);
492
493 /* If the ioctl happens here, the parent is us. */
494 strcpy(dkw->dkw_parent, sc->sc_dv.dv_xname);
495 return (dkwedge_add(dkw));
496 }
497
498 case DIOCDWEDGE:
499 {
500 struct dkwedge_info *dkw = (void *) addr;
501
502 if ((flag & FWRITE) == 0)
503 return (EBADF);
504
505 /* If the ioctl happens here, the parent is us. */
506 strcpy(dkw->dkw_parent, sc->sc_dv.dv_xname);
507 return (dkwedge_del(dkw));
508 }
509
510 case DIOCLWEDGES:
511 {
512 struct dkwedge_list *dkwl = (void *) addr;
513
514 return (dkwedge_list(&sc->sc_dk, dkwl, p));
515 }
516
517 default:
518 error = ENOTTY;
519 break;
520 }
521
522 return (error);
523 }
524
525 static void
526 ldstrategy(struct buf *bp)
527 {
528 struct ld_softc *sc;
529 struct disklabel *lp;
530 daddr_t blkno;
531 int s, part;
532
533 sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev));
534 part = DISKPART(bp->b_dev);
535
536 if ((sc->sc_flags & LDF_DETACH) != 0) {
537 bp->b_error = EIO;
538 goto bad;
539 }
540
541 lp = sc->sc_dk.dk_label;
542
543 /*
544 * The transfer must be a whole number of blocks and the offset must
545 * not be negative.
546 */
547 if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) {
548 bp->b_error = EINVAL;
549 goto bad;
550 }
551
552 /* If it's a null transfer, return immediately. */
553 if (bp->b_bcount == 0)
554 goto done;
555
556 /*
557 * Do bounds checking and adjust the transfer. If error, process.
558 * If past the end of partition, just return.
559 */
560 if (part != RAW_PART &&
561 bounds_check_with_label(&sc->sc_dk, bp,
562 (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) {
563 goto done;
564 }
565
566 /*
567 * Convert the block number to absolute and put it in terms
568 * of the device's logical block size.
569 */
570 if (lp->d_secsize == DEV_BSIZE)
571 blkno = bp->b_blkno;
572 else if (lp->d_secsize > DEV_BSIZE)
573 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
574 else
575 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
576
577 if (part != RAW_PART)
578 blkno += lp->d_partitions[part].p_offset;
579
580 bp->b_rawblkno = blkno;
581
582 s = splbio();
583 BUFQ_PUT(&sc->sc_bufq, bp);
584 ldstart(sc);
585 splx(s);
586 return;
587
588 bad:
589 bp->b_flags |= B_ERROR;
590 done:
591 bp->b_resid = bp->b_bcount;
592 biodone(bp);
593 }
594
595 static void
596 ldstart(struct ld_softc *sc)
597 {
598 struct buf *bp;
599 int error;
600
601 while (sc->sc_queuecnt < sc->sc_maxqueuecnt) {
602 /* See if there is work to do. */
603 if ((bp = BUFQ_PEEK(&sc->sc_bufq)) == NULL)
604 break;
605
606 disk_busy(&sc->sc_dk);
607 sc->sc_queuecnt++;
608
609 if (__predict_true((error = (*sc->sc_start)(sc, bp)) == 0)) {
610 /*
611 * The back-end is running the job; remove it from
612 * the queue.
613 */
614 (void) BUFQ_GET(&sc->sc_bufq);
615 } else {
616 disk_unbusy(&sc->sc_dk, 0, (bp->b_flags & B_READ));
617 sc->sc_queuecnt--;
618 if (error == EAGAIN) {
619 /*
620 * Temporary resource shortage in the
621 * back-end; just defer the job until
622 * later.
623 *
624 * XXX We might consider a watchdog timer
625 * XXX to make sure we are kicked into action.
626 */
627 break;
628 } else {
629 (void) BUFQ_GET(&sc->sc_bufq);
630 bp->b_error = error;
631 bp->b_flags |= B_ERROR;
632 bp->b_resid = bp->b_bcount;
633 biodone(bp);
634 }
635 }
636 }
637 }
638
639 void
640 lddone(struct ld_softc *sc, struct buf *bp)
641 {
642
643 if ((bp->b_flags & B_ERROR) != 0) {
644 diskerr(bp, "ld", "error", LOG_PRINTF, 0, sc->sc_dk.dk_label);
645 printf("\n");
646 }
647
648 disk_unbusy(&sc->sc_dk, bp->b_bcount - bp->b_resid,
649 (bp->b_flags & B_READ));
650 #if NRND > 0
651 rnd_add_uint32(&sc->sc_rnd_source, bp->b_rawblkno);
652 #endif
653 biodone(bp);
654
655 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
656 if ((sc->sc_flags & LDF_DRAIN) != 0) {
657 sc->sc_flags &= ~LDF_DRAIN;
658 wakeup(&sc->sc_queuecnt);
659 }
660 ldstart(sc);
661 }
662 }
663
664 static int
665 ldsize(dev_t dev)
666 {
667 struct ld_softc *sc;
668 int part, unit, omask, size;
669
670 unit = DISKUNIT(dev);
671 if ((sc = device_lookup(&ld_cd, unit)) == NULL)
672 return (ENODEV);
673 if ((sc->sc_flags & LDF_ENABLED) == 0)
674 return (ENODEV);
675 part = DISKPART(dev);
676
677 omask = sc->sc_dk.dk_openmask & (1 << part);
678
679 if (omask == 0 && ldopen(dev, 0, S_IFBLK, NULL) != 0)
680 return (-1);
681 else if (sc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
682 size = -1;
683 else
684 size = sc->sc_dk.dk_label->d_partitions[part].p_size *
685 (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
686 if (omask == 0 && ldclose(dev, 0, S_IFBLK, NULL) != 0)
687 return (-1);
688
689 return (size);
690 }
691
692 /*
693 * Load the label information from the specified device.
694 */
695 static void
696 ldgetdisklabel(struct ld_softc *sc)
697 {
698 const char *errstring;
699
700 ldgetdefaultlabel(sc, sc->sc_dk.dk_label);
701
702 /* Call the generic disklabel extraction routine. */
703 errstring = readdisklabel(MAKEDISKDEV(0, sc->sc_dv.dv_unit, RAW_PART),
704 ldstrategy, sc->sc_dk.dk_label, sc->sc_dk.dk_cpulabel);
705 if (errstring != NULL)
706 printf("%s: %s\n", sc->sc_dv.dv_xname, errstring);
707
708 /* In-core label now valid. */
709 sc->sc_flags |= LDF_VLABEL;
710 }
711
712 /*
713 * Construct a ficticious label.
714 */
715 static void
716 ldgetdefaultlabel(struct ld_softc *sc, struct disklabel *lp)
717 {
718
719 memset(lp, 0, sizeof(struct disklabel));
720
721 lp->d_secsize = sc->sc_secsize;
722 lp->d_ntracks = sc->sc_nheads;
723 lp->d_nsectors = sc->sc_nsectors;
724 lp->d_ncylinders = sc->sc_ncylinders;
725 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
726 lp->d_type = DTYPE_LD;
727 strlcpy(lp->d_typename, "unknown", sizeof(lp->d_typename));
728 strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
729 lp->d_secperunit = sc->sc_secperunit;
730 lp->d_rpm = 7200;
731 lp->d_interleave = 1;
732 lp->d_flags = 0;
733
734 lp->d_partitions[RAW_PART].p_offset = 0;
735 lp->d_partitions[RAW_PART].p_size =
736 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
737 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
738 lp->d_npartitions = RAW_PART + 1;
739
740 lp->d_magic = DISKMAGIC;
741 lp->d_magic2 = DISKMAGIC;
742 lp->d_checksum = dkcksum(lp);
743 }
744
745 /*
746 * Take a dump.
747 */
748 static int
749 lddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
750 {
751 struct ld_softc *sc;
752 struct disklabel *lp;
753 int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv;
754 static int dumping;
755
756 unit = DISKUNIT(dev);
757 if ((sc = device_lookup(&ld_cd, unit)) == NULL)
758 return (ENXIO);
759 if ((sc->sc_flags & LDF_ENABLED) == 0)
760 return (ENODEV);
761 if (sc->sc_dump == NULL)
762 return (ENXIO);
763
764 /* Check if recursive dump; if so, punt. */
765 if (dumping)
766 return (EFAULT);
767 dumping = 1;
768
769 /* Convert to disk sectors. Request must be a multiple of size. */
770 part = DISKPART(dev);
771 lp = sc->sc_dk.dk_label;
772 if ((size % lp->d_secsize) != 0)
773 return (EFAULT);
774 towrt = size / lp->d_secsize;
775 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in DEV_BSIZE units */
776
777 nsects = lp->d_partitions[part].p_size;
778 sectoff = lp->d_partitions[part].p_offset;
779
780 /* Check transfer bounds against partition size. */
781 if ((blkno < 0) || ((blkno + towrt) > nsects))
782 return (EINVAL);
783
784 /* Offset block number to start of partition. */
785 blkno += sectoff;
786
787 /* Start dumping and return when done. */
788 maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1;
789 while (towrt > 0) {
790 nblk = min(maxblkcnt, towrt);
791
792 if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0)
793 return (rv);
794
795 towrt -= nblk;
796 blkno += nblk;
797 va += nblk * sc->sc_secsize;
798 }
799
800 dumping = 0;
801 return (0);
802 }
803
804 /*
805 * Adjust the size of a transfer.
806 */
807 static void
808 ldminphys(struct buf *bp)
809 {
810 struct ld_softc *sc;
811
812 sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev));
813
814 if (bp->b_bcount > sc->sc_maxxfer)
815 bp->b_bcount = sc->sc_maxxfer;
816 minphys(bp);
817 }
818