ld.c revision 1.24 1 /* $NetBSD: ld.c,v 1.24 2003/06/13 02:32:27 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Disk driver for use by RAID controllers.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.24 2003/06/13 02:32:27 thorpej Exp $");
45
46 #include "rnd.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/endian.h>
56 #include <sys/disklabel.h>
57 #include <sys/disk.h>
58 #include <sys/dkio.h>
59 #include <sys/stat.h>
60 #include <sys/lock.h>
61 #include <sys/conf.h>
62 #include <sys/fcntl.h>
63 #include <sys/vnode.h>
64 #include <sys/syslog.h>
65 #if NRND > 0
66 #include <sys/rnd.h>
67 #endif
68
69 #include <dev/ldvar.h>
70
71 static void ldgetdefaultlabel(struct ld_softc *, struct disklabel *);
72 static void ldgetdisklabel(struct ld_softc *);
73 static int ldlock(struct ld_softc *);
74 static void ldminphys(struct buf *bp);
75 static void ldshutdown(void *);
76 static void ldstart(struct ld_softc *);
77 static void ldunlock(struct ld_softc *);
78
79 extern struct cfdriver ld_cd;
80
81 dev_type_open(ldopen);
82 dev_type_close(ldclose);
83 dev_type_read(ldread);
84 dev_type_write(ldwrite);
85 dev_type_ioctl(ldioctl);
86 dev_type_strategy(ldstrategy);
87 dev_type_dump(lddump);
88 dev_type_size(ldsize);
89
90 const struct bdevsw ld_bdevsw = {
91 ldopen, ldclose, ldstrategy, ldioctl, lddump, ldsize, D_DISK
92 };
93
94 const struct cdevsw ld_cdevsw = {
95 ldopen, ldclose, ldread, ldwrite, ldioctl,
96 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
97 };
98
99 static struct dkdriver lddkdriver = { ldstrategy };
100 static void *ld_sdh;
101
102 void
103 ldattach(struct ld_softc *sc)
104 {
105 char buf[9];
106
107 if ((sc->sc_flags & LDF_ENABLED) == 0) {
108 printf("%s: disabled\n", sc->sc_dv.dv_xname);
109 return;
110 }
111
112 /* Initialise and attach the disk structure. */
113 sc->sc_dk.dk_driver = &lddkdriver;
114 sc->sc_dk.dk_name = sc->sc_dv.dv_xname;
115 disk_attach(&sc->sc_dk);
116
117 if (sc->sc_maxxfer > MAXPHYS)
118 sc->sc_maxxfer = MAXPHYS;
119
120 /* Build synthetic geometry if necessary. */
121 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 ||
122 sc->sc_ncylinders == 0) {
123 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */
124 sc->sc_nheads = 16;
125 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */
126 sc->sc_nheads = 32;
127 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */
128 sc->sc_nheads = 64;
129 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */
130 sc->sc_nheads = 128;
131 else
132 sc->sc_nheads = 255;
133
134 sc->sc_nsectors = 63;
135 sc->sc_ncylinders = sc->sc_secperunit /
136 (sc->sc_nheads * sc->sc_nsectors);
137 }
138
139 format_bytes(buf, sizeof(buf), (u_int64_t)sc->sc_secperunit *
140 sc->sc_secsize);
141 printf("%s: %s, %d cyl, %d head, %d sec, %d bytes/sect x %d sectors\n",
142 sc->sc_dv.dv_xname, buf, sc->sc_ncylinders, sc->sc_nheads,
143 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
144
145 #if NRND > 0
146 /* Attach the device into the rnd source list. */
147 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname,
148 RND_TYPE_DISK, 0);
149 #endif
150
151 /* Set the `shutdownhook'. */
152 if (ld_sdh == NULL)
153 ld_sdh = shutdownhook_establish(ldshutdown, NULL);
154 bufq_alloc(&sc->sc_bufq, BUFQ_FCFS);
155 }
156
157 int
158 ldadjqparam(struct ld_softc *sc, int max)
159 {
160 int s;
161
162 s = splbio();
163 sc->sc_maxqueuecnt = max;
164 splx(s);
165
166 return (0);
167 }
168
169 int
170 ldbegindetach(struct ld_softc *sc, int flags)
171 {
172 int s, rv = 0;
173
174 if ((sc->sc_flags & LDF_ENABLED) == 0)
175 return (0);
176
177 if ((flags & DETACH_FORCE) == 0 && sc->sc_dk.dk_openmask != 0)
178 return (EBUSY);
179
180 s = splbio();
181 sc->sc_maxqueuecnt = 0;
182 sc->sc_flags |= LDF_DETACH;
183 while (sc->sc_queuecnt > 0) {
184 sc->sc_flags |= LDF_DRAIN;
185 rv = tsleep(&sc->sc_queuecnt, PRIBIO, "lddrn", 0);
186 if (rv)
187 break;
188 }
189 splx(s);
190
191 return (rv);
192 }
193
194 void
195 ldenddetach(struct ld_softc *sc)
196 {
197 struct buf *bp;
198 int s, bmaj, cmaj, i, mn;
199
200 if ((sc->sc_flags & LDF_ENABLED) == 0)
201 return;
202
203 /* Wait for commands queued with the hardware to complete. */
204 if (sc->sc_queuecnt != 0)
205 if (tsleep(&sc->sc_queuecnt, PRIBIO, "lddtch", 30 * hz))
206 printf("%s: not drained\n", sc->sc_dv.dv_xname);
207
208 /* Locate the major numbers. */
209 bmaj = bdevsw_lookup_major(&ld_bdevsw);
210 cmaj = cdevsw_lookup_major(&ld_cdevsw);
211
212 /* Kill off any queued buffers. */
213 s = splbio();
214 while ((bp = BUFQ_GET(&sc->sc_bufq)) != NULL) {
215 bp->b_error = EIO;
216 bp->b_flags |= B_ERROR;
217 bp->b_resid = bp->b_bcount;
218 biodone(bp);
219 }
220 bufq_free(&sc->sc_bufq);
221 splx(s);
222
223 /* Nuke the vnodes for any open instances. */
224 for (i = 0; i < MAXPARTITIONS; i++) {
225 mn = DISKMINOR(sc->sc_dv.dv_unit, i);
226 vdevgone(bmaj, mn, mn, VBLK);
227 vdevgone(cmaj, mn, mn, VCHR);
228 }
229
230 /* Detach from the disk list. */
231 disk_detach(&sc->sc_dk);
232
233 #if NRND > 0
234 /* Unhook the entropy source. */
235 rnd_detach_source(&sc->sc_rnd_source);
236 #endif
237
238 /*
239 * XXX We can't really flush the cache here, beceause the
240 * XXX device may already be non-existent from the controller's
241 * XXX perspective.
242 */
243 #if 0
244 /* Flush the device's cache. */
245 if (sc->sc_flush != NULL)
246 if ((*sc->sc_flush)(sc) != 0)
247 printf("%s: unable to flush cache\n",
248 sc->sc_dv.dv_xname);
249 #endif
250 }
251
252 /* ARGSUSED */
253 static void
254 ldshutdown(void *cookie)
255 {
256 struct ld_softc *sc;
257 int i;
258
259 for (i = 0; i < ld_cd.cd_ndevs; i++) {
260 if ((sc = device_lookup(&ld_cd, i)) == NULL)
261 continue;
262 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc) != 0)
263 printf("%s: unable to flush cache\n",
264 sc->sc_dv.dv_xname);
265 }
266 }
267
268 /* ARGSUSED */
269 int
270 ldopen(dev_t dev, int flags, int fmt, struct proc *p)
271 {
272 struct ld_softc *sc;
273 int unit, part;
274
275 unit = DISKUNIT(dev);
276 if ((sc = device_lookup(&ld_cd, unit))== NULL)
277 return (ENXIO);
278 if ((sc->sc_flags & LDF_ENABLED) == 0)
279 return (ENODEV);
280 part = DISKPART(dev);
281 ldlock(sc);
282
283 if (sc->sc_dk.dk_openmask == 0) {
284 /* Load the partition info if not already loaded. */
285 if ((sc->sc_flags & LDF_VLABEL) == 0)
286 ldgetdisklabel(sc);
287 }
288
289 /* Check that the partition exists. */
290 if (part != RAW_PART && (part >= sc->sc_dk.dk_label->d_npartitions ||
291 sc->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
292 ldunlock(sc);
293 return (ENXIO);
294 }
295
296 /* Ensure only one open at a time. */
297 switch (fmt) {
298 case S_IFCHR:
299 sc->sc_dk.dk_copenmask |= (1 << part);
300 break;
301 case S_IFBLK:
302 sc->sc_dk.dk_bopenmask |= (1 << part);
303 break;
304 }
305 sc->sc_dk.dk_openmask =
306 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
307
308 ldunlock(sc);
309 return (0);
310 }
311
312 /* ARGSUSED */
313 int
314 ldclose(dev_t dev, int flags, int fmt, struct proc *p)
315 {
316 struct ld_softc *sc;
317 int part, unit;
318
319 unit = DISKUNIT(dev);
320 part = DISKPART(dev);
321 sc = device_lookup(&ld_cd, unit);
322 ldlock(sc);
323
324 switch (fmt) {
325 case S_IFCHR:
326 sc->sc_dk.dk_copenmask &= ~(1 << part);
327 break;
328 case S_IFBLK:
329 sc->sc_dk.dk_bopenmask &= ~(1 << part);
330 break;
331 }
332 sc->sc_dk.dk_openmask =
333 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
334
335 if (sc->sc_dk.dk_openmask == 0) {
336 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc) != 0)
337 printf("%s: unable to flush cache\n",
338 sc->sc_dv.dv_xname);
339 if ((sc->sc_flags & LDF_KLABEL) == 0)
340 sc->sc_flags &= ~LDF_VLABEL;
341 }
342
343 ldunlock(sc);
344 return (0);
345 }
346
347 /* ARGSUSED */
348 int
349 ldread(dev_t dev, struct uio *uio, int ioflag)
350 {
351
352 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
353 }
354
355 /* ARGSUSED */
356 int
357 ldwrite(dev_t dev, struct uio *uio, int ioflag)
358 {
359
360 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
361 }
362
363 /* ARGSUSED */
364 int
365 ldioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct proc *p)
366 {
367 struct ld_softc *sc;
368 int part, unit, error;
369 #ifdef __HAVE_OLD_DISKLABEL
370 struct disklabel newlabel;
371 #endif
372 struct disklabel *lp;
373
374 unit = DISKUNIT(dev);
375 part = DISKPART(dev);
376 sc = device_lookup(&ld_cd, unit);
377 error = 0;
378
379 switch (cmd) {
380 case DIOCGDINFO:
381 memcpy(addr, sc->sc_dk.dk_label, sizeof(struct disklabel));
382 return (0);
383
384 #ifdef __HAVE_OLD_DISKLABEL
385 case ODIOCGDINFO:
386 newlabel = *(sc->sc_dk.dk_label);
387 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
388 return ENOTTY;
389 memcpy(addr, &newlabel, sizeof(struct olddisklabel));
390 return (0);
391 #endif
392
393 case DIOCGPART:
394 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
395 ((struct partinfo *)addr)->part =
396 &sc->sc_dk.dk_label->d_partitions[part];
397 break;
398
399 case DIOCWDINFO:
400 case DIOCSDINFO:
401 #ifdef __HAVE_OLD_DISKLABEL
402 case ODIOCWDINFO:
403 case ODIOCSDINFO:
404
405 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
406 memset(&newlabel, 0, sizeof newlabel);
407 memcpy(&newlabel, addr, sizeof (struct olddisklabel));
408 lp = &newlabel;
409 } else
410 #endif
411 lp = (struct disklabel *)addr;
412
413 if ((flag & FWRITE) == 0)
414 return (EBADF);
415
416 if ((error = ldlock(sc)) != 0)
417 return (error);
418 sc->sc_flags |= LDF_LABELLING;
419
420 error = setdisklabel(sc->sc_dk.dk_label,
421 lp, /*sc->sc_dk.dk_openmask : */0,
422 sc->sc_dk.dk_cpulabel);
423 if (error == 0 && (cmd == DIOCWDINFO
424 #ifdef __HAVE_OLD_DISKLABEL
425 || cmd == ODIOCWDINFO
426 #endif
427 ))
428 error = writedisklabel(
429 MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
430 ldstrategy, sc->sc_dk.dk_label,
431 sc->sc_dk.dk_cpulabel);
432
433 sc->sc_flags &= ~LDF_LABELLING;
434 ldunlock(sc);
435 break;
436
437 case DIOCKLABEL:
438 if ((flag & FWRITE) == 0)
439 return (EBADF);
440 if (*(int *)addr)
441 sc->sc_flags |= LDF_KLABEL;
442 else
443 sc->sc_flags &= ~LDF_KLABEL;
444 break;
445
446 case DIOCWLABEL:
447 if ((flag & FWRITE) == 0)
448 return (EBADF);
449 if (*(int *)addr)
450 sc->sc_flags |= LDF_WLABEL;
451 else
452 sc->sc_flags &= ~LDF_WLABEL;
453 break;
454
455 case DIOCGDEFLABEL:
456 ldgetdefaultlabel(sc, (struct disklabel *)addr);
457 break;
458
459 #ifdef __HAVE_OLD_DISKLABEL
460 case ODIOCGDEFLABEL:
461 ldgetdefaultlabel(sc, &newlabel);
462 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
463 return ENOTTY;
464 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
465 break;
466 #endif
467
468 default:
469 error = ENOTTY;
470 break;
471 }
472
473 return (error);
474 }
475
476 void
477 ldstrategy(struct buf *bp)
478 {
479 struct ld_softc *sc;
480 struct disklabel *lp;
481 daddr_t blkno;
482 int s, part;
483
484 sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev));
485 part = DISKPART(bp->b_dev);
486
487 if ((sc->sc_flags & LDF_DETACH) != 0) {
488 bp->b_error = EIO;
489 goto bad;
490 }
491
492 lp = sc->sc_dk.dk_label;
493
494 /*
495 * The transfer must be a whole number of blocks and the offset must
496 * not be negative.
497 */
498 if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) {
499 bp->b_error = EINVAL;
500 goto bad;
501 }
502
503 /* If it's a null transfer, return immediately. */
504 if (bp->b_bcount == 0)
505 goto done;
506
507 /*
508 * Do bounds checking and adjust the transfer. If error, process.
509 * If past the end of partition, just return.
510 */
511 if (part != RAW_PART &&
512 bounds_check_with_label(&sc->sc_dk, bp,
513 (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) {
514 goto done;
515 }
516
517 /*
518 * Convert the block number to absolute and put it in terms
519 * of the device's logical block size.
520 */
521 if (lp->d_secsize == DEV_BSIZE)
522 blkno = bp->b_blkno;
523 else if (lp->d_secsize > DEV_BSIZE)
524 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
525 else
526 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
527
528 if (part != RAW_PART)
529 blkno += lp->d_partitions[part].p_offset;
530
531 bp->b_rawblkno = blkno;
532
533 s = splbio();
534 BUFQ_PUT(&sc->sc_bufq, bp);
535 ldstart(sc);
536 splx(s);
537 return;
538
539 bad:
540 bp->b_flags |= B_ERROR;
541 done:
542 bp->b_resid = bp->b_bcount;
543 biodone(bp);
544 }
545
546 static void
547 ldstart(struct ld_softc *sc)
548 {
549 struct buf *bp;
550 int error;
551
552 while (sc->sc_queuecnt < sc->sc_maxqueuecnt) {
553 /* See if there is work to do. */
554 if ((bp = BUFQ_PEEK(&sc->sc_bufq)) == NULL)
555 break;
556
557 disk_busy(&sc->sc_dk);
558 sc->sc_queuecnt++;
559
560 if (__predict_true((error = (*sc->sc_start)(sc, bp)) == 0)) {
561 /*
562 * The back-end is running the job; remove it from
563 * the queue.
564 */
565 (void) BUFQ_GET(&sc->sc_bufq);
566 } else {
567 disk_unbusy(&sc->sc_dk, 0, (bp->b_flags & B_READ));
568 sc->sc_queuecnt--;
569 if (error == EAGAIN) {
570 /*
571 * Temporary resource shortage in the
572 * back-end; just defer the job until
573 * later.
574 *
575 * XXX We might consider a watchdog timer
576 * XXX to make sure we are kicked into action.
577 */
578 break;
579 } else {
580 (void) BUFQ_GET(&sc->sc_bufq);
581 bp->b_error = error;
582 bp->b_flags |= B_ERROR;
583 bp->b_resid = bp->b_bcount;
584 biodone(bp);
585 }
586 }
587 }
588 }
589
590 void
591 lddone(struct ld_softc *sc, struct buf *bp)
592 {
593
594 if ((bp->b_flags & B_ERROR) != 0) {
595 diskerr(bp, "ld", "error", LOG_PRINTF, 0, sc->sc_dk.dk_label);
596 printf("\n");
597 }
598
599 disk_unbusy(&sc->sc_dk, bp->b_bcount - bp->b_resid,
600 (bp->b_flags & B_READ));
601 #if NRND > 0
602 rnd_add_uint32(&sc->sc_rnd_source, bp->b_rawblkno);
603 #endif
604 biodone(bp);
605
606 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
607 if ((sc->sc_flags & LDF_DRAIN) != 0) {
608 sc->sc_flags &= ~LDF_DRAIN;
609 wakeup(&sc->sc_queuecnt);
610 }
611 ldstart(sc);
612 }
613 }
614
615 int
616 ldsize(dev_t dev)
617 {
618 struct ld_softc *sc;
619 int part, unit, omask, size;
620
621 unit = DISKUNIT(dev);
622 if ((sc = device_lookup(&ld_cd, unit)) == NULL)
623 return (ENODEV);
624 if ((sc->sc_flags & LDF_ENABLED) == 0)
625 return (ENODEV);
626 part = DISKPART(dev);
627
628 omask = sc->sc_dk.dk_openmask & (1 << part);
629
630 if (omask == 0 && ldopen(dev, 0, S_IFBLK, NULL) != 0)
631 return (-1);
632 else if (sc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
633 size = -1;
634 else
635 size = sc->sc_dk.dk_label->d_partitions[part].p_size *
636 (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
637 if (omask == 0 && ldclose(dev, 0, S_IFBLK, NULL) != 0)
638 return (-1);
639
640 return (size);
641 }
642
643 /*
644 * Load the label information from the specified device.
645 */
646 static void
647 ldgetdisklabel(struct ld_softc *sc)
648 {
649 const char *errstring;
650
651 ldgetdefaultlabel(sc, sc->sc_dk.dk_label);
652
653 /* Call the generic disklabel extraction routine. */
654 errstring = readdisklabel(MAKEDISKDEV(0, sc->sc_dv.dv_unit, RAW_PART),
655 ldstrategy, sc->sc_dk.dk_label, sc->sc_dk.dk_cpulabel);
656 if (errstring != NULL)
657 printf("%s: %s\n", sc->sc_dv.dv_xname, errstring);
658
659 /* In-core label now valid. */
660 sc->sc_flags |= LDF_VLABEL;
661 }
662
663 /*
664 * Construct a ficticious label.
665 */
666 static void
667 ldgetdefaultlabel(struct ld_softc *sc, struct disklabel *lp)
668 {
669
670 memset(lp, 0, sizeof(struct disklabel));
671
672 lp->d_secsize = sc->sc_secsize;
673 lp->d_ntracks = sc->sc_nheads;
674 lp->d_nsectors = sc->sc_nsectors;
675 lp->d_ncylinders = sc->sc_ncylinders;
676 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
677 lp->d_type = DTYPE_LD;
678 strlcpy(lp->d_typename, "unknown", sizeof(lp->d_typename));
679 strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
680 lp->d_secperunit = sc->sc_secperunit;
681 lp->d_rpm = 7200;
682 lp->d_interleave = 1;
683 lp->d_flags = 0;
684
685 lp->d_partitions[RAW_PART].p_offset = 0;
686 lp->d_partitions[RAW_PART].p_size =
687 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
688 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
689 lp->d_npartitions = RAW_PART + 1;
690
691 lp->d_magic = DISKMAGIC;
692 lp->d_magic2 = DISKMAGIC;
693 lp->d_checksum = dkcksum(lp);
694 }
695
696 /*
697 * Wait interruptibly for an exclusive lock.
698 *
699 * XXX Several drivers do this; it should be abstracted and made MP-safe.
700 */
701 static int
702 ldlock(struct ld_softc *sc)
703 {
704 int error;
705
706 while ((sc->sc_flags & LDF_LKHELD) != 0) {
707 sc->sc_flags |= LDF_LKWANTED;
708 if ((error = tsleep(sc, PRIBIO | PCATCH, "ldlck", 0)) != 0)
709 return (error);
710 }
711 sc->sc_flags |= LDF_LKHELD;
712 return (0);
713 }
714
715 /*
716 * Unlock and wake up any waiters.
717 */
718 static void
719 ldunlock(struct ld_softc *sc)
720 {
721
722 sc->sc_flags &= ~LDF_LKHELD;
723 if ((sc->sc_flags & LDF_LKWANTED) != 0) {
724 sc->sc_flags &= ~LDF_LKWANTED;
725 wakeup(sc);
726 }
727 }
728
729 /*
730 * Take a dump.
731 */
732 int
733 lddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
734 {
735 struct ld_softc *sc;
736 struct disklabel *lp;
737 int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv;
738 static int dumping;
739
740 unit = DISKUNIT(dev);
741 if ((sc = device_lookup(&ld_cd, unit)) == NULL)
742 return (ENXIO);
743 if ((sc->sc_flags & LDF_ENABLED) == 0)
744 return (ENODEV);
745 if (sc->sc_dump == NULL)
746 return (ENXIO);
747
748 /* Check if recursive dump; if so, punt. */
749 if (dumping)
750 return (EFAULT);
751 dumping = 1;
752
753 /* Convert to disk sectors. Request must be a multiple of size. */
754 part = DISKPART(dev);
755 lp = sc->sc_dk.dk_label;
756 if ((size % lp->d_secsize) != 0)
757 return (EFAULT);
758 towrt = size / lp->d_secsize;
759 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in DEV_BSIZE units */
760
761 nsects = lp->d_partitions[part].p_size;
762 sectoff = lp->d_partitions[part].p_offset;
763
764 /* Check transfer bounds against partition size. */
765 if ((blkno < 0) || ((blkno + towrt) > nsects))
766 return (EINVAL);
767
768 /* Offset block number to start of partition. */
769 blkno += sectoff;
770
771 /* Start dumping and return when done. */
772 maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1;
773 while (towrt > 0) {
774 nblk = min(maxblkcnt, towrt);
775
776 if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0)
777 return (rv);
778
779 towrt -= nblk;
780 blkno += nblk;
781 va += nblk * sc->sc_secsize;
782 }
783
784 dumping = 0;
785 return (0);
786 }
787
788 /*
789 * Adjust the size of a transfer.
790 */
791 static void
792 ldminphys(struct buf *bp)
793 {
794 struct ld_softc *sc;
795
796 sc = device_lookup(&ld_cd, DISKUNIT(bp->b_dev));
797
798 if (bp->b_bcount > sc->sc_maxxfer)
799 bp->b_bcount = sc->sc_maxxfer;
800 minphys(bp);
801 }
802