ld.c revision 1.73 1 /* $NetBSD: ld.c,v 1.73 2014/07/25 08:02:19 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Disk driver for use by RAID controllers.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.73 2014/07/25 08:02:19 dholland Exp $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/device.h>
43 #include <sys/queue.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/bufq.h>
47 #include <sys/endian.h>
48 #include <sys/disklabel.h>
49 #include <sys/disk.h>
50 #include <sys/dkio.h>
51 #include <sys/stat.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/vnode.h>
55 #include <sys/syslog.h>
56 #include <sys/mutex.h>
57 #include <sys/rnd.h>
58
59 #include <dev/ldvar.h>
60
61 #include <prop/proplib.h>
62
63 static void ldgetdefaultlabel(struct ld_softc *, struct disklabel *);
64 static void ldgetdisklabel(struct ld_softc *);
65 static void ldminphys(struct buf *bp);
66 static bool ld_suspend(device_t, const pmf_qual_t *);
67 static bool ld_shutdown(device_t, int);
68 static void ldstart(struct ld_softc *, struct buf *);
69 static void ld_set_geometry(struct ld_softc *);
70 static void ld_config_interrupts (device_t);
71 static int ldlastclose(device_t);
72
73 extern struct cfdriver ld_cd;
74
75 static dev_type_open(ldopen);
76 static dev_type_close(ldclose);
77 static dev_type_read(ldread);
78 static dev_type_write(ldwrite);
79 static dev_type_ioctl(ldioctl);
80 static dev_type_strategy(ldstrategy);
81 static dev_type_dump(lddump);
82 static dev_type_size(ldsize);
83
84 const struct bdevsw ld_bdevsw = {
85 .d_open = ldopen,
86 .d_close = ldclose,
87 .d_strategy = ldstrategy,
88 .d_ioctl = ldioctl,
89 .d_dump = lddump,
90 .d_psize = ldsize,
91 .d_discard = nodiscard,
92 .d_flag = D_DISK
93 };
94
95 const struct cdevsw ld_cdevsw = {
96 .d_open = ldopen,
97 .d_close = ldclose,
98 .d_read = ldread,
99 .d_write = ldwrite,
100 .d_ioctl = ldioctl,
101 .d_stop = nostop,
102 .d_tty = notty,
103 .d_poll = nopoll,
104 .d_mmap = nommap,
105 .d_kqfilter = nokqfilter,
106 .d_flag = D_DISK
107 };
108
109 static struct dkdriver lddkdriver = { ldstrategy, ldminphys };
110
111 void
112 ldattach(struct ld_softc *sc)
113 {
114 char tbuf[9];
115
116 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
117
118 if ((sc->sc_flags & LDF_ENABLED) == 0) {
119 aprint_normal_dev(sc->sc_dv, "disabled\n");
120 return;
121 }
122
123 /* Initialise and attach the disk structure. */
124 disk_init(&sc->sc_dk, device_xname(sc->sc_dv), &lddkdriver);
125 disk_attach(&sc->sc_dk);
126
127 if (sc->sc_maxxfer > MAXPHYS)
128 sc->sc_maxxfer = MAXPHYS;
129
130 /* Build synthetic geometry if necessary. */
131 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 ||
132 sc->sc_ncylinders == 0) {
133 uint64_t ncyl;
134
135 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */
136 sc->sc_nheads = 16;
137 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */
138 sc->sc_nheads = 32;
139 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */
140 sc->sc_nheads = 64;
141 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */
142 sc->sc_nheads = 128;
143 else
144 sc->sc_nheads = 255;
145
146 sc->sc_nsectors = 63;
147 sc->sc_ncylinders = INT_MAX;
148 ncyl = sc->sc_secperunit /
149 (sc->sc_nheads * sc->sc_nsectors);
150 if (ncyl < INT_MAX)
151 sc->sc_ncylinders = (int)ncyl;
152 }
153
154 format_bytes(tbuf, sizeof(tbuf), sc->sc_secperunit *
155 sc->sc_secsize);
156 aprint_normal_dev(sc->sc_dv, "%s, %d cyl, %d head, %d sec, "
157 "%d bytes/sect x %"PRIu64" sectors\n",
158 tbuf, sc->sc_ncylinders, sc->sc_nheads,
159 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
160 sc->sc_disksize512 = sc->sc_secperunit * sc->sc_secsize / DEV_BSIZE;
161
162 ld_set_geometry(sc);
163
164 /* Attach the device into the rnd source list. */
165 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
166 RND_TYPE_DISK, 0);
167
168 /* Register with PMF */
169 if (!pmf_device_register1(sc->sc_dv, ld_suspend, NULL, ld_shutdown))
170 aprint_error_dev(sc->sc_dv,
171 "couldn't establish power handler\n");
172
173 bufq_alloc(&sc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
174
175 /* Discover wedges on this disk. */
176 config_interrupts(sc->sc_dv, ld_config_interrupts);
177 }
178
179 int
180 ldadjqparam(struct ld_softc *sc, int xmax)
181 {
182 int s;
183
184 s = splbio();
185 sc->sc_maxqueuecnt = xmax;
186 splx(s);
187
188 return (0);
189 }
190
191 int
192 ldbegindetach(struct ld_softc *sc, int flags)
193 {
194 int s, rv = 0;
195
196 if ((sc->sc_flags & LDF_ENABLED) == 0)
197 return (0);
198
199 rv = disk_begindetach(&sc->sc_dk, ldlastclose, sc->sc_dv, flags);
200
201 if (rv != 0)
202 return rv;
203
204 s = splbio();
205 sc->sc_maxqueuecnt = 0;
206 sc->sc_flags |= LDF_DETACH;
207 while (sc->sc_queuecnt > 0) {
208 sc->sc_flags |= LDF_DRAIN;
209 rv = tsleep(&sc->sc_queuecnt, PRIBIO, "lddrn", 0);
210 if (rv)
211 break;
212 }
213 splx(s);
214
215 return (rv);
216 }
217
218 void
219 ldenddetach(struct ld_softc *sc)
220 {
221 int s, bmaj, cmaj, i, mn;
222
223 if ((sc->sc_flags & LDF_ENABLED) == 0)
224 return;
225
226 /* Wait for commands queued with the hardware to complete. */
227 if (sc->sc_queuecnt != 0)
228 if (tsleep(&sc->sc_queuecnt, PRIBIO, "lddtch", 30 * hz))
229 printf("%s: not drained\n", device_xname(sc->sc_dv));
230
231 /* Locate the major numbers. */
232 bmaj = bdevsw_lookup_major(&ld_bdevsw);
233 cmaj = cdevsw_lookup_major(&ld_cdevsw);
234
235 /* Kill off any queued buffers. */
236 s = splbio();
237 bufq_drain(sc->sc_bufq);
238 splx(s);
239
240 bufq_free(sc->sc_bufq);
241
242 /* Nuke the vnodes for any open instances. */
243 for (i = 0; i < MAXPARTITIONS; i++) {
244 mn = DISKMINOR(device_unit(sc->sc_dv), i);
245 vdevgone(bmaj, mn, mn, VBLK);
246 vdevgone(cmaj, mn, mn, VCHR);
247 }
248
249 /* Delete all of our wedges. */
250 dkwedge_delall(&sc->sc_dk);
251
252 /* Detach from the disk list. */
253 disk_detach(&sc->sc_dk);
254 disk_destroy(&sc->sc_dk);
255
256 /* Unhook the entropy source. */
257 rnd_detach_source(&sc->sc_rnd_source);
258
259 /* Deregister with PMF */
260 pmf_device_deregister(sc->sc_dv);
261
262 /*
263 * XXX We can't really flush the cache here, beceause the
264 * XXX device may already be non-existent from the controller's
265 * XXX perspective.
266 */
267 #if 0
268 /* Flush the device's cache. */
269 if (sc->sc_flush != NULL)
270 if ((*sc->sc_flush)(sc, 0) != 0)
271 aprint_error_dev(sc->sc_dv, "unable to flush cache\n");
272 #endif
273 mutex_destroy(&sc->sc_mutex);
274 }
275
276 /* ARGSUSED */
277 static bool
278 ld_suspend(device_t dev, const pmf_qual_t *qual)
279 {
280 return ld_shutdown(dev, 0);
281 }
282
283 /* ARGSUSED */
284 static bool
285 ld_shutdown(device_t dev, int flags)
286 {
287 struct ld_softc *sc = device_private(dev);
288
289 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, LDFL_POLL) != 0) {
290 printf("%s: unable to flush cache\n", device_xname(dev));
291 return false;
292 }
293
294 return true;
295 }
296
297 /* ARGSUSED */
298 static int
299 ldopen(dev_t dev, int flags, int fmt, struct lwp *l)
300 {
301 struct ld_softc *sc;
302 int error, unit, part;
303
304 unit = DISKUNIT(dev);
305 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
306 return (ENXIO);
307 if ((sc->sc_flags & LDF_ENABLED) == 0)
308 return (ENODEV);
309 part = DISKPART(dev);
310
311 mutex_enter(&sc->sc_dk.dk_openlock);
312
313 if (sc->sc_dk.dk_openmask == 0) {
314 /* Load the partition info if not already loaded. */
315 if ((sc->sc_flags & LDF_VLABEL) == 0)
316 ldgetdisklabel(sc);
317 }
318
319 /* Check that the partition exists. */
320 if (part != RAW_PART && (part >= sc->sc_dk.dk_label->d_npartitions ||
321 sc->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
322 error = ENXIO;
323 goto bad1;
324 }
325
326 /* Ensure only one open at a time. */
327 switch (fmt) {
328 case S_IFCHR:
329 sc->sc_dk.dk_copenmask |= (1 << part);
330 break;
331 case S_IFBLK:
332 sc->sc_dk.dk_bopenmask |= (1 << part);
333 break;
334 }
335 sc->sc_dk.dk_openmask =
336 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
337
338 error = 0;
339 bad1:
340 mutex_exit(&sc->sc_dk.dk_openlock);
341 return (error);
342 }
343
344 static int
345 ldlastclose(device_t self)
346 {
347 struct ld_softc *sc = device_private(self);
348
349 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, 0) != 0)
350 aprint_error_dev(self, "unable to flush cache\n");
351 if ((sc->sc_flags & LDF_KLABEL) == 0)
352 sc->sc_flags &= ~LDF_VLABEL;
353
354 return 0;
355 }
356
357 /* ARGSUSED */
358 static int
359 ldclose(dev_t dev, int flags, int fmt, struct lwp *l)
360 {
361 struct ld_softc *sc;
362 int part, unit;
363
364 unit = DISKUNIT(dev);
365 part = DISKPART(dev);
366 sc = device_lookup_private(&ld_cd, unit);
367
368 mutex_enter(&sc->sc_dk.dk_openlock);
369
370 switch (fmt) {
371 case S_IFCHR:
372 sc->sc_dk.dk_copenmask &= ~(1 << part);
373 break;
374 case S_IFBLK:
375 sc->sc_dk.dk_bopenmask &= ~(1 << part);
376 break;
377 }
378 sc->sc_dk.dk_openmask =
379 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
380
381 if (sc->sc_dk.dk_openmask == 0)
382 ldlastclose(sc->sc_dv);
383
384 mutex_exit(&sc->sc_dk.dk_openlock);
385 return (0);
386 }
387
388 /* ARGSUSED */
389 static int
390 ldread(dev_t dev, struct uio *uio, int ioflag)
391 {
392
393 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
394 }
395
396 /* ARGSUSED */
397 static int
398 ldwrite(dev_t dev, struct uio *uio, int ioflag)
399 {
400
401 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
402 }
403
404 /* ARGSUSED */
405 static int
406 ldioctl(dev_t dev, u_long cmd, void *addr, int32_t flag, struct lwp *l)
407 {
408 struct ld_softc *sc;
409 int part, unit, error;
410 #ifdef __HAVE_OLD_DISKLABEL
411 struct disklabel newlabel;
412 #endif
413 struct disklabel *lp;
414
415 unit = DISKUNIT(dev);
416 part = DISKPART(dev);
417 sc = device_lookup_private(&ld_cd, unit);
418
419 error = disk_ioctl(&sc->sc_dk, cmd, addr, flag, l);
420 if (error != EPASSTHROUGH)
421 return (error);
422
423 error = 0;
424 switch (cmd) {
425 case DIOCGDINFO:
426 memcpy(addr, sc->sc_dk.dk_label, sizeof(struct disklabel));
427 return (0);
428
429 #ifdef __HAVE_OLD_DISKLABEL
430 case ODIOCGDINFO:
431 newlabel = *(sc->sc_dk.dk_label);
432 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
433 return ENOTTY;
434 memcpy(addr, &newlabel, sizeof(struct olddisklabel));
435 return (0);
436 #endif
437
438 case DIOCGPART:
439 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
440 ((struct partinfo *)addr)->part =
441 &sc->sc_dk.dk_label->d_partitions[part];
442 break;
443
444 case DIOCWDINFO:
445 case DIOCSDINFO:
446 #ifdef __HAVE_OLD_DISKLABEL
447 case ODIOCWDINFO:
448 case ODIOCSDINFO:
449
450 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
451 memset(&newlabel, 0, sizeof newlabel);
452 memcpy(&newlabel, addr, sizeof (struct olddisklabel));
453 lp = &newlabel;
454 } else
455 #endif
456 lp = (struct disklabel *)addr;
457
458 if ((flag & FWRITE) == 0)
459 return (EBADF);
460
461 mutex_enter(&sc->sc_dk.dk_openlock);
462 sc->sc_flags |= LDF_LABELLING;
463
464 error = setdisklabel(sc->sc_dk.dk_label,
465 lp, /*sc->sc_dk.dk_openmask : */0,
466 sc->sc_dk.dk_cpulabel);
467 if (error == 0 && (cmd == DIOCWDINFO
468 #ifdef __HAVE_OLD_DISKLABEL
469 || cmd == ODIOCWDINFO
470 #endif
471 ))
472 error = writedisklabel(
473 MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
474 ldstrategy, sc->sc_dk.dk_label,
475 sc->sc_dk.dk_cpulabel);
476
477 sc->sc_flags &= ~LDF_LABELLING;
478 mutex_exit(&sc->sc_dk.dk_openlock);
479 break;
480
481 case DIOCKLABEL:
482 if ((flag & FWRITE) == 0)
483 return (EBADF);
484 if (*(int *)addr)
485 sc->sc_flags |= LDF_KLABEL;
486 else
487 sc->sc_flags &= ~LDF_KLABEL;
488 break;
489
490 case DIOCWLABEL:
491 if ((flag & FWRITE) == 0)
492 return (EBADF);
493 if (*(int *)addr)
494 sc->sc_flags |= LDF_WLABEL;
495 else
496 sc->sc_flags &= ~LDF_WLABEL;
497 break;
498
499 case DIOCGDEFLABEL:
500 ldgetdefaultlabel(sc, (struct disklabel *)addr);
501 break;
502
503 #ifdef __HAVE_OLD_DISKLABEL
504 case ODIOCGDEFLABEL:
505 ldgetdefaultlabel(sc, &newlabel);
506 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
507 return ENOTTY;
508 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
509 break;
510 #endif
511
512 case DIOCCACHESYNC:
513 /*
514 * XXX Do we really need to care about having a writable
515 * file descriptor here?
516 */
517 if ((flag & FWRITE) == 0)
518 error = EBADF;
519 else if (sc->sc_flush)
520 error = (*sc->sc_flush)(sc, 0);
521 else
522 error = 0; /* XXX Error out instead? */
523 break;
524
525 case DIOCAWEDGE:
526 {
527 struct dkwedge_info *dkw = (void *) addr;
528
529 if ((flag & FWRITE) == 0)
530 return (EBADF);
531
532 /* If the ioctl happens here, the parent is us. */
533 strlcpy(dkw->dkw_parent, device_xname(sc->sc_dv),
534 sizeof(dkw->dkw_parent));
535 return (dkwedge_add(dkw));
536 }
537
538 case DIOCDWEDGE:
539 {
540 struct dkwedge_info *dkw = (void *) addr;
541
542 if ((flag & FWRITE) == 0)
543 return (EBADF);
544
545 /* If the ioctl happens here, the parent is us. */
546 strlcpy(dkw->dkw_parent, device_xname(sc->sc_dv),
547 sizeof(dkw->dkw_parent));
548 return (dkwedge_del(dkw));
549 }
550
551 case DIOCLWEDGES:
552 {
553 struct dkwedge_list *dkwl = (void *) addr;
554
555 return (dkwedge_list(&sc->sc_dk, dkwl, l));
556 }
557 case DIOCGSTRATEGY:
558 {
559 struct disk_strategy *dks = (void *)addr;
560
561 mutex_enter(&sc->sc_mutex);
562 strlcpy(dks->dks_name, bufq_getstrategyname(sc->sc_bufq),
563 sizeof(dks->dks_name));
564 mutex_exit(&sc->sc_mutex);
565 dks->dks_paramlen = 0;
566
567 return 0;
568 }
569 case DIOCSSTRATEGY:
570 {
571 struct disk_strategy *dks = (void *)addr;
572 struct bufq_state *new, *old;
573
574 if ((flag & FWRITE) == 0)
575 return EPERM;
576
577 if (dks->dks_param != NULL)
578 return EINVAL;
579
580 dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
581 error = bufq_alloc(&new, dks->dks_name,
582 BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
583 if (error)
584 return error;
585
586 mutex_enter(&sc->sc_mutex);
587 old = sc->sc_bufq;
588 bufq_move(new, old);
589 sc->sc_bufq = new;
590 mutex_exit(&sc->sc_mutex);
591 bufq_free(old);
592
593 return 0;
594 }
595 default:
596 error = ENOTTY;
597 break;
598 }
599
600 return (error);
601 }
602
603 static void
604 ldstrategy(struct buf *bp)
605 {
606 struct ld_softc *sc;
607 struct disklabel *lp;
608 daddr_t blkno;
609 int s, part;
610
611 sc = device_lookup_private(&ld_cd, DISKUNIT(bp->b_dev));
612 part = DISKPART(bp->b_dev);
613
614 if ((sc->sc_flags & LDF_DETACH) != 0) {
615 bp->b_error = EIO;
616 goto done;
617 }
618
619 lp = sc->sc_dk.dk_label;
620
621 /*
622 * The transfer must be a whole number of blocks and the offset must
623 * not be negative.
624 */
625 if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) {
626 bp->b_error = EINVAL;
627 goto done;
628 }
629
630 /* If it's a null transfer, return immediately. */
631 if (bp->b_bcount == 0)
632 goto done;
633
634 /*
635 * Do bounds checking and adjust the transfer. If error, process.
636 * If past the end of partition, just return.
637 */
638 if (part == RAW_PART) {
639 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
640 sc->sc_disksize512) <= 0)
641 goto done;
642 } else {
643 if (bounds_check_with_label(&sc->sc_dk, bp,
644 (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0)
645 goto done;
646 }
647
648 /*
649 * Convert the block number to absolute and put it in terms
650 * of the device's logical block size.
651 */
652 if (lp->d_secsize == DEV_BSIZE)
653 blkno = bp->b_blkno;
654 else if (lp->d_secsize > DEV_BSIZE)
655 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
656 else
657 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
658
659 if (part != RAW_PART)
660 blkno += lp->d_partitions[part].p_offset;
661
662 bp->b_rawblkno = blkno;
663
664 s = splbio();
665 ldstart(sc, bp);
666 splx(s);
667 return;
668
669 done:
670 bp->b_resid = bp->b_bcount;
671 biodone(bp);
672 }
673
674 static void
675 ldstart(struct ld_softc *sc, struct buf *bp)
676 {
677 int error;
678
679 mutex_enter(&sc->sc_mutex);
680
681 if (bp != NULL)
682 bufq_put(sc->sc_bufq, bp);
683
684 while (sc->sc_queuecnt < sc->sc_maxqueuecnt) {
685 /* See if there is work to do. */
686 if ((bp = bufq_peek(sc->sc_bufq)) == NULL)
687 break;
688
689 disk_busy(&sc->sc_dk);
690 sc->sc_queuecnt++;
691
692 if (__predict_true((error = (*sc->sc_start)(sc, bp)) == 0)) {
693 /*
694 * The back-end is running the job; remove it from
695 * the queue.
696 */
697 (void) bufq_get(sc->sc_bufq);
698 } else {
699 disk_unbusy(&sc->sc_dk, 0, (bp->b_flags & B_READ));
700 sc->sc_queuecnt--;
701 if (error == EAGAIN) {
702 /*
703 * Temporary resource shortage in the
704 * back-end; just defer the job until
705 * later.
706 *
707 * XXX We might consider a watchdog timer
708 * XXX to make sure we are kicked into action.
709 */
710 break;
711 } else {
712 (void) bufq_get(sc->sc_bufq);
713 bp->b_error = error;
714 bp->b_resid = bp->b_bcount;
715 mutex_exit(&sc->sc_mutex);
716 biodone(bp);
717 mutex_enter(&sc->sc_mutex);
718 }
719 }
720 }
721
722 mutex_exit(&sc->sc_mutex);
723 }
724
725 void
726 lddone(struct ld_softc *sc, struct buf *bp)
727 {
728
729 if (bp->b_error != 0) {
730 diskerr(bp, "ld", "error", LOG_PRINTF, 0, sc->sc_dk.dk_label);
731 printf("\n");
732 }
733
734 disk_unbusy(&sc->sc_dk, bp->b_bcount - bp->b_resid,
735 (bp->b_flags & B_READ));
736 rnd_add_uint32(&sc->sc_rnd_source, bp->b_rawblkno);
737 biodone(bp);
738
739 mutex_enter(&sc->sc_mutex);
740 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
741 if ((sc->sc_flags & LDF_DRAIN) != 0) {
742 sc->sc_flags &= ~LDF_DRAIN;
743 wakeup(&sc->sc_queuecnt);
744 }
745 mutex_exit(&sc->sc_mutex);
746 ldstart(sc, NULL);
747 } else
748 mutex_exit(&sc->sc_mutex);
749 }
750
751 static int
752 ldsize(dev_t dev)
753 {
754 struct ld_softc *sc;
755 int part, unit, omask, size;
756
757 unit = DISKUNIT(dev);
758 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
759 return (ENODEV);
760 if ((sc->sc_flags & LDF_ENABLED) == 0)
761 return (ENODEV);
762 part = DISKPART(dev);
763
764 omask = sc->sc_dk.dk_openmask & (1 << part);
765
766 if (omask == 0 && ldopen(dev, 0, S_IFBLK, NULL) != 0)
767 return (-1);
768 else if (sc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
769 size = -1;
770 else
771 size = sc->sc_dk.dk_label->d_partitions[part].p_size *
772 (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
773 if (omask == 0 && ldclose(dev, 0, S_IFBLK, NULL) != 0)
774 return (-1);
775
776 return (size);
777 }
778
779 /*
780 * Load the label information from the specified device.
781 */
782 static void
783 ldgetdisklabel(struct ld_softc *sc)
784 {
785 const char *errstring;
786
787 ldgetdefaultlabel(sc, sc->sc_dk.dk_label);
788
789 /* Call the generic disklabel extraction routine. */
790 errstring = readdisklabel(MAKEDISKDEV(0, device_unit(sc->sc_dv),
791 RAW_PART), ldstrategy, sc->sc_dk.dk_label, sc->sc_dk.dk_cpulabel);
792 if (errstring != NULL)
793 printf("%s: %s\n", device_xname(sc->sc_dv), errstring);
794
795 /* In-core label now valid. */
796 sc->sc_flags |= LDF_VLABEL;
797 }
798
799 /*
800 * Construct a ficticious label.
801 */
802 static void
803 ldgetdefaultlabel(struct ld_softc *sc, struct disklabel *lp)
804 {
805
806 memset(lp, 0, sizeof(struct disklabel));
807
808 lp->d_secsize = sc->sc_secsize;
809 lp->d_ntracks = sc->sc_nheads;
810 lp->d_nsectors = sc->sc_nsectors;
811 lp->d_ncylinders = sc->sc_ncylinders;
812 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
813 lp->d_type = DTYPE_LD;
814 strlcpy(lp->d_typename, "unknown", sizeof(lp->d_typename));
815 strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
816 lp->d_secperunit = sc->sc_secperunit;
817 lp->d_rpm = 7200;
818 lp->d_interleave = 1;
819 lp->d_flags = 0;
820
821 lp->d_partitions[RAW_PART].p_offset = 0;
822 lp->d_partitions[RAW_PART].p_size =
823 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
824 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
825 lp->d_npartitions = RAW_PART + 1;
826
827 lp->d_magic = DISKMAGIC;
828 lp->d_magic2 = DISKMAGIC;
829 lp->d_checksum = dkcksum(lp);
830 }
831
832 /*
833 * Take a dump.
834 */
835 static int
836 lddump(dev_t dev, daddr_t blkno, void *vav, size_t size)
837 {
838 char *va = vav;
839 struct ld_softc *sc;
840 struct disklabel *lp;
841 int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv;
842 static int dumping;
843
844 unit = DISKUNIT(dev);
845 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
846 return (ENXIO);
847 if ((sc->sc_flags & LDF_ENABLED) == 0)
848 return (ENODEV);
849 if (sc->sc_dump == NULL)
850 return (ENXIO);
851
852 /* Check if recursive dump; if so, punt. */
853 if (dumping)
854 return (EFAULT);
855 dumping = 1;
856
857 /* Convert to disk sectors. Request must be a multiple of size. */
858 part = DISKPART(dev);
859 lp = sc->sc_dk.dk_label;
860 if ((size % lp->d_secsize) != 0)
861 return (EFAULT);
862 towrt = size / lp->d_secsize;
863 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in DEV_BSIZE units */
864
865 nsects = lp->d_partitions[part].p_size;
866 sectoff = lp->d_partitions[part].p_offset;
867
868 /* Check transfer bounds against partition size. */
869 if ((blkno < 0) || ((blkno + towrt) > nsects))
870 return (EINVAL);
871
872 /* Offset block number to start of partition. */
873 blkno += sectoff;
874
875 /* Start dumping and return when done. */
876 maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1;
877 while (towrt > 0) {
878 nblk = min(maxblkcnt, towrt);
879
880 if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0)
881 return (rv);
882
883 towrt -= nblk;
884 blkno += nblk;
885 va += nblk * sc->sc_secsize;
886 }
887
888 dumping = 0;
889 return (0);
890 }
891
892 /*
893 * Adjust the size of a transfer.
894 */
895 static void
896 ldminphys(struct buf *bp)
897 {
898 struct ld_softc *sc;
899
900 sc = device_lookup_private(&ld_cd, DISKUNIT(bp->b_dev));
901
902 if (bp->b_bcount > sc->sc_maxxfer)
903 bp->b_bcount = sc->sc_maxxfer;
904 minphys(bp);
905 }
906
907 static void
908 ld_set_geometry(struct ld_softc *ld)
909 {
910 struct disk_geom *dg = &ld->sc_dk.dk_geom;
911
912 memset(dg, 0, sizeof(*dg));
913
914 dg->dg_secperunit = ld->sc_secperunit;
915 dg->dg_secsize = ld->sc_secsize;
916 dg->dg_nsectors = ld->sc_nsectors;
917 dg->dg_ntracks = ld->sc_nheads;
918 dg->dg_ncylinders = ld->sc_ncylinders;
919
920 disk_set_info(ld->sc_dv, &ld->sc_dk, NULL);
921 }
922
923 static void
924 ld_config_interrupts(device_t d)
925 {
926 struct ld_softc *sc = device_private(d);
927 dkwedge_discover(&sc->sc_dk);
928 }
929