ld.c revision 1.94.2.6 1 /* $NetBSD: ld.c,v 1.94.2.6 2016/11/04 14:49:08 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Disk driver for use by RAID controllers.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.94.2.6 2016/11/04 14:49:08 pgoyette Exp $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/device.h>
43 #include <sys/queue.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/bufq.h>
47 #include <sys/endian.h>
48 #include <sys/disklabel.h>
49 #include <sys/disk.h>
50 #include <sys/dkio.h>
51 #include <sys/stat.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/vnode.h>
55 #include <sys/syslog.h>
56 #include <sys/mutex.h>
57 #include <sys/localcount.h>
58 #include <sys/module.h>
59 #include <sys/reboot.h>
60
61 #include <dev/ldvar.h>
62
63 static void ldminphys(struct buf *bp);
64 static bool ld_suspend(device_t, const pmf_qual_t *);
65 static bool ld_shutdown(device_t, int);
66 static int ld_diskstart(device_t, struct buf *bp);
67 static void ld_iosize(device_t, int *);
68 static int ld_dumpblocks(device_t, void *, daddr_t, int);
69 static void ld_fake_geometry(struct ld_softc *);
70 static void ld_set_geometry(struct ld_softc *);
71 static void ld_config_interrupts (device_t);
72 static int ld_lastclose(device_t);
73 static int ld_discard(device_t, off_t, off_t);
74
75 extern struct cfdriver ld_cd;
76
77 static dev_type_open(ldopen);
78 static dev_type_close(ldclose);
79 static dev_type_read(ldread);
80 static dev_type_write(ldwrite);
81 static dev_type_ioctl(ldioctl);
82 static dev_type_strategy(ldstrategy);
83 static dev_type_dump(lddump);
84 static dev_type_size(ldsize);
85 static dev_type_discard(lddiscard);
86
87 const struct bdevsw ld_bdevsw = {
88 DEVSW_MODULE_INIT
89 .d_open = ldopen,
90 .d_close = ldclose,
91 .d_strategy = ldstrategy,
92 .d_ioctl = ldioctl,
93 .d_dump = lddump,
94 .d_psize = ldsize,
95 .d_discard = lddiscard,
96 .d_flag = D_DISK | D_MPSAFE
97 };
98
99 const struct cdevsw ld_cdevsw = {
100 DEVSW_MODULE_INIT
101 .d_open = ldopen,
102 .d_close = ldclose,
103 .d_read = ldread,
104 .d_write = ldwrite,
105 .d_ioctl = ldioctl,
106 .d_stop = nostop,
107 .d_tty = notty,
108 .d_poll = nopoll,
109 .d_mmap = nommap,
110 .d_kqfilter = nokqfilter,
111 .d_discard = lddiscard,
112 .d_flag = D_DISK | D_MPSAFE
113 };
114
115 static struct dkdriver lddkdriver = {
116 .d_open = ldopen,
117 .d_close = ldclose,
118 .d_strategy = ldstrategy,
119 .d_iosize = ld_iosize,
120 .d_minphys = ldminphys,
121 .d_diskstart = ld_diskstart,
122 .d_dumpblocks = ld_dumpblocks,
123 .d_lastclose = ld_lastclose,
124 .d_discard = ld_discard
125 };
126
127 void
128 ldattach(struct ld_softc *sc, const char *default_strategy)
129 {
130 device_t self = sc->sc_dv;
131 struct dk_softc *dksc = &sc->sc_dksc;
132
133 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
134 cv_init(&sc->sc_drain, "lddrain");
135
136 if ((sc->sc_flags & LDF_ENABLED) == 0) {
137 return;
138 }
139
140 /* Initialise dk and disk structure. */
141 dk_init(dksc, self, DKTYPE_LD);
142 disk_init(&dksc->sc_dkdev, dksc->sc_xname, &lddkdriver);
143
144 if (sc->sc_maxxfer > MAXPHYS)
145 sc->sc_maxxfer = MAXPHYS;
146
147 /* Build synthetic geometry if necessary. */
148 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 ||
149 sc->sc_ncylinders == 0)
150 ld_fake_geometry(sc);
151
152 sc->sc_disksize512 = sc->sc_secperunit * sc->sc_secsize / DEV_BSIZE;
153
154 /* Attach dk and disk subsystems */
155 dk_attach(dksc);
156 disk_attach(&dksc->sc_dkdev);
157 ld_set_geometry(sc);
158
159 bufq_alloc(&dksc->sc_bufq, default_strategy, BUFQ_SORT_RAWBLOCK);
160
161 /* Register with PMF */
162 if (!pmf_device_register1(dksc->sc_dev, ld_suspend, NULL, ld_shutdown))
163 aprint_error_dev(dksc->sc_dev,
164 "couldn't establish power handler\n");
165
166 /* Discover wedges on this disk. */
167 config_interrupts(sc->sc_dv, ld_config_interrupts);
168 }
169
170 int
171 ldadjqparam(struct ld_softc *sc, int xmax)
172 {
173
174 mutex_enter(&sc->sc_mutex);
175 sc->sc_maxqueuecnt = xmax;
176 mutex_exit(&sc->sc_mutex);
177
178 return (0);
179 }
180
181 int
182 ldbegindetach(struct ld_softc *sc, int flags)
183 {
184 struct dk_softc *dksc = &sc->sc_dksc;
185 int rv = 0;
186
187 if ((sc->sc_flags & LDF_ENABLED) == 0)
188 return (0);
189
190 rv = disk_begindetach(&dksc->sc_dkdev, ld_lastclose, dksc->sc_dev, flags);
191
192 if (rv != 0)
193 return rv;
194
195 mutex_enter(&sc->sc_mutex);
196 sc->sc_maxqueuecnt = 0;
197
198 while (sc->sc_queuecnt > 0) {
199 sc->sc_flags |= LDF_DRAIN;
200 cv_wait(&sc->sc_drain, &sc->sc_mutex);
201 }
202 mutex_exit(&sc->sc_mutex);
203
204 return (rv);
205 }
206
207 void
208 ldenddetach(struct ld_softc *sc)
209 {
210 struct dk_softc *dksc = &sc->sc_dksc;
211 int bmaj, cmaj, i, mn;
212
213 if ((sc->sc_flags & LDF_ENABLED) == 0)
214 return;
215
216 mutex_enter(&sc->sc_mutex);
217
218 /* Wait for commands queued with the hardware to complete. */
219 if (sc->sc_queuecnt != 0) {
220 if (cv_timedwait(&sc->sc_drain, &sc->sc_mutex, 30 * hz))
221 printf("%s: not drained\n", dksc->sc_xname);
222 }
223 mutex_exit(&sc->sc_mutex);
224
225 /* Kill off any queued buffers. */
226 dk_drain(dksc);
227 bufq_free(dksc->sc_bufq);
228
229 /* Locate the major numbers. */
230 bmaj = bdevsw_lookup_major(&ld_bdevsw);
231 cmaj = cdevsw_lookup_major(&ld_cdevsw);
232
233 /* Nuke the vnodes for any open instances. */
234 for (i = 0; i < MAXPARTITIONS; i++) {
235 mn = DISKMINOR(device_unit(dksc->sc_dev), i);
236 vdevgone(bmaj, mn, mn, VBLK);
237 vdevgone(cmaj, mn, mn, VCHR);
238 }
239
240 /* Delete all of our wedges. */
241 dkwedge_delall(&dksc->sc_dkdev);
242
243 /* Detach from the disk list. */
244 disk_detach(&dksc->sc_dkdev);
245 disk_destroy(&dksc->sc_dkdev);
246
247 dk_detach(dksc);
248
249 /* Deregister with PMF */
250 pmf_device_deregister(dksc->sc_dev);
251
252 /*
253 * XXX We can't really flush the cache here, beceause the
254 * XXX device may already be non-existent from the controller's
255 * XXX perspective.
256 */
257 #if 0
258 /* Flush the device's cache. */
259 if (sc->sc_flush != NULL)
260 if ((*sc->sc_flush)(sc, 0) != 0)
261 device_printf(dksc->sc_dev, "unable to flush cache\n");
262 #endif
263 cv_destroy(&sc->sc_drain);
264 mutex_destroy(&sc->sc_mutex);
265 }
266
267 /* ARGSUSED */
268 static bool
269 ld_suspend(device_t dev, const pmf_qual_t *qual)
270 {
271 return ld_shutdown(dev, 0);
272 }
273
274 /* ARGSUSED */
275 static bool
276 ld_shutdown(device_t dev, int flags)
277 {
278 struct ld_softc *sc = device_private(dev);
279 struct dk_softc *dksc = &sc->sc_dksc;
280
281 if ((flags & RB_NOSYNC) == 0 && sc->sc_flush != NULL
282 && (*sc->sc_flush)(sc, LDFL_POLL) != 0) {
283 device_printf(dksc->sc_dev, "unable to flush cache\n");
284 return false;
285 }
286
287 return true;
288 }
289
290 /* ARGSUSED */
291 static int
292 ldopen(dev_t dev, int flags, int fmt, struct lwp *l)
293 {
294 device_t self;
295 struct ld_softc *sc;
296 struct dk_softc *dksc;
297 int unit;
298 int error;
299
300 unit = DISKUNIT(dev);
301 self = device_lookup_acquire(&ld_cd, unit);
302 if (self == NULL)
303 return ENXIO;
304 sc = device_private(self);
305 dksc = &sc->sc_dksc;
306
307 error = dk_open(dksc, dev, flags, fmt, l);
308 device_release(self);
309 return error;
310 }
311
312 static int
313 ld_lastclose(device_t self)
314 {
315 struct ld_softc *sc = device_private(self);
316
317 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, 0) != 0)
318 device_printf(self, "unable to flush cache\n");
319
320 return 0;
321 }
322
323 /* ARGSUSED */
324 static int
325 ldclose(dev_t dev, int flags, int fmt, struct lwp *l)
326 {
327 device_t self;
328 struct ld_softc *sc;
329 struct dk_softc *dksc;
330 int unit;
331 int error;
332
333 unit = DISKUNIT(dev);
334 self = device_lookup_acquire(&ld_cd, unit);
335 if (self == NULL)
336 return ENXIO;
337 sc = device_private(self);
338 dksc = &sc->sc_dksc;
339
340 error = dk_close(dksc, dev, flags, fmt, l);
341 device_release(self);
342 return error;
343 }
344
345 /* ARGSUSED */
346 static int
347 ldread(dev_t dev, struct uio *uio, int ioflag)
348 {
349
350 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
351 }
352
353 /* ARGSUSED */
354 static int
355 ldwrite(dev_t dev, struct uio *uio, int ioflag)
356 {
357
358 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
359 }
360
361 /* ARGSUSED */
362 static int
363 ldioctl(dev_t dev, u_long cmd, void *addr, int32_t flag, struct lwp *l)
364 {
365 device_t self;
366 struct ld_softc *sc;
367 struct dk_softc *dksc;
368 int unit, error;
369
370 unit = DISKUNIT(dev);
371 self = device_lookup_acquire(&ld_cd, unit);
372 if (self == NULL)
373 return ENXIO;
374 sc = device_private(self);
375 dksc = &sc->sc_dksc;
376
377 error = dk_ioctl(dksc, dev, cmd, addr, flag, l);
378 if (error != EPASSTHROUGH) {
379 device_release(self);
380 return (error);
381 }
382
383 error = 0;
384
385 switch (cmd) {
386 case DIOCCACHESYNC:
387 /*
388 * XXX Do we really need to care about having a writable
389 * file descriptor here?
390 */
391 if ((flag & FWRITE) == 0)
392 error = EBADF;
393 else if (sc->sc_flush)
394 error = (*sc->sc_flush)(sc, 0);
395 else
396 error = 0; /* XXX Error out instead? */
397 break;
398
399 default:
400 error = dk_ioctl(dksc, dev, cmd, addr, flag, l);
401 break;
402 }
403
404 device_release(self);
405 return (error);
406 }
407
408 static void
409 ldstrategy(struct buf *bp)
410 {
411 device_t self;
412 struct ld_softc *sc;
413 struct dk_softc *dksc;
414 int unit;
415
416 unit = DISKUNIT(bp->b_dev);
417 self = device_lookup_acquire(&ld_cd, unit);
418 if (self == NULL)
419 return;
420 sc = device_private(self);
421 dksc = &sc->sc_dksc;
422
423 dk_strategy(dksc, bp);
424 device_release(self);
425 }
426
427 static int
428 ld_diskstart(device_t dev, struct buf *bp)
429 {
430 struct ld_softc *sc;
431 int error;
432
433 device_acquire(dev);
434 sc = device_private(dev);
435 if (sc->sc_queuecnt >= sc->sc_maxqueuecnt) {
436 device_release(dev);
437 return EAGAIN;
438 }
439
440 mutex_enter(&sc->sc_mutex);
441
442 if (sc->sc_queuecnt >= sc->sc_maxqueuecnt)
443 error = EAGAIN;
444 else {
445 error = (*sc->sc_start)(sc, bp);
446 if (error == 0)
447 sc->sc_queuecnt++;
448 }
449
450 mutex_exit(&sc->sc_mutex);
451
452 device_release(dev);
453 return error;
454 }
455
456 void
457 lddone(struct ld_softc *sc, struct buf *bp)
458 {
459 struct dk_softc *dksc = &sc->sc_dksc;
460
461 dk_done(dksc, bp);
462
463 mutex_enter(&sc->sc_mutex);
464 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
465 if ((sc->sc_flags & LDF_DRAIN) != 0) {
466 sc->sc_flags &= ~LDF_DRAIN;
467 cv_broadcast(&sc->sc_drain);
468 }
469 mutex_exit(&sc->sc_mutex);
470 dk_start(dksc, NULL);
471 } else
472 mutex_exit(&sc->sc_mutex);
473 }
474
475 static int
476 ldsize(dev_t dev)
477 {
478 device_t self;
479 struct ld_softc *sc;
480 struct dk_softc *dksc;
481 int unit;
482 int error;
483
484 unit = DISKUNIT(dev);
485 self = device_lookup_acquire(&ld_cd, unit);
486 if (self == NULL)
487 return ENODEV;
488 sc = device_private(self);
489 dksc = &sc->sc_dksc;
490
491 if ((sc->sc_flags & LDF_ENABLED) == 0)
492 error = (ENODEV);
493 else
494 error = dk_size(dksc, dev);
495
496 device_release(self);
497 return error;
498 }
499
500 /*
501 * Take a dump.
502 */
503 static int
504 lddump(dev_t dev, daddr_t blkno, void *va, size_t size)
505 {
506 device_t self;
507 struct ld_softc *sc;
508 struct dk_softc *dksc;
509 int unit;
510 int error;
511
512 unit = DISKUNIT(dev);
513 self = device_lookup_acquire(&ld_cd, unit);
514 if (self == NULL)
515 return ENXIO;
516 sc = device_private(self);
517 dksc = &sc->sc_dksc;
518
519 if ((sc->sc_flags & LDF_ENABLED) == 0)
520 error = (ENODEV);
521 else
522 error = dk_dump(dksc, dev, blkno, va, size);
523
524 device_release(self);
525 return error;
526 }
527
528 static int
529 ld_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
530 {
531 struct ld_softc *sc;
532 int error;
533
534 device_acquire(dev);
535 sc = device_private(dev);
536 if (sc->sc_dump == NULL)
537 error = ENODEV;
538 else
539 error = (*sc->sc_dump)(sc, va, blkno, nblk);
540
541 device_release(dev);
542 return error;
543 }
544
545 /*
546 * Adjust the size of a transfer.
547 */
548 static void
549 ldminphys(struct buf *bp)
550 {
551 device_t self;
552 int unit;
553 struct ld_softc *sc;
554
555 unit = DISKUNIT(bp->b_dev);
556 self = device_lookup_acquire(&ld_cd, unit);
557 if (self == NULL)
558 return;
559 sc = device_private(self);
560
561 ld_iosize(sc->sc_dv, &bp->b_bcount);
562 minphys(bp);
563 device_release(self);
564 }
565
566 static void
567 ld_iosize(device_t d, int *countp)
568 {
569 struct ld_softc *sc;
570
571 device_acquire(d);
572 sc = device_private(d);
573
574 if (*countp > sc->sc_maxxfer)
575 *countp = sc->sc_maxxfer;
576
577 device_release(d);
578 }
579
580 static void
581 ld_fake_geometry(struct ld_softc *sc)
582 {
583 uint64_t ncyl;
584
585 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */
586 sc->sc_nheads = 16;
587 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */
588 sc->sc_nheads = 32;
589 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */
590 sc->sc_nheads = 64;
591 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */
592 sc->sc_nheads = 128;
593 else
594 sc->sc_nheads = 255;
595
596 sc->sc_nsectors = 63;
597 sc->sc_ncylinders = INT_MAX;
598 ncyl = sc->sc_secperunit /
599 (sc->sc_nheads * sc->sc_nsectors);
600 if (ncyl < INT_MAX)
601 sc->sc_ncylinders = (int)ncyl;
602 }
603
604 static void
605 ld_set_geometry(struct ld_softc *sc)
606 {
607 struct dk_softc *dksc = &sc->sc_dksc;
608 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
609 char tbuf[9];
610
611 format_bytes(tbuf, sizeof(tbuf), sc->sc_secperunit *
612 sc->sc_secsize);
613 aprint_normal_dev(dksc->sc_dev, "%s, %d cyl, %d head, %d sec, "
614 "%d bytes/sect x %"PRIu64" sectors\n",
615 tbuf, sc->sc_ncylinders, sc->sc_nheads,
616 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
617
618 memset(dg, 0, sizeof(*dg));
619 dg->dg_secperunit = sc->sc_secperunit;
620 dg->dg_secsize = sc->sc_secsize;
621 dg->dg_nsectors = sc->sc_nsectors;
622 dg->dg_ntracks = sc->sc_nheads;
623 dg->dg_ncylinders = sc->sc_ncylinders;
624
625 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
626 }
627
628 static void
629 ld_config_interrupts(device_t d)
630 {
631 struct ld_softc *sc;
632 struct dk_softc *dksc;
633
634 device_acquire(d);
635 sc = device_private(d);
636 dksc = &sc->sc_dksc;
637 dkwedge_discover(&dksc->sc_dkdev);
638 device_release(d);
639 }
640
641 static int
642 ld_discard(device_t dev, off_t pos, off_t len)
643 {
644 struct ld_softc *sc;
645 int error;
646
647 device_acquire(dev);
648 sc = device_private(dev);
649 if (sc->sc_discard == NULL)
650 error = (ENODEV);
651 else
652 error = (*sc->sc_discard)(sc, pos, len);
653 device_release(dev);
654 return error;
655 }
656
657 static int
658 lddiscard(dev_t dev, off_t pos, off_t len)
659 {
660 device_t self;
661 struct ld_softc *sc;
662 struct dk_softc *dksc;
663 int unit;
664 int error;
665
666 unit = DISKUNIT(dev);
667 self = device_lookup_acquire(&ld_cd, unit);
668 if (self == NULL)
669 return ENXIO;
670 sc = device_private(self);
671 dksc = &sc->sc_dksc;
672
673 error = dk_discard(dksc, dev, pos, len);
674 device_release(self);
675 return error;
676 }
677
678 MODULE(MODULE_CLASS_DRIVER, ld, "dk_subr");
679
680 #ifdef _MODULE
681 CFDRIVER_DECL(ld, DV_DISK, NULL);
682 #endif
683
684 static int
685 ld_modcmd(modcmd_t cmd, void *opaque)
686 {
687 #ifdef _MODULE
688 devmajor_t bmajor, cmajor;
689 #endif
690 int error = 0;
691
692 #ifdef _MODULE
693 switch (cmd) {
694 case MODULE_CMD_INIT:
695 bmajor = cmajor = -1;
696 error = devsw_attach(ld_cd.cd_name, &ld_bdevsw, &bmajor,
697 &ld_cdevsw, &cmajor);
698 if (error)
699 break;
700 error = config_cfdriver_attach(&ld_cd);
701 break;
702 case MODULE_CMD_FINI:
703 error = config_cfdriver_detach(&ld_cd);
704 if (error)
705 break;
706 devsw_detach(&ld_bdevsw, &ld_cdevsw);
707 break;
708 default:
709 error = ENOTTY;
710 break;
711 }
712 #endif
713
714 return error;
715 }
716