ld.c revision 1.88 1 /* $NetBSD: ld.c,v 1.88 2015/08/16 17:32:31 mlelstv Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Disk driver for use by RAID controllers.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.88 2015/08/16 17:32:31 mlelstv Exp $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/device.h>
43 #include <sys/queue.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/bufq.h>
47 #include <sys/endian.h>
48 #include <sys/disklabel.h>
49 #include <sys/disk.h>
50 #include <sys/dkio.h>
51 #include <sys/stat.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/vnode.h>
55 #include <sys/syslog.h>
56 #include <sys/mutex.h>
57 #include <sys/rndsource.h>
58
59 #include <dev/ldvar.h>
60
61 #include <prop/proplib.h>
62
63 static void ldminphys(struct buf *bp);
64 static bool ld_suspend(device_t, const pmf_qual_t *);
65 static bool ld_shutdown(device_t, int);
66 static void ld_start(device_t);
67 static void ld_iosize(device_t, int *);
68 static int ld_dumpblocks(device_t, void *, daddr_t, int);
69 static void ld_fake_geometry(struct ld_softc *);
70 static void ld_set_geometry(struct ld_softc *);
71 static void ld_config_interrupts (device_t);
72 static int ld_lastclose(device_t);
73
74 extern struct cfdriver ld_cd;
75
76 static dev_type_open(ldopen);
77 static dev_type_close(ldclose);
78 static dev_type_read(ldread);
79 static dev_type_write(ldwrite);
80 static dev_type_ioctl(ldioctl);
81 static dev_type_strategy(ldstrategy);
82 static dev_type_dump(lddump);
83 static dev_type_size(ldsize);
84
85 const struct bdevsw ld_bdevsw = {
86 .d_open = ldopen,
87 .d_close = ldclose,
88 .d_strategy = ldstrategy,
89 .d_ioctl = ldioctl,
90 .d_dump = lddump,
91 .d_psize = ldsize,
92 .d_discard = nodiscard,
93 .d_flag = D_DISK
94 };
95
96 const struct cdevsw ld_cdevsw = {
97 .d_open = ldopen,
98 .d_close = ldclose,
99 .d_read = ldread,
100 .d_write = ldwrite,
101 .d_ioctl = ldioctl,
102 .d_stop = nostop,
103 .d_tty = notty,
104 .d_poll = nopoll,
105 .d_mmap = nommap,
106 .d_kqfilter = nokqfilter,
107 .d_discard = nodiscard,
108 .d_flag = D_DISK
109 };
110
111 static struct dkdriver lddkdriver = {
112 .d_open = ldopen,
113 .d_close = ldclose,
114 .d_strategy = ldstrategy,
115 .d_iosize = ld_iosize,
116 .d_minphys = ldminphys,
117 .d_diskstart = ld_start,
118 .d_dumpblocks = ld_dumpblocks,
119 .d_lastclose = ld_lastclose
120 };
121
122 void
123 ldattach(struct ld_softc *sc)
124 {
125 device_t self = sc->sc_dv;
126 struct dk_softc *dksc = &sc->sc_dksc;
127
128 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
129 cv_init(&sc->sc_drain, "lddrain");
130
131 if ((sc->sc_flags & LDF_ENABLED) == 0) {
132 return;
133 }
134
135 /* Initialise dk and disk structure. */
136 dk_init(dksc, self, DKTYPE_LD);
137 disk_init(&dksc->sc_dkdev, dksc->sc_xname, &lddkdriver);
138
139 /* Attach the device into the rnd source list. */
140 rnd_attach_source(&sc->sc_rnd_source, dksc->sc_xname,
141 RND_TYPE_DISK, RND_FLAG_DEFAULT);
142
143 if (sc->sc_maxxfer > MAXPHYS)
144 sc->sc_maxxfer = MAXPHYS;
145
146 /* Build synthetic geometry if necessary. */
147 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 ||
148 sc->sc_ncylinders == 0)
149 ld_fake_geometry(sc);
150
151 sc->sc_disksize512 = sc->sc_secperunit * sc->sc_secsize / DEV_BSIZE;
152
153 /* Attach dk and disk subsystems */
154 dk_attach(dksc);
155 disk_attach(&dksc->sc_dkdev);
156 ld_set_geometry(sc);
157
158 bufq_alloc(&dksc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
159
160 /* Register with PMF */
161 if (!pmf_device_register1(dksc->sc_dev, ld_suspend, NULL, ld_shutdown))
162 aprint_error_dev(dksc->sc_dev,
163 "couldn't establish power handler\n");
164
165 /* Discover wedges on this disk. */
166 config_interrupts(sc->sc_dv, ld_config_interrupts);
167 }
168
169 int
170 ldadjqparam(struct ld_softc *sc, int xmax)
171 {
172
173 mutex_enter(&sc->sc_mutex);
174 sc->sc_maxqueuecnt = xmax;
175 mutex_exit(&sc->sc_mutex);
176
177 return (0);
178 }
179
180 int
181 ldbegindetach(struct ld_softc *sc, int flags)
182 {
183 struct dk_softc *dksc = &sc->sc_dksc;
184 int rv = 0;
185
186 if ((sc->sc_flags & LDF_ENABLED) == 0)
187 return (0);
188
189 rv = disk_begindetach(&dksc->sc_dkdev, ld_lastclose, dksc->sc_dev, flags);
190
191 if (rv != 0)
192 return rv;
193
194 mutex_enter(&sc->sc_mutex);
195 sc->sc_maxqueuecnt = 0;
196
197 dk_detach(dksc);
198
199 while (sc->sc_queuecnt > 0) {
200 sc->sc_flags |= LDF_DRAIN;
201 cv_wait(&sc->sc_drain, &sc->sc_mutex);
202 }
203 mutex_exit(&sc->sc_mutex);
204
205 return (rv);
206 }
207
208 void
209 ldenddetach(struct ld_softc *sc)
210 {
211 struct dk_softc *dksc = &sc->sc_dksc;
212 int bmaj, cmaj, i, mn;
213
214 if ((sc->sc_flags & LDF_ENABLED) == 0)
215 return;
216
217 mutex_enter(&sc->sc_mutex);
218
219 /* Wait for commands queued with the hardware to complete. */
220 if (sc->sc_queuecnt != 0) {
221 if (cv_timedwait(&sc->sc_drain, &sc->sc_mutex, 30 * hz))
222 printf("%s: not drained\n", dksc->sc_xname);
223 }
224
225 /* Kill off any queued buffers. */
226 bufq_drain(dksc->sc_bufq);
227 mutex_exit(&sc->sc_mutex);
228
229 bufq_free(dksc->sc_bufq);
230
231 /* Locate the major numbers. */
232 bmaj = bdevsw_lookup_major(&ld_bdevsw);
233 cmaj = cdevsw_lookup_major(&ld_cdevsw);
234
235 /* Nuke the vnodes for any open instances. */
236 for (i = 0; i < MAXPARTITIONS; i++) {
237 mn = DISKMINOR(device_unit(dksc->sc_dev), i);
238 vdevgone(bmaj, mn, mn, VBLK);
239 vdevgone(cmaj, mn, mn, VCHR);
240 }
241
242 /* Delete all of our wedges. */
243 dkwedge_delall(&dksc->sc_dkdev);
244
245 /* Detach from the disk list. */
246 disk_detach(&dksc->sc_dkdev);
247 disk_destroy(&dksc->sc_dkdev);
248
249 /* Unhook the entropy source. */
250 rnd_detach_source(&sc->sc_rnd_source);
251
252 /* Deregister with PMF */
253 pmf_device_deregister(dksc->sc_dev);
254
255 /*
256 * XXX We can't really flush the cache here, beceause the
257 * XXX device may already be non-existent from the controller's
258 * XXX perspective.
259 */
260 #if 0
261 /* Flush the device's cache. */
262 if (sc->sc_flush != NULL)
263 if ((*sc->sc_flush)(sc, 0) != 0)
264 device_printf(dksc->sc_dev, "unable to flush cache\n");
265 #endif
266 cv_destroy(&sc->sc_drain);
267 mutex_destroy(&sc->sc_mutex);
268 }
269
270 /* ARGSUSED */
271 static bool
272 ld_suspend(device_t dev, const pmf_qual_t *qual)
273 {
274 return ld_shutdown(dev, 0);
275 }
276
277 /* ARGSUSED */
278 static bool
279 ld_shutdown(device_t dev, int flags)
280 {
281 struct ld_softc *sc = device_private(dev);
282 struct dk_softc *dksc = &sc->sc_dksc;
283
284 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, LDFL_POLL) != 0) {
285 device_printf(dksc->sc_dev, "unable to flush cache\n");
286 return false;
287 }
288
289 return true;
290 }
291
292 /* ARGSUSED */
293 static int
294 ldopen(dev_t dev, int flags, int fmt, struct lwp *l)
295 {
296 struct ld_softc *sc;
297 struct dk_softc *dksc;
298 int unit;
299
300 unit = DISKUNIT(dev);
301 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
302 return (ENXIO);
303 dksc = &sc->sc_dksc;
304
305 return dk_open(dksc, dev, flags, fmt, l);
306 }
307
308 static int
309 ld_lastclose(device_t self)
310 {
311 struct ld_softc *sc = device_private(self);
312
313 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, 0) != 0)
314 device_printf(self, "unable to flush cache\n");
315
316 return 0;
317 }
318
319 /* ARGSUSED */
320 static int
321 ldclose(dev_t dev, int flags, int fmt, struct lwp *l)
322 {
323 struct ld_softc *sc;
324 struct dk_softc *dksc;
325 int unit;
326
327 unit = DISKUNIT(dev);
328 sc = device_lookup_private(&ld_cd, unit);
329 dksc = &sc->sc_dksc;
330
331 return dk_close(dksc, dev, flags, fmt, l);
332 }
333
334 /* ARGSUSED */
335 static int
336 ldread(dev_t dev, struct uio *uio, int ioflag)
337 {
338
339 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
340 }
341
342 /* ARGSUSED */
343 static int
344 ldwrite(dev_t dev, struct uio *uio, int ioflag)
345 {
346
347 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
348 }
349
350 /* ARGSUSED */
351 static int
352 ldioctl(dev_t dev, u_long cmd, void *addr, int32_t flag, struct lwp *l)
353 {
354 struct ld_softc *sc;
355 struct dk_softc *dksc;
356 int unit, error;
357
358 unit = DISKUNIT(dev);
359 sc = device_lookup_private(&ld_cd, unit);
360 dksc = &sc->sc_dksc;
361
362 error = disk_ioctl(&dksc->sc_dkdev, dev, cmd, addr, flag, l);
363 if (error != EPASSTHROUGH)
364 return (error);
365
366 error = dk_ioctl(dksc, dev, cmd, addr, flag, l);
367 if (error != EPASSTHROUGH)
368 return (error);
369
370 error = 0;
371
372 switch (cmd) {
373 case DIOCCACHESYNC:
374 /*
375 * XXX Do we really need to care about having a writable
376 * file descriptor here?
377 */
378 if ((flag & FWRITE) == 0)
379 error = EBADF;
380 else if (sc->sc_flush)
381 error = (*sc->sc_flush)(sc, 0);
382 else
383 error = 0; /* XXX Error out instead? */
384 break;
385 default:
386 error = ENOTTY;
387 break;
388 }
389
390 return (error);
391 }
392
393 static void
394 ldstrategy(struct buf *bp)
395 {
396 struct ld_softc *sc;
397 struct dk_softc *dksc;
398 int unit;
399
400 unit = DISKUNIT(bp->b_dev);
401 sc = device_lookup_private(&ld_cd, unit);
402 dksc = &sc->sc_dksc;
403
404 return dk_strategy(dksc, bp);
405 }
406
407 static void
408 ld_start(device_t dev)
409 {
410 struct ld_softc *sc = device_private(dev);
411 struct dk_softc *dksc = &sc->sc_dksc;
412 struct buf *bp;
413 int error;
414
415 mutex_enter(&sc->sc_mutex);
416
417 while (sc->sc_queuecnt < sc->sc_maxqueuecnt) {
418 /* See if there is work to do. */
419 if ((bp = bufq_peek(dksc->sc_bufq)) == NULL)
420 break;
421
422 disk_busy(&dksc->sc_dkdev);
423 sc->sc_queuecnt++;
424
425 if (__predict_true((error = (*sc->sc_start)(sc, bp)) == 0)) {
426 /*
427 * The back-end is running the job; remove it from
428 * the queue.
429 */
430 (void) bufq_get(dksc->sc_bufq);
431 } else {
432 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
433 sc->sc_queuecnt--;
434 if (error == EAGAIN) {
435 /*
436 * Temporary resource shortage in the
437 * back-end; just defer the job until
438 * later.
439 *
440 * XXX We might consider a watchdog timer
441 * XXX to make sure we are kicked into action.
442 */
443 break;
444 } else {
445 (void) bufq_get(dksc->sc_bufq);
446 bp->b_error = error;
447 bp->b_resid = bp->b_bcount;
448 mutex_exit(&sc->sc_mutex);
449 biodone(bp);
450 mutex_enter(&sc->sc_mutex);
451 }
452 }
453 }
454
455 mutex_exit(&sc->sc_mutex);
456 }
457
458 void
459 lddone(struct ld_softc *sc, struct buf *bp)
460 {
461 struct dk_softc *dksc = &sc->sc_dksc;
462
463 dk_done(dksc, bp);
464
465 mutex_enter(&sc->sc_mutex);
466 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
467 if ((sc->sc_flags & LDF_DRAIN) != 0) {
468 sc->sc_flags &= ~LDF_DRAIN;
469 cv_broadcast(&sc->sc_drain);
470 }
471 mutex_exit(&sc->sc_mutex);
472 ld_start(dksc->sc_dev);
473 } else
474 mutex_exit(&sc->sc_mutex);
475 }
476
477 static int
478 ldsize(dev_t dev)
479 {
480 struct ld_softc *sc;
481 struct dk_softc *dksc;
482 int unit;
483
484 unit = DISKUNIT(dev);
485 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
486 return (ENODEV);
487 dksc = &sc->sc_dksc;
488
489 if ((sc->sc_flags & LDF_ENABLED) == 0)
490 return (ENODEV);
491
492 return dk_size(dksc, dev);
493 }
494
495 /*
496 * Take a dump.
497 */
498 static int
499 lddump(dev_t dev, daddr_t blkno, void *va, size_t size)
500 {
501 struct ld_softc *sc;
502 struct dk_softc *dksc;
503 int unit;
504
505 unit = DISKUNIT(dev);
506 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
507 return (ENXIO);
508 dksc = &sc->sc_dksc;
509
510 if ((sc->sc_flags & LDF_ENABLED) == 0)
511 return (ENODEV);
512
513 return dk_dump(dksc, dev, blkno, va, size);
514 }
515
516 static int
517 ld_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
518 {
519 struct ld_softc *sc = device_private(dev);
520
521 if (sc->sc_dump == NULL)
522 return (ENXIO);
523
524 return (*sc->sc_dump)(sc, va, blkno, nblk);
525 }
526
527 /*
528 * Adjust the size of a transfer.
529 */
530 static void
531 ldminphys(struct buf *bp)
532 {
533 int unit;
534 struct ld_softc *sc;
535
536 unit = DISKUNIT(bp->b_dev);
537 sc = device_lookup_private(&ld_cd, unit);
538
539 ld_iosize(sc->sc_dv, &bp->b_bcount);
540 minphys(bp);
541 }
542
543 static void
544 ld_iosize(device_t d, int *countp)
545 {
546 struct ld_softc *sc = device_private(d);
547
548 if (*countp > sc->sc_maxxfer)
549 *countp = sc->sc_maxxfer;
550 }
551
552 static void
553 ld_fake_geometry(struct ld_softc *sc)
554 {
555 uint64_t ncyl;
556
557 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */
558 sc->sc_nheads = 16;
559 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */
560 sc->sc_nheads = 32;
561 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */
562 sc->sc_nheads = 64;
563 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */
564 sc->sc_nheads = 128;
565 else
566 sc->sc_nheads = 255;
567
568 sc->sc_nsectors = 63;
569 sc->sc_ncylinders = INT_MAX;
570 ncyl = sc->sc_secperunit /
571 (sc->sc_nheads * sc->sc_nsectors);
572 if (ncyl < INT_MAX)
573 sc->sc_ncylinders = (int)ncyl;
574 }
575
576 static void
577 ld_set_geometry(struct ld_softc *sc)
578 {
579 struct dk_softc *dksc = &sc->sc_dksc;
580 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
581 char tbuf[9];
582
583 format_bytes(tbuf, sizeof(tbuf), sc->sc_secperunit *
584 sc->sc_secsize);
585 aprint_normal_dev(dksc->sc_dev, "%s, %d cyl, %d head, %d sec, "
586 "%d bytes/sect x %"PRIu64" sectors\n",
587 tbuf, sc->sc_ncylinders, sc->sc_nheads,
588 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
589
590 memset(dg, 0, sizeof(*dg));
591 dg->dg_secperunit = sc->sc_secperunit;
592 dg->dg_secsize = sc->sc_secsize;
593 dg->dg_nsectors = sc->sc_nsectors;
594 dg->dg_ntracks = sc->sc_nheads;
595 dg->dg_ncylinders = sc->sc_ncylinders;
596
597 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
598 }
599
600 static void
601 ld_config_interrupts(device_t d)
602 {
603 struct ld_softc *sc = device_private(d);
604 struct dk_softc *dksc = &sc->sc_dksc;
605
606 dkwedge_discover(&dksc->sc_dkdev);
607 }
608