ld.c revision 1.85 1 /* $NetBSD: ld.c,v 1.85 2015/08/16 14:02:52 mlelstv Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Disk driver for use by RAID controllers.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.85 2015/08/16 14:02:52 mlelstv Exp $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/device.h>
43 #include <sys/queue.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/bufq.h>
47 #include <sys/endian.h>
48 #include <sys/disklabel.h>
49 #include <sys/disk.h>
50 #include <sys/dkio.h>
51 #include <sys/stat.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/vnode.h>
55 #include <sys/syslog.h>
56 #include <sys/mutex.h>
57 #include <sys/rndsource.h>
58
59 #include <dev/ldvar.h>
60
61 #include <prop/proplib.h>
62
63 static void ldminphys(struct buf *bp);
64 static bool ld_suspend(device_t, const pmf_qual_t *);
65 static bool ld_shutdown(device_t, int);
66 static void ld_start(device_t);
67 static void ld_iosize(device_t, int *);
68 static int ld_dumpblocks(device_t, void *, daddr_t, int);
69 static void ld_fake_geometry(struct ld_softc *);
70 static void ld_set_geometry(struct ld_softc *);
71 static void ld_config_interrupts (device_t);
72 static int ld_lastclose(device_t);
73
74 extern struct cfdriver ld_cd;
75
76 static dev_type_open(ldopen);
77 static dev_type_close(ldclose);
78 static dev_type_read(ldread);
79 static dev_type_write(ldwrite);
80 static dev_type_ioctl(ldioctl);
81 static dev_type_strategy(ldstrategy);
82 static dev_type_dump(lddump);
83 static dev_type_size(ldsize);
84
85 const struct bdevsw ld_bdevsw = {
86 .d_open = ldopen,
87 .d_close = ldclose,
88 .d_strategy = ldstrategy,
89 .d_ioctl = ldioctl,
90 .d_dump = lddump,
91 .d_psize = ldsize,
92 .d_discard = nodiscard,
93 .d_flag = D_DISK
94 };
95
96 const struct cdevsw ld_cdevsw = {
97 .d_open = ldopen,
98 .d_close = ldclose,
99 .d_read = ldread,
100 .d_write = ldwrite,
101 .d_ioctl = ldioctl,
102 .d_stop = nostop,
103 .d_tty = notty,
104 .d_poll = nopoll,
105 .d_mmap = nommap,
106 .d_kqfilter = nokqfilter,
107 .d_discard = nodiscard,
108 .d_flag = D_DISK
109 };
110
111 static struct dkdriver lddkdriver = {
112 .d_open = ldopen,
113 .d_close = ldclose,
114 .d_strategy = ldstrategy,
115 .d_iosize = ld_iosize,
116 .d_minphys = ldminphys,
117 .d_diskstart = ld_start,
118 .d_dumpblocks = ld_dumpblocks,
119 .d_lastclose = ld_lastclose
120 };
121
122 void
123 ldattach(struct ld_softc *sc)
124 {
125 device_t self = sc->sc_dv;
126 struct dk_softc *dksc = &sc->sc_dksc;
127
128 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
129 cv_init(&sc->sc_drain, "lddrain");
130
131 if ((sc->sc_flags & LDF_ENABLED) == 0) {
132 return;
133 }
134
135 /* Initialise dk and disk structure. */
136 dk_init(dksc, self, DKTYPE_LD);
137 disk_init(&dksc->sc_dkdev, dksc->sc_xname, &lddkdriver);
138
139 /* Attach the device into the rnd source list. */
140 rnd_attach_source(&sc->sc_rnd_source, dksc->sc_xname,
141 RND_TYPE_DISK, RND_FLAG_DEFAULT);
142
143 if (sc->sc_maxxfer > MAXPHYS)
144 sc->sc_maxxfer = MAXPHYS;
145
146 /* Build synthetic geometry if necessary. */
147 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 ||
148 sc->sc_ncylinders == 0)
149 ld_fake_geometry(sc);
150
151 sc->sc_disksize512 = sc->sc_secperunit * sc->sc_secsize / DEV_BSIZE;
152
153 /* Attach dk and disk subsystems */
154 dk_attach(dksc);
155 disk_attach(&dksc->sc_dkdev);
156 ld_set_geometry(sc);
157
158 bufq_alloc(&dksc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
159
160 /* Register with PMF */
161 if (!pmf_device_register1(dksc->sc_dev, ld_suspend, NULL, ld_shutdown))
162 aprint_error_dev(dksc->sc_dev,
163 "couldn't establish power handler\n");
164
165 /* Discover wedges on this disk. */
166 config_interrupts(sc->sc_dv, ld_config_interrupts);
167 }
168
169 int
170 ldadjqparam(struct ld_softc *sc, int xmax)
171 {
172
173 mutex_enter(&sc->sc_mutex);
174 sc->sc_maxqueuecnt = xmax;
175 mutex_exit(&sc->sc_mutex);
176
177 return (0);
178 }
179
180 int
181 ldbegindetach(struct ld_softc *sc, int flags)
182 {
183 struct dk_softc *dksc = &sc->sc_dksc;
184 int rv = 0;
185
186 if ((sc->sc_flags & LDF_ENABLED) == 0)
187 return (0);
188
189 rv = disk_begindetach(&dksc->sc_dkdev, ld_lastclose, dksc->sc_dev, flags);
190
191 if (rv != 0)
192 return rv;
193
194 mutex_enter(&sc->sc_mutex);
195 sc->sc_maxqueuecnt = 0;
196
197 dk_detach(dksc);
198
199 while (sc->sc_queuecnt > 0) {
200 sc->sc_flags |= LDF_DRAIN;
201 cv_wait(&sc->sc_drain, &sc->sc_mutex);
202 }
203 mutex_exit(&sc->sc_mutex);
204
205 return (rv);
206 }
207
208 void
209 ldenddetach(struct ld_softc *sc)
210 {
211 struct dk_softc *dksc = &sc->sc_dksc;
212 int bmaj, cmaj, i, mn;
213
214 if ((sc->sc_flags & LDF_ENABLED) == 0)
215 return;
216
217 mutex_enter(&sc->sc_mutex);
218
219 /* Wait for commands queued with the hardware to complete. */
220 if (sc->sc_queuecnt != 0)
221 if (tsleep(&sc->sc_queuecnt, PRIBIO, "lddtch", 30 * hz))
222 printf("%s: not drained\n", dksc->sc_xname);
223
224 /* Kill off any queued buffers. */
225 bufq_drain(dksc->sc_bufq);
226 mutex_exit(&sc->sc_mutex);
227
228 bufq_free(dksc->sc_bufq);
229
230 /* Locate the major numbers. */
231 bmaj = bdevsw_lookup_major(&ld_bdevsw);
232 cmaj = cdevsw_lookup_major(&ld_cdevsw);
233
234 /* Nuke the vnodes for any open instances. */
235 for (i = 0; i < MAXPARTITIONS; i++) {
236 mn = DISKMINOR(device_unit(dksc->sc_dev), i);
237 vdevgone(bmaj, mn, mn, VBLK);
238 vdevgone(cmaj, mn, mn, VCHR);
239 }
240
241 /* Delete all of our wedges. */
242 dkwedge_delall(&dksc->sc_dkdev);
243
244 /* Detach from the disk list. */
245 disk_detach(&dksc->sc_dkdev);
246 disk_destroy(&dksc->sc_dkdev);
247
248 /* Unhook the entropy source. */
249 rnd_detach_source(&sc->sc_rnd_source);
250
251 /* Deregister with PMF */
252 pmf_device_deregister(dksc->sc_dev);
253
254 /*
255 * XXX We can't really flush the cache here, beceause the
256 * XXX device may already be non-existent from the controller's
257 * XXX perspective.
258 */
259 #if 0
260 /* Flush the device's cache. */
261 if (sc->sc_flush != NULL)
262 if ((*sc->sc_flush)(sc, 0) != 0)
263 aprint_error_dev(dksc->sc_dev, "unable to flush cache\n");
264 #endif
265 cv_destroy(&sc->sc_drain);
266 mutex_destroy(&sc->sc_mutex);
267 }
268
269 /* ARGSUSED */
270 static bool
271 ld_suspend(device_t dev, const pmf_qual_t *qual)
272 {
273 return ld_shutdown(dev, 0);
274 }
275
276 /* ARGSUSED */
277 static bool
278 ld_shutdown(device_t dev, int flags)
279 {
280 struct ld_softc *sc = device_private(dev);
281 struct dk_softc *dksc = &sc->sc_dksc;
282
283 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, LDFL_POLL) != 0) {
284 printf("%s: unable to flush cache\n", dksc->sc_xname);
285 return false;
286 }
287
288 return true;
289 }
290
291 /* ARGSUSED */
292 static int
293 ldopen(dev_t dev, int flags, int fmt, struct lwp *l)
294 {
295 struct ld_softc *sc;
296 struct dk_softc *dksc;
297 int unit;
298
299 unit = DISKUNIT(dev);
300 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
301 return (ENXIO);
302 dksc = &sc->sc_dksc;
303
304 return dk_open(dksc, dev, flags, fmt, l);
305 }
306
307 static int
308 ld_lastclose(device_t self)
309 {
310 struct ld_softc *sc = device_private(self);
311
312 if (sc->sc_flush != NULL && (*sc->sc_flush)(sc, 0) != 0)
313 aprint_error_dev(self, "unable to flush cache\n");
314
315 return 0;
316 }
317
318 /* ARGSUSED */
319 static int
320 ldclose(dev_t dev, int flags, int fmt, struct lwp *l)
321 {
322 struct ld_softc *sc;
323 struct dk_softc *dksc;
324 int unit;
325
326 unit = DISKUNIT(dev);
327 sc = device_lookup_private(&ld_cd, unit);
328 dksc = &sc->sc_dksc;
329
330 return dk_close(dksc, dev, flags, fmt, l);
331 }
332
333 /* ARGSUSED */
334 static int
335 ldread(dev_t dev, struct uio *uio, int ioflag)
336 {
337
338 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
339 }
340
341 /* ARGSUSED */
342 static int
343 ldwrite(dev_t dev, struct uio *uio, int ioflag)
344 {
345
346 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
347 }
348
349 /* ARGSUSED */
350 static int
351 ldioctl(dev_t dev, u_long cmd, void *addr, int32_t flag, struct lwp *l)
352 {
353 struct ld_softc *sc;
354 struct dk_softc *dksc;
355 int unit, error;
356
357 unit = DISKUNIT(dev);
358 sc = device_lookup_private(&ld_cd, unit);
359 dksc = &sc->sc_dksc;
360
361 error = disk_ioctl(&dksc->sc_dkdev, dev, cmd, addr, flag, l);
362 if (error != EPASSTHROUGH)
363 return (error);
364
365 error = dk_ioctl(dksc, dev, cmd, addr, flag, l);
366 if (error != EPASSTHROUGH)
367 return (error);
368
369 error = 0;
370
371 switch (cmd) {
372 case DIOCCACHESYNC:
373 /*
374 * XXX Do we really need to care about having a writable
375 * file descriptor here?
376 */
377 if ((flag & FWRITE) == 0)
378 error = EBADF;
379 else if (sc->sc_flush)
380 error = (*sc->sc_flush)(sc, 0);
381 else
382 error = 0; /* XXX Error out instead? */
383 break;
384 default:
385 error = ENOTTY;
386 break;
387 }
388
389 return (error);
390 }
391
392 static void
393 ldstrategy(struct buf *bp)
394 {
395 struct ld_softc *sc;
396 struct dk_softc *dksc;
397 int unit;
398
399 unit = DISKUNIT(bp->b_dev);
400 sc = device_lookup_private(&ld_cd, unit);
401 dksc = &sc->sc_dksc;
402
403 return dk_strategy(dksc, bp);
404 }
405
406 static void
407 ld_start(device_t dev)
408 {
409 struct ld_softc *sc = device_private(dev);
410 struct dk_softc *dksc = &sc->sc_dksc;
411 struct buf *bp;
412 int error;
413
414 mutex_enter(&sc->sc_mutex);
415
416 while (sc->sc_queuecnt < sc->sc_maxqueuecnt) {
417 /* See if there is work to do. */
418 if ((bp = bufq_peek(dksc->sc_bufq)) == NULL)
419 break;
420
421 disk_busy(&dksc->sc_dkdev);
422 sc->sc_queuecnt++;
423
424 if (__predict_true((error = (*sc->sc_start)(sc, bp)) == 0)) {
425 /*
426 * The back-end is running the job; remove it from
427 * the queue.
428 */
429 (void) bufq_get(dksc->sc_bufq);
430 } else {
431 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
432 sc->sc_queuecnt--;
433 if (error == EAGAIN) {
434 /*
435 * Temporary resource shortage in the
436 * back-end; just defer the job until
437 * later.
438 *
439 * XXX We might consider a watchdog timer
440 * XXX to make sure we are kicked into action.
441 */
442 break;
443 } else {
444 (void) bufq_get(dksc->sc_bufq);
445 bp->b_error = error;
446 bp->b_resid = bp->b_bcount;
447 mutex_exit(&sc->sc_mutex);
448 biodone(bp);
449 mutex_enter(&sc->sc_mutex);
450 }
451 }
452 }
453
454 mutex_exit(&sc->sc_mutex);
455 }
456
457 void
458 lddone(struct ld_softc *sc, struct buf *bp)
459 {
460 struct dk_softc *dksc = &sc->sc_dksc;
461
462 dk_done(dksc, bp);
463
464 mutex_enter(&sc->sc_mutex);
465 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
466 if ((sc->sc_flags & LDF_DRAIN) != 0) {
467 sc->sc_flags &= ~LDF_DRAIN;
468 wakeup(&sc->sc_queuecnt);
469 }
470 mutex_exit(&sc->sc_mutex);
471 ld_start(dksc->sc_dev);
472 } else
473 mutex_exit(&sc->sc_mutex);
474 }
475
476 static int
477 ldsize(dev_t dev)
478 {
479 struct ld_softc *sc;
480 struct dk_softc *dksc;
481 int unit;
482
483 unit = DISKUNIT(dev);
484 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
485 return (ENODEV);
486 dksc = &sc->sc_dksc;
487
488 if ((sc->sc_flags & LDF_ENABLED) == 0)
489 return (ENODEV);
490
491 return dk_size(dksc, dev);
492 }
493
494 /*
495 * Take a dump.
496 */
497 static int
498 lddump(dev_t dev, daddr_t blkno, void *va, size_t size)
499 {
500 struct ld_softc *sc;
501 struct dk_softc *dksc;
502 int unit;
503
504 unit = DISKUNIT(dev);
505 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
506 return (ENXIO);
507 dksc = &sc->sc_dksc;
508
509 if ((sc->sc_flags & LDF_ENABLED) == 0)
510 return (ENODEV);
511
512 return dk_dump(dksc, dev, blkno, va, size);
513 }
514
515 static int
516 ld_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
517 {
518 struct ld_softc *sc = device_private(dev);
519
520 if (sc->sc_dump == NULL)
521 return (ENXIO);
522
523 return (*sc->sc_dump)(sc, va, blkno, nblk);
524 }
525
526 /*
527 * Adjust the size of a transfer.
528 */
529 static void
530 ldminphys(struct buf *bp)
531 {
532 int unit;
533 struct ld_softc *sc;
534
535 unit = DISKUNIT(bp->b_dev);
536 sc = device_lookup_private(&ld_cd, unit);
537
538 ld_iosize(sc->sc_dv, &bp->b_bcount);
539 minphys(bp);
540 }
541
542 static void
543 ld_iosize(device_t d, int *countp)
544 {
545 struct ld_softc *sc = device_private(d);
546
547 if (*countp > sc->sc_maxxfer)
548 *countp = sc->sc_maxxfer;
549 }
550
551 static void
552 ld_fake_geometry(struct ld_softc *sc)
553 {
554 uint64_t ncyl;
555
556 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */
557 sc->sc_nheads = 16;
558 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */
559 sc->sc_nheads = 32;
560 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */
561 sc->sc_nheads = 64;
562 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */
563 sc->sc_nheads = 128;
564 else
565 sc->sc_nheads = 255;
566
567 sc->sc_nsectors = 63;
568 sc->sc_ncylinders = INT_MAX;
569 ncyl = sc->sc_secperunit /
570 (sc->sc_nheads * sc->sc_nsectors);
571 if (ncyl < INT_MAX)
572 sc->sc_ncylinders = (int)ncyl;
573 }
574
575 static void
576 ld_set_geometry(struct ld_softc *sc)
577 {
578 struct dk_softc *dksc = &sc->sc_dksc;
579 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
580 char tbuf[9];
581
582 format_bytes(tbuf, sizeof(tbuf), sc->sc_secperunit *
583 sc->sc_secsize);
584 aprint_normal_dev(dksc->sc_dev, "%s, %d cyl, %d head, %d sec, "
585 "%d bytes/sect x %"PRIu64" sectors\n",
586 tbuf, sc->sc_ncylinders, sc->sc_nheads,
587 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
588
589 memset(dg, 0, sizeof(*dg));
590 dg->dg_secperunit = sc->sc_secperunit;
591 dg->dg_secsize = sc->sc_secsize;
592 dg->dg_nsectors = sc->sc_nsectors;
593 dg->dg_ntracks = sc->sc_nheads;
594 dg->dg_ncylinders = sc->sc_ncylinders;
595
596 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
597 }
598
599 static void
600 ld_config_interrupts(device_t d)
601 {
602 struct ld_softc *sc = device_private(d);
603 struct dk_softc *dksc = &sc->sc_dksc;
604
605 dkwedge_discover(&dksc->sc_dkdev);
606 }
607