wd.c revision 1.438.2.2 1 /* $NetBSD: wd.c,v 1.438.2.2 2018/09/06 06:55:48 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*-
28 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
29 * All rights reserved.
30 *
31 * This code is derived from software contributed to The NetBSD Foundation
32 * by Charles M. Hannum and by Onno van der Linden.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
44 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
45 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
46 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
47 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
48 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
49 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
50 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
51 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
53 * POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: wd.c,v 1.438.2.2 2018/09/06 06:55:48 pgoyette Exp $");
58
59 #include "opt_ata.h"
60 #include "opt_wd.h"
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/conf.h>
66 #include <sys/file.h>
67 #include <sys/stat.h>
68 #include <sys/ioctl.h>
69 #include <sys/buf.h>
70 #include <sys/bufq.h>
71 #include <sys/uio.h>
72 #include <sys/malloc.h>
73 #include <sys/device.h>
74 #include <sys/disklabel.h>
75 #include <sys/disk.h>
76 #include <sys/syslog.h>
77 #include <sys/proc.h>
78 #include <sys/reboot.h>
79 #include <sys/vnode.h>
80 #include <sys/rndsource.h>
81
82 #include <sys/intr.h>
83 #include <sys/bus.h>
84
85 #include <dev/ata/atareg.h>
86 #include <dev/ata/atavar.h>
87 #include <dev/ata/wdvar.h>
88 #include <dev/ic/wdcreg.h>
89 #include <sys/ataio.h>
90 #include "locators.h"
91
92 #include <prop/proplib.h>
93
94 #define WDIORETRIES_SINGLE 4 /* number of retries for single-sector */
95 #define WDIORETRIES 5 /* number of retries before giving up */
96 #define RECOVERYTIME hz/2 /* time to wait before retrying a cmd */
97
98 #define WDUNIT(dev) DISKUNIT(dev)
99 #define WDPART(dev) DISKPART(dev)
100 #define WDMINOR(unit, part) DISKMINOR(unit, part)
101 #define MAKEWDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
102
103 #define WDLABELDEV(dev) (MAKEWDDEV(major(dev), WDUNIT(dev), RAW_PART))
104
105 #define DEBUG_FUNCS 0x08
106 #define DEBUG_PROBE 0x10
107 #define DEBUG_DETACH 0x20
108 #define DEBUG_XFERS 0x40
109 #ifdef ATADEBUG
110 #ifndef ATADEBUG_WD_MASK
111 #define ATADEBUG_WD_MASK 0x0
112 #endif
113 int wdcdebug_wd_mask = ATADEBUG_WD_MASK;
114 #define ATADEBUG_PRINT(args, level) \
115 if (wdcdebug_wd_mask & (level)) \
116 printf args
117 #else
118 #define ATADEBUG_PRINT(args, level)
119 #endif
120
121 static int wdprobe(device_t, cfdata_t, void *);
122 static void wdattach(device_t, device_t, void *);
123 static int wddetach(device_t, int);
124 static void wdperror(const struct wd_softc *, struct ata_xfer *);
125
126 static void wdminphys(struct buf *);
127
128 static int wd_firstopen(device_t, dev_t, int, int);
129 static int wd_lastclose(device_t);
130 static bool wd_suspend(device_t, const pmf_qual_t *);
131 static int wd_standby(struct wd_softc *, int);
132
133 CFATTACH_DECL3_NEW(wd, sizeof(struct wd_softc),
134 wdprobe, wdattach, wddetach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
135
136 extern struct cfdriver wd_cd;
137
138 static dev_type_open(wdopen);
139 static dev_type_close(wdclose);
140 static dev_type_read(wdread);
141 static dev_type_write(wdwrite);
142 static dev_type_ioctl(wdioctl);
143 static dev_type_strategy(wdstrategy);
144 static dev_type_dump(wddump);
145 static dev_type_size(wdsize);
146 static dev_type_discard(wddiscard);
147
148 const struct bdevsw wd_bdevsw = {
149 .d_open = wdopen,
150 .d_close = wdclose,
151 .d_strategy = wdstrategy,
152 .d_ioctl = wdioctl,
153 .d_dump = wddump,
154 .d_psize = wdsize,
155 .d_discard = wddiscard,
156 .d_flag = D_DISK
157 };
158
159 const struct cdevsw wd_cdevsw = {
160 .d_open = wdopen,
161 .d_close = wdclose,
162 .d_read = wdread,
163 .d_write = wdwrite,
164 .d_ioctl = wdioctl,
165 .d_stop = nostop,
166 .d_tty = notty,
167 .d_poll = nopoll,
168 .d_mmap = nommap,
169 .d_kqfilter = nokqfilter,
170 .d_discard = wddiscard,
171 .d_flag = D_DISK
172 };
173
174 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
175 static int wddoingadump = 0;
176 static int wddumprecalibrated = 0;
177
178 /*
179 * Glue necessary to hook WDCIOCCOMMAND into physio
180 */
181
182 struct wd_ioctl {
183 LIST_ENTRY(wd_ioctl) wi_list;
184 struct buf wi_bp;
185 struct uio wi_uio;
186 struct iovec wi_iov;
187 atareq_t wi_atareq;
188 struct wd_softc *wi_softc;
189 };
190
191 static struct wd_ioctl *wi_find(struct buf *);
192 static void wi_free(struct wd_ioctl *);
193 static struct wd_ioctl *wi_get(struct wd_softc *);
194 static void wdioctlstrategy(struct buf *);
195
196 static void wdstart(device_t);
197 static void wdstart1(struct wd_softc *, struct buf *, struct ata_xfer *);
198 static int wd_diskstart(device_t, struct buf *);
199 static int wd_dumpblocks(device_t, void *, daddr_t, int);
200 static void wd_iosize(device_t, int *);
201 static int wd_discard(device_t, off_t, off_t);
202 static void wdbiorestart(void *);
203 static void wddone(device_t, struct ata_xfer *);
204 static int wd_get_params(struct wd_softc *, uint8_t, struct ataparams *);
205 static void wd_set_geometry(struct wd_softc *);
206 static int wd_flushcache(struct wd_softc *, int, bool);
207 static int wd_trim(struct wd_softc *, daddr_t, long);
208 static bool wd_shutdown(device_t, int);
209
210 static int wd_getcache(struct wd_softc *, int *);
211 static int wd_setcache(struct wd_softc *, int);
212
213 static void wd_sysctl_attach(struct wd_softc *);
214 static void wd_sysctl_detach(struct wd_softc *);
215
216 struct dkdriver wddkdriver = {
217 .d_open = wdopen,
218 .d_close = wdclose,
219 .d_strategy = wdstrategy,
220 .d_minphys = wdminphys,
221 .d_diskstart = wd_diskstart,
222 .d_dumpblocks = wd_dumpblocks,
223 .d_iosize = wd_iosize,
224 .d_firstopen = wd_firstopen,
225 .d_lastclose = wd_lastclose,
226 .d_discard = wd_discard
227 };
228
229 #ifdef HAS_BAD144_HANDLING
230 static void bad144intern(struct wd_softc *);
231 #endif
232
233 #define WD_QUIRK_SPLIT_MOD15_WRITE 0x0001 /* must split certain writes */
234
235 #define WD_QUIRK_FMT "\20\1SPLIT_MOD15_WRITE\2FORCE_LBA48"
236
237 /*
238 * Quirk table for IDE drives. Put more-specific matches first, since
239 * a simple globing routine is used for matching.
240 */
241 static const struct wd_quirk {
242 const char *wdq_match; /* inquiry pattern to match */
243 int wdq_quirks; /* drive quirks */
244 } wd_quirk_table[] = {
245 /*
246 * Some Seagate S-ATA drives have a PHY which can get confused
247 * with the way data is packetized by some S-ATA controllers.
248 *
249 * The work-around is to split in two any write transfer whose
250 * sector count % 15 == 1 (assuming 512 byte sectors).
251 *
252 * XXX This is an incomplete list. There are at least a couple
253 * XXX more model numbers. If you have trouble with such transfers
254 * XXX (8K is the most common) on Seagate S-ATA drives, please
255 * XXX notify thorpej (at) NetBSD.org.
256 *
257 * The ST360015AS has not yet been confirmed to have this
258 * issue, however, it is the only other drive in the
259 * Seagate Barracuda Serial ATA V family.
260 *
261 */
262 { "ST3120023AS",
263 WD_QUIRK_SPLIT_MOD15_WRITE },
264 { "ST380023AS",
265 WD_QUIRK_SPLIT_MOD15_WRITE },
266 { "ST360015AS",
267 WD_QUIRK_SPLIT_MOD15_WRITE },
268 { NULL,
269 0 }
270 };
271
272 static const struct wd_quirk *
273 wd_lookup_quirks(const char *name)
274 {
275 const struct wd_quirk *wdq;
276 const char *estr;
277
278 for (wdq = wd_quirk_table; wdq->wdq_match != NULL; wdq++) {
279 /*
280 * We only want exact matches (which include matches
281 * against globbing characters).
282 */
283 if (pmatch(name, wdq->wdq_match, &estr) == 2)
284 return (wdq);
285 }
286 return (NULL);
287 }
288
289 static int
290 wdprobe(device_t parent, cfdata_t match, void *aux)
291 {
292 struct ata_device *adev = aux;
293
294 if (adev == NULL)
295 return 0;
296 if (adev->adev_bustype->bustype_type != SCSIPI_BUSTYPE_ATA)
297 return 0;
298
299 if (match->cf_loc[ATA_HLCF_DRIVE] != ATA_HLCF_DRIVE_DEFAULT &&
300 match->cf_loc[ATA_HLCF_DRIVE] != adev->adev_drv_data->drive)
301 return 0;
302 return 1;
303 }
304
305 static void
306 wdattach(device_t parent, device_t self, void *aux)
307 {
308 struct wd_softc *wd = device_private(self);
309 struct dk_softc *dksc = &wd->sc_dksc;
310 struct ata_device *adev= aux;
311 int i, blank;
312 char tbuf[41], pbuf[9], c, *p, *q;
313 const struct wd_quirk *wdq;
314 int dtype = DKTYPE_UNKNOWN;
315
316 dksc->sc_dev = self;
317
318 ATADEBUG_PRINT(("wdattach\n"), DEBUG_FUNCS | DEBUG_PROBE);
319 mutex_init(&wd->sc_lock, MUTEX_DEFAULT, IPL_BIO);
320 #ifdef WD_SOFTBADSECT
321 SLIST_INIT(&wd->sc_bslist);
322 #endif
323 wd->atabus = adev->adev_bustype;
324 wd->drvp = adev->adev_drv_data;
325
326 wd->drvp->drv_openings = 1;
327 wd->drvp->drv_start = wdstart;
328 wd->drvp->drv_done = wddone;
329 wd->drvp->drv_softc = dksc->sc_dev; /* done in atabusconfig_thread()
330 but too late */
331
332 aprint_naive("\n");
333 aprint_normal("\n");
334
335 /* read our drive info */
336 if (wd_get_params(wd, AT_WAIT, &wd->sc_params) != 0) {
337 aprint_error_dev(self, "IDENTIFY failed\n");
338 goto out;
339 }
340
341 for (blank = 0, p = wd->sc_params.atap_model, q = tbuf, i = 0;
342 i < sizeof(wd->sc_params.atap_model); i++) {
343 c = *p++;
344 if (c == '\0')
345 break;
346 if (c != ' ') {
347 if (blank) {
348 *q++ = ' ';
349 blank = 0;
350 }
351 *q++ = c;
352 } else
353 blank = 1;
354 }
355 *q++ = '\0';
356
357 aprint_normal_dev(self, "<%s>\n", tbuf);
358
359 wdq = wd_lookup_quirks(tbuf);
360 if (wdq != NULL)
361 wd->sc_quirks = wdq->wdq_quirks;
362
363 if (wd->sc_quirks != 0) {
364 char sbuf[sizeof(WD_QUIRK_FMT) + 64];
365 snprintb(sbuf, sizeof(sbuf), WD_QUIRK_FMT, wd->sc_quirks);
366 aprint_normal_dev(self, "quirks %s\n", sbuf);
367
368 if (wd->sc_quirks & WD_QUIRK_SPLIT_MOD15_WRITE) {
369 aprint_error_dev(self, "drive corrupts write transfers with certain controllers, consider replacing\n");
370 }
371 }
372
373 if ((wd->sc_params.atap_multi & 0xff) > 1) {
374 wd->drvp->multi = wd->sc_params.atap_multi & 0xff;
375 } else {
376 wd->drvp->multi = 1;
377 }
378
379 aprint_verbose_dev(self, "drive supports %d-sector PIO transfers,",
380 wd->drvp->multi);
381
382 /* 48-bit LBA addressing */
383 if ((wd->sc_params.atap_cmd2_en & ATA_CMD2_LBA48) != 0)
384 wd->sc_flags |= WDF_LBA48;
385
386 /* Prior to ATA-4, LBA was optional. */
387 if ((wd->sc_params.atap_capabilities1 & WDC_CAP_LBA) != 0)
388 wd->sc_flags |= WDF_LBA;
389 #if 0
390 /* ATA-4 requires LBA. */
391 if (wd->sc_params.atap_ataversion != 0xffff &&
392 wd->sc_params.atap_ataversion >= WDC_VER_ATA4)
393 wd->sc_flags |= WDF_LBA;
394 #endif
395
396 if ((wd->sc_flags & WDF_LBA48) != 0) {
397 aprint_verbose(" LBA48 addressing\n");
398 wd->sc_capacity =
399 ((uint64_t) wd->sc_params.atap_max_lba[3] << 48) |
400 ((uint64_t) wd->sc_params.atap_max_lba[2] << 32) |
401 ((uint64_t) wd->sc_params.atap_max_lba[1] << 16) |
402 ((uint64_t) wd->sc_params.atap_max_lba[0] << 0);
403 wd->sc_capacity28 =
404 (wd->sc_params.atap_capacity[1] << 16) |
405 wd->sc_params.atap_capacity[0];
406 } else if ((wd->sc_flags & WDF_LBA) != 0) {
407 aprint_verbose(" LBA addressing\n");
408 wd->sc_capacity28 = wd->sc_capacity =
409 (wd->sc_params.atap_capacity[1] << 16) |
410 wd->sc_params.atap_capacity[0];
411 } else {
412 aprint_verbose(" chs addressing\n");
413 wd->sc_capacity28 = wd->sc_capacity =
414 wd->sc_params.atap_cylinders *
415 wd->sc_params.atap_heads *
416 wd->sc_params.atap_sectors;
417 }
418 if ((wd->sc_params.atap_secsz & ATA_SECSZ_VALID_MASK) == ATA_SECSZ_VALID
419 && ((wd->sc_params.atap_secsz & ATA_SECSZ_LLS) != 0)) {
420 wd->sc_blksize = 2ULL *
421 ((uint32_t)((wd->sc_params.atap_lls_secsz[1] << 16) |
422 wd->sc_params.atap_lls_secsz[0]));
423 } else {
424 wd->sc_blksize = 512;
425 }
426 wd->sc_capacity512 = (wd->sc_capacity * wd->sc_blksize) / DEV_BSIZE;
427 format_bytes(pbuf, sizeof(pbuf), wd->sc_capacity * wd->sc_blksize);
428 aprint_normal_dev(self, "%s, %d cyl, %d head, %d sec, "
429 "%d bytes/sect x %llu sectors\n",
430 pbuf,
431 (wd->sc_flags & WDF_LBA) ? (int)(wd->sc_capacity /
432 (wd->sc_params.atap_heads * wd->sc_params.atap_sectors)) :
433 wd->sc_params.atap_cylinders,
434 wd->sc_params.atap_heads, wd->sc_params.atap_sectors,
435 wd->sc_blksize, (unsigned long long)wd->sc_capacity);
436
437 ATADEBUG_PRINT(("%s: atap_dmatiming_mimi=%d, atap_dmatiming_recom=%d\n",
438 device_xname(self), wd->sc_params.atap_dmatiming_mimi,
439 wd->sc_params.atap_dmatiming_recom), DEBUG_PROBE);
440
441 if (wd->sc_blksize <= 0 || !powerof2(wd->sc_blksize) ||
442 wd->sc_blksize < DEV_BSIZE || wd->sc_blksize > MAXPHYS) {
443 aprint_normal_dev(self, "WARNING: block size %u "
444 "might not actually work\n", wd->sc_blksize);
445 }
446
447 if (strcmp(wd->sc_params.atap_model, "ST506") == 0)
448 dtype = DKTYPE_ST506;
449 else
450 dtype = DKTYPE_ESDI;
451
452 out:
453 /*
454 * Initialize and attach the disk structure.
455 */
456 dk_init(dksc, self, dtype);
457 disk_init(&dksc->sc_dkdev, dksc->sc_xname, &wddkdriver);
458
459 /* Attach dk and disk subsystems */
460 dk_attach(dksc);
461 disk_attach(&dksc->sc_dkdev);
462 wd_set_geometry(wd);
463
464 bufq_alloc(&dksc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
465
466 /* reference to label structure, used by ata code */
467 wd->drvp->lp = dksc->sc_dkdev.dk_label;
468
469 /* Discover wedges on this disk. */
470 dkwedge_discover(&dksc->sc_dkdev);
471
472 if (!pmf_device_register1(self, wd_suspend, NULL, wd_shutdown))
473 aprint_error_dev(self, "couldn't establish power handler\n");
474
475 wd_sysctl_attach(wd);
476 }
477
478 static bool
479 wd_suspend(device_t dv, const pmf_qual_t *qual)
480 {
481 struct wd_softc *sc = device_private(dv);
482
483 /* the adapter needs to be enabled */
484 if (sc->atabus->ata_addref(sc->drvp))
485 return true; /* no need to complain */
486
487 wd_flushcache(sc, AT_WAIT, false);
488 wd_standby(sc, AT_WAIT);
489
490 sc->atabus->ata_delref(sc->drvp);
491 return true;
492 }
493
494 static int
495 wddetach(device_t self, int flags)
496 {
497 struct wd_softc *wd = device_private(self);
498 struct dk_softc *dksc = &wd->sc_dksc;
499 int bmaj, cmaj, i, mn, rc;
500
501 if ((rc = disk_begindetach(&dksc->sc_dkdev, wd_lastclose, self, flags)) != 0)
502 return rc;
503
504 /* locate the major number */
505 bmaj = bdevsw_lookup_major(&wd_bdevsw);
506 cmaj = cdevsw_lookup_major(&wd_cdevsw);
507
508 /* Nuke the vnodes for any open instances. */
509 for (i = 0; i < MAXPARTITIONS; i++) {
510 mn = WDMINOR(device_unit(self), i);
511 vdevgone(bmaj, mn, mn, VBLK);
512 vdevgone(cmaj, mn, mn, VCHR);
513 }
514
515 dk_drain(dksc);
516
517 /* Kill off any pending commands. */
518 mutex_enter(&wd->sc_lock);
519 wd->atabus->ata_killpending(wd->drvp);
520 mutex_exit(&wd->sc_lock);
521
522 bufq_free(dksc->sc_bufq);
523
524 if (flags & DETACH_POWEROFF)
525 wd_standby(wd, AT_POLL);
526
527 /* Delete all of our wedges. */
528 dkwedge_delall(&dksc->sc_dkdev);
529
530 /* Detach from the disk list. */
531 disk_detach(&dksc->sc_dkdev);
532 disk_destroy(&dksc->sc_dkdev);
533
534 dk_detach(dksc);
535
536 #ifdef WD_SOFTBADSECT
537 /* Clean out the bad sector list */
538 while (!SLIST_EMPTY(&wd->sc_bslist)) {
539 void *head = SLIST_FIRST(&wd->sc_bslist);
540 SLIST_REMOVE_HEAD(&wd->sc_bslist, dbs_next);
541 free(head, M_TEMP);
542 }
543 wd->sc_bscount = 0;
544 #endif
545
546 pmf_device_deregister(self);
547
548 wd_sysctl_detach(wd);
549
550 mutex_destroy(&wd->sc_lock);
551
552 wd->drvp->drive_type = ATA_DRIVET_NONE; /* no drive any more here */
553 wd->drvp->drive_flags = 0;
554
555 return (0);
556 }
557
558 /*
559 * Read/write routine for a buffer. Validates the arguments and schedules the
560 * transfer. Does not wait for the transfer to complete.
561 */
562 static void
563 wdstrategy(struct buf *bp)
564 {
565 struct wd_softc *wd =
566 device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
567 struct dk_softc *dksc = &wd->sc_dksc;
568
569 ATADEBUG_PRINT(("wdstrategy (%s)\n", dksc->sc_xname),
570 DEBUG_XFERS);
571
572 /* If device invalidated (e.g. media change, door open,
573 * device detachment), then error.
574 */
575 if ((wd->sc_flags & WDF_LOADED) == 0 ||
576 !device_is_enabled(dksc->sc_dev))
577 goto err;
578
579 #ifdef WD_SOFTBADSECT
580 /*
581 * If the transfer about to be attempted contains only a block that
582 * is known to be bad then return an error for the transfer without
583 * even attempting to start a transfer up under the premis that we
584 * will just end up doing more retries for a transfer that will end
585 * up failing again.
586 */
587 if (__predict_false(!SLIST_EMPTY(&wd->sc_bslist))) {
588 struct disklabel *lp = dksc->sc_dkdev.dk_label;
589 struct disk_badsectors *dbs;
590 daddr_t blkno, maxblk;
591
592 /* convert the block number to absolute */
593 if (lp->d_secsize >= DEV_BSIZE)
594 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
595 else
596 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
597 if (WDPART(bp->b_dev) != RAW_PART)
598 blkno += lp->d_partitions[WDPART(bp->b_dev)].p_offset;
599 maxblk = blkno + (bp->b_bcount / wd->sc_blksize) - 1;
600
601 mutex_enter(&wd->sc_lock);
602 SLIST_FOREACH(dbs, &wd->sc_bslist, dbs_next)
603 if ((dbs->dbs_min <= bp->b_rawblkno &&
604 bp->b_rawblkno <= dbs->dbs_max) ||
605 (dbs->dbs_min <= maxblk && maxblk <= dbs->dbs_max)){
606 mutex_exit(&wd->sc_lock);
607 goto err;
608 }
609 mutex_exit(&wd->sc_lock);
610 }
611 #endif
612
613 dk_strategy(dksc, bp);
614 return;
615
616 err:
617 bp->b_error = EIO;
618 bp->b_resid = bp->b_bcount;
619 biodone(bp);
620 }
621
622 static void
623 wdstart1(struct wd_softc *wd, struct buf *bp, struct ata_xfer *xfer)
624 {
625 struct dk_softc *dksc = &wd->sc_dksc;
626
627 KASSERT(bp == xfer->c_bio.bp || xfer->c_bio.bp == NULL);
628 KASSERT((xfer->c_flags & (C_WAITACT|C_FREE)) == 0);
629
630 /* Reset state, so that retries don't use stale info */
631 if (__predict_false(xfer->c_retries > 0)) {
632 xfer->c_flags = 0;
633 memset(&xfer->c_bio, 0, sizeof(xfer->c_bio));
634 }
635
636 xfer->c_bio.blkno = bp->b_rawblkno;
637 xfer->c_bio.bcount = bp->b_bcount;
638 xfer->c_bio.databuf = bp->b_data;
639 xfer->c_bio.blkdone = 0;
640 xfer->c_bio.bp = bp;
641
642 #ifdef WD_CHAOS_MONKEY
643 /*
644 * Override blkno to be over device capacity to trigger error,
645 * but only if it's read, to avoid trashing disk contents should
646 * the command be clipped, or otherwise misinterpreted, by the
647 * driver or controller.
648 */
649 if (BUF_ISREAD(bp) && xfer->c_retries == 0 && wd->drv_chaos_freq > 0 &&
650 (++wd->drv_chaos_cnt % wd->drv_chaos_freq) == 0) {
651 aprint_normal_dev(dksc->sc_dev, "%s: chaos xfer %d\n",
652 __func__, xfer->c_slot);
653 xfer->c_bio.blkno = 7777777 + wd->sc_capacity;
654 xfer->c_flags |= C_CHAOS;
655 }
656 #endif
657
658 /*
659 * If we're retrying, retry in single-sector mode. This will give us
660 * the sector number of the problem, and will eventually allow the
661 * transfer to succeed. If FUA is requested, we can't actually
662 * do this, as ATA_SINGLE is usually executed as PIO transfer by drivers
663 * which support it, and that isn't compatible with NCQ/FUA.
664 */
665 if (xfer->c_retries >= WDIORETRIES_SINGLE &&
666 (bp->b_flags & B_MEDIA_FUA) == 0)
667 xfer->c_bio.flags = ATA_SINGLE;
668 else
669 xfer->c_bio.flags = 0;
670
671 /*
672 * request LBA48 transfers when supported by the controller
673 * and needed by transfer offset or size.
674 */
675 if (wd->sc_flags & WDF_LBA48 &&
676 (((xfer->c_bio.blkno +
677 xfer->c_bio.bcount / dksc->sc_dkdev.dk_geom.dg_secsize) >
678 wd->sc_capacity28) ||
679 ((xfer->c_bio.bcount / dksc->sc_dkdev.dk_geom.dg_secsize) > 128)))
680 xfer->c_bio.flags |= ATA_LBA48;
681
682 /*
683 * If NCQ was negotiated, always use it for the first several attempts.
684 * Since device cancels all outstanding requests on error, downgrade
685 * to non-NCQ on retry, so that the retried transfer would not cause
686 * cascade failure for the other transfers if it fails again.
687 * If FUA was requested, we can't downgrade, as that would violate
688 * the semantics - FUA would not be honored. In that case, continue
689 * retrying with NCQ.
690 */
691 if (WD_USE_NCQ(wd) && (xfer->c_retries < WDIORETRIES_SINGLE ||
692 (bp->b_flags & B_MEDIA_FUA) != 0)) {
693 xfer->c_bio.flags |= ATA_LBA48;
694 xfer->c_flags |= C_NCQ;
695
696 if (WD_USE_NCQ_PRIO(wd) &&
697 BIO_GETPRIO(bp) == BPRIO_TIMECRITICAL)
698 xfer->c_bio.flags |= ATA_PRIO_HIGH;
699 }
700
701 if (wd->sc_flags & WDF_LBA)
702 xfer->c_bio.flags |= ATA_LBA;
703 if (bp->b_flags & B_READ)
704 xfer->c_bio.flags |= ATA_READ;
705 if (bp->b_flags & B_MEDIA_FUA) {
706 /* If not using NCQ, the command WRITE DMA FUA EXT is LBA48 */
707 KASSERT((wd->sc_flags & WDF_LBA48) != 0);
708 if ((xfer->c_flags & C_NCQ) == 0)
709 xfer->c_bio.flags |= ATA_LBA48;
710
711 xfer->c_bio.flags |= ATA_FUA;
712 }
713
714 switch (wd->atabus->ata_bio(wd->drvp, xfer)) {
715 case ATACMD_TRY_AGAIN:
716 panic("wdstart1: try again");
717 break;
718 case ATACMD_QUEUED:
719 case ATACMD_COMPLETE:
720 break;
721 default:
722 panic("wdstart1: bad return code from ata_bio()");
723 }
724 }
725
726 static int
727 wd_diskstart(device_t dev, struct buf *bp)
728 {
729 struct wd_softc *wd = device_private(dev);
730 #ifdef ATADEBUG
731 struct dk_softc *dksc = &wd->sc_dksc;
732 #endif
733 struct ata_xfer *xfer;
734
735 mutex_enter(&wd->sc_lock);
736
737 xfer = ata_get_xfer_ext(wd->drvp->chnl_softc, 0,
738 WD_USE_NCQ(wd) ? WD_MAX_OPENINGS(wd) : 0);
739 if (xfer == NULL) {
740 ATADEBUG_PRINT(("wd_diskstart %s no xfer\n",
741 dksc->sc_xname), DEBUG_XFERS);
742 mutex_exit(&wd->sc_lock);
743 return EAGAIN;
744 }
745
746 wdstart1(wd, bp, xfer);
747
748 mutex_exit(&wd->sc_lock);
749
750 return 0;
751 }
752
753 /*
754 * Queue a drive for I/O.
755 */
756 static void
757 wdstart(device_t self)
758 {
759 struct wd_softc *wd = device_private(self);
760 struct dk_softc *dksc = &wd->sc_dksc;
761
762 ATADEBUG_PRINT(("wdstart %s\n", dksc->sc_xname),
763 DEBUG_XFERS);
764
765 if (!device_is_active(dksc->sc_dev))
766 return;
767
768 mutex_enter(&wd->sc_lock);
769
770 /*
771 * Do not queue any transfers until flush is finished, so that
772 * once flush is pending, it will get handled as soon as xfer
773 * is available.
774 */
775 if (ISSET(wd->sc_flags, WDF_FLUSH_PEND)) {
776 ATADEBUG_PRINT(("wdstart %s flush pend\n",
777 dksc->sc_xname), DEBUG_XFERS);
778 mutex_exit(&wd->sc_lock);
779 return;
780 }
781
782 mutex_exit(&wd->sc_lock);
783
784 dk_start(dksc, NULL);
785 }
786
787 static void
788 wddone(device_t self, struct ata_xfer *xfer)
789 {
790 struct wd_softc *wd = device_private(self);
791 struct dk_softc *dksc = &wd->sc_dksc;
792 const char *errmsg;
793 int do_perror = 0;
794 struct buf *bp;
795
796 ATADEBUG_PRINT(("wddone %s\n", dksc->sc_xname),
797 DEBUG_XFERS);
798
799 if (__predict_false(wddoingadump)) {
800 /* just drop it to the floor */
801 ata_free_xfer(wd->drvp->chnl_softc, xfer);
802 return;
803 }
804
805 bp = xfer->c_bio.bp;
806 KASSERT(bp != NULL);
807
808 bp->b_resid = xfer->c_bio.bcount;
809 switch (xfer->c_bio.error) {
810 case ERR_DMA:
811 errmsg = "DMA error";
812 goto retry;
813 case ERR_DF:
814 errmsg = "device fault";
815 goto retry;
816 case TIMEOUT:
817 errmsg = "device timeout";
818 goto retry;
819 case REQUEUE:
820 errmsg = "requeue";
821 goto retry2;
822 case ERR_RESET:
823 errmsg = "channel reset";
824 goto retry2;
825 case ERROR:
826 /* Don't care about media change bits */
827 if (xfer->c_bio.r_error != 0 &&
828 (xfer->c_bio.r_error & ~(WDCE_MC | WDCE_MCR)) == 0)
829 goto noerror;
830 errmsg = "error";
831 do_perror = 1;
832 retry: /* Just reset and retry. Can we do more ? */
833 if ((xfer->c_flags & C_RECOVERED) == 0) {
834 int wflags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
835 (*wd->atabus->ata_reset_drive)(wd->drvp, wflags, NULL);
836 }
837 retry2:
838 mutex_enter(&wd->sc_lock);
839
840 diskerr(bp, "wd", errmsg, LOG_PRINTF,
841 xfer->c_bio.blkdone, dksc->sc_dkdev.dk_label);
842 if (xfer->c_retries < WDIORETRIES)
843 printf(", slot %d, retry %d", xfer->c_slot,
844 xfer->c_retries + 1);
845 printf("\n");
846 if (do_perror)
847 wdperror(wd, xfer);
848
849 if (xfer->c_retries < WDIORETRIES) {
850 xfer->c_retries++;
851
852 /* Rerun ASAP if just requeued */
853 callout_reset(&xfer->c_retry_callout,
854 (xfer->c_bio.error == REQUEUE) ? 1 : RECOVERYTIME,
855 wdbiorestart, xfer);
856
857 mutex_exit(&wd->sc_lock);
858 return;
859 }
860
861 mutex_exit(&wd->sc_lock);
862
863 #ifdef WD_SOFTBADSECT
864 /*
865 * Not all errors indicate a failed block but those that do,
866 * put the block on the bad-block list for the device. Only
867 * do this for reads because the drive should do it for writes,
868 * itself, according to Manuel.
869 */
870 if ((bp->b_flags & B_READ) &&
871 ((wd->drvp->ata_vers >= 4 && xfer->c_bio.r_error & 64) ||
872 (wd->drvp->ata_vers < 4 && xfer->c_bio.r_error & 192))) {
873 struct disk_badsectors *dbs;
874
875 dbs = malloc(sizeof *dbs, M_TEMP, M_NOWAIT);
876 if (dbs == NULL) {
877 aprint_error_dev(dksc->sc_dev,
878 "failed to add bad block to list\n");
879 goto out;
880 }
881
882 dbs->dbs_min = bp->b_rawblkno;
883 dbs->dbs_max = dbs->dbs_min +
884 (bp->b_bcount /wd->sc_blksize) - 1;
885 microtime(&dbs->dbs_failedat);
886
887 mutex_enter(&wd->sc_lock);
888 SLIST_INSERT_HEAD(&wd->sc_bslist, dbs, dbs_next);
889 wd->sc_bscount++;
890 mutex_exit(&wd->sc_lock);
891 }
892 out:
893 #endif
894 bp->b_error = EIO;
895 break;
896 case NOERROR:
897 noerror: if ((xfer->c_bio.flags & ATA_CORR) || xfer->c_retries > 0)
898 aprint_error_dev(dksc->sc_dev,
899 "soft error (corrected) slot %d\n", xfer->c_slot);
900 #ifdef WD_CHAOS_MONKEY
901 KASSERT((xfer->c_flags & C_CHAOS) == 0);
902 #endif
903 break;
904 case ERR_NODEV:
905 bp->b_error = EIO;
906 break;
907 }
908 if (__predict_false(bp->b_error != 0) && bp->b_resid == 0) {
909 /*
910 * the disk or controller sometimes report a complete
911 * xfer, when there has been an error. This is wrong,
912 * assume nothing got transfered in this case
913 */
914 bp->b_resid = bp->b_bcount;
915 }
916
917 ata_free_xfer(wd->drvp->chnl_softc, xfer);
918
919 dk_done(dksc, bp);
920 ata_channel_start(wd->drvp->chnl_softc, wd->drvp->drive, true);
921 }
922
923 static void
924 wdbiorestart(void *v)
925 {
926 struct ata_xfer *xfer = v;
927 struct buf *bp = xfer->c_bio.bp;
928 struct wd_softc *wd = device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
929 #ifdef ATADEBUG
930 struct dk_softc *dksc = &wd->sc_dksc;
931 #endif
932
933 ATADEBUG_PRINT(("wdbiorestart %s\n", dksc->sc_xname),
934 DEBUG_XFERS);
935
936 mutex_enter(&wd->sc_lock);
937 wdstart1(wd, bp, xfer);
938 mutex_exit(&wd->sc_lock);
939 }
940
941 static void
942 wdminphys(struct buf *bp)
943 {
944 const struct wd_softc * const wd =
945 device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
946 int maxsectors;
947
948 /*
949 * The limit is actually 65536 for LBA48 and 256 for non-LBA48,
950 * but that requires to set the count for the ATA command
951 * to 0, which is somewhat error prone, so better stay safe.
952 */
953 if (wd->sc_flags & WDF_LBA48)
954 maxsectors = 65535;
955 else
956 maxsectors = 128;
957
958 if (bp->b_bcount > (wd->sc_blksize * maxsectors))
959 bp->b_bcount = (wd->sc_blksize * maxsectors);
960
961 minphys(bp);
962 }
963
964 static void
965 wd_iosize(device_t dev, int *count)
966 {
967 struct buf B;
968 int bmaj;
969
970 bmaj = bdevsw_lookup_major(&wd_bdevsw);
971 B.b_dev = MAKEWDDEV(bmaj,device_unit(dev),RAW_PART);
972 B.b_bcount = *count;
973
974 wdminphys(&B);
975
976 *count = B.b_bcount;
977 }
978
979 static int
980 wdread(dev_t dev, struct uio *uio, int flags)
981 {
982
983 ATADEBUG_PRINT(("wdread\n"), DEBUG_XFERS);
984 return (physio(wdstrategy, NULL, dev, B_READ, wdminphys, uio));
985 }
986
987 static int
988 wdwrite(dev_t dev, struct uio *uio, int flags)
989 {
990
991 ATADEBUG_PRINT(("wdwrite\n"), DEBUG_XFERS);
992 return (physio(wdstrategy, NULL, dev, B_WRITE, wdminphys, uio));
993 }
994
995 static int
996 wdopen(dev_t dev, int flag, int fmt, struct lwp *l)
997 {
998 struct wd_softc *wd;
999 struct dk_softc *dksc;
1000 int unit, part, error;
1001
1002 ATADEBUG_PRINT(("wdopen\n"), DEBUG_FUNCS);
1003 unit = WDUNIT(dev);
1004 wd = device_lookup_private(&wd_cd, unit);
1005 if (wd == NULL)
1006 return (ENXIO);
1007 dksc = &wd->sc_dksc;
1008
1009 if (! device_is_active(dksc->sc_dev))
1010 return (ENODEV);
1011
1012 part = WDPART(dev);
1013
1014 if (wd->sc_capacity == 0)
1015 return (ENODEV);
1016
1017 /*
1018 * If any partition is open, but the disk has been invalidated,
1019 * disallow further opens.
1020 */
1021 if ((wd->sc_flags & (WDF_OPEN | WDF_LOADED)) == WDF_OPEN) {
1022 if (part != RAW_PART || fmt != S_IFCHR)
1023 return EIO;
1024 }
1025
1026 error = dk_open(dksc, dev, flag, fmt, l);
1027
1028 return error;
1029 }
1030
1031 /*
1032 * Serialized by caller
1033 */
1034 static int
1035 wd_firstopen(device_t self, dev_t dev, int flag, int fmt)
1036 {
1037 struct wd_softc *wd = device_private(self);
1038 struct dk_softc *dksc = &wd->sc_dksc;
1039 int error;
1040
1041 error = wd->atabus->ata_addref(wd->drvp);
1042 if (error)
1043 return error;
1044
1045 if ((wd->sc_flags & WDF_LOADED) == 0) {
1046 int param_error;
1047
1048 /* Load the physical device parameters. */
1049 param_error = wd_get_params(wd, AT_WAIT, &wd->sc_params);
1050 if (param_error != 0) {
1051 aprint_error_dev(dksc->sc_dev, "IDENTIFY failed\n");
1052 error = EIO;
1053 goto bad;
1054 }
1055 wd_set_geometry(wd);
1056 wd->sc_flags |= WDF_LOADED;
1057 }
1058
1059 wd->sc_flags |= WDF_OPEN;
1060 return 0;
1061
1062 bad:
1063 wd->atabus->ata_delref(wd->drvp);
1064 return error;
1065 }
1066
1067 /*
1068 * Caller must hold wd->sc_dk.dk_openlock.
1069 */
1070 static int
1071 wd_lastclose(device_t self)
1072 {
1073 struct wd_softc *wd = device_private(self);
1074
1075 KASSERTMSG(bufq_peek(wd->sc_dksc.sc_bufq) == NULL, "bufq not empty");
1076
1077 wd_flushcache(wd, AT_WAIT, false);
1078
1079 wd->atabus->ata_delref(wd->drvp);
1080 wd->sc_flags &= ~WDF_OPEN;
1081
1082 return 0;
1083 }
1084
1085 static int
1086 wdclose(dev_t dev, int flag, int fmt, struct lwp *l)
1087 {
1088 struct wd_softc *wd;
1089 struct dk_softc *dksc;
1090 int unit;
1091
1092 unit = WDUNIT(dev);
1093 wd = device_lookup_private(&wd_cd, unit);
1094 dksc = &wd->sc_dksc;
1095
1096 return dk_close(dksc, dev, flag, fmt, l);
1097 }
1098
1099 void
1100 wdperror(const struct wd_softc *wd, struct ata_xfer *xfer)
1101 {
1102 static const char *const errstr0_3[] = {"address mark not found",
1103 "track 0 not found", "aborted command", "media change requested",
1104 "id not found", "media changed", "uncorrectable data error",
1105 "bad block detected"};
1106 static const char *const errstr4_5[] = {
1107 "obsolete (address mark not found)",
1108 "no media/write protected", "aborted command",
1109 "media change requested", "id not found", "media changed",
1110 "uncorrectable data error", "interface CRC error"};
1111 const char *const *errstr;
1112 int i;
1113 const char *sep = "";
1114
1115 const struct dk_softc *dksc = &wd->sc_dksc;
1116 const char *devname = dksc->sc_xname;
1117 struct ata_drive_datas *drvp = wd->drvp;
1118 int errno = xfer->c_bio.r_error;
1119
1120 if (drvp->ata_vers >= 4)
1121 errstr = errstr4_5;
1122 else
1123 errstr = errstr0_3;
1124
1125 printf("%s: (", devname);
1126
1127 if (errno == 0)
1128 printf("error not notified");
1129
1130 for (i = 0; i < 8; i++) {
1131 if (errno & (1 << i)) {
1132 printf("%s%s", sep, errstr[i]);
1133 sep = ", ";
1134 }
1135 }
1136 printf(")\n");
1137 }
1138
1139 int
1140 wdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1141 {
1142 struct wd_softc *wd =
1143 device_lookup_private(&wd_cd, WDUNIT(dev));
1144 struct dk_softc *dksc = &wd->sc_dksc;
1145
1146 ATADEBUG_PRINT(("wdioctl\n"), DEBUG_FUNCS);
1147
1148 if ((wd->sc_flags & WDF_LOADED) == 0)
1149 return EIO;
1150
1151 switch (cmd) {
1152 #ifdef HAS_BAD144_HANDLING
1153 case DIOCSBAD:
1154 if ((flag & FWRITE) == 0)
1155 return EBADF;
1156 dksc->sc_dkdev.dk_cpulabel->bad = *(struct dkbad *)addr;
1157 dksc->sc_dkdev.dk_label->d_flags |= D_BADSECT;
1158 bad144intern(wd);
1159 return 0;
1160 #endif
1161 #ifdef WD_SOFTBADSECT
1162 case DIOCBSLIST :
1163 {
1164 uint32_t count, missing, skip;
1165 struct disk_badsecinfo dbsi;
1166 struct disk_badsectors *dbs;
1167 size_t available;
1168 uint8_t *laddr;
1169
1170 dbsi = *(struct disk_badsecinfo *)addr;
1171 missing = wd->sc_bscount;
1172 count = 0;
1173 available = dbsi.dbsi_bufsize;
1174 skip = dbsi.dbsi_skip;
1175 laddr = (uint8_t *)dbsi.dbsi_buffer;
1176
1177 /*
1178 * We start this loop with the expectation that all of the
1179 * entries will be missed and decrement this counter each
1180 * time we either skip over one (already copied out) or
1181 * we actually copy it back to user space. The structs
1182 * holding the bad sector information are copied directly
1183 * back to user space whilst the summary is returned via
1184 * the struct passed in via the ioctl.
1185 */
1186 SLIST_FOREACH(dbs, &wd->sc_bslist, dbs_next) {
1187 if (skip > 0) {
1188 missing--;
1189 skip--;
1190 continue;
1191 }
1192 if (available < sizeof(*dbs))
1193 break;
1194 available -= sizeof(*dbs);
1195 copyout(dbs, laddr, sizeof(*dbs));
1196 laddr += sizeof(*dbs);
1197 missing--;
1198 count++;
1199 }
1200 dbsi.dbsi_left = missing;
1201 dbsi.dbsi_copied = count;
1202 *(struct disk_badsecinfo *)addr = dbsi;
1203 return 0;
1204 }
1205
1206 case DIOCBSFLUSH :
1207 /* Clean out the bad sector list */
1208 while (!SLIST_EMPTY(&wd->sc_bslist)) {
1209 void *head = SLIST_FIRST(&wd->sc_bslist);
1210 SLIST_REMOVE_HEAD(&wd->sc_bslist, dbs_next);
1211 free(head, M_TEMP);
1212 }
1213 wd->sc_bscount = 0;
1214 return 0;
1215 #endif
1216
1217 #ifdef notyet
1218 case DIOCWFORMAT:
1219 if ((flag & FWRITE) == 0)
1220 return EBADF;
1221 {
1222 register struct format_op *fop;
1223 struct iovec aiov;
1224 struct uio auio;
1225 int error1;
1226
1227 fop = (struct format_op *)addr;
1228 aiov.iov_base = fop->df_buf;
1229 aiov.iov_len = fop->df_count;
1230 auio.uio_iov = &aiov;
1231 auio.uio_iovcnt = 1;
1232 auio.uio_resid = fop->df_count;
1233 auio.uio_offset =
1234 fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
1235 auio.uio_vmspace = l->l_proc->p_vmspace;
1236 error1 = physio(wdformat, NULL, dev, B_WRITE, wdminphys,
1237 &auio);
1238 fop->df_count -= auio.uio_resid;
1239 fop->df_reg[0] = wdc->sc_status;
1240 fop->df_reg[1] = wdc->sc_error;
1241 return error1;
1242 }
1243 #endif
1244 case DIOCGCACHE:
1245 return wd_getcache(wd, (int *)addr);
1246
1247 case DIOCSCACHE:
1248 return wd_setcache(wd, *(int *)addr);
1249
1250 case DIOCCACHESYNC:
1251 return wd_flushcache(wd, AT_WAIT, true);
1252
1253 case ATAIOCCOMMAND:
1254 /*
1255 * Make sure this command is (relatively) safe first
1256 */
1257 if ((((atareq_t *) addr)->flags & ATACMD_READ) == 0 &&
1258 (flag & FWRITE) == 0)
1259 return (EBADF);
1260 {
1261 struct wd_ioctl *wi;
1262 atareq_t *atareq = (atareq_t *) addr;
1263 int error1;
1264
1265 wi = wi_get(wd);
1266 wi->wi_atareq = *atareq;
1267
1268 if (atareq->datalen && atareq->flags &
1269 (ATACMD_READ | ATACMD_WRITE)) {
1270 void *tbuf;
1271 if (atareq->datalen < DEV_BSIZE
1272 && atareq->command == WDCC_IDENTIFY) {
1273 tbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
1274 wi->wi_iov.iov_base = tbuf;
1275 wi->wi_iov.iov_len = DEV_BSIZE;
1276 UIO_SETUP_SYSSPACE(&wi->wi_uio);
1277 } else {
1278 tbuf = NULL;
1279 wi->wi_iov.iov_base = atareq->databuf;
1280 wi->wi_iov.iov_len = atareq->datalen;
1281 wi->wi_uio.uio_vmspace = l->l_proc->p_vmspace;
1282 }
1283 wi->wi_uio.uio_iov = &wi->wi_iov;
1284 wi->wi_uio.uio_iovcnt = 1;
1285 wi->wi_uio.uio_resid = atareq->datalen;
1286 wi->wi_uio.uio_offset = 0;
1287 wi->wi_uio.uio_rw =
1288 (atareq->flags & ATACMD_READ) ? B_READ : B_WRITE;
1289 error1 = physio(wdioctlstrategy, &wi->wi_bp, dev,
1290 (atareq->flags & ATACMD_READ) ? B_READ : B_WRITE,
1291 wdminphys, &wi->wi_uio);
1292 if (tbuf != NULL && error1 == 0) {
1293 error1 = copyout(tbuf, atareq->databuf,
1294 atareq->datalen);
1295 free(tbuf, M_TEMP);
1296 }
1297 } else {
1298 /* No need to call physio if we don't have any
1299 user data */
1300 wi->wi_bp.b_flags = 0;
1301 wi->wi_bp.b_data = 0;
1302 wi->wi_bp.b_bcount = 0;
1303 wi->wi_bp.b_dev = dev;
1304 wi->wi_bp.b_proc = l->l_proc;
1305 wdioctlstrategy(&wi->wi_bp);
1306 error1 = wi->wi_bp.b_error;
1307 }
1308 *atareq = wi->wi_atareq;
1309 wi_free(wi);
1310 return(error1);
1311 }
1312
1313 default:
1314 return dk_ioctl(dksc, dev, cmd, addr, flag, l);
1315 }
1316
1317 #ifdef DIAGNOSTIC
1318 panic("wdioctl: impossible");
1319 #endif
1320 }
1321
1322 static int
1323 wd_discard(device_t dev, off_t pos, off_t len)
1324 {
1325 struct wd_softc *wd = device_private(dev);
1326 daddr_t bno;
1327 long size, done;
1328 long maxatonce, amount;
1329 int result;
1330
1331 if (!(wd->sc_params.atap_ata_major & WDC_VER_ATA7)
1332 || !(wd->sc_params.support_dsm & ATA_SUPPORT_DSM_TRIM)) {
1333 /* not supported; ignore request */
1334 ATADEBUG_PRINT(("wddiscard (unsupported)\n"), DEBUG_FUNCS);
1335 return 0;
1336 }
1337 maxatonce = 0xffff; /*wd->sc_params.max_dsm_blocks*/
1338
1339 ATADEBUG_PRINT(("wddiscard\n"), DEBUG_FUNCS);
1340
1341 if ((wd->sc_flags & WDF_LOADED) == 0)
1342 return EIO;
1343
1344 /* round the start up and the end down */
1345 bno = (pos + wd->sc_blksize - 1) / wd->sc_blksize;
1346 size = ((pos + len) / wd->sc_blksize) - bno;
1347
1348 done = 0;
1349 while (done < size) {
1350 amount = size - done;
1351 if (amount > maxatonce) {
1352 amount = maxatonce;
1353 }
1354 result = wd_trim(wd, bno + done, amount);
1355 if (result) {
1356 return result;
1357 }
1358 done += amount;
1359 }
1360 return 0;
1361 }
1362
1363 static int
1364 wddiscard(dev_t dev, off_t pos, off_t len)
1365 {
1366 struct wd_softc *wd;
1367 struct dk_softc *dksc;
1368 int unit;
1369
1370 unit = WDUNIT(dev);
1371 wd = device_lookup_private(&wd_cd, unit);
1372 dksc = &wd->sc_dksc;
1373
1374 return dk_discard(dksc, dev, pos, len);
1375 }
1376
1377 #ifdef B_FORMAT
1378 int
1379 wdformat(struct buf *bp)
1380 {
1381
1382 bp->b_flags |= B_FORMAT;
1383 return wdstrategy(bp);
1384 }
1385 #endif
1386
1387 int
1388 wdsize(dev_t dev)
1389 {
1390 struct wd_softc *wd;
1391 struct dk_softc *dksc;
1392 int unit;
1393
1394 ATADEBUG_PRINT(("wdsize\n"), DEBUG_FUNCS);
1395
1396 unit = WDUNIT(dev);
1397 wd = device_lookup_private(&wd_cd, unit);
1398 if (wd == NULL)
1399 return (-1);
1400 dksc = &wd->sc_dksc;
1401
1402 if (!device_is_active(dksc->sc_dev))
1403 return (-1);
1404
1405 return dk_size(dksc, dev);
1406 }
1407
1408 /*
1409 * Dump core after a system crash.
1410 */
1411 static int
1412 wddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1413 {
1414 struct wd_softc *wd;
1415 struct dk_softc *dksc;
1416 int unit;
1417
1418 /* Check if recursive dump; if so, punt. */
1419 if (wddoingadump)
1420 return EFAULT;
1421 wddoingadump = 1;
1422
1423 unit = WDUNIT(dev);
1424 wd = device_lookup_private(&wd_cd, unit);
1425 if (wd == NULL)
1426 return (ENXIO);
1427 dksc = &wd->sc_dksc;
1428
1429 return dk_dump(dksc, dev, blkno, va, size);
1430 }
1431
1432 static int
1433 wd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
1434 {
1435 struct wd_softc *wd = device_private(dev);
1436 struct dk_softc *dksc = &wd->sc_dksc;
1437 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1438 struct ata_xfer *xfer;
1439 int err;
1440
1441 /* Recalibrate, if first dump transfer. */
1442 if (wddumprecalibrated == 0) {
1443 wddumprecalibrated = 1;
1444 (*wd->atabus->ata_reset_drive)(wd->drvp,
1445 AT_POLL | AT_RST_EMERG, NULL);
1446 wd->drvp->state = RESET;
1447 }
1448
1449 xfer = ata_get_xfer_ext(wd->drvp->chnl_softc, 0, 0);
1450 if (xfer == NULL) {
1451 printf("%s: no xfer\n", __func__);
1452 return EAGAIN;
1453 }
1454
1455 xfer->c_bio.blkno = blkno;
1456 xfer->c_bio.flags = ATA_POLL;
1457 if (wd->sc_flags & WDF_LBA48 &&
1458 (xfer->c_bio.blkno + nblk) > wd->sc_capacity28)
1459 xfer->c_bio.flags |= ATA_LBA48;
1460 if (wd->sc_flags & WDF_LBA)
1461 xfer->c_bio.flags |= ATA_LBA;
1462 xfer->c_bio.bcount = nblk * dg->dg_secsize;
1463 xfer->c_bio.databuf = va;
1464 #ifndef WD_DUMP_NOT_TRUSTED
1465 switch (err = wd->atabus->ata_bio(wd->drvp, xfer)) {
1466 case ATACMD_TRY_AGAIN:
1467 panic("wddump: try again");
1468 break;
1469 case ATACMD_QUEUED:
1470 panic("wddump: polled command has been queued");
1471 break;
1472 case ATACMD_COMPLETE:
1473 break;
1474 default:
1475 panic("wddump: unknown atacmd code %d", err);
1476 }
1477 switch(err = xfer->c_bio.error) {
1478 case TIMEOUT:
1479 printf("wddump: device timed out");
1480 err = EIO;
1481 break;
1482 case ERR_DF:
1483 printf("wddump: drive fault");
1484 err = EIO;
1485 break;
1486 case ERR_DMA:
1487 printf("wddump: DMA error");
1488 err = EIO;
1489 break;
1490 case ERROR:
1491 printf("wddump: ");
1492 wdperror(wd, xfer);
1493 err = EIO;
1494 break;
1495 case NOERROR:
1496 err = 0;
1497 break;
1498 default:
1499 panic("wddump: unknown error type %d", err);
1500 }
1501
1502 if (err != 0) {
1503 printf("\n");
1504 return err;
1505 }
1506 #else /* WD_DUMP_NOT_TRUSTED */
1507 /* Let's just talk about this first... */
1508 printf("wd%d: dump addr 0x%x, cylin %d, head %d, sector %d\n",
1509 unit, va, cylin, head, sector);
1510 delay(500 * 1000); /* half a second */
1511 #endif
1512
1513 wddoingadump = 0;
1514 return 0;
1515 }
1516
1517 #ifdef HAS_BAD144_HANDLING
1518 /*
1519 * Internalize the bad sector table.
1520 */
1521 void
1522 bad144intern(struct wd_softc *wd)
1523 {
1524 struct dk_softc *dksc = &wd->sc_dksc;
1525 struct dkbad *bt = &dksc->sc_dkdev.dk_cpulabel->bad;
1526 struct disklabel *lp = dksc->sc_dkdev.dk_label;
1527 int i = 0;
1528
1529 ATADEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
1530
1531 for (; i < NBT_BAD; i++) {
1532 if (bt->bt_bad[i].bt_cyl == 0xffff)
1533 break;
1534 wd->drvp->badsect[i] =
1535 bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
1536 (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
1537 (bt->bt_bad[i].bt_trksec & 0xff);
1538 }
1539 for (; i < NBT_BAD+1; i++)
1540 wd->drvp->badsect[i] = -1;
1541 }
1542 #endif
1543
1544 static void
1545 wd_set_geometry(struct wd_softc *wd)
1546 {
1547 struct dk_softc *dksc = &wd->sc_dksc;
1548 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1549
1550 memset(dg, 0, sizeof(*dg));
1551
1552 dg->dg_secperunit = wd->sc_capacity;
1553 dg->dg_secsize = wd->sc_blksize;
1554 dg->dg_nsectors = wd->sc_params.atap_sectors;
1555 dg->dg_ntracks = wd->sc_params.atap_heads;
1556 if ((wd->sc_flags & WDF_LBA) == 0)
1557 dg->dg_ncylinders = wd->sc_params.atap_cylinders;
1558
1559 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1560 }
1561
1562 int
1563 wd_get_params(struct wd_softc *wd, uint8_t flags, struct ataparams *params)
1564 {
1565
1566 switch (wd->atabus->ata_get_params(wd->drvp, flags, params)) {
1567 case CMD_AGAIN:
1568 return 1;
1569 case CMD_ERR:
1570 if (wd->drvp->drive_type != ATA_DRIVET_OLD)
1571 return 1;
1572 /*
1573 * We `know' there's a drive here; just assume it's old.
1574 * This geometry is only used to read the MBR and print a
1575 * (false) attach message.
1576 */
1577 strncpy(params->atap_model, "ST506",
1578 sizeof params->atap_model);
1579 params->atap_config = ATA_CFG_FIXED;
1580 params->atap_cylinders = 1024;
1581 params->atap_heads = 8;
1582 params->atap_sectors = 17;
1583 params->atap_multi = 1;
1584 params->atap_capabilities1 = params->atap_capabilities2 = 0;
1585 wd->drvp->ata_vers = -1; /* Mark it as pre-ATA */
1586 /* FALLTHROUGH */
1587 case CMD_OK:
1588 return 0;
1589 default:
1590 panic("wd_get_params: bad return code from ata_get_params");
1591 /* NOTREACHED */
1592 }
1593 }
1594
1595 int
1596 wd_getcache(struct wd_softc *wd, int *bitsp)
1597 {
1598 struct ataparams params;
1599
1600 if (wd_get_params(wd, AT_WAIT, ¶ms) != 0)
1601 return EIO;
1602 if (params.atap_cmd_set1 == 0x0000 ||
1603 params.atap_cmd_set1 == 0xffff ||
1604 (params.atap_cmd_set1 & WDC_CMD1_CACHE) == 0) {
1605 *bitsp = 0;
1606 return 0;
1607 }
1608 *bitsp = DKCACHE_WCHANGE | DKCACHE_READ;
1609 if (params.atap_cmd1_en & WDC_CMD1_CACHE)
1610 *bitsp |= DKCACHE_WRITE;
1611
1612 if (WD_USE_NCQ(wd) || (wd->drvp->drive_flags & ATA_DRIVE_WFUA))
1613 *bitsp |= DKCACHE_FUA;
1614
1615 return 0;
1616 }
1617
1618 const char at_errbits[] = "\20\10ERROR\11TIMEOU\12DF";
1619
1620 int
1621 wd_setcache(struct wd_softc *wd, int bits)
1622 {
1623 struct dk_softc *dksc = &wd->sc_dksc;
1624 struct ataparams params;
1625 struct ata_xfer *xfer;
1626 int error;
1627
1628 if (wd_get_params(wd, AT_WAIT, ¶ms) != 0)
1629 return EIO;
1630
1631 if (params.atap_cmd_set1 == 0x0000 ||
1632 params.atap_cmd_set1 == 0xffff ||
1633 (params.atap_cmd_set1 & WDC_CMD1_CACHE) == 0)
1634 return EOPNOTSUPP;
1635
1636 if ((bits & DKCACHE_READ) == 0 ||
1637 (bits & DKCACHE_SAVE) != 0)
1638 return EOPNOTSUPP;
1639
1640 xfer = ata_get_xfer(wd->drvp->chnl_softc);
1641 if (xfer == NULL)
1642 return EINTR;
1643
1644 xfer->c_ata_c.r_command = SET_FEATURES;
1645 xfer->c_ata_c.r_st_bmask = 0;
1646 xfer->c_ata_c.r_st_pmask = 0;
1647 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1648 xfer->c_ata_c.flags = AT_WAIT;
1649 if (bits & DKCACHE_WRITE)
1650 xfer->c_ata_c.r_features = WDSF_WRITE_CACHE_EN;
1651 else
1652 xfer->c_ata_c.r_features = WDSF_WRITE_CACHE_DS;
1653 if (wd->atabus->ata_exec_command(wd->drvp, xfer) != ATACMD_COMPLETE) {
1654 aprint_error_dev(dksc->sc_dev,
1655 "wd_setcache command not complete\n");
1656 error = EIO;
1657 goto out;
1658 }
1659
1660 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1661 char sbuf[sizeof(at_errbits) + 64];
1662 snprintb(sbuf, sizeof(sbuf), at_errbits, xfer->c_ata_c.flags);
1663 aprint_error_dev(dksc->sc_dev, "wd_setcache: status=%s\n", sbuf);
1664 error = EIO;
1665 goto out;
1666 }
1667
1668 error = 0;
1669
1670 out:
1671 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1672 ata_channel_start(wd->drvp->chnl_softc, wd->drvp->drive, true);
1673 return error;
1674 }
1675
1676 static int
1677 wd_standby(struct wd_softc *wd, int flags)
1678 {
1679 struct dk_softc *dksc = &wd->sc_dksc;
1680 struct ata_xfer *xfer;
1681 int error;
1682
1683 xfer = ata_get_xfer(wd->drvp->chnl_softc);
1684 if (xfer == NULL)
1685 return EINTR;
1686
1687 xfer->c_ata_c.r_command = WDCC_STANDBY_IMMED;
1688 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1689 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1690 xfer->c_ata_c.flags = flags;
1691 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1692 if (wd->atabus->ata_exec_command(wd->drvp, xfer) != ATACMD_COMPLETE) {
1693 aprint_error_dev(dksc->sc_dev,
1694 "standby immediate command didn't complete\n");
1695 error = EIO;
1696 goto out;
1697 }
1698 if (xfer->c_ata_c.flags & AT_ERROR) {
1699 if (xfer->c_ata_c.r_error == WDCE_ABRT) {
1700 /* command not supported */
1701 error = ENODEV;
1702 goto out;
1703 }
1704 }
1705 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1706 char sbuf[sizeof(at_errbits) + 64];
1707 snprintb(sbuf, sizeof(sbuf), at_errbits, xfer->c_ata_c.flags);
1708 aprint_error_dev(dksc->sc_dev, "wd_standby: status=%s\n", sbuf);
1709 error = EIO;
1710 goto out;
1711 }
1712 error = 0;
1713
1714 out:
1715 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1716
1717 /*
1718 * Drive is supposed to go idle, start only other drives.
1719 * bufq might be actually already freed at this moment.
1720 */
1721 ata_channel_start(wd->drvp->chnl_softc, wd->drvp->drive, false);
1722
1723 return error;
1724 }
1725
1726 int
1727 wd_flushcache(struct wd_softc *wd, int flags, bool start_self)
1728 {
1729 struct dk_softc *dksc = &wd->sc_dksc;
1730 struct ata_xfer *xfer;
1731 int error;
1732
1733 /*
1734 * WDCC_FLUSHCACHE is here since ATA-4, but some drives report
1735 * only ATA-2 and still support it.
1736 */
1737 if (wd->drvp->ata_vers < 4 &&
1738 ((wd->sc_params.atap_cmd_set2 & WDC_CMD2_FC) == 0 ||
1739 wd->sc_params.atap_cmd_set2 == 0xffff))
1740 return ENODEV;
1741
1742 mutex_enter(&wd->sc_lock);
1743 SET(wd->sc_flags, WDF_FLUSH_PEND);
1744 mutex_exit(&wd->sc_lock);
1745
1746 xfer = ata_get_xfer(wd->drvp->chnl_softc);
1747
1748 mutex_enter(&wd->sc_lock);
1749 CLR(wd->sc_flags, WDF_FLUSH_PEND);
1750 mutex_exit(&wd->sc_lock);
1751
1752 if (xfer == NULL) {
1753 error = EINTR;
1754 goto out;
1755 }
1756
1757 if ((wd->sc_params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 &&
1758 (wd->sc_params.atap_cmd2_en & ATA_CMD2_FCE) != 0) {
1759 xfer->c_ata_c.r_command = WDCC_FLUSHCACHE_EXT;
1760 flags |= AT_LBA48;
1761 } else
1762 xfer->c_ata_c.r_command = WDCC_FLUSHCACHE;
1763 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1764 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1765 xfer->c_ata_c.flags = flags | AT_READREG;
1766 xfer->c_ata_c.timeout = 300000; /* 5m timeout */
1767 if (wd->atabus->ata_exec_command(wd->drvp, xfer) != ATACMD_COMPLETE) {
1768 aprint_error_dev(dksc->sc_dev,
1769 "flush cache command didn't complete\n");
1770 error = EIO;
1771 goto out_xfer;
1772 }
1773 if (xfer->c_ata_c.flags & AT_ERROR) {
1774 if (xfer->c_ata_c.r_error == WDCE_ABRT) {
1775 /* command not supported */
1776 error = ENODEV;
1777 goto out_xfer;
1778 }
1779 }
1780 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1781 char sbuf[sizeof(at_errbits) + 64];
1782 snprintb(sbuf, sizeof(sbuf), at_errbits, xfer->c_ata_c.flags);
1783 aprint_error_dev(dksc->sc_dev, "wd_flushcache: status=%s\n",
1784 sbuf);
1785 error = EIO;
1786 goto out_xfer;
1787 }
1788 error = 0;
1789
1790 out_xfer:
1791 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1792
1793 out:
1794 /* start again I/O processing possibly stopped due to no xfer */
1795 ata_channel_start(wd->drvp->chnl_softc, wd->drvp->drive, start_self);
1796
1797 return error;
1798 }
1799
1800 static int
1801 wd_trim(struct wd_softc *wd, daddr_t bno, long size)
1802 {
1803 struct dk_softc *dksc = &wd->sc_dksc;
1804 struct ata_xfer *xfer;
1805 int error;
1806 unsigned char *req;
1807
1808 xfer = ata_get_xfer(wd->drvp->chnl_softc);
1809 if (xfer == NULL)
1810 return EINTR;
1811
1812 req = kmem_zalloc(512, KM_SLEEP);
1813 req[0] = bno & 0xff;
1814 req[1] = (bno >> 8) & 0xff;
1815 req[2] = (bno >> 16) & 0xff;
1816 req[3] = (bno >> 24) & 0xff;
1817 req[4] = (bno >> 32) & 0xff;
1818 req[5] = (bno >> 40) & 0xff;
1819 req[6] = size & 0xff;
1820 req[7] = (size >> 8) & 0xff;
1821
1822 xfer->c_ata_c.r_command = ATA_DATA_SET_MANAGEMENT;
1823 xfer->c_ata_c.r_count = 1;
1824 xfer->c_ata_c.r_features = ATA_SUPPORT_DSM_TRIM;
1825 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1826 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1827 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1828 xfer->c_ata_c.data = req;
1829 xfer->c_ata_c.bcount = 512;
1830 xfer->c_ata_c.flags |= AT_WRITE | AT_WAIT;
1831 if (wd->atabus->ata_exec_command(wd->drvp, xfer) != ATACMD_COMPLETE) {
1832 aprint_error_dev(dksc->sc_dev,
1833 "trim command didn't complete\n");
1834 kmem_free(req, 512);
1835 error = EIO;
1836 goto out;
1837 }
1838 kmem_free(req, 512);
1839 if (xfer->c_ata_c.flags & AT_ERROR) {
1840 if (xfer->c_ata_c.r_error == WDCE_ABRT) {
1841 /* command not supported */
1842 error = ENODEV;
1843 goto out;
1844 }
1845 }
1846 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1847 char sbuf[sizeof(at_errbits) + 64];
1848 snprintb(sbuf, sizeof(sbuf), at_errbits, xfer->c_ata_c.flags);
1849 aprint_error_dev(dksc->sc_dev, "wd_trim: status=%s\n",
1850 sbuf);
1851 error = EIO;
1852 goto out;
1853 }
1854 error = 0;
1855
1856 out:
1857 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1858 ata_channel_start(wd->drvp->chnl_softc, wd->drvp->drive, true);
1859 return error;
1860 }
1861
1862 bool
1863 wd_shutdown(device_t dev, int how)
1864 {
1865 struct wd_softc *wd = device_private(dev);
1866
1867 /* the adapter needs to be enabled */
1868 if (wd->atabus->ata_addref(wd->drvp))
1869 return true; /* no need to complain */
1870
1871 wd_flushcache(wd, AT_POLL, false);
1872 if ((how & RB_POWERDOWN) == RB_POWERDOWN)
1873 wd_standby(wd, AT_POLL);
1874 return true;
1875 }
1876
1877 /*
1878 * Allocate space for a ioctl queue structure. Mostly taken from
1879 * scsipi_ioctl.c
1880 */
1881 struct wd_ioctl *
1882 wi_get(struct wd_softc *wd)
1883 {
1884 struct wd_ioctl *wi;
1885
1886 wi = malloc(sizeof(struct wd_ioctl), M_TEMP, M_WAITOK|M_ZERO);
1887 wi->wi_softc = wd;
1888 buf_init(&wi->wi_bp);
1889
1890 return (wi);
1891 }
1892
1893 /*
1894 * Free an ioctl structure and remove it from our list
1895 */
1896
1897 void
1898 wi_free(struct wd_ioctl *wi)
1899 {
1900 buf_destroy(&wi->wi_bp);
1901 free(wi, M_TEMP);
1902 }
1903
1904 /*
1905 * Find a wd_ioctl structure based on the struct buf.
1906 */
1907
1908 struct wd_ioctl *
1909 wi_find(struct buf *bp)
1910 {
1911 return container_of(bp, struct wd_ioctl, wi_bp);
1912 }
1913
1914 static uint
1915 wi_sector_size(const struct wd_ioctl * const wi)
1916 {
1917 switch (wi->wi_atareq.command) {
1918 case WDCC_READ:
1919 case WDCC_WRITE:
1920 case WDCC_READMULTI:
1921 case WDCC_WRITEMULTI:
1922 case WDCC_READDMA:
1923 case WDCC_WRITEDMA:
1924 case WDCC_READ_EXT:
1925 case WDCC_WRITE_EXT:
1926 case WDCC_READMULTI_EXT:
1927 case WDCC_WRITEMULTI_EXT:
1928 case WDCC_READDMA_EXT:
1929 case WDCC_WRITEDMA_EXT:
1930 case WDCC_READ_FPDMA_QUEUED:
1931 case WDCC_WRITE_FPDMA_QUEUED:
1932 return wi->wi_softc->sc_blksize;
1933 default:
1934 return 512;
1935 }
1936 }
1937
1938 /*
1939 * Ioctl pseudo strategy routine
1940 *
1941 * This is mostly stolen from scsipi_ioctl.c:scsistrategy(). What
1942 * happens here is:
1943 *
1944 * - wdioctl() queues a wd_ioctl structure.
1945 *
1946 * - wdioctl() calls physio/wdioctlstrategy based on whether or not
1947 * user space I/O is required. If physio() is called, physio() eventually
1948 * calls wdioctlstrategy().
1949 *
1950 * - In either case, wdioctlstrategy() calls wd->atabus->ata_exec_command()
1951 * to perform the actual command
1952 *
1953 * The reason for the use of the pseudo strategy routine is because
1954 * when doing I/O to/from user space, physio _really_ wants to be in
1955 * the loop. We could put the entire buffer into the ioctl request
1956 * structure, but that won't scale if we want to do things like download
1957 * microcode.
1958 */
1959
1960 void
1961 wdioctlstrategy(struct buf *bp)
1962 {
1963 struct wd_ioctl *wi;
1964 struct ata_xfer *xfer;
1965 int error = 0;
1966
1967 wi = wi_find(bp);
1968 if (wi == NULL) {
1969 printf("wdioctlstrategy: "
1970 "No matching ioctl request found in queue\n");
1971 error = EINVAL;
1972 goto out2;
1973 }
1974
1975 xfer = ata_get_xfer(wi->wi_softc->drvp->chnl_softc);
1976 if (xfer == NULL) {
1977 error = EINTR;
1978 goto out2;
1979 }
1980
1981 /*
1982 * Abort if physio broke up the transfer
1983 */
1984
1985 if (bp->b_bcount != wi->wi_atareq.datalen) {
1986 printf("physio split wd ioctl request... cannot proceed\n");
1987 error = EIO;
1988 goto out;
1989 }
1990
1991 /*
1992 * Abort if we didn't get a buffer size that was a multiple of
1993 * our sector size (or overflows CHS/LBA28 sector count)
1994 */
1995
1996 if ((bp->b_bcount % wi_sector_size(wi)) != 0 ||
1997 (bp->b_bcount / wi_sector_size(wi)) >=
1998 (1 << NBBY)) {
1999 error = EINVAL;
2000 goto out;
2001 }
2002
2003 /*
2004 * Make sure a timeout was supplied in the ioctl request
2005 */
2006
2007 if (wi->wi_atareq.timeout == 0) {
2008 error = EINVAL;
2009 goto out;
2010 }
2011
2012 if (wi->wi_atareq.flags & ATACMD_READ)
2013 xfer->c_ata_c.flags |= AT_READ;
2014 else if (wi->wi_atareq.flags & ATACMD_WRITE)
2015 xfer->c_ata_c.flags |= AT_WRITE;
2016
2017 if (wi->wi_atareq.flags & ATACMD_READREG)
2018 xfer->c_ata_c.flags |= AT_READREG;
2019
2020 if ((wi->wi_atareq.flags & ATACMD_LBA) != 0)
2021 xfer->c_ata_c.flags |= AT_LBA;
2022
2023 xfer->c_ata_c.flags |= AT_WAIT;
2024
2025 xfer->c_ata_c.timeout = wi->wi_atareq.timeout;
2026 xfer->c_ata_c.r_command = wi->wi_atareq.command;
2027 xfer->c_ata_c.r_lba = ((wi->wi_atareq.head & 0x0f) << 24) |
2028 (wi->wi_atareq.cylinder << 8) |
2029 wi->wi_atareq.sec_num;
2030 xfer->c_ata_c.r_count = wi->wi_atareq.sec_count;
2031 xfer->c_ata_c.r_features = wi->wi_atareq.features;
2032 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
2033 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
2034 xfer->c_ata_c.data = wi->wi_bp.b_data;
2035 xfer->c_ata_c.bcount = wi->wi_bp.b_bcount;
2036
2037 if (wi->wi_softc->atabus->ata_exec_command(wi->wi_softc->drvp, xfer)
2038 != ATACMD_COMPLETE) {
2039 wi->wi_atareq.retsts = ATACMD_ERROR;
2040 error = EIO;
2041 goto out;
2042 }
2043
2044 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
2045 if (xfer->c_ata_c.flags & AT_ERROR) {
2046 wi->wi_atareq.retsts = ATACMD_ERROR;
2047 wi->wi_atareq.error = xfer->c_ata_c.r_error;
2048 } else if (xfer->c_ata_c.flags & AT_DF)
2049 wi->wi_atareq.retsts = ATACMD_DF;
2050 else
2051 wi->wi_atareq.retsts = ATACMD_TIMEOUT;
2052 } else {
2053 wi->wi_atareq.retsts = ATACMD_OK;
2054 if (wi->wi_atareq.flags & ATACMD_READREG) {
2055 wi->wi_atareq.command = xfer->c_ata_c.r_status;
2056 wi->wi_atareq.features = xfer->c_ata_c.r_error;
2057 wi->wi_atareq.sec_count = xfer->c_ata_c.r_count;
2058 wi->wi_atareq.sec_num = xfer->c_ata_c.r_lba & 0xff;
2059 wi->wi_atareq.head = (xfer->c_ata_c.r_device & 0xf0) |
2060 ((xfer->c_ata_c.r_lba >> 24) & 0x0f);
2061 wi->wi_atareq.cylinder =
2062 (xfer->c_ata_c.r_lba >> 8) & 0xffff;
2063 wi->wi_atareq.error = xfer->c_ata_c.r_error;
2064 }
2065 }
2066
2067 out:
2068 ata_free_xfer(wi->wi_softc->drvp->chnl_softc, xfer);
2069 ata_channel_start(wi->wi_softc->drvp->chnl_softc,
2070 wi->wi_softc->drvp->drive, true);
2071 out2:
2072 bp->b_error = error;
2073 if (error)
2074 bp->b_resid = bp->b_bcount;
2075 biodone(bp);
2076 }
2077
2078 static void
2079 wd_sysctl_attach(struct wd_softc *wd)
2080 {
2081 struct dk_softc *dksc = &wd->sc_dksc;
2082 const struct sysctlnode *node;
2083 int error;
2084
2085 /* sysctl set-up */
2086 if (sysctl_createv(&wd->nodelog, 0, NULL, &node,
2087 0, CTLTYPE_NODE, dksc->sc_xname,
2088 SYSCTL_DESCR("wd driver settings"),
2089 NULL, 0, NULL, 0,
2090 CTL_HW, CTL_CREATE, CTL_EOL) != 0) {
2091 aprint_error_dev(dksc->sc_dev,
2092 "could not create %s.%s sysctl node\n",
2093 "hw", dksc->sc_xname);
2094 return;
2095 }
2096
2097 wd->drv_max_tags = ATA_MAX_OPENINGS;
2098 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2099 CTLFLAG_READWRITE, CTLTYPE_INT, "max_tags",
2100 SYSCTL_DESCR("max number of NCQ tags to use"),
2101 NULL, 0, &wd->drv_max_tags, 0,
2102 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2103 != 0) {
2104 aprint_error_dev(dksc->sc_dev,
2105 "could not create %s.%s.max_tags sysctl - error %d\n",
2106 "hw", dksc->sc_xname, error);
2107 return;
2108 }
2109
2110 wd->drv_ncq = true;
2111 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2112 CTLFLAG_READWRITE, CTLTYPE_BOOL, "use_ncq",
2113 SYSCTL_DESCR("use NCQ if supported"),
2114 NULL, 0, &wd->drv_ncq, 0,
2115 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2116 != 0) {
2117 aprint_error_dev(dksc->sc_dev,
2118 "could not create %s.%s.use_ncq sysctl - error %d\n",
2119 "hw", dksc->sc_xname, error);
2120 return;
2121 }
2122
2123 wd->drv_ncq_prio = false;
2124 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2125 CTLFLAG_READWRITE, CTLTYPE_BOOL, "use_ncq_prio",
2126 SYSCTL_DESCR("use NCQ PRIORITY if supported"),
2127 NULL, 0, &wd->drv_ncq_prio, 0,
2128 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2129 != 0) {
2130 aprint_error_dev(dksc->sc_dev,
2131 "could not create %s.%s.use_ncq_prio sysctl - error %d\n",
2132 "hw", dksc->sc_xname, error);
2133 return;
2134 }
2135
2136 #ifdef WD_CHAOS_MONKEY
2137 wd->drv_chaos_freq = 0;
2138 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2139 CTLFLAG_READWRITE, CTLTYPE_INT, "chaos_freq",
2140 SYSCTL_DESCR("simulated bio read error rate"),
2141 NULL, 0, &wd->drv_chaos_freq, 0,
2142 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2143 != 0) {
2144 aprint_error_dev(dksc->sc_dev,
2145 "could not create %s.%s.chaos_freq sysctl - error %d\n",
2146 "hw", dksc->sc_xname, error);
2147 return;
2148 }
2149
2150 wd->drv_chaos_cnt = 0;
2151 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2152 CTLFLAG_READONLY, CTLTYPE_INT, "chaos_cnt",
2153 SYSCTL_DESCR("number of processed bio reads"),
2154 NULL, 0, &wd->drv_chaos_cnt, 0,
2155 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2156 != 0) {
2157 aprint_error_dev(dksc->sc_dev,
2158 "could not create %s.%s.chaos_cnt sysctl - error %d\n",
2159 "hw", dksc->sc_xname, error);
2160 return;
2161 }
2162 #endif
2163
2164 }
2165
2166 static void
2167 wd_sysctl_detach(struct wd_softc *wd)
2168 {
2169 sysctl_teardown(&wd->nodelog);
2170 }
2171
2172 #ifdef ATADEBUG
2173 int wddebug(void);
2174
2175 int
2176 wddebug(void)
2177 {
2178 struct wd_softc *wd;
2179 struct dk_softc *dksc;
2180 int unit;
2181
2182 for (unit = 0; unit <= 3; unit++) {
2183 wd = device_lookup_private(&wd_cd, unit);
2184 if (wd == NULL)
2185 continue;
2186 dksc = &wd->sc_dksc;
2187 printf("%s fl %x bufq %p:\n",
2188 dksc->sc_xname, wd->sc_flags, bufq_peek(dksc->sc_bufq));
2189
2190 atachannel_debug(wd->drvp->chnl_softc);
2191 }
2192 return 0;
2193 }
2194 #endif /* ATADEBUG */
2195