wd.c revision 1.446 1 /* $NetBSD: wd.c,v 1.446 2019/03/19 16:56:29 mlelstv Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*-
28 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
29 * All rights reserved.
30 *
31 * This code is derived from software contributed to The NetBSD Foundation
32 * by Charles M. Hannum and by Onno van der Linden.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
44 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
45 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
46 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
47 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
48 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
49 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
50 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
51 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
53 * POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: wd.c,v 1.446 2019/03/19 16:56:29 mlelstv Exp $");
58
59 #include "opt_ata.h"
60 #include "opt_wd.h"
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/conf.h>
66 #include <sys/file.h>
67 #include <sys/stat.h>
68 #include <sys/ioctl.h>
69 #include <sys/buf.h>
70 #include <sys/bufq.h>
71 #include <sys/uio.h>
72 #include <sys/device.h>
73 #include <sys/disklabel.h>
74 #include <sys/disk.h>
75 #include <sys/syslog.h>
76 #include <sys/proc.h>
77 #include <sys/reboot.h>
78 #include <sys/vnode.h>
79 #include <sys/rndsource.h>
80
81 #include <sys/intr.h>
82 #include <sys/bus.h>
83
84 #include <dev/ata/atareg.h>
85 #include <dev/ata/atavar.h>
86 #include <dev/ata/wdvar.h>
87 #include <dev/ic/wdcreg.h>
88 #include <sys/ataio.h>
89 #include "locators.h"
90
91 #include <prop/proplib.h>
92
93 #define WDIORETRIES_SINGLE 4 /* number of retries for single-sector */
94 #define WDIORETRIES 5 /* number of retries before giving up */
95 #define RECOVERYTIME hz/2 /* time to wait before retrying a cmd */
96
97 #define WDUNIT(dev) DISKUNIT(dev)
98 #define WDPART(dev) DISKPART(dev)
99 #define WDMINOR(unit, part) DISKMINOR(unit, part)
100 #define MAKEWDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
101
102 #define WDLABELDEV(dev) (MAKEWDDEV(major(dev), WDUNIT(dev), RAW_PART))
103
104 #define DEBUG_FUNCS 0x08
105 #define DEBUG_PROBE 0x10
106 #define DEBUG_DETACH 0x20
107 #define DEBUG_XFERS 0x40
108 #ifdef ATADEBUG
109 #ifndef ATADEBUG_WD_MASK
110 #define ATADEBUG_WD_MASK 0x0
111 #endif
112 int wdcdebug_wd_mask = ATADEBUG_WD_MASK;
113 #define ATADEBUG_PRINT(args, level) \
114 if (wdcdebug_wd_mask & (level)) \
115 printf args
116 #else
117 #define ATADEBUG_PRINT(args, level)
118 #endif
119
120 static int wdprobe(device_t, cfdata_t, void *);
121 static void wdattach(device_t, device_t, void *);
122 static int wddetach(device_t, int);
123 static void wdperror(const struct wd_softc *, struct ata_xfer *);
124
125 static void wdminphys(struct buf *);
126
127 static int wd_firstopen(device_t, dev_t, int, int);
128 static int wd_lastclose(device_t);
129 static bool wd_suspend(device_t, const pmf_qual_t *);
130 static int wd_standby(struct wd_softc *, int);
131
132 CFATTACH_DECL3_NEW(wd, sizeof(struct wd_softc),
133 wdprobe, wdattach, wddetach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
134
135 extern struct cfdriver wd_cd;
136
137 static dev_type_open(wdopen);
138 static dev_type_close(wdclose);
139 static dev_type_read(wdread);
140 static dev_type_write(wdwrite);
141 static dev_type_ioctl(wdioctl);
142 static dev_type_strategy(wdstrategy);
143 static dev_type_dump(wddump);
144 static dev_type_size(wdsize);
145 static dev_type_discard(wddiscard);
146
147 const struct bdevsw wd_bdevsw = {
148 .d_open = wdopen,
149 .d_close = wdclose,
150 .d_strategy = wdstrategy,
151 .d_ioctl = wdioctl,
152 .d_dump = wddump,
153 .d_psize = wdsize,
154 .d_discard = wddiscard,
155 .d_flag = D_DISK
156 };
157
158 const struct cdevsw wd_cdevsw = {
159 .d_open = wdopen,
160 .d_close = wdclose,
161 .d_read = wdread,
162 .d_write = wdwrite,
163 .d_ioctl = wdioctl,
164 .d_stop = nostop,
165 .d_tty = notty,
166 .d_poll = nopoll,
167 .d_mmap = nommap,
168 .d_kqfilter = nokqfilter,
169 .d_discard = wddiscard,
170 .d_flag = D_DISK
171 };
172
173 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
174 static int wddoingadump = 0;
175 static int wddumprecalibrated = 0;
176
177 /*
178 * Glue necessary to hook WDCIOCCOMMAND into physio
179 */
180
181 struct wd_ioctl {
182 LIST_ENTRY(wd_ioctl) wi_list;
183 struct buf wi_bp;
184 struct uio wi_uio;
185 struct iovec wi_iov;
186 atareq_t wi_atareq;
187 struct wd_softc *wi_softc;
188 };
189
190 static struct wd_ioctl *wi_find(struct buf *);
191 static void wi_free(struct wd_ioctl *);
192 static struct wd_ioctl *wi_get(struct wd_softc *);
193 static void wdioctlstrategy(struct buf *);
194
195 static void wdrestart(void *);
196 static void wdstart1(struct wd_softc *, struct buf *, struct ata_xfer *);
197 static int wd_diskstart(device_t, struct buf *);
198 static int wd_dumpblocks(device_t, void *, daddr_t, int);
199 static void wd_iosize(device_t, int *);
200 static int wd_discard(device_t, off_t, off_t);
201 static void wdbioretry(void *);
202 static void wdbiorequeue(void *);
203 static void wddone(device_t, struct ata_xfer *);
204 static int wd_get_params(struct wd_softc *, uint8_t, struct ataparams *);
205 static void wd_set_geometry(struct wd_softc *);
206 static int wd_flushcache(struct wd_softc *, int, bool);
207 static int wd_trim(struct wd_softc *, daddr_t, long);
208 static bool wd_shutdown(device_t, int);
209
210 static int wd_getcache(struct wd_softc *, int *);
211 static int wd_setcache(struct wd_softc *, int);
212
213 static void wd_sysctl_attach(struct wd_softc *);
214 static void wd_sysctl_detach(struct wd_softc *);
215
216 struct dkdriver wddkdriver = {
217 .d_open = wdopen,
218 .d_close = wdclose,
219 .d_strategy = wdstrategy,
220 .d_minphys = wdminphys,
221 .d_diskstart = wd_diskstart,
222 .d_dumpblocks = wd_dumpblocks,
223 .d_iosize = wd_iosize,
224 .d_firstopen = wd_firstopen,
225 .d_lastclose = wd_lastclose,
226 .d_discard = wd_discard
227 };
228
229 #ifdef HAS_BAD144_HANDLING
230 static void bad144intern(struct wd_softc *);
231 #endif
232
233 #define WD_QUIRK_SPLIT_MOD15_WRITE 0x0001 /* must split certain writes */
234
235 #define WD_QUIRK_FMT "\20\1SPLIT_MOD15_WRITE\2FORCE_LBA48"
236
237 /*
238 * Quirk table for IDE drives. Put more-specific matches first, since
239 * a simple globing routine is used for matching.
240 */
241 static const struct wd_quirk {
242 const char *wdq_match; /* inquiry pattern to match */
243 int wdq_quirks; /* drive quirks */
244 } wd_quirk_table[] = {
245 /*
246 * Some Seagate S-ATA drives have a PHY which can get confused
247 * with the way data is packetized by some S-ATA controllers.
248 *
249 * The work-around is to split in two any write transfer whose
250 * sector count % 15 == 1 (assuming 512 byte sectors).
251 *
252 * XXX This is an incomplete list. There are at least a couple
253 * XXX more model numbers. If you have trouble with such transfers
254 * XXX (8K is the most common) on Seagate S-ATA drives, please
255 * XXX notify thorpej (at) NetBSD.org.
256 *
257 * The ST360015AS has not yet been confirmed to have this
258 * issue, however, it is the only other drive in the
259 * Seagate Barracuda Serial ATA V family.
260 *
261 */
262 { "ST3120023AS",
263 WD_QUIRK_SPLIT_MOD15_WRITE },
264 { "ST380023AS",
265 WD_QUIRK_SPLIT_MOD15_WRITE },
266 { "ST360015AS",
267 WD_QUIRK_SPLIT_MOD15_WRITE },
268 { NULL,
269 0 }
270 };
271
272 static const struct wd_quirk *
273 wd_lookup_quirks(const char *name)
274 {
275 const struct wd_quirk *wdq;
276 const char *estr;
277
278 for (wdq = wd_quirk_table; wdq->wdq_match != NULL; wdq++) {
279 /*
280 * We only want exact matches (which include matches
281 * against globbing characters).
282 */
283 if (pmatch(name, wdq->wdq_match, &estr) == 2)
284 return (wdq);
285 }
286 return (NULL);
287 }
288
289 static int
290 wdprobe(device_t parent, cfdata_t match, void *aux)
291 {
292 struct ata_device *adev = aux;
293
294 if (adev == NULL)
295 return 0;
296 if (adev->adev_bustype->bustype_type != SCSIPI_BUSTYPE_ATA)
297 return 0;
298
299 if (match->cf_loc[ATA_HLCF_DRIVE] != ATA_HLCF_DRIVE_DEFAULT &&
300 match->cf_loc[ATA_HLCF_DRIVE] != adev->adev_drv_data->drive)
301 return 0;
302 return 1;
303 }
304
305 static void
306 wdattach(device_t parent, device_t self, void *aux)
307 {
308 struct wd_softc *wd = device_private(self);
309 struct dk_softc *dksc = &wd->sc_dksc;
310 struct ata_device *adev= aux;
311 int i, blank;
312 char tbuf[41],pbuf[9], c, *p, *q;
313 const struct wd_quirk *wdq;
314 int dtype = DKTYPE_UNKNOWN;
315
316 dksc->sc_dev = self;
317
318 ATADEBUG_PRINT(("wdattach\n"), DEBUG_FUNCS | DEBUG_PROBE);
319 mutex_init(&wd->sc_lock, MUTEX_DEFAULT, IPL_BIO);
320 #ifdef WD_SOFTBADSECT
321 SLIST_INIT(&wd->sc_bslist);
322 #endif
323 wd->atabus = adev->adev_bustype;
324 wd->inflight = 0;
325 wd->drvp = adev->adev_drv_data;
326
327 wd->drvp->drv_openings = 1;
328 wd->drvp->drv_done = wddone;
329 wd->drvp->drv_softc = dksc->sc_dev; /* done in atabusconfig_thread()
330 but too late */
331
332 SLIST_INIT(&wd->sc_retry_list);
333 SLIST_INIT(&wd->sc_requeue_list);
334 callout_init(&wd->sc_retry_callout, 0); /* XXX MPSAFE */
335 callout_init(&wd->sc_requeue_callout, 0); /* XXX MPSAFE */
336 callout_init(&wd->sc_restart_diskqueue, 0); /* XXX MPSAFE */
337
338 aprint_naive("\n");
339 aprint_normal("\n");
340
341 /* read our drive info */
342 if (wd_get_params(wd, AT_WAIT, &wd->sc_params) != 0) {
343 aprint_error_dev(self, "IDENTIFY failed\n");
344 goto out;
345 }
346
347 for (blank = 0, p = wd->sc_params.atap_model, q = tbuf, i = 0;
348 i < sizeof(wd->sc_params.atap_model); i++) {
349 c = *p++;
350 if (c == '\0')
351 break;
352 if (c != ' ') {
353 if (blank) {
354 *q++ = ' ';
355 blank = 0;
356 }
357 *q++ = c;
358 } else
359 blank = 1;
360 }
361 *q++ = '\0';
362
363 wd->sc_typename = kmem_asprintf("%s", tbuf);
364 aprint_normal_dev(self, "<%s>\n", wd->sc_typename);
365
366 wdq = wd_lookup_quirks(tbuf);
367 if (wdq != NULL)
368 wd->sc_quirks = wdq->wdq_quirks;
369
370 if (wd->sc_quirks != 0) {
371 char sbuf[sizeof(WD_QUIRK_FMT) + 64];
372 snprintb(sbuf, sizeof(sbuf), WD_QUIRK_FMT, wd->sc_quirks);
373 aprint_normal_dev(self, "quirks %s\n", sbuf);
374
375 if (wd->sc_quirks & WD_QUIRK_SPLIT_MOD15_WRITE) {
376 aprint_error_dev(self, "drive corrupts write transfers with certain controllers, consider replacing\n");
377 }
378 }
379
380 if ((wd->sc_params.atap_multi & 0xff) > 1) {
381 wd->drvp->multi = wd->sc_params.atap_multi & 0xff;
382 } else {
383 wd->drvp->multi = 1;
384 }
385
386 aprint_verbose_dev(self, "drive supports %d-sector PIO transfers,",
387 wd->drvp->multi);
388
389 /* 48-bit LBA addressing */
390 if ((wd->sc_params.atap_cmd2_en & ATA_CMD2_LBA48) != 0)
391 wd->sc_flags |= WDF_LBA48;
392
393 /* Prior to ATA-4, LBA was optional. */
394 if ((wd->sc_params.atap_capabilities1 & WDC_CAP_LBA) != 0)
395 wd->sc_flags |= WDF_LBA;
396 #if 0
397 /* ATA-4 requires LBA. */
398 if (wd->sc_params.atap_ataversion != 0xffff &&
399 wd->sc_params.atap_ataversion >= WDC_VER_ATA4)
400 wd->sc_flags |= WDF_LBA;
401 #endif
402
403 if ((wd->sc_flags & WDF_LBA48) != 0) {
404 aprint_verbose(" LBA48 addressing\n");
405 wd->sc_capacity =
406 ((uint64_t) wd->sc_params.atap_max_lba[3] << 48) |
407 ((uint64_t) wd->sc_params.atap_max_lba[2] << 32) |
408 ((uint64_t) wd->sc_params.atap_max_lba[1] << 16) |
409 ((uint64_t) wd->sc_params.atap_max_lba[0] << 0);
410 wd->sc_capacity28 =
411 (wd->sc_params.atap_capacity[1] << 16) |
412 wd->sc_params.atap_capacity[0];
413 } else if ((wd->sc_flags & WDF_LBA) != 0) {
414 aprint_verbose(" LBA addressing\n");
415 wd->sc_capacity28 = wd->sc_capacity =
416 (wd->sc_params.atap_capacity[1] << 16) |
417 wd->sc_params.atap_capacity[0];
418 } else {
419 aprint_verbose(" chs addressing\n");
420 wd->sc_capacity28 = wd->sc_capacity =
421 wd->sc_params.atap_cylinders *
422 wd->sc_params.atap_heads *
423 wd->sc_params.atap_sectors;
424 }
425 if ((wd->sc_params.atap_secsz & ATA_SECSZ_VALID_MASK) == ATA_SECSZ_VALID
426 && ((wd->sc_params.atap_secsz & ATA_SECSZ_LLS) != 0)) {
427 wd->sc_blksize = 2ULL *
428 ((uint32_t)((wd->sc_params.atap_lls_secsz[1] << 16) |
429 wd->sc_params.atap_lls_secsz[0]));
430 } else {
431 wd->sc_blksize = 512;
432 }
433 wd->sc_capacity512 = (wd->sc_capacity * wd->sc_blksize) / DEV_BSIZE;
434 format_bytes(pbuf, sizeof(pbuf), wd->sc_capacity * wd->sc_blksize);
435 aprint_normal_dev(self, "%s, %d cyl, %d head, %d sec, "
436 "%d bytes/sect x %llu sectors\n",
437 pbuf,
438 (wd->sc_flags & WDF_LBA) ? (int)(wd->sc_capacity /
439 (wd->sc_params.atap_heads * wd->sc_params.atap_sectors)) :
440 wd->sc_params.atap_cylinders,
441 wd->sc_params.atap_heads, wd->sc_params.atap_sectors,
442 wd->sc_blksize, (unsigned long long)wd->sc_capacity);
443
444 ATADEBUG_PRINT(("%s: atap_dmatiming_mimi=%d, atap_dmatiming_recom=%d\n",
445 device_xname(self), wd->sc_params.atap_dmatiming_mimi,
446 wd->sc_params.atap_dmatiming_recom), DEBUG_PROBE);
447
448 if (wd->sc_blksize <= 0 || !powerof2(wd->sc_blksize) ||
449 wd->sc_blksize < DEV_BSIZE || wd->sc_blksize > MAXPHYS) {
450 aprint_normal_dev(self, "WARNING: block size %u "
451 "might not actually work\n", wd->sc_blksize);
452 }
453
454 if (strcmp(wd->sc_params.atap_model, "ST506") == 0)
455 dtype = DKTYPE_ST506;
456 else
457 dtype = DKTYPE_ESDI;
458
459 out:
460 /*
461 * Initialize and attach the disk structure.
462 */
463 dk_init(dksc, self, dtype);
464 disk_init(&dksc->sc_dkdev, dksc->sc_xname, &wddkdriver);
465
466 /* Attach dk and disk subsystems */
467 dk_attach(dksc);
468 disk_attach(&dksc->sc_dkdev);
469 wd_set_geometry(wd);
470
471 bufq_alloc(&dksc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
472
473 /* reference to label structure, used by ata code */
474 wd->drvp->lp = dksc->sc_dkdev.dk_label;
475
476 /* Discover wedges on this disk. */
477 dkwedge_discover(&dksc->sc_dkdev);
478
479 if (!pmf_device_register1(self, wd_suspend, NULL, wd_shutdown))
480 aprint_error_dev(self, "couldn't establish power handler\n");
481
482 wd_sysctl_attach(wd);
483 }
484
485 static bool
486 wd_suspend(device_t dv, const pmf_qual_t *qual)
487 {
488 struct wd_softc *sc = device_private(dv);
489
490 /* the adapter needs to be enabled */
491 if (sc->atabus->ata_addref(sc->drvp))
492 return true; /* no need to complain */
493
494 wd_flushcache(sc, AT_WAIT, false);
495 wd_standby(sc, AT_WAIT);
496
497 sc->atabus->ata_delref(sc->drvp);
498 return true;
499 }
500
501 static int
502 wddetach(device_t self, int flags)
503 {
504 struct wd_softc *wd = device_private(self);
505 struct dk_softc *dksc = &wd->sc_dksc;
506 int bmaj, cmaj, i, mn, rc;
507
508 if ((rc = disk_begindetach(&dksc->sc_dkdev, wd_lastclose, self, flags)) != 0)
509 return rc;
510
511 /* locate the major number */
512 bmaj = bdevsw_lookup_major(&wd_bdevsw);
513 cmaj = cdevsw_lookup_major(&wd_cdevsw);
514
515 /* Nuke the vnodes for any open instances. */
516 for (i = 0; i < MAXPARTITIONS; i++) {
517 mn = WDMINOR(device_unit(self), i);
518 vdevgone(bmaj, mn, mn, VBLK);
519 vdevgone(cmaj, mn, mn, VCHR);
520 }
521
522 dk_drain(dksc);
523
524 /* Kill off any pending commands. */
525 mutex_enter(&wd->sc_lock);
526 wd->atabus->ata_killpending(wd->drvp);
527
528 callout_halt(&wd->sc_retry_callout, &wd->sc_lock);
529 callout_destroy(&wd->sc_retry_callout);
530 callout_halt(&wd->sc_requeue_callout, &wd->sc_lock);
531 callout_destroy(&wd->sc_requeue_callout);
532 callout_halt(&wd->sc_restart_diskqueue, &wd->sc_lock);
533 callout_destroy(&wd->sc_restart_diskqueue);
534
535 mutex_exit(&wd->sc_lock);
536
537 bufq_free(dksc->sc_bufq);
538
539 /* Delete all of our wedges. */
540 dkwedge_delall(&dksc->sc_dkdev);
541
542 if (flags & DETACH_POWEROFF)
543 wd_standby(wd, AT_POLL);
544
545 /* Detach from the disk list. */
546 disk_detach(&dksc->sc_dkdev);
547 disk_destroy(&dksc->sc_dkdev);
548
549 dk_detach(dksc);
550
551 #ifdef WD_SOFTBADSECT
552 /* Clean out the bad sector list */
553 while (!SLIST_EMPTY(&wd->sc_bslist)) {
554 struct disk_badsectors *dbs = SLIST_FIRST(&wd->sc_bslist);
555 SLIST_REMOVE_HEAD(&wd->sc_bslist, dbs_next);
556 kmem_free(dbs, sizeof(*dbs));
557 }
558 wd->sc_bscount = 0;
559 #endif
560 if (wd->sc_typename != NULL) {
561 kmem_free(wd->sc_typename, strlen(wd->sc_typename) + 1);
562 wd->sc_typename = NULL;
563 }
564
565 pmf_device_deregister(self);
566
567 wd_sysctl_detach(wd);
568
569 mutex_destroy(&wd->sc_lock);
570
571 wd->drvp->drive_type = ATA_DRIVET_NONE; /* no drive any more here */
572 wd->drvp->drive_flags = 0;
573
574 return (0);
575 }
576
577 /*
578 * Read/write routine for a buffer. Validates the arguments and schedules the
579 * transfer. Does not wait for the transfer to complete.
580 */
581 static void
582 wdstrategy(struct buf *bp)
583 {
584 struct wd_softc *wd =
585 device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
586 struct dk_softc *dksc = &wd->sc_dksc;
587
588 ATADEBUG_PRINT(("wdstrategy (%s)\n", dksc->sc_xname),
589 DEBUG_XFERS);
590
591 /* If device invalidated (e.g. media change, door open,
592 * device detachment), then error.
593 */
594 if ((wd->sc_flags & WDF_LOADED) == 0 ||
595 !device_is_enabled(dksc->sc_dev))
596 goto err;
597
598 #ifdef WD_SOFTBADSECT
599 /*
600 * If the transfer about to be attempted contains only a block that
601 * is known to be bad then return an error for the transfer without
602 * even attempting to start a transfer up under the premis that we
603 * will just end up doing more retries for a transfer that will end
604 * up failing again.
605 */
606 if (__predict_false(!SLIST_EMPTY(&wd->sc_bslist))) {
607 struct disklabel *lp = dksc->sc_dkdev.dk_label;
608 struct disk_badsectors *dbs;
609 daddr_t blkno, maxblk;
610
611 /* convert the block number to absolute */
612 if (lp->d_secsize >= DEV_BSIZE)
613 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
614 else
615 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
616 if (WDPART(bp->b_dev) != RAW_PART)
617 blkno += lp->d_partitions[WDPART(bp->b_dev)].p_offset;
618 maxblk = blkno + (bp->b_bcount / wd->sc_blksize) - 1;
619
620 mutex_enter(&wd->sc_lock);
621 SLIST_FOREACH(dbs, &wd->sc_bslist, dbs_next)
622 if ((dbs->dbs_min <= bp->b_rawblkno &&
623 bp->b_rawblkno <= dbs->dbs_max) ||
624 (dbs->dbs_min <= maxblk && maxblk <= dbs->dbs_max)){
625 mutex_exit(&wd->sc_lock);
626 goto err;
627 }
628 mutex_exit(&wd->sc_lock);
629 }
630 #endif
631
632 dk_strategy(dksc, bp);
633 return;
634
635 err:
636 bp->b_error = EIO;
637 bp->b_resid = bp->b_bcount;
638 biodone(bp);
639 }
640
641 static void
642 wdstart1(struct wd_softc *wd, struct buf *bp, struct ata_xfer *xfer)
643 {
644 struct dk_softc *dksc = &wd->sc_dksc;
645
646 KASSERT(bp == xfer->c_bio.bp || xfer->c_bio.bp == NULL);
647 KASSERT((xfer->c_flags & (C_WAITACT|C_FREE)) == 0);
648
649 /* Reset state, so that retries don't use stale info */
650 if (__predict_false(xfer->c_retries > 0)) {
651 xfer->c_flags = 0;
652 memset(&xfer->c_bio, 0, sizeof(xfer->c_bio));
653 }
654
655 xfer->c_bio.blkno = bp->b_rawblkno;
656 xfer->c_bio.bcount = bp->b_bcount;
657 xfer->c_bio.databuf = bp->b_data;
658 xfer->c_bio.blkdone = 0;
659 xfer->c_bio.bp = bp;
660
661 #ifdef WD_CHAOS_MONKEY
662 /*
663 * Override blkno to be over device capacity to trigger error,
664 * but only if it's read, to avoid trashing disk contents should
665 * the command be clipped, or otherwise misinterpreted, by the
666 * driver or controller.
667 */
668 if (BUF_ISREAD(bp) && xfer->c_retries == 0 && wd->drv_chaos_freq > 0 &&
669 (++wd->drv_chaos_cnt % wd->drv_chaos_freq) == 0) {
670 device_printf(dksc->sc_dev, "%s: chaos xfer %"PRIxPTR"\n",
671 __func__, (intptr_t)xfer & PAGE_MASK);
672 xfer->c_bio.blkno = 7777777 + wd->sc_capacity;
673 xfer->c_flags |= C_CHAOS;
674 }
675 #endif
676
677 /*
678 * If we're retrying, retry in single-sector mode. This will give us
679 * the sector number of the problem, and will eventually allow the
680 * transfer to succeed. If FUA is requested, we can't actually
681 * do this, as ATA_SINGLE is usually executed as PIO transfer by drivers
682 * which support it, and that isn't compatible with NCQ/FUA.
683 */
684 if (xfer->c_retries >= WDIORETRIES_SINGLE &&
685 (bp->b_flags & B_MEDIA_FUA) == 0)
686 xfer->c_bio.flags = ATA_SINGLE;
687 else
688 xfer->c_bio.flags = 0;
689
690 /*
691 * request LBA48 transfers when supported by the controller
692 * and needed by transfer offset or size.
693 */
694 if (wd->sc_flags & WDF_LBA48 &&
695 (((xfer->c_bio.blkno +
696 xfer->c_bio.bcount / dksc->sc_dkdev.dk_geom.dg_secsize) >
697 wd->sc_capacity28) ||
698 ((xfer->c_bio.bcount / dksc->sc_dkdev.dk_geom.dg_secsize) > 128)))
699 xfer->c_bio.flags |= ATA_LBA48;
700
701 /*
702 * If NCQ was negotiated, always use it for the first several attempts.
703 * Since device cancels all outstanding requests on error, downgrade
704 * to non-NCQ on retry, so that the retried transfer would not cause
705 * cascade failure for the other transfers if it fails again.
706 * If FUA was requested, we can't downgrade, as that would violate
707 * the semantics - FUA would not be honored. In that case, continue
708 * retrying with NCQ.
709 */
710 if (WD_USE_NCQ(wd) && (xfer->c_retries < WDIORETRIES_SINGLE ||
711 (bp->b_flags & B_MEDIA_FUA) != 0)) {
712 xfer->c_bio.flags |= ATA_LBA48;
713 xfer->c_flags |= C_NCQ;
714
715 if (WD_USE_NCQ_PRIO(wd) &&
716 BIO_GETPRIO(bp) == BPRIO_TIMECRITICAL)
717 xfer->c_bio.flags |= ATA_PRIO_HIGH;
718 }
719
720 if (wd->sc_flags & WDF_LBA)
721 xfer->c_bio.flags |= ATA_LBA;
722 if (bp->b_flags & B_READ)
723 xfer->c_bio.flags |= ATA_READ;
724 if (bp->b_flags & B_MEDIA_FUA) {
725 /* If not using NCQ, the command WRITE DMA FUA EXT is LBA48 */
726 KASSERT((wd->sc_flags & WDF_LBA48) != 0);
727 if ((xfer->c_flags & C_NCQ) == 0)
728 xfer->c_bio.flags |= ATA_LBA48;
729
730 xfer->c_bio.flags |= ATA_FUA;
731 }
732
733 wd->inflight++;
734 switch (wd->atabus->ata_bio(wd->drvp, xfer)) {
735 case ATACMD_TRY_AGAIN:
736 panic("wdstart1: try again");
737 break;
738 case ATACMD_QUEUED:
739 case ATACMD_COMPLETE:
740 break;
741 default:
742 panic("wdstart1: bad return code from ata_bio()");
743 }
744 }
745
746 static int
747 wd_diskstart(device_t dev, struct buf *bp)
748 {
749 struct wd_softc *wd = device_private(dev);
750 #ifdef ATADEBUG
751 struct dk_softc *dksc = &wd->sc_dksc;
752 #endif
753 struct ata_xfer *xfer;
754 struct ata_channel *chp;
755 unsigned openings;
756
757 mutex_enter(&wd->sc_lock);
758
759 chp = wd->drvp->chnl_softc;
760
761 ata_channel_lock(chp);
762 openings = ata_queue_openings(chp);
763 ata_channel_unlock(chp);
764
765 openings = uimin(openings, wd->drvp->drv_openings);
766
767 if (wd->inflight >= openings) {
768 mutex_exit(&wd->sc_lock);
769 return EAGAIN;
770 }
771
772 xfer = ata_get_xfer(chp, false);
773 if (xfer == NULL) {
774 ATADEBUG_PRINT(("wd_diskstart %s no xfer\n",
775 dksc->sc_xname), DEBUG_XFERS);
776
777 /*
778 * No available memory, retry later. This happens very rarely
779 * and only under memory pressure, so wait relatively long
780 * before retry.
781 */
782 if (!callout_pending(&wd->sc_restart_diskqueue)) {
783 callout_reset(&wd->sc_restart_diskqueue, hz / 2,
784 wdrestart, dev);
785 }
786
787 mutex_exit(&wd->sc_lock);
788 return EAGAIN;
789 }
790
791 wdstart1(wd, bp, xfer);
792
793 mutex_exit(&wd->sc_lock);
794
795 return 0;
796 }
797
798 /*
799 * Queue a drive for I/O.
800 */
801 static void
802 wdrestart(void *x)
803 {
804 device_t self = x;
805 struct wd_softc *wd = device_private(self);
806 struct dk_softc *dksc = &wd->sc_dksc;
807
808 ATADEBUG_PRINT(("wdstart %s\n", dksc->sc_xname),
809 DEBUG_XFERS);
810
811 if (!device_is_active(dksc->sc_dev))
812 return;
813
814 dk_start(dksc, NULL);
815 }
816
817 static void
818 wddone(device_t self, struct ata_xfer *xfer)
819 {
820 struct wd_softc *wd = device_private(self);
821 struct dk_softc *dksc = &wd->sc_dksc;
822 const char *errmsg;
823 int do_perror = 0;
824 struct buf *bp;
825
826 ATADEBUG_PRINT(("wddone %s\n", dksc->sc_xname),
827 DEBUG_XFERS);
828
829 if (__predict_false(wddoingadump)) {
830 /* just drop it to the floor */
831 ata_free_xfer(wd->drvp->chnl_softc, xfer);
832 return;
833 }
834
835 bp = xfer->c_bio.bp;
836 KASSERT(bp != NULL);
837
838 bp->b_resid = xfer->c_bio.bcount;
839 switch (xfer->c_bio.error) {
840 case ERR_DMA:
841 errmsg = "DMA error";
842 goto retry;
843 case ERR_DF:
844 errmsg = "device fault";
845 goto retry;
846 case TIMEOUT:
847 errmsg = "device timeout";
848 goto retry;
849 case REQUEUE:
850 errmsg = "requeue";
851 goto retry2;
852 case ERR_RESET:
853 errmsg = "channel reset";
854 goto retry2;
855 case ERROR:
856 /* Don't care about media change bits */
857 if (xfer->c_bio.r_error != 0 &&
858 (xfer->c_bio.r_error & ~(WDCE_MC | WDCE_MCR)) == 0)
859 goto noerror;
860 errmsg = "error";
861 do_perror = 1;
862 retry: /* Just reset and retry. Can we do more ? */
863 if ((xfer->c_flags & C_RECOVERED) == 0) {
864 int wflags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
865 ata_channel_lock(wd->drvp->chnl_softc);
866 ata_thread_run(wd->drvp->chnl_softc, wflags,
867 ATACH_TH_DRIVE_RESET, wd->drvp->drive);
868 ata_channel_unlock(wd->drvp->chnl_softc);
869 }
870 retry2:
871 mutex_enter(&wd->sc_lock);
872
873 diskerr(bp, "wd", errmsg, LOG_PRINTF,
874 xfer->c_bio.blkdone, dksc->sc_dkdev.dk_label);
875 if (xfer->c_retries < WDIORETRIES)
876 printf(", xfer %"PRIxPTR", retry %d",
877 (intptr_t)xfer & PAGE_MASK,
878 xfer->c_retries);
879 printf("\n");
880 if (do_perror)
881 wdperror(wd, xfer);
882
883 if (xfer->c_retries < WDIORETRIES) {
884 xfer->c_retries++;
885
886 /* Rerun ASAP if just requeued */
887 if (xfer->c_bio.error == REQUEUE) {
888 SLIST_INSERT_HEAD(&wd->sc_requeue_list, xfer,
889 c_retrychain);
890 callout_reset(&wd->sc_requeue_callout,
891 1, wdbiorequeue, wd);
892 } else {
893 SLIST_INSERT_HEAD(&wd->sc_retry_list, xfer,
894 c_retrychain);
895 callout_reset(&wd->sc_retry_callout,
896 RECOVERYTIME, wdbioretry, wd);
897 }
898
899 mutex_exit(&wd->sc_lock);
900 return;
901 }
902
903 mutex_exit(&wd->sc_lock);
904
905 #ifdef WD_SOFTBADSECT
906 /*
907 * Not all errors indicate a failed block but those that do,
908 * put the block on the bad-block list for the device. Only
909 * do this for reads because the drive should do it for writes,
910 * itself, according to Manuel.
911 */
912 if ((bp->b_flags & B_READ) &&
913 ((wd->drvp->ata_vers >= 4 && xfer->c_bio.r_error & 64) ||
914 (wd->drvp->ata_vers < 4 && xfer->c_bio.r_error & 192))) {
915 struct disk_badsectors *dbs;
916
917 dbs = kmem_zalloc(sizeof *dbs, KM_NOSLEEP);
918 if (dbs == NULL) {
919 aprint_error_dev(dksc->sc_dev,
920 "failed to add bad block to list\n");
921 goto out;
922 }
923
924 dbs->dbs_min = bp->b_rawblkno;
925 dbs->dbs_max = dbs->dbs_min +
926 (bp->b_bcount /wd->sc_blksize) - 1;
927 microtime(&dbs->dbs_failedat);
928
929 mutex_enter(&wd->sc_lock);
930 SLIST_INSERT_HEAD(&wd->sc_bslist, dbs, dbs_next);
931 wd->sc_bscount++;
932 mutex_exit(&wd->sc_lock);
933 }
934 out:
935 #endif
936 bp->b_error = EIO;
937 break;
938 case NOERROR:
939 #ifdef WD_CHAOS_MONKEY
940 /*
941 * For example Parallels AHCI emulation doesn't actually
942 * return error for the invalid I/O, so just re-run
943 * the request and do not panic.
944 */
945 if (__predict_false(xfer->c_flags & C_CHAOS)) {
946 xfer->c_bio.error = REQUEUE;
947 errmsg = "chaos noerror";
948 goto retry2;
949 }
950 #endif
951
952 noerror: if ((xfer->c_bio.flags & ATA_CORR) || xfer->c_retries > 0)
953 device_printf(dksc->sc_dev,
954 "soft error (corrected) xfer %"PRIxPTR"\n",
955 (intptr_t)xfer & PAGE_MASK);
956 break;
957 case ERR_NODEV:
958 bp->b_error = EIO;
959 break;
960 }
961 if (__predict_false(bp->b_error != 0) && bp->b_resid == 0) {
962 /*
963 * the disk or controller sometimes report a complete
964 * xfer, when there has been an error. This is wrong,
965 * assume nothing got transfered in this case
966 */
967 bp->b_resid = bp->b_bcount;
968 }
969
970 ata_free_xfer(wd->drvp->chnl_softc, xfer);
971
972 wd->inflight--;
973 dk_done(dksc, bp);
974 dk_start(dksc, NULL);
975 }
976
977 static void
978 wdbioretry(void *v)
979 {
980 struct wd_softc *wd = v;
981 struct ata_xfer *xfer;
982
983 ATADEBUG_PRINT(("%s %s\n", __func__, wd->sc_dksc.sc_xname),
984 DEBUG_XFERS);
985
986 mutex_enter(&wd->sc_lock);
987 while ((xfer = SLIST_FIRST(&wd->sc_retry_list))) {
988 SLIST_REMOVE_HEAD(&wd->sc_retry_list, c_retrychain);
989 wdstart1(wd, xfer->c_bio.bp, xfer);
990 }
991 mutex_exit(&wd->sc_lock);
992 }
993
994 static void
995 wdbiorequeue(void *v)
996 {
997 struct wd_softc *wd = v;
998 struct ata_xfer *xfer;
999
1000 ATADEBUG_PRINT(("%s %s\n", __func__, wd->sc_dksc.sc_xname),
1001 DEBUG_XFERS);
1002
1003 mutex_enter(&wd->sc_lock);
1004 while ((xfer = SLIST_FIRST(&wd->sc_requeue_list))) {
1005 SLIST_REMOVE_HEAD(&wd->sc_requeue_list, c_retrychain);
1006 wdstart1(wd, xfer->c_bio.bp, xfer);
1007 }
1008 mutex_exit(&wd->sc_lock);
1009 }
1010
1011 static void
1012 wdminphys(struct buf *bp)
1013 {
1014 const struct wd_softc * const wd =
1015 device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
1016 int maxsectors;
1017
1018 /*
1019 * The limit is actually 65536 for LBA48 and 256 for non-LBA48,
1020 * but that requires to set the count for the ATA command
1021 * to 0, which is somewhat error prone, so better stay safe.
1022 */
1023 if (wd->sc_flags & WDF_LBA48)
1024 maxsectors = 65535;
1025 else
1026 maxsectors = 128;
1027
1028 if (bp->b_bcount > (wd->sc_blksize * maxsectors))
1029 bp->b_bcount = (wd->sc_blksize * maxsectors);
1030
1031 minphys(bp);
1032 }
1033
1034 static void
1035 wd_iosize(device_t dev, int *count)
1036 {
1037 struct buf B;
1038 int bmaj;
1039
1040 bmaj = bdevsw_lookup_major(&wd_bdevsw);
1041 B.b_dev = MAKEWDDEV(bmaj,device_unit(dev),RAW_PART);
1042 B.b_bcount = *count;
1043
1044 wdminphys(&B);
1045
1046 *count = B.b_bcount;
1047 }
1048
1049 static int
1050 wdread(dev_t dev, struct uio *uio, int flags)
1051 {
1052
1053 ATADEBUG_PRINT(("wdread\n"), DEBUG_XFERS);
1054 return (physio(wdstrategy, NULL, dev, B_READ, wdminphys, uio));
1055 }
1056
1057 static int
1058 wdwrite(dev_t dev, struct uio *uio, int flags)
1059 {
1060
1061 ATADEBUG_PRINT(("wdwrite\n"), DEBUG_XFERS);
1062 return (physio(wdstrategy, NULL, dev, B_WRITE, wdminphys, uio));
1063 }
1064
1065 static int
1066 wdopen(dev_t dev, int flag, int fmt, struct lwp *l)
1067 {
1068 struct wd_softc *wd;
1069 struct dk_softc *dksc;
1070 int unit, part, error;
1071
1072 ATADEBUG_PRINT(("wdopen\n"), DEBUG_FUNCS);
1073 unit = WDUNIT(dev);
1074 wd = device_lookup_private(&wd_cd, unit);
1075 if (wd == NULL)
1076 return (ENXIO);
1077 dksc = &wd->sc_dksc;
1078
1079 if (! device_is_active(dksc->sc_dev))
1080 return (ENODEV);
1081
1082 part = WDPART(dev);
1083
1084 if (wd->sc_capacity == 0)
1085 return (ENODEV);
1086
1087 /*
1088 * If any partition is open, but the disk has been invalidated,
1089 * disallow further opens.
1090 */
1091 if ((wd->sc_flags & (WDF_OPEN | WDF_LOADED)) == WDF_OPEN) {
1092 if (part != RAW_PART || fmt != S_IFCHR)
1093 return EIO;
1094 }
1095
1096 error = dk_open(dksc, dev, flag, fmt, l);
1097
1098 return error;
1099 }
1100
1101 /*
1102 * Serialized by caller
1103 */
1104 static int
1105 wd_firstopen(device_t self, dev_t dev, int flag, int fmt)
1106 {
1107 struct wd_softc *wd = device_private(self);
1108 struct dk_softc *dksc = &wd->sc_dksc;
1109 int error;
1110
1111 error = wd->atabus->ata_addref(wd->drvp);
1112 if (error)
1113 return error;
1114
1115 if ((wd->sc_flags & WDF_LOADED) == 0) {
1116 int param_error;
1117
1118 /* Load the physical device parameters. */
1119 param_error = wd_get_params(wd, AT_WAIT, &wd->sc_params);
1120 if (param_error != 0) {
1121 aprint_error_dev(dksc->sc_dev, "IDENTIFY failed\n");
1122 error = EIO;
1123 goto bad;
1124 }
1125 wd_set_geometry(wd);
1126 wd->sc_flags |= WDF_LOADED;
1127 }
1128
1129 wd->sc_flags |= WDF_OPEN;
1130 return 0;
1131
1132 bad:
1133 wd->atabus->ata_delref(wd->drvp);
1134 return error;
1135 }
1136
1137 /*
1138 * Caller must hold wd->sc_dk.dk_openlock.
1139 */
1140 static int
1141 wd_lastclose(device_t self)
1142 {
1143 struct wd_softc *wd = device_private(self);
1144
1145 KASSERTMSG(bufq_peek(wd->sc_dksc.sc_bufq) == NULL, "bufq not empty");
1146
1147 wd_flushcache(wd, AT_WAIT, false);
1148
1149 wd->atabus->ata_delref(wd->drvp);
1150 wd->sc_flags &= ~WDF_OPEN;
1151
1152 return 0;
1153 }
1154
1155 static int
1156 wdclose(dev_t dev, int flag, int fmt, struct lwp *l)
1157 {
1158 struct wd_softc *wd;
1159 struct dk_softc *dksc;
1160 int unit;
1161
1162 unit = WDUNIT(dev);
1163 wd = device_lookup_private(&wd_cd, unit);
1164 dksc = &wd->sc_dksc;
1165
1166 return dk_close(dksc, dev, flag, fmt, l);
1167 }
1168
1169 void
1170 wdperror(const struct wd_softc *wd, struct ata_xfer *xfer)
1171 {
1172 static const char *const errstr0_3[] = {"address mark not found",
1173 "track 0 not found", "aborted command", "media change requested",
1174 "id not found", "media changed", "uncorrectable data error",
1175 "bad block detected"};
1176 static const char *const errstr4_5[] = {
1177 "obsolete (address mark not found)",
1178 "no media/write protected", "aborted command",
1179 "media change requested", "id not found", "media changed",
1180 "uncorrectable data error", "interface CRC error"};
1181 const char *const *errstr;
1182 int i;
1183 const char *sep = "";
1184
1185 const struct dk_softc *dksc = &wd->sc_dksc;
1186 const char *devname = dksc->sc_xname;
1187 struct ata_drive_datas *drvp = wd->drvp;
1188 int errno = xfer->c_bio.r_error;
1189
1190 if (drvp->ata_vers >= 4)
1191 errstr = errstr4_5;
1192 else
1193 errstr = errstr0_3;
1194
1195 printf("%s: (", devname);
1196
1197 if (errno == 0)
1198 printf("error not notified");
1199
1200 for (i = 0; i < 8; i++) {
1201 if (errno & (1 << i)) {
1202 printf("%s%s", sep, errstr[i]);
1203 sep = ", ";
1204 }
1205 }
1206 printf(")\n");
1207 }
1208
1209 int
1210 wdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1211 {
1212 struct wd_softc *wd =
1213 device_lookup_private(&wd_cd, WDUNIT(dev));
1214 struct dk_softc *dksc = &wd->sc_dksc;
1215
1216 ATADEBUG_PRINT(("wdioctl\n"), DEBUG_FUNCS);
1217
1218 if ((wd->sc_flags & WDF_LOADED) == 0)
1219 return EIO;
1220
1221 switch (cmd) {
1222 #ifdef HAS_BAD144_HANDLING
1223 case DIOCSBAD:
1224 if ((flag & FWRITE) == 0)
1225 return EBADF;
1226 dksc->sc_dkdev.dk_cpulabel->bad = *(struct dkbad *)addr;
1227 dksc->sc_dkdev.dk_label->d_flags |= D_BADSECT;
1228 bad144intern(wd);
1229 return 0;
1230 #endif
1231 #ifdef WD_SOFTBADSECT
1232 case DIOCBSLIST :
1233 {
1234 uint32_t count, missing, skip;
1235 struct disk_badsecinfo dbsi;
1236 struct disk_badsectors *dbs;
1237 size_t available;
1238 uint8_t *laddr;
1239
1240 dbsi = *(struct disk_badsecinfo *)addr;
1241 missing = wd->sc_bscount;
1242 count = 0;
1243 available = dbsi.dbsi_bufsize;
1244 skip = dbsi.dbsi_skip;
1245 laddr = (uint8_t *)dbsi.dbsi_buffer;
1246
1247 /*
1248 * We start this loop with the expectation that all of the
1249 * entries will be missed and decrement this counter each
1250 * time we either skip over one (already copied out) or
1251 * we actually copy it back to user space. The structs
1252 * holding the bad sector information are copied directly
1253 * back to user space whilst the summary is returned via
1254 * the struct passed in via the ioctl.
1255 */
1256 mutex_enter(&wd->sc_lock);
1257 SLIST_FOREACH(dbs, &wd->sc_bslist, dbs_next) {
1258 if (skip > 0) {
1259 missing--;
1260 skip--;
1261 continue;
1262 }
1263 if (available < sizeof(*dbs))
1264 break;
1265 available -= sizeof(*dbs);
1266 copyout(dbs, laddr, sizeof(*dbs));
1267 laddr += sizeof(*dbs);
1268 missing--;
1269 count++;
1270 }
1271 mutex_exit(&wd->sc_lock);
1272 dbsi.dbsi_left = missing;
1273 dbsi.dbsi_copied = count;
1274 *(struct disk_badsecinfo *)addr = dbsi;
1275 return 0;
1276 }
1277
1278 case DIOCBSFLUSH :
1279 /* Clean out the bad sector list */
1280 mutex_enter(&wd->sc_lock);
1281 while (!SLIST_EMPTY(&wd->sc_bslist)) {
1282 struct disk_badsectors *dbs =
1283 SLIST_FIRST(&wd->sc_bslist);
1284 SLIST_REMOVE_HEAD(&wd->sc_bslist, dbs_next);
1285 kmem_free(dbs, sizeof(*dbs));
1286 }
1287 mutex_exit(&wd->sc_lock);
1288 wd->sc_bscount = 0;
1289 return 0;
1290 #endif
1291
1292 #ifdef notyet
1293 case DIOCWFORMAT:
1294 if ((flag & FWRITE) == 0)
1295 return EBADF;
1296 {
1297 register struct format_op *fop;
1298 struct iovec aiov;
1299 struct uio auio;
1300 int error1;
1301
1302 fop = (struct format_op *)addr;
1303 aiov.iov_base = fop->df_buf;
1304 aiov.iov_len = fop->df_count;
1305 auio.uio_iov = &aiov;
1306 auio.uio_iovcnt = 1;
1307 auio.uio_resid = fop->df_count;
1308 auio.uio_offset =
1309 fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
1310 auio.uio_vmspace = l->l_proc->p_vmspace;
1311 error1 = physio(wdformat, NULL, dev, B_WRITE, wdminphys,
1312 &auio);
1313 fop->df_count -= auio.uio_resid;
1314 fop->df_reg[0] = wdc->sc_status;
1315 fop->df_reg[1] = wdc->sc_error;
1316 return error1;
1317 }
1318 #endif
1319 case DIOCGCACHE:
1320 return wd_getcache(wd, (int *)addr);
1321
1322 case DIOCSCACHE:
1323 return wd_setcache(wd, *(int *)addr);
1324
1325 case DIOCCACHESYNC:
1326 return wd_flushcache(wd, AT_WAIT, true);
1327
1328 case ATAIOCCOMMAND:
1329 /*
1330 * Make sure this command is (relatively) safe first
1331 */
1332 if ((((atareq_t *) addr)->flags & ATACMD_READ) == 0 &&
1333 (flag & FWRITE) == 0)
1334 return (EBADF);
1335 {
1336 struct wd_ioctl *wi;
1337 atareq_t *atareq = (atareq_t *) addr;
1338 int error1;
1339
1340 wi = wi_get(wd);
1341 wi->wi_atareq = *atareq;
1342
1343 if (atareq->datalen && atareq->flags &
1344 (ATACMD_READ | ATACMD_WRITE)) {
1345 void *tbuf;
1346 if (atareq->datalen < DEV_BSIZE
1347 && atareq->command == WDCC_IDENTIFY) {
1348 tbuf = kmem_zalloc(DEV_BSIZE, KM_SLEEP);
1349 wi->wi_iov.iov_base = tbuf;
1350 wi->wi_iov.iov_len = DEV_BSIZE;
1351 UIO_SETUP_SYSSPACE(&wi->wi_uio);
1352 } else {
1353 tbuf = NULL;
1354 wi->wi_iov.iov_base = atareq->databuf;
1355 wi->wi_iov.iov_len = atareq->datalen;
1356 wi->wi_uio.uio_vmspace = l->l_proc->p_vmspace;
1357 }
1358 wi->wi_uio.uio_iov = &wi->wi_iov;
1359 wi->wi_uio.uio_iovcnt = 1;
1360 wi->wi_uio.uio_resid = atareq->datalen;
1361 wi->wi_uio.uio_offset = 0;
1362 wi->wi_uio.uio_rw =
1363 (atareq->flags & ATACMD_READ) ? B_READ : B_WRITE;
1364 error1 = physio(wdioctlstrategy, &wi->wi_bp, dev,
1365 (atareq->flags & ATACMD_READ) ? B_READ : B_WRITE,
1366 wdminphys, &wi->wi_uio);
1367 if (tbuf != NULL && error1 == 0) {
1368 error1 = copyout(tbuf, atareq->databuf,
1369 atareq->datalen);
1370 kmem_free(tbuf, DEV_BSIZE);
1371 }
1372 } else {
1373 /* No need to call physio if we don't have any
1374 user data */
1375 wi->wi_bp.b_flags = 0;
1376 wi->wi_bp.b_data = 0;
1377 wi->wi_bp.b_bcount = 0;
1378 wi->wi_bp.b_dev = dev;
1379 wi->wi_bp.b_proc = l->l_proc;
1380 wdioctlstrategy(&wi->wi_bp);
1381 error1 = wi->wi_bp.b_error;
1382 }
1383 *atareq = wi->wi_atareq;
1384 wi_free(wi);
1385 return(error1);
1386 }
1387
1388 default:
1389 return dk_ioctl(dksc, dev, cmd, addr, flag, l);
1390 }
1391
1392 #ifdef DIAGNOSTIC
1393 panic("wdioctl: impossible");
1394 #endif
1395 }
1396
1397 static int
1398 wd_discard(device_t dev, off_t pos, off_t len)
1399 {
1400 struct wd_softc *wd = device_private(dev);
1401 daddr_t bno;
1402 long size, done;
1403 long maxatonce, amount;
1404 int result;
1405
1406 if (!(wd->sc_params.atap_ata_major & WDC_VER_ATA7)
1407 || !(wd->sc_params.support_dsm & ATA_SUPPORT_DSM_TRIM)) {
1408 /* not supported; ignore request */
1409 ATADEBUG_PRINT(("wddiscard (unsupported)\n"), DEBUG_FUNCS);
1410 return 0;
1411 }
1412 maxatonce = 0xffff; /*wd->sc_params.max_dsm_blocks*/
1413
1414 ATADEBUG_PRINT(("wddiscard\n"), DEBUG_FUNCS);
1415
1416 if ((wd->sc_flags & WDF_LOADED) == 0)
1417 return EIO;
1418
1419 /* round the start up and the end down */
1420 bno = (pos + wd->sc_blksize - 1) / wd->sc_blksize;
1421 size = ((pos + len) / wd->sc_blksize) - bno;
1422
1423 done = 0;
1424 while (done < size) {
1425 amount = size - done;
1426 if (amount > maxatonce) {
1427 amount = maxatonce;
1428 }
1429 result = wd_trim(wd, bno + done, amount);
1430 if (result) {
1431 return result;
1432 }
1433 done += amount;
1434 }
1435 return 0;
1436 }
1437
1438 static int
1439 wddiscard(dev_t dev, off_t pos, off_t len)
1440 {
1441 struct wd_softc *wd;
1442 struct dk_softc *dksc;
1443 int unit;
1444
1445 unit = WDUNIT(dev);
1446 wd = device_lookup_private(&wd_cd, unit);
1447 dksc = &wd->sc_dksc;
1448
1449 return dk_discard(dksc, dev, pos, len);
1450 }
1451
1452 #ifdef B_FORMAT
1453 int
1454 wdformat(struct buf *bp)
1455 {
1456
1457 bp->b_flags |= B_FORMAT;
1458 return wdstrategy(bp);
1459 }
1460 #endif
1461
1462 int
1463 wdsize(dev_t dev)
1464 {
1465 struct wd_softc *wd;
1466 struct dk_softc *dksc;
1467 int unit;
1468
1469 ATADEBUG_PRINT(("wdsize\n"), DEBUG_FUNCS);
1470
1471 unit = WDUNIT(dev);
1472 wd = device_lookup_private(&wd_cd, unit);
1473 if (wd == NULL)
1474 return (-1);
1475 dksc = &wd->sc_dksc;
1476
1477 if (!device_is_active(dksc->sc_dev))
1478 return (-1);
1479
1480 return dk_size(dksc, dev);
1481 }
1482
1483 /*
1484 * Dump core after a system crash.
1485 */
1486 static int
1487 wddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1488 {
1489 struct wd_softc *wd;
1490 struct dk_softc *dksc;
1491 int unit;
1492
1493 /* Check if recursive dump; if so, punt. */
1494 if (wddoingadump)
1495 return EFAULT;
1496 wddoingadump = 1;
1497
1498 unit = WDUNIT(dev);
1499 wd = device_lookup_private(&wd_cd, unit);
1500 if (wd == NULL)
1501 return (ENXIO);
1502 dksc = &wd->sc_dksc;
1503
1504 return dk_dump(dksc, dev, blkno, va, size);
1505 }
1506
1507 static int
1508 wd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
1509 {
1510 struct wd_softc *wd = device_private(dev);
1511 struct dk_softc *dksc = &wd->sc_dksc;
1512 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1513 struct ata_xfer *xfer = &wd->dump_xfer;
1514 int err;
1515
1516 /* Recalibrate, if first dump transfer. */
1517 if (wddumprecalibrated == 0) {
1518 wddumprecalibrated = 1;
1519 ata_channel_lock(wd->drvp->chnl_softc);
1520 /* This will directly execute the reset due to AT_POLL */
1521 ata_thread_run(wd->drvp->chnl_softc, AT_POLL,
1522 ATACH_TH_DRIVE_RESET, wd->drvp->drive);
1523
1524 wd->drvp->state = RESET;
1525 ata_channel_unlock(wd->drvp->chnl_softc);
1526 }
1527
1528 memset(xfer, 0, sizeof(*xfer));
1529 xfer->c_flags |= C_PRIVATE_ALLOC | C_SKIP_QUEUE;
1530
1531 xfer->c_bio.blkno = blkno;
1532 xfer->c_bio.flags = ATA_POLL;
1533 if (wd->sc_flags & WDF_LBA48 &&
1534 (xfer->c_bio.blkno + nblk) > wd->sc_capacity28)
1535 xfer->c_bio.flags |= ATA_LBA48;
1536 if (wd->sc_flags & WDF_LBA)
1537 xfer->c_bio.flags |= ATA_LBA;
1538 xfer->c_bio.bcount = nblk * dg->dg_secsize;
1539 xfer->c_bio.databuf = va;
1540 #ifndef WD_DUMP_NOT_TRUSTED
1541 switch (err = wd->atabus->ata_bio(wd->drvp, xfer)) {
1542 case ATACMD_TRY_AGAIN:
1543 panic("wddump: try again");
1544 break;
1545 case ATACMD_QUEUED:
1546 panic("wddump: polled command has been queued");
1547 break;
1548 case ATACMD_COMPLETE:
1549 break;
1550 default:
1551 panic("wddump: unknown atacmd code %d", err);
1552 }
1553 switch(err = xfer->c_bio.error) {
1554 case TIMEOUT:
1555 printf("wddump: device timed out");
1556 err = EIO;
1557 break;
1558 case ERR_DF:
1559 printf("wddump: drive fault");
1560 err = EIO;
1561 break;
1562 case ERR_DMA:
1563 printf("wddump: DMA error");
1564 err = EIO;
1565 break;
1566 case ERROR:
1567 printf("wddump: ");
1568 wdperror(wd, xfer);
1569 err = EIO;
1570 break;
1571 case NOERROR:
1572 err = 0;
1573 break;
1574 default:
1575 panic("wddump: unknown error type %x", err);
1576 }
1577
1578 if (err != 0) {
1579 printf("\n");
1580 return err;
1581 }
1582 #else /* WD_DUMP_NOT_TRUSTED */
1583 /* Let's just talk about this first... */
1584 printf("wd%d: dump addr 0x%x, cylin %d, head %d, sector %d\n",
1585 unit, va, cylin, head, sector);
1586 delay(500 * 1000); /* half a second */
1587 #endif
1588
1589 wddoingadump = 0;
1590 return 0;
1591 }
1592
1593 #ifdef HAS_BAD144_HANDLING
1594 /*
1595 * Internalize the bad sector table.
1596 */
1597 void
1598 bad144intern(struct wd_softc *wd)
1599 {
1600 struct dk_softc *dksc = &wd->sc_dksc;
1601 struct dkbad *bt = &dksc->sc_dkdev.dk_cpulabel->bad;
1602 struct disklabel *lp = dksc->sc_dkdev.dk_label;
1603 int i = 0;
1604
1605 ATADEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
1606
1607 for (; i < NBT_BAD; i++) {
1608 if (bt->bt_bad[i].bt_cyl == 0xffff)
1609 break;
1610 wd->drvp->badsect[i] =
1611 bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
1612 (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
1613 (bt->bt_bad[i].bt_trksec & 0xff);
1614 }
1615 for (; i < NBT_BAD+1; i++)
1616 wd->drvp->badsect[i] = -1;
1617 }
1618 #endif
1619
1620 static void
1621 wd_set_geometry(struct wd_softc *wd)
1622 {
1623 struct dk_softc *dksc = &wd->sc_dksc;
1624 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1625
1626 memset(dg, 0, sizeof(*dg));
1627
1628 dg->dg_secperunit = wd->sc_capacity;
1629 dg->dg_secsize = wd->sc_blksize;
1630 dg->dg_nsectors = wd->sc_params.atap_sectors;
1631 dg->dg_ntracks = wd->sc_params.atap_heads;
1632 if ((wd->sc_flags & WDF_LBA) == 0)
1633 dg->dg_ncylinders = wd->sc_params.atap_cylinders;
1634
1635 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, wd->sc_typename);
1636 }
1637
1638 int
1639 wd_get_params(struct wd_softc *wd, uint8_t flags, struct ataparams *params)
1640 {
1641
1642 switch (wd->atabus->ata_get_params(wd->drvp, flags, params)) {
1643 case CMD_AGAIN:
1644 return 1;
1645 case CMD_ERR:
1646 if (wd->drvp->drive_type != ATA_DRIVET_OLD)
1647 return 1;
1648 /*
1649 * We `know' there's a drive here; just assume it's old.
1650 * This geometry is only used to read the MBR and print a
1651 * (false) attach message.
1652 */
1653 strncpy(params->atap_model, "ST506",
1654 sizeof params->atap_model);
1655 params->atap_config = ATA_CFG_FIXED;
1656 params->atap_cylinders = 1024;
1657 params->atap_heads = 8;
1658 params->atap_sectors = 17;
1659 params->atap_multi = 1;
1660 params->atap_capabilities1 = params->atap_capabilities2 = 0;
1661 wd->drvp->ata_vers = -1; /* Mark it as pre-ATA */
1662 /* FALLTHROUGH */
1663 case CMD_OK:
1664 return 0;
1665 default:
1666 panic("wd_get_params: bad return code from ata_get_params");
1667 /* NOTREACHED */
1668 }
1669 }
1670
1671 int
1672 wd_getcache(struct wd_softc *wd, int *bitsp)
1673 {
1674 struct ataparams params;
1675
1676 if (wd_get_params(wd, AT_WAIT, ¶ms) != 0)
1677 return EIO;
1678 if (params.atap_cmd_set1 == 0x0000 ||
1679 params.atap_cmd_set1 == 0xffff ||
1680 (params.atap_cmd_set1 & WDC_CMD1_CACHE) == 0) {
1681 *bitsp = 0;
1682 return 0;
1683 }
1684 *bitsp = DKCACHE_WCHANGE | DKCACHE_READ;
1685 if (params.atap_cmd1_en & WDC_CMD1_CACHE)
1686 *bitsp |= DKCACHE_WRITE;
1687
1688 if (WD_USE_NCQ(wd) || (wd->drvp->drive_flags & ATA_DRIVE_WFUA))
1689 *bitsp |= DKCACHE_FUA;
1690
1691 return 0;
1692 }
1693
1694 const char at_errbits[] = "\20\10ERROR\11TIMEOU\12DF";
1695
1696 int
1697 wd_setcache(struct wd_softc *wd, int bits)
1698 {
1699 struct dk_softc *dksc = &wd->sc_dksc;
1700 struct ataparams params;
1701 struct ata_xfer *xfer;
1702 int error;
1703
1704 if (wd_get_params(wd, AT_WAIT, ¶ms) != 0)
1705 return EIO;
1706
1707 if (params.atap_cmd_set1 == 0x0000 ||
1708 params.atap_cmd_set1 == 0xffff ||
1709 (params.atap_cmd_set1 & WDC_CMD1_CACHE) == 0)
1710 return EOPNOTSUPP;
1711
1712 if ((bits & DKCACHE_READ) == 0 ||
1713 (bits & DKCACHE_SAVE) != 0)
1714 return EOPNOTSUPP;
1715
1716 xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
1717
1718 xfer->c_ata_c.r_command = SET_FEATURES;
1719 xfer->c_ata_c.r_st_bmask = 0;
1720 xfer->c_ata_c.r_st_pmask = 0;
1721 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1722 xfer->c_ata_c.flags = AT_WAIT;
1723 if (bits & DKCACHE_WRITE)
1724 xfer->c_ata_c.r_features = WDSF_WRITE_CACHE_EN;
1725 else
1726 xfer->c_ata_c.r_features = WDSF_WRITE_CACHE_DS;
1727 if (wd->atabus->ata_exec_command(wd->drvp, xfer) != ATACMD_COMPLETE) {
1728 aprint_error_dev(dksc->sc_dev,
1729 "wd_setcache command not complete\n");
1730 error = EIO;
1731 goto out;
1732 }
1733
1734 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1735 char sbuf[sizeof(at_errbits) + 64];
1736 snprintb(sbuf, sizeof(sbuf), at_errbits, xfer->c_ata_c.flags);
1737 aprint_error_dev(dksc->sc_dev, "wd_setcache: status=%s\n", sbuf);
1738 error = EIO;
1739 goto out;
1740 }
1741
1742 error = 0;
1743
1744 out:
1745 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1746 return error;
1747 }
1748
1749 static int
1750 wd_standby(struct wd_softc *wd, int flags)
1751 {
1752 struct dk_softc *dksc = &wd->sc_dksc;
1753 struct ata_xfer *xfer;
1754 int error;
1755
1756 aprint_debug_dev(dksc->sc_dev, "standby immediate\n");
1757 xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
1758
1759 xfer->c_ata_c.r_command = WDCC_STANDBY_IMMED;
1760 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1761 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1762 xfer->c_ata_c.flags = flags;
1763 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1764 if (wd->atabus->ata_exec_command(wd->drvp, xfer) != ATACMD_COMPLETE) {
1765 aprint_error_dev(dksc->sc_dev,
1766 "standby immediate command didn't complete\n");
1767 error = EIO;
1768 goto out;
1769 }
1770 if (xfer->c_ata_c.flags & AT_ERROR) {
1771 if (xfer->c_ata_c.r_error == WDCE_ABRT) {
1772 /* command not supported */
1773 aprint_debug_dev(dksc->sc_dev,
1774 "standby immediate not supported\n");
1775 error = ENODEV;
1776 goto out;
1777 }
1778 }
1779 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1780 char sbuf[sizeof(at_errbits) + 64];
1781 snprintb(sbuf, sizeof(sbuf), at_errbits, xfer->c_ata_c.flags);
1782 aprint_error_dev(dksc->sc_dev, "wd_standby: status=%s\n", sbuf);
1783 error = EIO;
1784 goto out;
1785 }
1786 error = 0;
1787
1788 out:
1789 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1790 return error;
1791 }
1792
1793 int
1794 wd_flushcache(struct wd_softc *wd, int flags, bool start_self)
1795 {
1796 struct dk_softc *dksc = &wd->sc_dksc;
1797 struct ata_xfer *xfer;
1798 int error;
1799
1800 /*
1801 * WDCC_FLUSHCACHE is here since ATA-4, but some drives report
1802 * only ATA-2 and still support it.
1803 */
1804 if (wd->drvp->ata_vers < 4 &&
1805 ((wd->sc_params.atap_cmd_set2 & WDC_CMD2_FC) == 0 ||
1806 wd->sc_params.atap_cmd_set2 == 0xffff))
1807 return ENODEV;
1808
1809 xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
1810
1811 if ((wd->sc_params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 &&
1812 (wd->sc_params.atap_cmd2_en & ATA_CMD2_FCE) != 0) {
1813 xfer->c_ata_c.r_command = WDCC_FLUSHCACHE_EXT;
1814 flags |= AT_LBA48;
1815 } else
1816 xfer->c_ata_c.r_command = WDCC_FLUSHCACHE;
1817 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1818 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1819 xfer->c_ata_c.flags = flags | AT_READREG;
1820 xfer->c_ata_c.timeout = 300000; /* 5m timeout */
1821 if (wd->atabus->ata_exec_command(wd->drvp, xfer) != ATACMD_COMPLETE) {
1822 aprint_error_dev(dksc->sc_dev,
1823 "flush cache command didn't complete\n");
1824 error = EIO;
1825 goto out_xfer;
1826 }
1827 if (xfer->c_ata_c.flags & AT_ERROR) {
1828 if (xfer->c_ata_c.r_error == WDCE_ABRT) {
1829 /* command not supported */
1830 error = ENODEV;
1831 goto out_xfer;
1832 }
1833 }
1834 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1835 char sbuf[sizeof(at_errbits) + 64];
1836 snprintb(sbuf, sizeof(sbuf), at_errbits, xfer->c_ata_c.flags);
1837 aprint_error_dev(dksc->sc_dev, "wd_flushcache: status=%s\n",
1838 sbuf);
1839 error = EIO;
1840 goto out_xfer;
1841 }
1842 error = 0;
1843
1844 out_xfer:
1845 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1846 return error;
1847 }
1848
1849 /*
1850 * Execute TRIM command, assumes sleep context.
1851 */
1852 static int
1853 wd_trim(struct wd_softc *wd, daddr_t bno, long size)
1854 {
1855 struct dk_softc *dksc = &wd->sc_dksc;
1856 struct ata_xfer *xfer;
1857 int error;
1858 unsigned char *req;
1859
1860 xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
1861
1862 req = kmem_zalloc(512, KM_SLEEP);
1863 req[0] = bno & 0xff;
1864 req[1] = (bno >> 8) & 0xff;
1865 req[2] = (bno >> 16) & 0xff;
1866 req[3] = (bno >> 24) & 0xff;
1867 req[4] = (bno >> 32) & 0xff;
1868 req[5] = (bno >> 40) & 0xff;
1869 req[6] = size & 0xff;
1870 req[7] = (size >> 8) & 0xff;
1871
1872 /*
1873 * XXX We could possibly use NCQ TRIM, which supports executing
1874 * this command concurrently. It would need some investigation, some
1875 * early or not so early disk firmware caused data loss with NCQ TRIM.
1876 * atastart() et.al would need to be adjusted to allow and support
1877 * running several non-I/O ATA commands in parallel.
1878 */
1879
1880 xfer->c_ata_c.r_command = ATA_DATA_SET_MANAGEMENT;
1881 xfer->c_ata_c.r_count = 1;
1882 xfer->c_ata_c.r_features = ATA_SUPPORT_DSM_TRIM;
1883 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1884 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1885 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1886 xfer->c_ata_c.data = req;
1887 xfer->c_ata_c.bcount = 512;
1888 xfer->c_ata_c.flags |= AT_WRITE | AT_WAIT;
1889 if (wd->atabus->ata_exec_command(wd->drvp, xfer) != ATACMD_COMPLETE) {
1890 aprint_error_dev(dksc->sc_dev,
1891 "trim command didn't complete\n");
1892 kmem_free(req, 512);
1893 error = EIO;
1894 goto out;
1895 }
1896 kmem_free(req, 512);
1897 if (xfer->c_ata_c.flags & AT_ERROR) {
1898 if (xfer->c_ata_c.r_error == WDCE_ABRT) {
1899 /* command not supported */
1900 error = ENODEV;
1901 goto out;
1902 }
1903 }
1904 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1905 char sbuf[sizeof(at_errbits) + 64];
1906 snprintb(sbuf, sizeof(sbuf), at_errbits, xfer->c_ata_c.flags);
1907 aprint_error_dev(dksc->sc_dev, "wd_trim: status=%s\n",
1908 sbuf);
1909 error = EIO;
1910 goto out;
1911 }
1912 error = 0;
1913
1914 out:
1915 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1916 return error;
1917 }
1918
1919 bool
1920 wd_shutdown(device_t dev, int how)
1921 {
1922 struct wd_softc *wd = device_private(dev);
1923
1924 /* the adapter needs to be enabled */
1925 if (wd->atabus->ata_addref(wd->drvp))
1926 return true; /* no need to complain */
1927
1928 wd_flushcache(wd, AT_POLL, false);
1929 if ((how & RB_POWERDOWN) == RB_POWERDOWN)
1930 wd_standby(wd, AT_POLL);
1931 return true;
1932 }
1933
1934 /*
1935 * Allocate space for a ioctl queue structure. Mostly taken from
1936 * scsipi_ioctl.c
1937 */
1938 struct wd_ioctl *
1939 wi_get(struct wd_softc *wd)
1940 {
1941 struct wd_ioctl *wi;
1942
1943 wi = kmem_zalloc(sizeof(struct wd_ioctl), KM_SLEEP);
1944 wi->wi_softc = wd;
1945 buf_init(&wi->wi_bp);
1946
1947 return (wi);
1948 }
1949
1950 /*
1951 * Free an ioctl structure and remove it from our list
1952 */
1953
1954 void
1955 wi_free(struct wd_ioctl *wi)
1956 {
1957 buf_destroy(&wi->wi_bp);
1958 kmem_free(wi, sizeof(*wi));
1959 }
1960
1961 /*
1962 * Find a wd_ioctl structure based on the struct buf.
1963 */
1964
1965 struct wd_ioctl *
1966 wi_find(struct buf *bp)
1967 {
1968 return container_of(bp, struct wd_ioctl, wi_bp);
1969 }
1970
1971 static uint
1972 wi_sector_size(const struct wd_ioctl * const wi)
1973 {
1974 switch (wi->wi_atareq.command) {
1975 case WDCC_READ:
1976 case WDCC_WRITE:
1977 case WDCC_READMULTI:
1978 case WDCC_WRITEMULTI:
1979 case WDCC_READDMA:
1980 case WDCC_WRITEDMA:
1981 case WDCC_READ_EXT:
1982 case WDCC_WRITE_EXT:
1983 case WDCC_READMULTI_EXT:
1984 case WDCC_WRITEMULTI_EXT:
1985 case WDCC_READDMA_EXT:
1986 case WDCC_WRITEDMA_EXT:
1987 case WDCC_READ_FPDMA_QUEUED:
1988 case WDCC_WRITE_FPDMA_QUEUED:
1989 return wi->wi_softc->sc_blksize;
1990 default:
1991 return 512;
1992 }
1993 }
1994
1995 /*
1996 * Ioctl pseudo strategy routine
1997 *
1998 * This is mostly stolen from scsipi_ioctl.c:scsistrategy(). What
1999 * happens here is:
2000 *
2001 * - wdioctl() queues a wd_ioctl structure.
2002 *
2003 * - wdioctl() calls physio/wdioctlstrategy based on whether or not
2004 * user space I/O is required. If physio() is called, physio() eventually
2005 * calls wdioctlstrategy().
2006 *
2007 * - In either case, wdioctlstrategy() calls wd->atabus->ata_exec_command()
2008 * to perform the actual command
2009 *
2010 * The reason for the use of the pseudo strategy routine is because
2011 * when doing I/O to/from user space, physio _really_ wants to be in
2012 * the loop. We could put the entire buffer into the ioctl request
2013 * structure, but that won't scale if we want to do things like download
2014 * microcode.
2015 */
2016
2017 void
2018 wdioctlstrategy(struct buf *bp)
2019 {
2020 struct wd_ioctl *wi;
2021 struct ata_xfer *xfer;
2022 int error = 0;
2023
2024 wi = wi_find(bp);
2025 if (wi == NULL) {
2026 printf("wdioctlstrategy: "
2027 "No matching ioctl request found in queue\n");
2028 error = EINVAL;
2029 goto out2;
2030 }
2031
2032 xfer = ata_get_xfer(wi->wi_softc->drvp->chnl_softc, true);
2033
2034 /*
2035 * Abort if physio broke up the transfer
2036 */
2037
2038 if (bp->b_bcount != wi->wi_atareq.datalen) {
2039 printf("physio split wd ioctl request... cannot proceed\n");
2040 error = EIO;
2041 goto out;
2042 }
2043
2044 /*
2045 * Abort if we didn't get a buffer size that was a multiple of
2046 * our sector size (or overflows CHS/LBA28 sector count)
2047 */
2048
2049 if ((bp->b_bcount % wi_sector_size(wi)) != 0 ||
2050 (bp->b_bcount / wi_sector_size(wi)) >=
2051 (1 << NBBY)) {
2052 error = EINVAL;
2053 goto out;
2054 }
2055
2056 /*
2057 * Make sure a timeout was supplied in the ioctl request
2058 */
2059
2060 if (wi->wi_atareq.timeout == 0) {
2061 error = EINVAL;
2062 goto out;
2063 }
2064
2065 if (wi->wi_atareq.flags & ATACMD_READ)
2066 xfer->c_ata_c.flags |= AT_READ;
2067 else if (wi->wi_atareq.flags & ATACMD_WRITE)
2068 xfer->c_ata_c.flags |= AT_WRITE;
2069
2070 if (wi->wi_atareq.flags & ATACMD_READREG)
2071 xfer->c_ata_c.flags |= AT_READREG;
2072
2073 if ((wi->wi_atareq.flags & ATACMD_LBA) != 0)
2074 xfer->c_ata_c.flags |= AT_LBA;
2075
2076 xfer->c_ata_c.flags |= AT_WAIT;
2077
2078 xfer->c_ata_c.timeout = wi->wi_atareq.timeout;
2079 xfer->c_ata_c.r_command = wi->wi_atareq.command;
2080 xfer->c_ata_c.r_lba = ((wi->wi_atareq.head & 0x0f) << 24) |
2081 (wi->wi_atareq.cylinder << 8) |
2082 wi->wi_atareq.sec_num;
2083 xfer->c_ata_c.r_count = wi->wi_atareq.sec_count;
2084 xfer->c_ata_c.r_features = wi->wi_atareq.features;
2085 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
2086 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
2087 xfer->c_ata_c.data = wi->wi_bp.b_data;
2088 xfer->c_ata_c.bcount = wi->wi_bp.b_bcount;
2089
2090 if (wi->wi_softc->atabus->ata_exec_command(wi->wi_softc->drvp, xfer)
2091 != ATACMD_COMPLETE) {
2092 wi->wi_atareq.retsts = ATACMD_ERROR;
2093 error = EIO;
2094 goto out;
2095 }
2096
2097 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
2098 if (xfer->c_ata_c.flags & AT_ERROR) {
2099 wi->wi_atareq.retsts = ATACMD_ERROR;
2100 wi->wi_atareq.error = xfer->c_ata_c.r_error;
2101 } else if (xfer->c_ata_c.flags & AT_DF)
2102 wi->wi_atareq.retsts = ATACMD_DF;
2103 else
2104 wi->wi_atareq.retsts = ATACMD_TIMEOUT;
2105 } else {
2106 wi->wi_atareq.retsts = ATACMD_OK;
2107 if (wi->wi_atareq.flags & ATACMD_READREG) {
2108 wi->wi_atareq.command = xfer->c_ata_c.r_status;
2109 wi->wi_atareq.features = xfer->c_ata_c.r_error;
2110 wi->wi_atareq.sec_count = xfer->c_ata_c.r_count;
2111 wi->wi_atareq.sec_num = xfer->c_ata_c.r_lba & 0xff;
2112 wi->wi_atareq.head = (xfer->c_ata_c.r_device & 0xf0) |
2113 ((xfer->c_ata_c.r_lba >> 24) & 0x0f);
2114 wi->wi_atareq.cylinder =
2115 (xfer->c_ata_c.r_lba >> 8) & 0xffff;
2116 wi->wi_atareq.error = xfer->c_ata_c.r_error;
2117 }
2118 }
2119
2120 out:
2121 ata_free_xfer(wi->wi_softc->drvp->chnl_softc, xfer);
2122 out2:
2123 bp->b_error = error;
2124 if (error)
2125 bp->b_resid = bp->b_bcount;
2126 biodone(bp);
2127 }
2128
2129 static void
2130 wd_sysctl_attach(struct wd_softc *wd)
2131 {
2132 struct dk_softc *dksc = &wd->sc_dksc;
2133 const struct sysctlnode *node;
2134 int error;
2135
2136 /* sysctl set-up */
2137 if (sysctl_createv(&wd->nodelog, 0, NULL, &node,
2138 0, CTLTYPE_NODE, dksc->sc_xname,
2139 SYSCTL_DESCR("wd driver settings"),
2140 NULL, 0, NULL, 0,
2141 CTL_HW, CTL_CREATE, CTL_EOL) != 0) {
2142 aprint_error_dev(dksc->sc_dev,
2143 "could not create %s.%s sysctl node\n",
2144 "hw", dksc->sc_xname);
2145 return;
2146 }
2147
2148 wd->drv_ncq = true;
2149 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2150 CTLFLAG_READWRITE, CTLTYPE_BOOL, "use_ncq",
2151 SYSCTL_DESCR("use NCQ if supported"),
2152 NULL, 0, &wd->drv_ncq, 0,
2153 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2154 != 0) {
2155 aprint_error_dev(dksc->sc_dev,
2156 "could not create %s.%s.use_ncq sysctl - error %d\n",
2157 "hw", dksc->sc_xname, error);
2158 return;
2159 }
2160
2161 wd->drv_ncq_prio = false;
2162 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2163 CTLFLAG_READWRITE, CTLTYPE_BOOL, "use_ncq_prio",
2164 SYSCTL_DESCR("use NCQ PRIORITY if supported"),
2165 NULL, 0, &wd->drv_ncq_prio, 0,
2166 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2167 != 0) {
2168 aprint_error_dev(dksc->sc_dev,
2169 "could not create %s.%s.use_ncq_prio sysctl - error %d\n",
2170 "hw", dksc->sc_xname, error);
2171 return;
2172 }
2173
2174 #ifdef WD_CHAOS_MONKEY
2175 wd->drv_chaos_freq = 0;
2176 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2177 CTLFLAG_READWRITE, CTLTYPE_INT, "chaos_freq",
2178 SYSCTL_DESCR("simulated bio read error rate"),
2179 NULL, 0, &wd->drv_chaos_freq, 0,
2180 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2181 != 0) {
2182 aprint_error_dev(dksc->sc_dev,
2183 "could not create %s.%s.chaos_freq sysctl - error %d\n",
2184 "hw", dksc->sc_xname, error);
2185 return;
2186 }
2187
2188 wd->drv_chaos_cnt = 0;
2189 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2190 CTLFLAG_READONLY, CTLTYPE_INT, "chaos_cnt",
2191 SYSCTL_DESCR("number of processed bio reads"),
2192 NULL, 0, &wd->drv_chaos_cnt, 0,
2193 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2194 != 0) {
2195 aprint_error_dev(dksc->sc_dev,
2196 "could not create %s.%s.chaos_cnt sysctl - error %d\n",
2197 "hw", dksc->sc_xname, error);
2198 return;
2199 }
2200 #endif
2201
2202 }
2203
2204 static void
2205 wd_sysctl_detach(struct wd_softc *wd)
2206 {
2207 sysctl_teardown(&wd->nodelog);
2208 }
2209
2210 #ifdef ATADEBUG
2211 int wddebug(void);
2212
2213 int
2214 wddebug(void)
2215 {
2216 struct wd_softc *wd;
2217 struct dk_softc *dksc;
2218 int unit;
2219
2220 for (unit = 0; unit <= 3; unit++) {
2221 wd = device_lookup_private(&wd_cd, unit);
2222 if (wd == NULL)
2223 continue;
2224 dksc = &wd->sc_dksc;
2225 printf("%s fl %x bufq %p:\n",
2226 dksc->sc_xname, wd->sc_flags, bufq_peek(dksc->sc_bufq));
2227
2228 atachannel_debug(wd->drvp->chnl_softc);
2229 }
2230 return 0;
2231 }
2232 #endif /* ATADEBUG */
2233