wd.c revision 1.473 1 /* $NetBSD: wd.c,v 1.473 2025/02/27 01:34:43 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*-
28 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
29 * All rights reserved.
30 *
31 * This code is derived from software contributed to The NetBSD Foundation
32 * by Charles M. Hannum and by Onno van der Linden.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
44 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
45 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
46 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
47 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
48 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
49 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
50 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
51 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
53 * POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: wd.c,v 1.473 2025/02/27 01:34:43 jakllsch Exp $");
58
59 #include "opt_ata.h"
60 #include "opt_wd.h"
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/conf.h>
66 #include <sys/file.h>
67 #include <sys/stat.h>
68 #include <sys/ioctl.h>
69 #include <sys/buf.h>
70 #include <sys/bufq.h>
71 #include <sys/uio.h>
72 #include <sys/device.h>
73 #include <sys/disklabel.h>
74 #include <sys/disk.h>
75 #include <sys/syslog.h>
76 #include <sys/proc.h>
77 #include <sys/reboot.h>
78 #include <sys/vnode.h>
79 #include <sys/rndsource.h>
80
81 #include <sys/intr.h>
82 #include <sys/bus.h>
83
84 #include <dev/ata/atareg.h>
85 #include <dev/ata/atavar.h>
86 #include <dev/ata/wdvar.h>
87 #include <dev/ic/wdcreg.h>
88 #include <sys/ataio.h>
89 #include "locators.h"
90
91 #include <prop/proplib.h>
92
93 #define WDIORETRIES_SINGLE 4 /* number of retries for single-sector */
94 #define WDIORETRIES 5 /* number of retries before giving up */
95 #define RECOVERYTIME hz/2 /* time to wait before retrying a cmd */
96
97 #define WDUNIT(dev) DISKUNIT(dev)
98 #define WDPART(dev) DISKPART(dev)
99 #define WDMINOR(unit, part) DISKMINOR(unit, part)
100 #define MAKEWDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
101
102 #define WDLABELDEV(dev) (MAKEWDDEV(major(dev), WDUNIT(dev), RAW_PART))
103
104 #define DEBUG_FUNCS 0x08
105 #define DEBUG_PROBE 0x10
106 #define DEBUG_DETACH 0x20
107 #define DEBUG_XFERS 0x40
108 #ifdef ATADEBUG
109 #ifndef ATADEBUG_WD_MASK
110 #define ATADEBUG_WD_MASK 0x0
111 #endif
112 int wdcdebug_wd_mask = ATADEBUG_WD_MASK;
113 #define ATADEBUG_PRINT(args, level) \
114 if (wdcdebug_wd_mask & (level)) \
115 printf args
116 #else
117 #define ATADEBUG_PRINT(args, level)
118 #endif
119
120 static int wdprobe(device_t, cfdata_t, void *);
121 static void wdattach(device_t, device_t, void *);
122 static int wddetach(device_t, int);
123 static void wdperror(const struct wd_softc *, struct ata_xfer *);
124
125 static void wdminphys(struct buf *);
126
127 static int wd_firstopen(device_t, dev_t, int, int);
128 static int wd_lastclose(device_t);
129 static bool wd_suspend(device_t, const pmf_qual_t *);
130 static int wd_standby(struct wd_softc *, int);
131
132 CFATTACH_DECL3_NEW(wd, sizeof(struct wd_softc),
133 wdprobe, wdattach, wddetach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
134
135 extern struct cfdriver wd_cd;
136
137 static dev_type_open(wdopen);
138 static dev_type_close(wdclose);
139 static dev_type_read(wdread);
140 static dev_type_write(wdwrite);
141 static dev_type_ioctl(wdioctl);
142 static dev_type_strategy(wdstrategy);
143 static dev_type_dump(wddump);
144 static dev_type_size(wdsize);
145 static dev_type_discard(wddiscard);
146
147 const struct bdevsw wd_bdevsw = {
148 .d_open = wdopen,
149 .d_close = wdclose,
150 .d_strategy = wdstrategy,
151 .d_ioctl = wdioctl,
152 .d_dump = wddump,
153 .d_psize = wdsize,
154 .d_discard = wddiscard,
155 .d_cfdriver = &wd_cd,
156 .d_devtounit = disklabel_dev_unit,
157 .d_flag = D_DISK
158 };
159
160 const struct cdevsw wd_cdevsw = {
161 .d_open = wdopen,
162 .d_close = wdclose,
163 .d_read = wdread,
164 .d_write = wdwrite,
165 .d_ioctl = wdioctl,
166 .d_stop = nostop,
167 .d_tty = notty,
168 .d_poll = nopoll,
169 .d_mmap = nommap,
170 .d_kqfilter = nokqfilter,
171 .d_discard = wddiscard,
172 .d_cfdriver = &wd_cd,
173 .d_devtounit = disklabel_dev_unit,
174 .d_flag = D_DISK
175 };
176
177 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
178 static int wddoingadump = 0;
179 static int wddumprecalibrated = 0;
180
181 /*
182 * Glue necessary to hook WDCIOCCOMMAND into physio
183 */
184
185 struct wd_ioctl {
186 LIST_ENTRY(wd_ioctl) wi_list;
187 struct buf wi_bp;
188 struct uio wi_uio;
189 struct iovec wi_iov;
190 atareq_t wi_atareq;
191 struct wd_softc *wi_softc;
192 };
193
194 static struct wd_ioctl *wi_find(struct buf *);
195 static void wi_free(struct wd_ioctl *);
196 static struct wd_ioctl *wi_get(struct wd_softc *);
197 static void wdioctlstrategy(struct buf *);
198
199 static void wdrestart(void *);
200 static void wdstart1(struct wd_softc *, struct buf *, struct ata_xfer *);
201 static int wd_diskstart(device_t, struct buf *);
202 static int wd_dumpblocks(device_t, void *, daddr_t, int);
203 static void wd_iosize(device_t, int *);
204 static int wd_discard(device_t, off_t, off_t);
205 static void wdbioretry(void *);
206 static void wdbiorequeue(void *);
207 static void wddone(device_t, struct ata_xfer *);
208 static int wd_get_params(struct wd_softc *, struct ataparams *);
209 static void wd_set_geometry(struct wd_softc *);
210 static int wd_flushcache(struct wd_softc *, int);
211 static int wd_trim(struct wd_softc *, daddr_t, long);
212 static bool wd_shutdown(device_t, int);
213
214 static int wd_getcache(struct wd_softc *, int *);
215 static int wd_setcache(struct wd_softc *, int);
216
217 static void wd_sysctl_attach(struct wd_softc *);
218 static void wd_sysctl_detach(struct wd_softc *);
219
220 static const struct dkdriver wddkdriver = {
221 .d_open = wdopen,
222 .d_close = wdclose,
223 .d_strategy = wdstrategy,
224 .d_minphys = wdminphys,
225 .d_diskstart = wd_diskstart,
226 .d_dumpblocks = wd_dumpblocks,
227 .d_iosize = wd_iosize,
228 .d_firstopen = wd_firstopen,
229 .d_lastclose = wd_lastclose,
230 .d_discard = wd_discard
231 };
232
233 #ifdef HAS_BAD144_HANDLING
234 static void bad144intern(struct wd_softc *);
235 #endif
236
237 #define WD_QUIRK_SPLIT_MOD15_WRITE 0x0001 /* must split certain writes */
238
239 #define WD_QUIRK_FMT "\20\1SPLIT_MOD15_WRITE"
240
241 /*
242 * Quirk table for IDE drives. Put more-specific matches first, since
243 * a simple globing routine is used for matching.
244 */
245 static const struct wd_quirk {
246 const char *wdq_match; /* inquiry pattern to match */
247 int wdq_quirks; /* drive quirks */
248 } wd_quirk_table[] = {
249 /*
250 * Some Seagate S-ATA drives have a PHY which can get confused
251 * with the way data is packetized by some S-ATA controllers.
252 *
253 * The work-around is to split in two any write transfer whose
254 * sector count % 15 == 1 (assuming 512 byte sectors).
255 *
256 * XXX This is an incomplete list. There are at least a couple
257 * XXX more model numbers. If you have trouble with such transfers
258 * XXX (8K is the most common) on Seagate S-ATA drives, please
259 * XXX notify thorpej (at) NetBSD.org.
260 *
261 * The ST360015AS has not yet been confirmed to have this
262 * issue, however, it is the only other drive in the
263 * Seagate Barracuda Serial ATA V family.
264 *
265 */
266 { "ST3120023AS", WD_QUIRK_SPLIT_MOD15_WRITE },
267 { "ST380023AS", WD_QUIRK_SPLIT_MOD15_WRITE },
268 { "ST360015AS", WD_QUIRK_SPLIT_MOD15_WRITE },
269 { NULL,
270 0 }
271 };
272
273 static const struct wd_quirk *
274 wd_lookup_quirks(const char *name)
275 {
276 const struct wd_quirk *wdq;
277 const char *estr;
278
279 for (wdq = wd_quirk_table; wdq->wdq_match != NULL; wdq++) {
280 /*
281 * We only want exact matches (which include matches
282 * against globbing characters).
283 */
284 if (pmatch(name, wdq->wdq_match, &estr) == 2)
285 return (wdq);
286 }
287 return (NULL);
288 }
289
290 static int
291 wdprobe(device_t parent, cfdata_t match, void *aux)
292 {
293 struct ata_device *adev = aux;
294
295 if (adev == NULL)
296 return 0;
297 if (adev->adev_bustype->bustype_type != SCSIPI_BUSTYPE_ATA)
298 return 0;
299
300 if (match->cf_loc[ATA_HLCF_DRIVE] != ATA_HLCF_DRIVE_DEFAULT &&
301 match->cf_loc[ATA_HLCF_DRIVE] != adev->adev_drv_data->drive)
302 return 0;
303 return 1;
304 }
305
306 static void
307 wdattach(device_t parent, device_t self, void *aux)
308 {
309 struct wd_softc *wd = device_private(self);
310 struct dk_softc *dksc = &wd->sc_dksc;
311 struct ata_device *adev= aux;
312 int i, blank;
313 char tbuf[41],pbuf[9], c, *p, *q;
314 const struct wd_quirk *wdq;
315 int dtype = DKTYPE_UNKNOWN;
316
317 dksc->sc_dev = self;
318
319 ATADEBUG_PRINT(("wdattach\n"), DEBUG_FUNCS | DEBUG_PROBE);
320 mutex_init(&wd->sc_lock, MUTEX_DEFAULT, IPL_BIO);
321 #ifdef WD_SOFTBADSECT
322 SLIST_INIT(&wd->sc_bslist);
323 cv_init(&wd->sc_bslist_cv, "wdbadsect");
324 #endif
325 wd->atabus = adev->adev_bustype;
326 wd->inflight = 0;
327 wd->drvp = adev->adev_drv_data;
328
329 wd->drvp->drv_openings = 1;
330 wd->drvp->drv_done = wddone;
331 wd->drvp->drv_softc = dksc->sc_dev; /* done in atabusconfig_thread()
332 but too late */
333
334 SLIST_INIT(&wd->sc_retry_list);
335 SLIST_INIT(&wd->sc_requeue_list);
336 callout_init(&wd->sc_retry_callout, 0); /* XXX MPSAFE */
337 callout_init(&wd->sc_requeue_callout, 0); /* XXX MPSAFE */
338 callout_init(&wd->sc_restart_diskqueue, 0); /* XXX MPSAFE */
339
340 aprint_naive("\n");
341 aprint_normal("\n");
342
343 /* read our drive info */
344 if (wd_get_params(wd, &wd->sc_params) != 0) {
345 aprint_error_dev(self, "IDENTIFY failed\n");
346 goto out;
347 }
348
349 for (blank = 0, p = wd->sc_params.atap_model, q = tbuf, i = 0;
350 i < sizeof(wd->sc_params.atap_model); i++) {
351 c = *p++;
352 if (c == '\0')
353 break;
354 if (c != ' ') {
355 if (blank) {
356 *q++ = ' ';
357 blank = 0;
358 }
359 *q++ = c;
360 } else
361 blank = 1;
362 }
363 *q++ = '\0';
364
365 wd->sc_typename = kmem_asprintf("%s", tbuf);
366 aprint_normal_dev(self, "<%s>\n", wd->sc_typename);
367
368 wdq = wd_lookup_quirks(tbuf);
369 if (wdq != NULL)
370 wd->sc_quirks = wdq->wdq_quirks;
371
372 if (wd->sc_quirks != 0) {
373 char sbuf[sizeof(WD_QUIRK_FMT) + 64];
374 snprintb(sbuf, sizeof(sbuf), WD_QUIRK_FMT, wd->sc_quirks);
375 aprint_normal_dev(self, "quirks %s\n", sbuf);
376
377 if (wd->sc_quirks & WD_QUIRK_SPLIT_MOD15_WRITE) {
378 aprint_error_dev(self, "drive corrupts write transfers with certain controllers, consider replacing\n");
379 }
380 }
381
382 if ((wd->sc_params.atap_multi & 0xff) > 1) {
383 wd->drvp->multi = wd->sc_params.atap_multi & 0xff;
384 } else {
385 wd->drvp->multi = 1;
386 }
387
388 aprint_verbose_dev(self, "drive supports %d-sector PIO transfers,",
389 wd->drvp->multi);
390
391 /* 48-bit LBA addressing */
392 if ((wd->sc_params.atap_cmd2_en & ATA_CMD2_LBA48) != 0)
393 wd->sc_flags |= WDF_LBA48;
394
395 /* Prior to ATA-4, LBA was optional. */
396 if ((wd->sc_params.atap_capabilities1 & WDC_CAP_LBA) != 0)
397 wd->sc_flags |= WDF_LBA;
398 #if 0
399 /* ATA-4 requires LBA. */
400 if (wd->sc_params.atap_ataversion != 0xffff &&
401 wd->sc_params.atap_ataversion >= WDC_VER_ATA4)
402 wd->sc_flags |= WDF_LBA;
403 #endif
404
405 if ((wd->sc_flags & WDF_LBA48) != 0) {
406 aprint_verbose(" LBA48 addressing\n");
407 wd->sc_capacity =
408 ((uint64_t) wd->sc_params.atap_max_lba[3] << 48) |
409 ((uint64_t) wd->sc_params.atap_max_lba[2] << 32) |
410 ((uint64_t) wd->sc_params.atap_max_lba[1] << 16) |
411 ((uint64_t) wd->sc_params.atap_max_lba[0] << 0);
412 wd->sc_capacity28 =
413 (wd->sc_params.atap_capacity[1] << 16) |
414 wd->sc_params.atap_capacity[0];
415 /*
416 * Force LBA48 addressing for invalid numbers.
417 */
418 if (wd->sc_capacity28 > 0xfffffff)
419 wd->sc_capacity28 = 0xfffffff;
420 } else if ((wd->sc_flags & WDF_LBA) != 0) {
421 aprint_verbose(" LBA addressing\n");
422 wd->sc_capacity28 =
423 (wd->sc_params.atap_capacity[1] << 16) |
424 wd->sc_params.atap_capacity[0];
425 /*
426 * Limit capacity to LBA28 numbers to avoid overflow.
427 */
428 if (wd->sc_capacity28 > 0xfffffff)
429 wd->sc_capacity28 = 0xfffffff;
430 wd->sc_capacity = wd->sc_capacity28;
431 } else {
432 aprint_verbose(" chs addressing\n");
433 wd->sc_capacity =
434 wd->sc_params.atap_cylinders *
435 wd->sc_params.atap_heads *
436 wd->sc_params.atap_sectors;
437 /*
438 * LBA28 size is ignored for CHS addressing. Use a reasonable
439 * value for debugging. The CHS values may be artificial and
440 * are mostly ignored.
441 */
442 if (wd->sc_capacity < 0xfffffff)
443 wd->sc_capacity28 = wd->sc_capacity;
444 else
445 wd->sc_capacity28 = 0xfffffff;
446 }
447 if ((wd->sc_params.atap_secsz & ATA_SECSZ_VALID_MASK) == ATA_SECSZ_VALID
448 && ((wd->sc_params.atap_secsz & ATA_SECSZ_LLS) != 0)) {
449 wd->sc_blksize = 2ULL *
450 ((uint32_t)((wd->sc_params.atap_lls_secsz[1] << 16) |
451 wd->sc_params.atap_lls_secsz[0]));
452 } else {
453 wd->sc_blksize = 512;
454 }
455 wd->sc_sectoralign.dsa_firstaligned = 0;
456 wd->sc_sectoralign.dsa_alignment = 1;
457 if ((wd->sc_params.atap_secsz & ATA_SECSZ_VALID_MASK) == ATA_SECSZ_VALID
458 && ((wd->sc_params.atap_secsz & ATA_SECSZ_LPS) != 0)) {
459 wd->sc_sectoralign.dsa_alignment = 1 <<
460 (wd->sc_params.atap_secsz & ATA_SECSZ_LPS_SZMSK);
461 if ((wd->sc_params.atap_logical_align & ATA_LA_VALID_MASK) ==
462 ATA_LA_VALID) {
463 wd->sc_sectoralign.dsa_firstaligned =
464 wd->sc_params.atap_logical_align & ATA_LA_MASK;
465 }
466 }
467 wd->sc_capacity512 = (wd->sc_capacity * wd->sc_blksize) / DEV_BSIZE;
468 format_bytes(pbuf, sizeof(pbuf), wd->sc_capacity * wd->sc_blksize);
469 aprint_normal_dev(self, "%s, %d cyl, %d head, %d sec, "
470 "%d bytes/sect x %llu sectors",
471 pbuf,
472 (wd->sc_flags & WDF_LBA) ? (int)(wd->sc_capacity /
473 (wd->sc_params.atap_heads * wd->sc_params.atap_sectors)) :
474 wd->sc_params.atap_cylinders,
475 wd->sc_params.atap_heads, wd->sc_params.atap_sectors,
476 wd->sc_blksize, (unsigned long long)wd->sc_capacity);
477 if (wd->sc_sectoralign.dsa_alignment != 1) {
478 aprint_normal(" (%d bytes/physsect",
479 wd->sc_sectoralign.dsa_alignment * wd->sc_blksize);
480 if (wd->sc_sectoralign.dsa_firstaligned != 0) {
481 aprint_normal("; first aligned sector: %jd",
482 (intmax_t)wd->sc_sectoralign.dsa_firstaligned);
483 }
484 aprint_normal(")");
485 }
486 aprint_normal("\n");
487
488 ATADEBUG_PRINT(("%s: atap_dmatiming_mimi=%d, atap_dmatiming_recom=%d\n",
489 device_xname(self), wd->sc_params.atap_dmatiming_mimi,
490 wd->sc_params.atap_dmatiming_recom), DEBUG_PROBE);
491
492 if (wd->sc_blksize <= 0 || !powerof2(wd->sc_blksize) ||
493 wd->sc_blksize < DEV_BSIZE || wd->sc_blksize > MAXPHYS) {
494 aprint_normal_dev(self, "WARNING: block size %u "
495 "might not actually work\n", wd->sc_blksize);
496 }
497
498 if (strcmp(wd->sc_params.atap_model, "ST506") == 0)
499 dtype = DKTYPE_ST506;
500 else
501 dtype = DKTYPE_ESDI;
502
503 out:
504 /*
505 * Initialize and attach the disk structure.
506 */
507 dk_init(dksc, self, dtype);
508 disk_init(&dksc->sc_dkdev, dksc->sc_xname, &wddkdriver);
509
510 /* Attach dk and disk subsystems */
511 dk_attach(dksc);
512 disk_attach(&dksc->sc_dkdev);
513 wd_set_geometry(wd);
514
515 bufq_alloc(&dksc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
516
517 /* reference to label structure, used by ata code */
518 wd->drvp->lp = dksc->sc_dkdev.dk_label;
519
520 /* Discover wedges on this disk. */
521 dkwedge_discover(&dksc->sc_dkdev);
522
523 if (!pmf_device_register1(self, wd_suspend, NULL, wd_shutdown))
524 aprint_error_dev(self, "couldn't establish power handler\n");
525
526 wd_sysctl_attach(wd);
527 }
528
529 static bool
530 wd_suspend(device_t dv, const pmf_qual_t *qual)
531 {
532 struct wd_softc *sc = device_private(dv);
533
534 /* the adapter needs to be enabled */
535 if (sc->atabus->ata_addref(sc->drvp))
536 return true; /* no need to complain */
537
538 wd_flushcache(sc, AT_WAIT);
539 wd_standby(sc, AT_WAIT);
540
541 sc->atabus->ata_delref(sc->drvp);
542 return true;
543 }
544
545 static int
546 wddetach(device_t self, int flags)
547 {
548 struct wd_softc *wd = device_private(self);
549 struct dk_softc *dksc = &wd->sc_dksc;
550 int bmaj, cmaj, i, mn, rc;
551
552 if ((rc = disk_begindetach(&dksc->sc_dkdev, wd_lastclose, self, flags)) != 0)
553 return rc;
554
555 /* locate the major number */
556 bmaj = bdevsw_lookup_major(&wd_bdevsw);
557 cmaj = cdevsw_lookup_major(&wd_cdevsw);
558
559 /* Nuke the vnodes for any open instances. */
560 for (i = 0; i < MAXPARTITIONS; i++) {
561 mn = WDMINOR(device_unit(self), i);
562 vdevgone(bmaj, mn, mn, VBLK);
563 vdevgone(cmaj, mn, mn, VCHR);
564 }
565
566 dk_drain(dksc);
567
568 /* Kill off any pending commands. */
569 mutex_enter(&wd->sc_lock);
570 wd->atabus->ata_killpending(wd->drvp);
571
572 callout_halt(&wd->sc_retry_callout, &wd->sc_lock);
573 callout_destroy(&wd->sc_retry_callout);
574 callout_halt(&wd->sc_requeue_callout, &wd->sc_lock);
575 callout_destroy(&wd->sc_requeue_callout);
576 callout_halt(&wd->sc_restart_diskqueue, &wd->sc_lock);
577 callout_destroy(&wd->sc_restart_diskqueue);
578
579 mutex_exit(&wd->sc_lock);
580
581 bufq_free(dksc->sc_bufq);
582
583 /* Delete all of our wedges. */
584 dkwedge_delall(&dksc->sc_dkdev);
585
586 if (flags & DETACH_POWEROFF)
587 wd_standby(wd, AT_POLL);
588
589 /* Detach from the disk list. */
590 disk_detach(&dksc->sc_dkdev);
591 disk_destroy(&dksc->sc_dkdev);
592
593 dk_detach(dksc);
594
595 #ifdef WD_SOFTBADSECT
596 /* Clean out the bad sector list */
597 while (!SLIST_EMPTY(&wd->sc_bslist)) {
598 struct disk_badsectors *dbs = SLIST_FIRST(&wd->sc_bslist);
599 SLIST_REMOVE_HEAD(&wd->sc_bslist, dbs_next);
600 kmem_free(dbs, sizeof(*dbs));
601 }
602 wd->sc_bscount = 0;
603 #endif
604 if (wd->sc_typename != NULL) {
605 kmem_free(wd->sc_typename, strlen(wd->sc_typename) + 1);
606 wd->sc_typename = NULL;
607 }
608
609 pmf_device_deregister(self);
610
611 wd_sysctl_detach(wd);
612
613 #ifdef WD_SOFTBADSECT
614 KASSERT(SLIST_EMPTY(&wd->sc_bslist));
615 cv_destroy(&wd->sc_bslist_cv);
616 #endif
617
618 mutex_destroy(&wd->sc_lock);
619
620 wd->drvp->drive_type = ATA_DRIVET_NONE; /* no drive any more here */
621 wd->drvp->drive_flags = 0;
622
623 return (0);
624 }
625
626 /*
627 * Read/write routine for a buffer. Validates the arguments and schedules the
628 * transfer. Does not wait for the transfer to complete.
629 */
630 static void
631 wdstrategy(struct buf *bp)
632 {
633 struct wd_softc *wd =
634 device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
635 struct dk_softc *dksc = &wd->sc_dksc;
636
637 ATADEBUG_PRINT(("wdstrategy (%s)\n", dksc->sc_xname),
638 DEBUG_XFERS);
639
640 /* If device invalidated (e.g. media change, door open,
641 * device detachment), then error.
642 */
643 if ((wd->sc_flags & WDF_LOADED) == 0 ||
644 !device_is_enabled(dksc->sc_dev))
645 goto err;
646
647 #ifdef WD_SOFTBADSECT
648 /*
649 * If the transfer about to be attempted contains only a block that
650 * is known to be bad then return an error for the transfer without
651 * even attempting to start a transfer up under the premis that we
652 * will just end up doing more retries for a transfer that will end
653 * up failing again.
654 */
655 if (__predict_false(!SLIST_EMPTY(&wd->sc_bslist))) {
656 struct disklabel *lp = dksc->sc_dkdev.dk_label;
657 struct disk_badsectors *dbs;
658 daddr_t blkno, maxblk;
659
660 /* convert the block number to absolute */
661 if (lp->d_secsize >= DEV_BSIZE)
662 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
663 else
664 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
665 if (WDPART(bp->b_dev) != RAW_PART)
666 blkno += lp->d_partitions[WDPART(bp->b_dev)].p_offset;
667 maxblk = blkno + (bp->b_bcount / wd->sc_blksize) - 1;
668
669 mutex_enter(&wd->sc_lock);
670 SLIST_FOREACH(dbs, &wd->sc_bslist, dbs_next)
671 if ((dbs->dbs_min <= bp->b_rawblkno &&
672 bp->b_rawblkno <= dbs->dbs_max) ||
673 (dbs->dbs_min <= maxblk && maxblk <= dbs->dbs_max)){
674 mutex_exit(&wd->sc_lock);
675 goto err;
676 }
677 mutex_exit(&wd->sc_lock);
678 }
679 #endif
680
681 dk_strategy(dksc, bp);
682 return;
683
684 err:
685 bp->b_error = EIO;
686 bp->b_resid = bp->b_bcount;
687 biodone(bp);
688 }
689
690 static void
691 wdstart1(struct wd_softc *wd, struct buf *bp, struct ata_xfer *xfer)
692 {
693 struct dk_softc *dksc = &wd->sc_dksc;
694 const uint32_t secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
695
696 KASSERT(bp == xfer->c_bio.bp || xfer->c_bio.bp == NULL);
697 KASSERT((xfer->c_flags & (C_WAITACT|C_FREE)) == 0);
698 KASSERT(mutex_owned(&wd->sc_lock));
699
700 /* Reset state, so that retries don't use stale info */
701 if (__predict_false(xfer->c_retries > 0)) {
702 xfer->c_flags = 0;
703 memset(&xfer->c_bio, 0, sizeof(xfer->c_bio));
704 }
705
706 xfer->c_bio.blkno = bp->b_rawblkno;
707 xfer->c_bio.bcount = bp->b_bcount;
708 xfer->c_bio.databuf = bp->b_data;
709 xfer->c_bio.blkdone = 0;
710 xfer->c_bio.bp = bp;
711
712 /* Adjust blkno and bcount if xfer has been already partially done */
713 if (__predict_false(xfer->c_skip > 0)) {
714 KASSERT(xfer->c_skip < xfer->c_bio.bcount);
715 KASSERT((xfer->c_skip % secsize) == 0);
716 xfer->c_bio.bcount -= xfer->c_skip;
717 xfer->c_bio.blkno += xfer->c_skip / secsize;
718 }
719
720 #ifdef WD_CHAOS_MONKEY
721 /*
722 * Override blkno to be over device capacity to trigger error,
723 * but only if it's read, to avoid trashing disk contents should
724 * the command be clipped, or otherwise misinterpreted, by the
725 * driver or controller.
726 */
727 if (BUF_ISREAD(bp) && xfer->c_retries == 0 && wd->drv_chaos_freq > 0 &&
728 (++wd->drv_chaos_cnt % wd->drv_chaos_freq) == 0) {
729 device_printf(dksc->sc_dev, "%s: chaos xfer %"PRIxPTR"\n",
730 __func__, (intptr_t)xfer & PAGE_MASK);
731 xfer->c_bio.blkno = 7777777 + wd->sc_capacity;
732 xfer->c_flags |= C_CHAOS;
733 }
734 #endif
735
736 /*
737 * If we're retrying, retry in single-sector mode. This will give us
738 * the sector number of the problem, and will eventually allow the
739 * transfer to succeed. If FUA is requested, we can't actually
740 * do this, as ATA_SINGLE is usually executed as PIO transfer by drivers
741 * which support it, and that isn't compatible with NCQ/FUA.
742 */
743 if (xfer->c_retries >= WDIORETRIES_SINGLE &&
744 (bp->b_flags & B_MEDIA_FUA) == 0)
745 xfer->c_bio.flags = ATA_SINGLE;
746 else
747 xfer->c_bio.flags = 0;
748
749 /*
750 * request LBA48 transfers when supported by the controller
751 * and needed by transfer offset or size.
752 */
753 if (wd->sc_flags & WDF_LBA48 &&
754 (((xfer->c_bio.blkno + xfer->c_bio.bcount / secsize) >
755 wd->sc_capacity28) ||
756 ((xfer->c_bio.bcount / secsize) > 128)))
757 xfer->c_bio.flags |= ATA_LBA48;
758
759 /*
760 * If NCQ was negotiated, always use it for the first several attempts.
761 * Since device cancels all outstanding requests on error, downgrade
762 * to non-NCQ on retry, so that the retried transfer would not cause
763 * cascade failure for the other transfers if it fails again.
764 * If FUA was requested, we can't downgrade, as that would violate
765 * the semantics - FUA would not be honored. In that case, continue
766 * retrying with NCQ.
767 */
768 if (WD_USE_NCQ(wd) && (xfer->c_retries < WDIORETRIES_SINGLE ||
769 (bp->b_flags & B_MEDIA_FUA) != 0)) {
770 xfer->c_bio.flags |= ATA_LBA48;
771 xfer->c_flags |= C_NCQ;
772
773 if (WD_USE_NCQ_PRIO(wd) &&
774 BIO_GETPRIO(bp) == BPRIO_TIMECRITICAL)
775 xfer->c_bio.flags |= ATA_PRIO_HIGH;
776 }
777
778 if (wd->sc_flags & WDF_LBA)
779 xfer->c_bio.flags |= ATA_LBA;
780 if (bp->b_flags & B_READ) {
781 xfer->c_bio.flags |= ATA_READ;
782 } else {
783 /* it's a write */
784 wd->sc_flags |= WDF_DIRTY;
785 }
786 if (bp->b_flags & B_MEDIA_FUA) {
787 /* If not using NCQ, the command WRITE DMA FUA EXT is LBA48 */
788 KASSERT((wd->sc_flags & WDF_LBA48) != 0);
789 if ((xfer->c_flags & C_NCQ) == 0)
790 xfer->c_bio.flags |= ATA_LBA48;
791
792 xfer->c_bio.flags |= ATA_FUA;
793 }
794
795 if (xfer->c_retries == 0)
796 wd->inflight++;
797 mutex_exit(&wd->sc_lock);
798
799 /* Queue the xfer */
800 wd->atabus->ata_bio(wd->drvp, xfer);
801
802 mutex_enter(&wd->sc_lock);
803 }
804
805 static int
806 wd_diskstart(device_t dev, struct buf *bp)
807 {
808 struct wd_softc *wd = device_private(dev);
809 #ifdef ATADEBUG
810 struct dk_softc *dksc = &wd->sc_dksc;
811 #endif
812 struct ata_xfer *xfer;
813 struct ata_channel *chp;
814 unsigned openings;
815 int ticks;
816
817 mutex_enter(&wd->sc_lock);
818
819 chp = wd->drvp->chnl_softc;
820
821 ata_channel_lock(chp);
822 openings = ata_queue_openings(chp);
823 ata_channel_unlock(chp);
824
825 openings = uimin(openings, wd->drvp->drv_openings);
826
827 if (wd->inflight >= openings) {
828 /*
829 * pretend we run out of memory when the queue is full,
830 * so that the operation is retried after a minimal
831 * delay.
832 */
833 xfer = NULL;
834 ticks = 1;
835 } else {
836 /*
837 * If there is no available memory, retry later. This
838 * happens very rarely and only under memory pressure,
839 * so wait relatively long before retry.
840 */
841 xfer = ata_get_xfer(chp, false);
842 ticks = hz/2;
843 }
844
845 if (xfer == NULL) {
846 ATADEBUG_PRINT(("wd_diskstart %s no xfer\n",
847 dksc->sc_xname), DEBUG_XFERS);
848
849 /*
850 * The disk queue is pushed automatically when an I/O
851 * operation finishes or another one is queued. We
852 * need this extra timeout because an ATA channel
853 * might be shared by more than one disk queue and
854 * all queues need to be restarted when another slot
855 * becomes available.
856 */
857 if (!callout_pending(&wd->sc_restart_diskqueue)) {
858 callout_reset(&wd->sc_restart_diskqueue, ticks,
859 wdrestart, dev);
860 }
861
862 mutex_exit(&wd->sc_lock);
863 return EAGAIN;
864 }
865
866 wdstart1(wd, bp, xfer);
867
868 mutex_exit(&wd->sc_lock);
869
870 return 0;
871 }
872
873 /*
874 * Queue a drive for I/O.
875 */
876 static void
877 wdrestart(void *x)
878 {
879 device_t self = x;
880 struct wd_softc *wd = device_private(self);
881 struct dk_softc *dksc = &wd->sc_dksc;
882
883 ATADEBUG_PRINT(("wdstart %s\n", dksc->sc_xname),
884 DEBUG_XFERS);
885
886 if (!device_is_active(dksc->sc_dev))
887 return;
888
889 dk_start(dksc, NULL);
890 }
891
892 static void
893 wddone(device_t self, struct ata_xfer *xfer)
894 {
895 struct wd_softc *wd = device_private(self);
896 struct dk_softc *dksc = &wd->sc_dksc;
897 const char *errmsg;
898 int do_perror = 0;
899 struct buf *bp;
900
901 ATADEBUG_PRINT(("wddone %s\n", dksc->sc_xname),
902 DEBUG_XFERS);
903
904 if (__predict_false(wddoingadump)) {
905 /* just drop it to the floor */
906 ata_free_xfer(wd->drvp->chnl_softc, xfer);
907 return;
908 }
909
910 bp = xfer->c_bio.bp;
911 KASSERT(bp != NULL);
912
913 bp->b_resid = xfer->c_bio.bcount;
914 switch (xfer->c_bio.error) {
915 case ERR_DMA:
916 errmsg = "DMA error";
917 goto retry;
918 case ERR_DF:
919 errmsg = "device fault";
920 goto retry;
921 case TIMEOUT:
922 errmsg = "device timeout";
923 goto retry;
924 case REQUEUE:
925 errmsg = "requeue";
926 goto retry2;
927 case ERR_RESET:
928 errmsg = "channel reset";
929 goto retry2;
930 case ERROR:
931 /* Don't care about media change bits */
932 if (xfer->c_bio.r_error != 0 &&
933 (xfer->c_bio.r_error & ~(WDCE_MC | WDCE_MCR)) == 0)
934 goto noerror;
935 errmsg = "error";
936 do_perror = 1;
937 retry: /* Just reset and retry. Can we do more ? */
938 if ((xfer->c_flags & C_RECOVERED) == 0) {
939 int wflags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
940 ata_channel_lock(wd->drvp->chnl_softc);
941 ata_thread_run(wd->drvp->chnl_softc, wflags,
942 ATACH_TH_DRIVE_RESET, wd->drvp->drive);
943 ata_channel_unlock(wd->drvp->chnl_softc);
944 }
945 retry2:
946 mutex_enter(&wd->sc_lock);
947
948 diskerr(bp, "wd", errmsg, LOG_PRINTF,
949 xfer->c_bio.blkdone, dksc->sc_dkdev.dk_label);
950 if (xfer->c_retries < WDIORETRIES)
951 printf(", xfer %"PRIxPTR", retry %d",
952 (intptr_t)xfer & PAGE_MASK,
953 xfer->c_retries);
954 printf("\n");
955 if (do_perror)
956 wdperror(wd, xfer);
957
958 if (xfer->c_retries < WDIORETRIES) {
959 xfer->c_retries++;
960
961 /* Rerun ASAP if just requeued */
962 if (xfer->c_bio.error == REQUEUE) {
963 SLIST_INSERT_HEAD(&wd->sc_requeue_list, xfer,
964 c_retrychain);
965 callout_reset(&wd->sc_requeue_callout,
966 1, wdbiorequeue, wd);
967 } else {
968 SLIST_INSERT_HEAD(&wd->sc_retry_list, xfer,
969 c_retrychain);
970 callout_reset(&wd->sc_retry_callout,
971 RECOVERYTIME, wdbioretry, wd);
972 }
973
974 mutex_exit(&wd->sc_lock);
975 return;
976 }
977
978 mutex_exit(&wd->sc_lock);
979
980 #ifdef WD_SOFTBADSECT
981 /*
982 * Not all errors indicate a failed block but those that do,
983 * put the block on the bad-block list for the device. Only
984 * do this for reads because the drive should do it for writes,
985 * itself, according to Manuel.
986 */
987 if ((bp->b_flags & B_READ) &&
988 ((wd->drvp->ata_vers >= 4 && xfer->c_bio.r_error & 64) ||
989 (wd->drvp->ata_vers < 4 && xfer->c_bio.r_error & 192))) {
990 struct disk_badsectors *dbs;
991
992 dbs = kmem_zalloc(sizeof *dbs, KM_NOSLEEP);
993 if (dbs == NULL) {
994 device_printf(dksc->sc_dev,
995 "failed to add bad block to list\n");
996 goto out;
997 }
998
999 dbs->dbs_min = bp->b_rawblkno;
1000 dbs->dbs_max = dbs->dbs_min +
1001 (bp->b_bcount /wd->sc_blksize) - 1;
1002 microtime(&dbs->dbs_failedat);
1003
1004 mutex_enter(&wd->sc_lock);
1005 SLIST_INSERT_HEAD(&wd->sc_bslist, dbs, dbs_next);
1006 wd->sc_bscount++;
1007 mutex_exit(&wd->sc_lock);
1008 }
1009 out:
1010 #endif
1011 bp->b_error = EIO;
1012 break;
1013 case NOERROR:
1014 #ifdef WD_CHAOS_MONKEY
1015 /*
1016 * For example Parallels AHCI emulation doesn't actually
1017 * return error for the invalid I/O, so just re-run
1018 * the request and do not panic.
1019 */
1020 if (__predict_false(xfer->c_flags & C_CHAOS)) {
1021 xfer->c_bio.error = REQUEUE;
1022 errmsg = "chaos noerror";
1023 goto retry2;
1024 }
1025 #endif
1026
1027 noerror: if ((xfer->c_bio.flags & ATA_CORR) || xfer->c_retries > 0)
1028 device_printf(dksc->sc_dev,
1029 "soft error (corrected) xfer %"PRIxPTR"\n",
1030 (intptr_t)xfer & PAGE_MASK);
1031 break;
1032 case ERR_NODEV:
1033 bp->b_error = EIO;
1034 break;
1035 }
1036 if (__predict_false(bp->b_error != 0) && bp->b_resid == 0) {
1037 /*
1038 * the disk or controller sometimes report a complete
1039 * xfer, when there has been an error. This is wrong,
1040 * assume nothing got transferred in this case
1041 */
1042 bp->b_resid = bp->b_bcount;
1043 }
1044
1045 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1046
1047 mutex_enter(&wd->sc_lock);
1048 wd->inflight--;
1049 mutex_exit(&wd->sc_lock);
1050 dk_done(dksc, bp);
1051 dk_start(dksc, NULL);
1052 }
1053
1054 static void
1055 wdbioretry(void *v)
1056 {
1057 struct wd_softc *wd = v;
1058 struct ata_xfer *xfer;
1059
1060 ATADEBUG_PRINT(("%s %s\n", __func__, wd->sc_dksc.sc_xname),
1061 DEBUG_XFERS);
1062
1063 mutex_enter(&wd->sc_lock);
1064 while ((xfer = SLIST_FIRST(&wd->sc_retry_list))) {
1065 SLIST_REMOVE_HEAD(&wd->sc_retry_list, c_retrychain);
1066 wdstart1(wd, xfer->c_bio.bp, xfer);
1067 }
1068 mutex_exit(&wd->sc_lock);
1069 }
1070
1071 static void
1072 wdbiorequeue(void *v)
1073 {
1074 struct wd_softc *wd = v;
1075 struct ata_xfer *xfer;
1076
1077 ATADEBUG_PRINT(("%s %s\n", __func__, wd->sc_dksc.sc_xname),
1078 DEBUG_XFERS);
1079
1080 mutex_enter(&wd->sc_lock);
1081 while ((xfer = SLIST_FIRST(&wd->sc_requeue_list))) {
1082 SLIST_REMOVE_HEAD(&wd->sc_requeue_list, c_retrychain);
1083 wdstart1(wd, xfer->c_bio.bp, xfer);
1084 }
1085 mutex_exit(&wd->sc_lock);
1086 }
1087
1088 static void
1089 wdminphys(struct buf *bp)
1090 {
1091 const struct wd_softc * const wd =
1092 device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
1093 int maxsectors;
1094
1095 /*
1096 * The limit is actually 65536 for LBA48 and 256 for non-LBA48,
1097 * but that requires to set the count for the ATA command
1098 * to 0, which is somewhat error prone, so better stay safe.
1099 */
1100 if (wd->sc_flags & WDF_LBA48)
1101 maxsectors = 65535;
1102 else
1103 maxsectors = 128;
1104
1105 if (bp->b_bcount > (wd->sc_blksize * maxsectors))
1106 bp->b_bcount = (wd->sc_blksize * maxsectors);
1107
1108 minphys(bp);
1109 }
1110
1111 static void
1112 wd_iosize(device_t dev, int *count)
1113 {
1114 struct buf B;
1115 int bmaj;
1116
1117 bmaj = bdevsw_lookup_major(&wd_bdevsw);
1118 B.b_dev = MAKEWDDEV(bmaj,device_unit(dev),RAW_PART);
1119 B.b_bcount = *count;
1120
1121 wdminphys(&B);
1122
1123 *count = B.b_bcount;
1124 }
1125
1126 static int
1127 wdread(dev_t dev, struct uio *uio, int flags)
1128 {
1129
1130 ATADEBUG_PRINT(("wdread\n"), DEBUG_XFERS);
1131 return (physio(wdstrategy, NULL, dev, B_READ, wdminphys, uio));
1132 }
1133
1134 static int
1135 wdwrite(dev_t dev, struct uio *uio, int flags)
1136 {
1137
1138 ATADEBUG_PRINT(("wdwrite\n"), DEBUG_XFERS);
1139 return (physio(wdstrategy, NULL, dev, B_WRITE, wdminphys, uio));
1140 }
1141
1142 static int
1143 wdopen(dev_t dev, int flag, int fmt, struct lwp *l)
1144 {
1145 struct wd_softc *wd;
1146 struct dk_softc *dksc;
1147 int unit, part, error;
1148
1149 ATADEBUG_PRINT(("wdopen\n"), DEBUG_FUNCS);
1150 unit = WDUNIT(dev);
1151 wd = device_lookup_private(&wd_cd, unit);
1152 if (wd == NULL)
1153 return (ENXIO);
1154 dksc = &wd->sc_dksc;
1155
1156 if (! device_is_active(dksc->sc_dev))
1157 return (ENODEV);
1158
1159 part = WDPART(dev);
1160
1161 if (wd->sc_capacity == 0)
1162 return (ENODEV);
1163
1164 /*
1165 * If any partition is open, but the disk has been invalidated,
1166 * disallow further opens.
1167 */
1168 if ((wd->sc_flags & (WDF_OPEN | WDF_LOADED)) == WDF_OPEN) {
1169 if (part != RAW_PART || fmt != S_IFCHR)
1170 return EIO;
1171 }
1172
1173 error = dk_open(dksc, dev, flag, fmt, l);
1174
1175 return error;
1176 }
1177
1178 /*
1179 * Serialized by caller
1180 */
1181 static int
1182 wd_firstopen(device_t self, dev_t dev, int flag, int fmt)
1183 {
1184 struct wd_softc *wd = device_private(self);
1185 struct dk_softc *dksc = &wd->sc_dksc;
1186 int error;
1187
1188 error = wd->atabus->ata_addref(wd->drvp);
1189 if (error)
1190 return error;
1191
1192 if ((wd->sc_flags & WDF_LOADED) == 0) {
1193 int param_error;
1194
1195 /* Load the physical device parameters. */
1196 param_error = wd_get_params(wd, &wd->sc_params);
1197 if (param_error != 0) {
1198 aprint_error_dev(dksc->sc_dev, "IDENTIFY failed\n");
1199 error = EIO;
1200 goto bad;
1201 }
1202 wd_set_geometry(wd);
1203 wd->sc_flags |= WDF_LOADED;
1204 }
1205
1206 wd->sc_flags |= WDF_OPEN;
1207 return 0;
1208
1209 bad:
1210 wd->atabus->ata_delref(wd->drvp);
1211 return error;
1212 }
1213
1214 /*
1215 * Caller must hold wd->sc_dk.dk_openlock.
1216 */
1217 static int
1218 wd_lastclose(device_t self)
1219 {
1220 struct wd_softc *wd = device_private(self);
1221
1222 KASSERTMSG(bufq_peek(wd->sc_dksc.sc_bufq) == NULL, "bufq not empty");
1223
1224 if (wd->sc_flags & WDF_DIRTY)
1225 wd_flushcache(wd, AT_WAIT);
1226
1227 wd->atabus->ata_delref(wd->drvp);
1228 wd->sc_flags &= ~WDF_OPEN;
1229
1230 return 0;
1231 }
1232
1233 static int
1234 wdclose(dev_t dev, int flag, int fmt, struct lwp *l)
1235 {
1236 struct wd_softc *wd;
1237 struct dk_softc *dksc;
1238 int unit;
1239
1240 unit = WDUNIT(dev);
1241 wd = device_lookup_private(&wd_cd, unit);
1242 dksc = &wd->sc_dksc;
1243
1244 return dk_close(dksc, dev, flag, fmt, l);
1245 }
1246
1247 void
1248 wdperror(const struct wd_softc *wd, struct ata_xfer *xfer)
1249 {
1250 static const char *const errstr0_3[] = {"address mark not found",
1251 "track 0 not found", "aborted command", "media change requested",
1252 "id not found", "media changed", "uncorrectable data error",
1253 "bad block detected"};
1254 static const char *const errstr4_5[] = {
1255 "obsolete (address mark not found)",
1256 "no media/write protected", "aborted command",
1257 "media change requested", "id not found", "media changed",
1258 "uncorrectable data error", "interface CRC error"};
1259 const char *const *errstr;
1260 int i;
1261 const char *sep = "";
1262
1263 const struct dk_softc *dksc = &wd->sc_dksc;
1264 const char *devname = dksc->sc_xname;
1265 struct ata_drive_datas *drvp = wd->drvp;
1266 int errno = xfer->c_bio.r_error;
1267
1268 if (drvp->ata_vers >= 4)
1269 errstr = errstr4_5;
1270 else
1271 errstr = errstr0_3;
1272
1273 printf("%s: (", devname);
1274
1275 if (errno == 0)
1276 printf("error not notified");
1277
1278 for (i = 0; i < 8; i++) {
1279 if (errno & (1 << i)) {
1280 printf("%s%s", sep, errstr[i]);
1281 sep = ", ";
1282 }
1283 }
1284 printf(")\n");
1285 }
1286
1287 int
1288 wdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1289 {
1290 struct wd_softc *wd =
1291 device_lookup_private(&wd_cd, WDUNIT(dev));
1292 struct dk_softc *dksc = &wd->sc_dksc;
1293
1294 ATADEBUG_PRINT(("wdioctl\n"), DEBUG_FUNCS);
1295
1296 if ((wd->sc_flags & WDF_LOADED) == 0)
1297 return EIO;
1298
1299 switch (cmd) {
1300 #ifdef HAS_BAD144_HANDLING
1301 case DIOCSBAD:
1302 if ((flag & FWRITE) == 0)
1303 return EBADF;
1304 dksc->sc_dkdev.dk_cpulabel->bad = *(struct dkbad *)addr;
1305 dksc->sc_dkdev.dk_label->d_flags |= D_BADSECT;
1306 bad144intern(wd);
1307 return 0;
1308 #endif
1309 #ifdef WD_SOFTBADSECT
1310 case DIOCBSLIST: {
1311 uint32_t count, missing, skip;
1312 struct disk_badsecinfo dbsi;
1313 struct disk_badsectors *dbs, dbsbuf;
1314 size_t available;
1315 uint8_t *laddr;
1316 int error;
1317
1318 dbsi = *(struct disk_badsecinfo *)addr;
1319 missing = wd->sc_bscount;
1320 count = 0;
1321 available = dbsi.dbsi_bufsize;
1322 skip = dbsi.dbsi_skip;
1323 laddr = (uint8_t *)dbsi.dbsi_buffer;
1324
1325 /*
1326 * We start this loop with the expectation that all of the
1327 * entries will be missed and decrement this counter each
1328 * time we either skip over one (already copied out) or
1329 * we actually copy it back to user space. The structs
1330 * holding the bad sector information are copied directly
1331 * back to user space whilst the summary is returned via
1332 * the struct passed in via the ioctl.
1333 */
1334 error = 0;
1335 mutex_enter(&wd->sc_lock);
1336 wd->sc_bslist_inuse++;
1337 SLIST_FOREACH(dbs, &wd->sc_bslist, dbs_next) {
1338 if (skip > 0) {
1339 missing--;
1340 skip--;
1341 continue;
1342 }
1343 if (available < sizeof(*dbs))
1344 break;
1345 available -= sizeof(*dbs);
1346 memset(&dbsbuf, 0, sizeof(dbsbuf));
1347 dbsbuf.dbs_min = dbs->dbs_min;
1348 dbsbuf.dbs_max = dbs->dbs_max;
1349 dbsbuf.dbs_failedat = dbs->dbs_failedat;
1350 mutex_exit(&wd->sc_lock);
1351 error = copyout(&dbsbuf, laddr, sizeof(dbsbuf));
1352 mutex_enter(&wd->sc_lock);
1353 if (error)
1354 break;
1355 laddr += sizeof(*dbs);
1356 missing--;
1357 count++;
1358 }
1359 if (--wd->sc_bslist_inuse == 0)
1360 cv_broadcast(&wd->sc_bslist_cv);
1361 mutex_exit(&wd->sc_lock);
1362 dbsi.dbsi_left = missing;
1363 dbsi.dbsi_copied = count;
1364 *(struct disk_badsecinfo *)addr = dbsi;
1365
1366 /*
1367 * If we copied anything out, ignore error and return
1368 * success -- can't back it out.
1369 */
1370 return count ? 0 : error;
1371 }
1372
1373 case DIOCBSFLUSH: {
1374 int error;
1375
1376 /* Clean out the bad sector list */
1377 mutex_enter(&wd->sc_lock);
1378 while (wd->sc_bslist_inuse) {
1379 error = cv_wait_sig(&wd->sc_bslist_cv, &wd->sc_lock);
1380 if (error) {
1381 mutex_exit(&wd->sc_lock);
1382 return error;
1383 }
1384 }
1385 while (!SLIST_EMPTY(&wd->sc_bslist)) {
1386 struct disk_badsectors *dbs =
1387 SLIST_FIRST(&wd->sc_bslist);
1388 SLIST_REMOVE_HEAD(&wd->sc_bslist, dbs_next);
1389 mutex_exit(&wd->sc_lock);
1390 kmem_free(dbs, sizeof(*dbs));
1391 mutex_enter(&wd->sc_lock);
1392 }
1393 mutex_exit(&wd->sc_lock);
1394 wd->sc_bscount = 0;
1395 return 0;
1396 }
1397 #endif
1398
1399 #ifdef notyet
1400 case DIOCWFORMAT:
1401 if ((flag & FWRITE) == 0)
1402 return EBADF;
1403 {
1404 register struct format_op *fop;
1405 struct iovec aiov;
1406 struct uio auio;
1407 int error1;
1408
1409 fop = (struct format_op *)addr;
1410 aiov.iov_base = fop->df_buf;
1411 aiov.iov_len = fop->df_count;
1412 auio.uio_iov = &aiov;
1413 auio.uio_iovcnt = 1;
1414 auio.uio_resid = fop->df_count;
1415 auio.uio_offset =
1416 fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
1417 auio.uio_vmspace = l->l_proc->p_vmspace;
1418 error1 = physio(wdformat, NULL, dev, B_WRITE, wdminphys,
1419 &auio);
1420 fop->df_count -= auio.uio_resid;
1421 fop->df_reg[0] = wdc->sc_status;
1422 fop->df_reg[1] = wdc->sc_error;
1423 return error1;
1424 }
1425 #endif
1426 case DIOCGCACHE:
1427 return wd_getcache(wd, (int *)addr);
1428
1429 case DIOCSCACHE:
1430 return wd_setcache(wd, *(int *)addr);
1431
1432 case DIOCCACHESYNC:
1433 return wd_flushcache(wd, AT_WAIT);
1434
1435 case ATAIOCCOMMAND:
1436 /*
1437 * Make sure this command is (relatively) safe first
1438 */
1439 if ((((atareq_t *) addr)->flags & ATACMD_READ) == 0 &&
1440 (flag & FWRITE) == 0)
1441 return (EBADF);
1442 {
1443 struct wd_ioctl *wi;
1444 atareq_t *atareq = (atareq_t *) addr;
1445 int error1;
1446
1447 wi = wi_get(wd);
1448 wi->wi_atareq = *atareq;
1449
1450 if (atareq->datalen && atareq->flags &
1451 (ATACMD_READ | ATACMD_WRITE)) {
1452 void *tbuf;
1453 if (atareq->datalen < DEV_BSIZE
1454 && atareq->command == WDCC_IDENTIFY) {
1455 tbuf = kmem_zalloc(DEV_BSIZE, KM_SLEEP);
1456 wi->wi_iov.iov_base = tbuf;
1457 wi->wi_iov.iov_len = DEV_BSIZE;
1458 UIO_SETUP_SYSSPACE(&wi->wi_uio);
1459 } else {
1460 tbuf = NULL;
1461 wi->wi_iov.iov_base = atareq->databuf;
1462 wi->wi_iov.iov_len = atareq->datalen;
1463 wi->wi_uio.uio_vmspace = l->l_proc->p_vmspace;
1464 }
1465 wi->wi_uio.uio_iov = &wi->wi_iov;
1466 wi->wi_uio.uio_iovcnt = 1;
1467 wi->wi_uio.uio_resid = atareq->datalen;
1468 wi->wi_uio.uio_offset = 0;
1469 wi->wi_uio.uio_rw =
1470 (atareq->flags & ATACMD_READ) ? B_READ : B_WRITE;
1471 error1 = physio(wdioctlstrategy, &wi->wi_bp, dev,
1472 (atareq->flags & ATACMD_READ) ? B_READ : B_WRITE,
1473 wdminphys, &wi->wi_uio);
1474 if (tbuf != NULL && error1 == 0) {
1475 error1 = copyout(tbuf, atareq->databuf,
1476 atareq->datalen);
1477 kmem_free(tbuf, DEV_BSIZE);
1478 }
1479 } else {
1480 /* No need to call physio if we don't have any
1481 user data */
1482 wi->wi_bp.b_flags = 0;
1483 wi->wi_bp.b_data = 0;
1484 wi->wi_bp.b_bcount = 0;
1485 wi->wi_bp.b_dev = dev;
1486 wi->wi_bp.b_proc = l->l_proc;
1487 wdioctlstrategy(&wi->wi_bp);
1488 error1 = wi->wi_bp.b_error;
1489 }
1490 *atareq = wi->wi_atareq;
1491 wi_free(wi);
1492 return(error1);
1493 }
1494
1495 case DIOCGSECTORALIGN: {
1496 struct disk_sectoralign *dsa = addr;
1497 int part = WDPART(dev);
1498
1499 *dsa = wd->sc_sectoralign;
1500 if (part != RAW_PART) {
1501 struct disklabel *lp = dksc->sc_dkdev.dk_label;
1502 daddr_t offset = lp->d_partitions[part].p_offset;
1503 uint32_t r = offset % dsa->dsa_alignment;
1504
1505 if (r < dsa->dsa_firstaligned)
1506 dsa->dsa_firstaligned = dsa->dsa_firstaligned
1507 - r;
1508 else
1509 dsa->dsa_firstaligned = (dsa->dsa_firstaligned
1510 + dsa->dsa_alignment) - r;
1511 }
1512 dsa->dsa_firstaligned %= dsa->dsa_alignment;
1513
1514 return 0;
1515 }
1516
1517 default:
1518 return dk_ioctl(dksc, dev, cmd, addr, flag, l);
1519 }
1520
1521 #ifdef DIAGNOSTIC
1522 panic("wdioctl: impossible");
1523 #endif
1524 }
1525
1526 static int
1527 wd_discard(device_t dev, off_t pos, off_t len)
1528 {
1529 struct wd_softc *wd = device_private(dev);
1530 daddr_t bno;
1531 long size, done;
1532 long maxatonce, amount;
1533 int result;
1534
1535 if (!(wd->sc_params.atap_ata_major & WDC_VER_ATA7)
1536 || !(wd->sc_params.support_dsm & ATA_SUPPORT_DSM_TRIM)) {
1537 /* not supported; ignore request */
1538 ATADEBUG_PRINT(("wddiscard (unsupported)\n"), DEBUG_FUNCS);
1539 return 0;
1540 }
1541 maxatonce = 0xffff; /*wd->sc_params.max_dsm_blocks*/
1542
1543 ATADEBUG_PRINT(("wddiscard\n"), DEBUG_FUNCS);
1544
1545 if ((wd->sc_flags & WDF_LOADED) == 0)
1546 return EIO;
1547
1548 /* round the start up and the end down */
1549 bno = (pos + wd->sc_blksize - 1) / wd->sc_blksize;
1550 size = ((pos + len) / wd->sc_blksize) - bno;
1551
1552 done = 0;
1553 while (done < size) {
1554 amount = size - done;
1555 if (amount > maxatonce) {
1556 amount = maxatonce;
1557 }
1558 result = wd_trim(wd, bno + done, amount);
1559 if (result) {
1560 return result;
1561 }
1562 done += amount;
1563 }
1564 return 0;
1565 }
1566
1567 static int
1568 wddiscard(dev_t dev, off_t pos, off_t len)
1569 {
1570 struct wd_softc *wd;
1571 struct dk_softc *dksc;
1572 int unit;
1573
1574 unit = WDUNIT(dev);
1575 wd = device_lookup_private(&wd_cd, unit);
1576 dksc = &wd->sc_dksc;
1577
1578 return dk_discard(dksc, dev, pos, len);
1579 }
1580
1581 #ifdef B_FORMAT
1582 int
1583 wdformat(struct buf *bp)
1584 {
1585
1586 bp->b_flags |= B_FORMAT;
1587 return wdstrategy(bp);
1588 }
1589 #endif
1590
1591 int
1592 wdsize(dev_t dev)
1593 {
1594 struct wd_softc *wd;
1595 struct dk_softc *dksc;
1596 int unit;
1597
1598 ATADEBUG_PRINT(("wdsize\n"), DEBUG_FUNCS);
1599
1600 unit = WDUNIT(dev);
1601 wd = device_lookup_private(&wd_cd, unit);
1602 if (wd == NULL)
1603 return (-1);
1604 dksc = &wd->sc_dksc;
1605
1606 if (!device_is_active(dksc->sc_dev))
1607 return (-1);
1608
1609 return dk_size(dksc, dev);
1610 }
1611
1612 /*
1613 * Dump core after a system crash.
1614 */
1615 static int
1616 wddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1617 {
1618 struct wd_softc *wd;
1619 struct dk_softc *dksc;
1620 int unit;
1621
1622 /* Check if recursive dump; if so, punt. */
1623 if (wddoingadump)
1624 return EFAULT;
1625 wddoingadump = 1;
1626
1627 unit = WDUNIT(dev);
1628 wd = device_lookup_private(&wd_cd, unit);
1629 if (wd == NULL)
1630 return (ENXIO);
1631 dksc = &wd->sc_dksc;
1632
1633 return dk_dump(dksc, dev, blkno, va, size, 0);
1634 }
1635
1636 static int
1637 wd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
1638 {
1639 struct wd_softc *wd = device_private(dev);
1640 struct dk_softc *dksc = &wd->sc_dksc;
1641 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1642 struct ata_xfer *xfer = &wd->dump_xfer;
1643 int err;
1644
1645 /* Recalibrate, if first dump transfer. */
1646 if (wddumprecalibrated == 0) {
1647 wddumprecalibrated = 1;
1648 ata_channel_lock(wd->drvp->chnl_softc);
1649 /* This will directly execute the reset due to AT_POLL */
1650 ata_thread_run(wd->drvp->chnl_softc, AT_POLL,
1651 ATACH_TH_DRIVE_RESET, wd->drvp->drive);
1652
1653 wd->drvp->state = RESET;
1654 ata_channel_unlock(wd->drvp->chnl_softc);
1655 }
1656
1657 memset(xfer, 0, sizeof(*xfer));
1658 xfer->c_flags |= C_PRIVATE_ALLOC | C_SKIP_QUEUE;
1659
1660 xfer->c_bio.blkno = blkno;
1661 xfer->c_bio.flags = ATA_POLL;
1662 if (wd->sc_flags & WDF_LBA48 &&
1663 (xfer->c_bio.blkno + nblk) > wd->sc_capacity28)
1664 xfer->c_bio.flags |= ATA_LBA48;
1665 if (wd->sc_flags & WDF_LBA)
1666 xfer->c_bio.flags |= ATA_LBA;
1667 xfer->c_bio.bcount = nblk * dg->dg_secsize;
1668 xfer->c_bio.databuf = va;
1669 #ifndef WD_DUMP_NOT_TRUSTED
1670 /* This will poll until the bio is complete */
1671 wd->atabus->ata_bio(wd->drvp, xfer);
1672
1673 switch(err = xfer->c_bio.error) {
1674 case TIMEOUT:
1675 printf("wddump: device timed out");
1676 err = EIO;
1677 break;
1678 case ERR_DF:
1679 printf("wddump: drive fault");
1680 err = EIO;
1681 break;
1682 case ERR_DMA:
1683 printf("wddump: DMA error");
1684 err = EIO;
1685 break;
1686 case ERROR:
1687 printf("wddump: ");
1688 wdperror(wd, xfer);
1689 err = EIO;
1690 break;
1691 case NOERROR:
1692 err = 0;
1693 break;
1694 default:
1695 panic("wddump: unknown error type %x", err);
1696 }
1697
1698 if (err != 0) {
1699 printf("\n");
1700 return err;
1701 }
1702 #else /* WD_DUMP_NOT_TRUSTED */
1703 /* Let's just talk about this first... */
1704 printf("wd%d: dump addr 0x%x, cylin %d, head %d, sector %d\n",
1705 unit, va, cylin, head, sector);
1706 delay(500 * 1000); /* half a second */
1707 #endif
1708
1709 wddoingadump = 0;
1710 return 0;
1711 }
1712
1713 #ifdef HAS_BAD144_HANDLING
1714 /*
1715 * Internalize the bad sector table.
1716 */
1717 void
1718 bad144intern(struct wd_softc *wd)
1719 {
1720 struct dk_softc *dksc = &wd->sc_dksc;
1721 struct dkbad *bt = &dksc->sc_dkdev.dk_cpulabel->bad;
1722 struct disklabel *lp = dksc->sc_dkdev.dk_label;
1723 int i = 0;
1724
1725 ATADEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
1726
1727 for (; i < NBT_BAD; i++) {
1728 if (bt->bt_bad[i].bt_cyl == 0xffff)
1729 break;
1730 wd->drvp->badsect[i] =
1731 bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
1732 (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
1733 (bt->bt_bad[i].bt_trksec & 0xff);
1734 }
1735 for (; i < NBT_BAD+1; i++)
1736 wd->drvp->badsect[i] = -1;
1737 }
1738 #endif
1739
1740 static void
1741 wd_set_geometry(struct wd_softc *wd)
1742 {
1743 struct dk_softc *dksc = &wd->sc_dksc;
1744 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1745
1746 memset(dg, 0, sizeof(*dg));
1747
1748 dg->dg_secperunit = wd->sc_capacity;
1749 dg->dg_secsize = wd->sc_blksize;
1750 dg->dg_nsectors = wd->sc_params.atap_sectors;
1751 dg->dg_ntracks = wd->sc_params.atap_heads;
1752 if ((wd->sc_flags & WDF_LBA) == 0)
1753 dg->dg_ncylinders = wd->sc_params.atap_cylinders;
1754
1755 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, wd->sc_typename);
1756 }
1757
1758 int
1759 wd_get_params(struct wd_softc *wd, struct ataparams *params)
1760 {
1761 int retry = 0;
1762 struct ata_channel *chp = wd->drvp->chnl_softc;
1763 const int flags = AT_WAIT;
1764
1765 again:
1766 switch (wd->atabus->ata_get_params(wd->drvp, flags, params)) {
1767 case CMD_AGAIN:
1768 return 1;
1769 case CMD_ERR:
1770 if (retry == 0) {
1771 retry++;
1772 ata_channel_lock(chp);
1773 (*wd->atabus->ata_reset_drive)(wd->drvp, flags, NULL);
1774 ata_channel_unlock(chp);
1775 goto again;
1776 }
1777
1778 if (wd->drvp->drive_type != ATA_DRIVET_OLD)
1779 return 1;
1780 /*
1781 * We `know' there's a drive here; just assume it's old.
1782 * This geometry is only used to read the MBR and print a
1783 * (false) attach message.
1784 */
1785 strncpy(params->atap_model, "ST506",
1786 sizeof params->atap_model);
1787 params->atap_config = ATA_CFG_FIXED;
1788 params->atap_cylinders = 1024;
1789 params->atap_heads = 8;
1790 params->atap_sectors = 17;
1791 params->atap_multi = 1;
1792 params->atap_capabilities1 = params->atap_capabilities2 = 0;
1793 wd->drvp->ata_vers = -1; /* Mark it as pre-ATA */
1794 /* FALLTHROUGH */
1795 case CMD_OK:
1796 return 0;
1797 default:
1798 panic("wd_get_params: bad return code from ata_get_params");
1799 /* NOTREACHED */
1800 }
1801 }
1802
1803 int
1804 wd_getcache(struct wd_softc *wd, int *bitsp)
1805 {
1806 struct ataparams params;
1807
1808 if (wd_get_params(wd, ¶ms) != 0)
1809 return EIO;
1810 if (params.atap_cmd_set1 == 0x0000 ||
1811 params.atap_cmd_set1 == 0xffff ||
1812 (params.atap_cmd_set1 & WDC_CMD1_CACHE) == 0) {
1813 *bitsp = 0;
1814 return 0;
1815 }
1816 *bitsp = DKCACHE_WCHANGE | DKCACHE_READ;
1817 if (params.atap_cmd1_en & WDC_CMD1_CACHE)
1818 *bitsp |= DKCACHE_WRITE;
1819
1820 if (WD_USE_NCQ(wd) || (wd->drvp->drive_flags & ATA_DRIVE_WFUA))
1821 *bitsp |= DKCACHE_FUA;
1822
1823 return 0;
1824 }
1825
1826
1827 static int
1828 wd_check_error(const struct dk_softc *dksc, const struct ata_xfer *xfer,
1829 const char *func)
1830 {
1831 static const char at_errbits[] = "\20\10ERROR\11TIMEOU\12DF";
1832
1833 int flags = xfer->c_ata_c.flags;
1834
1835 if ((flags & AT_ERROR) != 0 && xfer->c_ata_c.r_error == WDCE_ABRT) {
1836 /* command not supported */
1837 aprint_debug_dev(dksc->sc_dev, "%s: not supported\n", func);
1838 return ENODEV;
1839 }
1840 if (flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
1841 char sbuf[sizeof(at_errbits) + 64];
1842 snprintb(sbuf, sizeof(sbuf), at_errbits, flags);
1843 device_printf(dksc->sc_dev, "%s: status=%s\n", func, sbuf);
1844 return EIO;
1845 }
1846 return 0;
1847 }
1848
1849 int
1850 wd_setcache(struct wd_softc *wd, int bits)
1851 {
1852 struct dk_softc *dksc = &wd->sc_dksc;
1853 struct ataparams params;
1854 struct ata_xfer *xfer;
1855 int error;
1856
1857 if (wd_get_params(wd, ¶ms) != 0)
1858 return EIO;
1859
1860 if (params.atap_cmd_set1 == 0x0000 ||
1861 params.atap_cmd_set1 == 0xffff ||
1862 (params.atap_cmd_set1 & WDC_CMD1_CACHE) == 0)
1863 return EOPNOTSUPP;
1864
1865 if ((bits & DKCACHE_READ) == 0 ||
1866 (bits & DKCACHE_SAVE) != 0)
1867 return EOPNOTSUPP;
1868
1869 xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
1870
1871 xfer->c_ata_c.r_command = SET_FEATURES;
1872 xfer->c_ata_c.r_st_bmask = 0;
1873 xfer->c_ata_c.r_st_pmask = 0;
1874 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1875 xfer->c_ata_c.flags = AT_WAIT;
1876 if (bits & DKCACHE_WRITE)
1877 xfer->c_ata_c.r_features = WDSF_WRITE_CACHE_EN;
1878 else
1879 xfer->c_ata_c.r_features = WDSF_WRITE_CACHE_DS;
1880
1881 wd->atabus->ata_exec_command(wd->drvp, xfer);
1882 ata_wait_cmd(wd->drvp->chnl_softc, xfer);
1883
1884 error = wd_check_error(dksc, xfer, __func__);
1885 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1886 return error;
1887 }
1888
1889 static int
1890 wd_standby(struct wd_softc *wd, int flags)
1891 {
1892 struct dk_softc *dksc = &wd->sc_dksc;
1893 struct ata_xfer *xfer;
1894 int error;
1895
1896 aprint_debug_dev(dksc->sc_dev, "standby immediate\n");
1897 xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
1898
1899 xfer->c_ata_c.r_command = WDCC_STANDBY_IMMED;
1900 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1901 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1902 xfer->c_ata_c.flags = flags;
1903 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1904
1905 wd->atabus->ata_exec_command(wd->drvp, xfer);
1906 ata_wait_cmd(wd->drvp->chnl_softc, xfer);
1907
1908 error = wd_check_error(dksc, xfer, __func__);
1909 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1910 return error;
1911 }
1912
1913 int
1914 wd_flushcache(struct wd_softc *wd, int flags)
1915 {
1916 struct dk_softc *dksc = &wd->sc_dksc;
1917 struct ata_xfer *xfer;
1918 int error;
1919
1920 /*
1921 * WDCC_FLUSHCACHE is here since ATA-4, but some drives report
1922 * only ATA-2 and still support it.
1923 */
1924 if (wd->drvp->ata_vers < 4 &&
1925 ((wd->sc_params.atap_cmd_set2 & WDC_CMD2_FC) == 0 ||
1926 wd->sc_params.atap_cmd_set2 == 0xffff))
1927 return ENODEV;
1928
1929 xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
1930
1931 if ((wd->sc_params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 &&
1932 (wd->sc_params.atap_cmd2_en & ATA_CMD2_FCE) != 0) {
1933 xfer->c_ata_c.r_command = WDCC_FLUSHCACHE_EXT;
1934 flags |= AT_LBA48;
1935 } else
1936 xfer->c_ata_c.r_command = WDCC_FLUSHCACHE;
1937 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1938 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1939 xfer->c_ata_c.flags = flags | AT_READREG;
1940 xfer->c_ata_c.timeout = 300000; /* 5m timeout */
1941
1942 wd->atabus->ata_exec_command(wd->drvp, xfer);
1943 ata_wait_cmd(wd->drvp->chnl_softc, xfer);
1944
1945 error = wd_check_error(dksc, xfer, __func__);
1946 wd->sc_flags &= ~WDF_DIRTY;
1947 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1948 return error;
1949 }
1950
1951 /*
1952 * Execute TRIM command, assumes sleep context.
1953 */
1954 static int
1955 wd_trim(struct wd_softc *wd, daddr_t bno, long size)
1956 {
1957 struct dk_softc *dksc = &wd->sc_dksc;
1958 struct ata_xfer *xfer;
1959 int error;
1960 unsigned char *req;
1961
1962 xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
1963
1964 req = kmem_zalloc(512, KM_SLEEP);
1965 req[0] = bno & 0xff;
1966 req[1] = (bno >> 8) & 0xff;
1967 req[2] = (bno >> 16) & 0xff;
1968 req[3] = (bno >> 24) & 0xff;
1969 req[4] = (bno >> 32) & 0xff;
1970 req[5] = (bno >> 40) & 0xff;
1971 req[6] = size & 0xff;
1972 req[7] = (size >> 8) & 0xff;
1973
1974 /*
1975 * XXX We could possibly use NCQ TRIM, which supports executing
1976 * this command concurrently. It would need some investigation, some
1977 * early or not so early disk firmware caused data loss with NCQ TRIM.
1978 * atastart() et.al would need to be adjusted to allow and support
1979 * running several non-I/O ATA commands in parallel.
1980 */
1981
1982 xfer->c_ata_c.r_command = ATA_DATA_SET_MANAGEMENT;
1983 xfer->c_ata_c.r_count = 1;
1984 xfer->c_ata_c.r_features = ATA_SUPPORT_DSM_TRIM;
1985 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
1986 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
1987 xfer->c_ata_c.timeout = 30000; /* 30s timeout */
1988 xfer->c_ata_c.data = req;
1989 xfer->c_ata_c.bcount = 512;
1990 xfer->c_ata_c.flags |= AT_WRITE | AT_WAIT;
1991
1992 wd->atabus->ata_exec_command(wd->drvp, xfer);
1993 ata_wait_cmd(wd->drvp->chnl_softc, xfer);
1994
1995 kmem_free(req, 512);
1996 error = wd_check_error(dksc, xfer, __func__);
1997 ata_free_xfer(wd->drvp->chnl_softc, xfer);
1998 return error;
1999 }
2000
2001 bool
2002 wd_shutdown(device_t dev, int how)
2003 {
2004 struct wd_softc *wd = device_private(dev);
2005
2006 /* the adapter needs to be enabled */
2007 if (wd->atabus->ata_addref(wd->drvp))
2008 return true; /* no need to complain */
2009
2010 wd_flushcache(wd, AT_POLL);
2011 if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2012 wd_standby(wd, AT_POLL);
2013 return true;
2014 }
2015
2016 /*
2017 * Allocate space for a ioctl queue structure. Mostly taken from
2018 * scsipi_ioctl.c
2019 */
2020 struct wd_ioctl *
2021 wi_get(struct wd_softc *wd)
2022 {
2023 struct wd_ioctl *wi;
2024
2025 wi = kmem_zalloc(sizeof(struct wd_ioctl), KM_SLEEP);
2026 wi->wi_softc = wd;
2027 buf_init(&wi->wi_bp);
2028
2029 return (wi);
2030 }
2031
2032 /*
2033 * Free an ioctl structure and remove it from our list
2034 */
2035
2036 void
2037 wi_free(struct wd_ioctl *wi)
2038 {
2039 buf_destroy(&wi->wi_bp);
2040 kmem_free(wi, sizeof(*wi));
2041 }
2042
2043 /*
2044 * Find a wd_ioctl structure based on the struct buf.
2045 */
2046
2047 struct wd_ioctl *
2048 wi_find(struct buf *bp)
2049 {
2050 return container_of(bp, struct wd_ioctl, wi_bp);
2051 }
2052
2053 static uint
2054 wi_sector_size(const struct wd_ioctl * const wi)
2055 {
2056 switch (wi->wi_atareq.command) {
2057 case WDCC_READ:
2058 case WDCC_WRITE:
2059 case WDCC_READMULTI:
2060 case WDCC_WRITEMULTI:
2061 case WDCC_READDMA:
2062 case WDCC_WRITEDMA:
2063 case WDCC_READ_EXT:
2064 case WDCC_WRITE_EXT:
2065 case WDCC_READMULTI_EXT:
2066 case WDCC_WRITEMULTI_EXT:
2067 case WDCC_READDMA_EXT:
2068 case WDCC_WRITEDMA_EXT:
2069 case WDCC_READ_FPDMA_QUEUED:
2070 case WDCC_WRITE_FPDMA_QUEUED:
2071 return wi->wi_softc->sc_blksize;
2072 default:
2073 return 512;
2074 }
2075 }
2076
2077 /*
2078 * Ioctl pseudo strategy routine
2079 *
2080 * This is mostly stolen from scsipi_ioctl.c:scsistrategy(). What
2081 * happens here is:
2082 *
2083 * - wdioctl() queues a wd_ioctl structure.
2084 *
2085 * - wdioctl() calls physio/wdioctlstrategy based on whether or not
2086 * user space I/O is required. If physio() is called, physio() eventually
2087 * calls wdioctlstrategy().
2088 *
2089 * - In either case, wdioctlstrategy() calls wd->atabus->ata_exec_command()
2090 * to perform the actual command
2091 *
2092 * The reason for the use of the pseudo strategy routine is because
2093 * when doing I/O to/from user space, physio _really_ wants to be in
2094 * the loop. We could put the entire buffer into the ioctl request
2095 * structure, but that won't scale if we want to do things like download
2096 * microcode.
2097 */
2098
2099 void
2100 wdioctlstrategy(struct buf *bp)
2101 {
2102 struct wd_ioctl *wi;
2103 struct ata_xfer *xfer;
2104 int error = 0;
2105
2106 wi = wi_find(bp);
2107 if (wi == NULL) {
2108 printf("wdioctlstrategy: "
2109 "No matching ioctl request found in queue\n");
2110 error = EINVAL;
2111 goto out2;
2112 }
2113
2114 xfer = ata_get_xfer(wi->wi_softc->drvp->chnl_softc, true);
2115
2116 /*
2117 * Abort if physio broke up the transfer
2118 */
2119
2120 if (bp->b_bcount != wi->wi_atareq.datalen) {
2121 printf("physio split wd ioctl request... cannot proceed\n");
2122 error = EIO;
2123 goto out;
2124 }
2125
2126 /*
2127 * Abort if we didn't get a buffer size that was a multiple of
2128 * our sector size (or overflows CHS/LBA28 sector count)
2129 */
2130
2131 if ((bp->b_bcount % wi_sector_size(wi)) != 0 ||
2132 (bp->b_bcount / wi_sector_size(wi)) >=
2133 (1 << NBBY)) {
2134 error = EINVAL;
2135 goto out;
2136 }
2137
2138 /*
2139 * Make sure a timeout was supplied in the ioctl request
2140 */
2141
2142 if (wi->wi_atareq.timeout == 0) {
2143 error = EINVAL;
2144 goto out;
2145 }
2146
2147 if (wi->wi_atareq.flags & ATACMD_READ)
2148 xfer->c_ata_c.flags |= AT_READ;
2149 else if (wi->wi_atareq.flags & ATACMD_WRITE)
2150 xfer->c_ata_c.flags |= AT_WRITE;
2151
2152 if (wi->wi_atareq.flags & ATACMD_READREG)
2153 xfer->c_ata_c.flags |= AT_READREG;
2154
2155 if ((wi->wi_atareq.flags & ATACMD_LBA) != 0)
2156 xfer->c_ata_c.flags |= AT_LBA;
2157
2158 xfer->c_ata_c.flags |= AT_WAIT;
2159
2160 xfer->c_ata_c.timeout = wi->wi_atareq.timeout;
2161 xfer->c_ata_c.r_command = wi->wi_atareq.command;
2162 xfer->c_ata_c.r_lba = ((wi->wi_atareq.head & 0x0f) << 24) |
2163 (wi->wi_atareq.cylinder << 8) |
2164 wi->wi_atareq.sec_num;
2165 xfer->c_ata_c.r_count = wi->wi_atareq.sec_count;
2166 xfer->c_ata_c.r_features = wi->wi_atareq.features;
2167 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
2168 xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
2169 xfer->c_ata_c.data = wi->wi_bp.b_data;
2170 xfer->c_ata_c.bcount = wi->wi_bp.b_bcount;
2171
2172 wi->wi_softc->atabus->ata_exec_command(wi->wi_softc->drvp, xfer);
2173 ata_wait_cmd(wi->wi_softc->drvp->chnl_softc, xfer);
2174
2175 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
2176 if (xfer->c_ata_c.flags & AT_ERROR) {
2177 wi->wi_atareq.retsts = ATACMD_ERROR;
2178 wi->wi_atareq.error = xfer->c_ata_c.r_error;
2179 } else if (xfer->c_ata_c.flags & AT_DF)
2180 wi->wi_atareq.retsts = ATACMD_DF;
2181 else
2182 wi->wi_atareq.retsts = ATACMD_TIMEOUT;
2183 } else {
2184 wi->wi_atareq.retsts = ATACMD_OK;
2185 if (wi->wi_atareq.flags & ATACMD_READREG) {
2186 wi->wi_atareq.command = xfer->c_ata_c.r_status;
2187 wi->wi_atareq.features = xfer->c_ata_c.r_error;
2188 wi->wi_atareq.sec_count = xfer->c_ata_c.r_count;
2189 wi->wi_atareq.sec_num = xfer->c_ata_c.r_lba & 0xff;
2190 wi->wi_atareq.head = (xfer->c_ata_c.r_device & 0xf0) |
2191 ((xfer->c_ata_c.r_lba >> 24) & 0x0f);
2192 wi->wi_atareq.cylinder =
2193 (xfer->c_ata_c.r_lba >> 8) & 0xffff;
2194 wi->wi_atareq.error = xfer->c_ata_c.r_error;
2195 }
2196 }
2197
2198 out:
2199 ata_free_xfer(wi->wi_softc->drvp->chnl_softc, xfer);
2200 out2:
2201 bp->b_error = error;
2202 if (error)
2203 bp->b_resid = bp->b_bcount;
2204 biodone(bp);
2205 }
2206
2207 static void
2208 wd_sysctl_attach(struct wd_softc *wd)
2209 {
2210 struct dk_softc *dksc = &wd->sc_dksc;
2211 const struct sysctlnode *node;
2212 int error;
2213
2214 /* sysctl set-up */
2215 if (sysctl_createv(&wd->nodelog, 0, NULL, &node,
2216 0, CTLTYPE_NODE, dksc->sc_xname,
2217 SYSCTL_DESCR("wd driver settings"),
2218 NULL, 0, NULL, 0,
2219 CTL_HW, CTL_CREATE, CTL_EOL) != 0) {
2220 aprint_error_dev(dksc->sc_dev,
2221 "could not create %s.%s sysctl node\n",
2222 "hw", dksc->sc_xname);
2223 return;
2224 }
2225
2226 wd->drv_ncq = true;
2227 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2228 CTLFLAG_READWRITE, CTLTYPE_BOOL, "use_ncq",
2229 SYSCTL_DESCR("use NCQ if supported"),
2230 NULL, 0, &wd->drv_ncq, 0,
2231 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2232 != 0) {
2233 aprint_error_dev(dksc->sc_dev,
2234 "could not create %s.%s.use_ncq sysctl - error %d\n",
2235 "hw", dksc->sc_xname, error);
2236 return;
2237 }
2238
2239 wd->drv_ncq_prio = false;
2240 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2241 CTLFLAG_READWRITE, CTLTYPE_BOOL, "use_ncq_prio",
2242 SYSCTL_DESCR("use NCQ PRIORITY if supported"),
2243 NULL, 0, &wd->drv_ncq_prio, 0,
2244 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2245 != 0) {
2246 aprint_error_dev(dksc->sc_dev,
2247 "could not create %s.%s.use_ncq_prio sysctl - error %d\n",
2248 "hw", dksc->sc_xname, error);
2249 return;
2250 }
2251
2252 #ifdef WD_CHAOS_MONKEY
2253 wd->drv_chaos_freq = 0;
2254 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2255 CTLFLAG_READWRITE, CTLTYPE_INT, "chaos_freq",
2256 SYSCTL_DESCR("simulated bio read error rate"),
2257 NULL, 0, &wd->drv_chaos_freq, 0,
2258 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2259 != 0) {
2260 aprint_error_dev(dksc->sc_dev,
2261 "could not create %s.%s.chaos_freq sysctl - error %d\n",
2262 "hw", dksc->sc_xname, error);
2263 return;
2264 }
2265
2266 wd->drv_chaos_cnt = 0;
2267 if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
2268 CTLFLAG_READONLY, CTLTYPE_INT, "chaos_cnt",
2269 SYSCTL_DESCR("number of processed bio reads"),
2270 NULL, 0, &wd->drv_chaos_cnt, 0,
2271 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
2272 != 0) {
2273 aprint_error_dev(dksc->sc_dev,
2274 "could not create %s.%s.chaos_cnt sysctl - error %d\n",
2275 "hw", dksc->sc_xname, error);
2276 return;
2277 }
2278 #endif
2279
2280 }
2281
2282 static void
2283 wd_sysctl_detach(struct wd_softc *wd)
2284 {
2285 sysctl_teardown(&wd->nodelog);
2286 }
2287
2288 #ifdef ATADEBUG
2289 int wddebug(void);
2290
2291 int
2292 wddebug(void)
2293 {
2294 struct wd_softc *wd;
2295 struct dk_softc *dksc;
2296 int unit;
2297
2298 for (unit = 0; unit <= 3; unit++) {
2299 wd = device_lookup_private(&wd_cd, unit);
2300 if (wd == NULL)
2301 continue;
2302 dksc = &wd->sc_dksc;
2303 printf("%s fl %x bufq %p:\n",
2304 dksc->sc_xname, wd->sc_flags, bufq_peek(dksc->sc_bufq));
2305
2306 atachannel_debug(wd->drvp->chnl_softc);
2307 }
2308 return 0;
2309 }
2310 #endif /* ATADEBUG */
2311