Home | History | Annotate | Line # | Download | only in ata
wd.c revision 1.472
      1 /*	$NetBSD: wd.c,v 1.472 2025/02/23 02:07:35 jakllsch Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1998, 2001 Manuel Bouyer.  All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *	notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *	notice, this list of conditions and the following disclaimer in the
     13  *	documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 /*-
     28  * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
     29  * All rights reserved.
     30  *
     31  * This code is derived from software contributed to The NetBSD Foundation
     32  * by Charles M. Hannum and by Onno van der Linden.
     33  *
     34  * Redistribution and use in source and binary forms, with or without
     35  * modification, are permitted provided that the following conditions
     36  * are met:
     37  * 1. Redistributions of source code must retain the above copyright
     38  *    notice, this list of conditions and the following disclaimer.
     39  * 2. Redistributions in binary form must reproduce the above copyright
     40  *    notice, this list of conditions and the following disclaimer in the
     41  *    documentation and/or other materials provided with the distribution.
     42  *
     43  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     44  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     45  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     46  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     47  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     48  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     49  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     50  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     51  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     52  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     53  * POSSIBILITY OF SUCH DAMAGE.
     54  */
     55 
     56 #include <sys/cdefs.h>
     57 __KERNEL_RCSID(0, "$NetBSD: wd.c,v 1.472 2025/02/23 02:07:35 jakllsch Exp $");
     58 
     59 #include "opt_ata.h"
     60 #include "opt_wd.h"
     61 
     62 #include <sys/param.h>
     63 #include <sys/systm.h>
     64 #include <sys/kernel.h>
     65 #include <sys/conf.h>
     66 #include <sys/file.h>
     67 #include <sys/stat.h>
     68 #include <sys/ioctl.h>
     69 #include <sys/buf.h>
     70 #include <sys/bufq.h>
     71 #include <sys/uio.h>
     72 #include <sys/device.h>
     73 #include <sys/disklabel.h>
     74 #include <sys/disk.h>
     75 #include <sys/syslog.h>
     76 #include <sys/proc.h>
     77 #include <sys/reboot.h>
     78 #include <sys/vnode.h>
     79 #include <sys/rndsource.h>
     80 
     81 #include <sys/intr.h>
     82 #include <sys/bus.h>
     83 
     84 #include <dev/ata/atareg.h>
     85 #include <dev/ata/atavar.h>
     86 #include <dev/ata/wdvar.h>
     87 #include <dev/ic/wdcreg.h>
     88 #include <sys/ataio.h>
     89 #include "locators.h"
     90 
     91 #include <prop/proplib.h>
     92 
     93 #define	WDIORETRIES_SINGLE 4	/* number of retries for single-sector */
     94 #define	WDIORETRIES	5	/* number of retries before giving up */
     95 #define	RECOVERYTIME hz/2	/* time to wait before retrying a cmd */
     96 
     97 #define	WDUNIT(dev)		DISKUNIT(dev)
     98 #define	WDPART(dev)		DISKPART(dev)
     99 #define	WDMINOR(unit, part)	DISKMINOR(unit, part)
    100 #define	MAKEWDDEV(maj, unit, part)	MAKEDISKDEV(maj, unit, part)
    101 
    102 #define	WDLABELDEV(dev)	(MAKEWDDEV(major(dev), WDUNIT(dev), RAW_PART))
    103 
    104 #define DEBUG_FUNCS  0x08
    105 #define DEBUG_PROBE  0x10
    106 #define DEBUG_DETACH 0x20
    107 #define DEBUG_XFERS  0x40
    108 #ifdef ATADEBUG
    109 #ifndef ATADEBUG_WD_MASK
    110 #define ATADEBUG_WD_MASK 0x0
    111 #endif
    112 int wdcdebug_wd_mask = ATADEBUG_WD_MASK;
    113 #define ATADEBUG_PRINT(args, level) \
    114 	if (wdcdebug_wd_mask & (level)) \
    115 		printf args
    116 #else
    117 #define ATADEBUG_PRINT(args, level)
    118 #endif
    119 
    120 static int	wdprobe(device_t, cfdata_t, void *);
    121 static void	wdattach(device_t, device_t, void *);
    122 static int	wddetach(device_t, int);
    123 static void	wdperror(const struct wd_softc *, struct ata_xfer *);
    124 
    125 static void	wdminphys(struct buf *);
    126 
    127 static int	wd_firstopen(device_t, dev_t, int, int);
    128 static int	wd_lastclose(device_t);
    129 static bool	wd_suspend(device_t, const pmf_qual_t *);
    130 static int	wd_standby(struct wd_softc *, int);
    131 
    132 CFATTACH_DECL3_NEW(wd, sizeof(struct wd_softc),
    133     wdprobe, wdattach, wddetach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    134 
    135 extern struct cfdriver wd_cd;
    136 
    137 static dev_type_open(wdopen);
    138 static dev_type_close(wdclose);
    139 static dev_type_read(wdread);
    140 static dev_type_write(wdwrite);
    141 static dev_type_ioctl(wdioctl);
    142 static dev_type_strategy(wdstrategy);
    143 static dev_type_dump(wddump);
    144 static dev_type_size(wdsize);
    145 static dev_type_discard(wddiscard);
    146 
    147 const struct bdevsw wd_bdevsw = {
    148 	.d_open = wdopen,
    149 	.d_close = wdclose,
    150 	.d_strategy = wdstrategy,
    151 	.d_ioctl = wdioctl,
    152 	.d_dump = wddump,
    153 	.d_psize = wdsize,
    154 	.d_discard = wddiscard,
    155 	.d_cfdriver = &wd_cd,
    156 	.d_devtounit = disklabel_dev_unit,
    157 	.d_flag = D_DISK
    158 };
    159 
    160 const struct cdevsw wd_cdevsw = {
    161 	.d_open = wdopen,
    162 	.d_close = wdclose,
    163 	.d_read = wdread,
    164 	.d_write = wdwrite,
    165 	.d_ioctl = wdioctl,
    166 	.d_stop = nostop,
    167 	.d_tty = notty,
    168 	.d_poll = nopoll,
    169 	.d_mmap = nommap,
    170 	.d_kqfilter = nokqfilter,
    171 	.d_discard = wddiscard,
    172 	.d_cfdriver = &wd_cd,
    173 	.d_devtounit = disklabel_dev_unit,
    174 	.d_flag = D_DISK
    175 };
    176 
    177 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
    178 static int wddoingadump = 0;
    179 static int wddumprecalibrated = 0;
    180 
    181 /*
    182  * Glue necessary to hook WDCIOCCOMMAND into physio
    183  */
    184 
    185 struct wd_ioctl {
    186 	LIST_ENTRY(wd_ioctl) wi_list;
    187 	struct buf wi_bp;
    188 	struct uio wi_uio;
    189 	struct iovec wi_iov;
    190 	atareq_t wi_atareq;
    191 	struct wd_softc *wi_softc;
    192 };
    193 
    194 static struct	wd_ioctl *wi_find(struct buf *);
    195 static void	wi_free(struct wd_ioctl *);
    196 static struct	wd_ioctl *wi_get(struct wd_softc *);
    197 static void	wdioctlstrategy(struct buf *);
    198 
    199 static void	wdrestart(void *);
    200 static void	wdstart1(struct wd_softc *, struct buf *, struct ata_xfer *);
    201 static int	wd_diskstart(device_t, struct buf *);
    202 static int	wd_dumpblocks(device_t, void *, daddr_t, int);
    203 static void	wd_iosize(device_t, int *);
    204 static int	wd_discard(device_t, off_t, off_t);
    205 static void	wdbioretry(void *);
    206 static void	wdbiorequeue(void *);
    207 static void	wddone(device_t, struct ata_xfer *);
    208 static int	wd_get_params(struct wd_softc *, struct ataparams *);
    209 static void	wd_set_geometry(struct wd_softc *);
    210 static int	wd_flushcache(struct wd_softc *, int);
    211 static int	wd_trim(struct wd_softc *, daddr_t, long);
    212 static bool	wd_shutdown(device_t, int);
    213 
    214 static int wd_getcache(struct wd_softc *, int *);
    215 static int wd_setcache(struct wd_softc *, int);
    216 
    217 static void wd_sysctl_attach(struct wd_softc *);
    218 static void wd_sysctl_detach(struct wd_softc *);
    219 
    220 static const struct dkdriver wddkdriver = {
    221 	.d_open = wdopen,
    222 	.d_close = wdclose,
    223 	.d_strategy = wdstrategy,
    224 	.d_minphys = wdminphys,
    225 	.d_diskstart = wd_diskstart,
    226 	.d_dumpblocks = wd_dumpblocks,
    227 	.d_iosize = wd_iosize,
    228 	.d_firstopen = wd_firstopen,
    229 	.d_lastclose = wd_lastclose,
    230 	.d_discard = wd_discard
    231 };
    232 
    233 #ifdef HAS_BAD144_HANDLING
    234 static void bad144intern(struct wd_softc *);
    235 #endif
    236 
    237 #define	WD_QUIRK_SPLIT_MOD15_WRITE	0x0001	/* must split certain writes */
    238 
    239 #define	WD_QUIRK_FMT "\20\1SPLIT_MOD15_WRITE"
    240 
    241 /*
    242  * Quirk table for IDE drives.  Put more-specific matches first, since
    243  * a simple globing routine is used for matching.
    244  */
    245 static const struct wd_quirk {
    246 	const char *wdq_match;		/* inquiry pattern to match */
    247 	int wdq_quirks;			/* drive quirks */
    248 } wd_quirk_table[] = {
    249 	/*
    250 	 * Some Seagate S-ATA drives have a PHY which can get confused
    251 	 * with the way data is packetized by some S-ATA controllers.
    252 	 *
    253 	 * The work-around is to split in two any write transfer whose
    254 	 * sector count % 15 == 1 (assuming 512 byte sectors).
    255 	 *
    256 	 * XXX This is an incomplete list.  There are at least a couple
    257 	 * XXX more model numbers.  If you have trouble with such transfers
    258 	 * XXX (8K is the most common) on Seagate S-ATA drives, please
    259 	 * XXX notify thorpej (at) NetBSD.org.
    260 	 *
    261 	 * The ST360015AS has not yet been confirmed to have this
    262 	 * issue, however, it is the only other drive in the
    263 	 * Seagate Barracuda Serial ATA V family.
    264 	 *
    265 	 */
    266 	{ "ST3120023AS", WD_QUIRK_SPLIT_MOD15_WRITE },
    267 	{ "ST380023AS", WD_QUIRK_SPLIT_MOD15_WRITE },
    268 	{ "ST360015AS", WD_QUIRK_SPLIT_MOD15_WRITE },
    269 	{ NULL,
    270 	  0 }
    271 };
    272 
    273 static const struct wd_quirk *
    274 wd_lookup_quirks(const char *name)
    275 {
    276 	const struct wd_quirk *wdq;
    277 	const char *estr;
    278 
    279 	for (wdq = wd_quirk_table; wdq->wdq_match != NULL; wdq++) {
    280 		/*
    281 		 * We only want exact matches (which include matches
    282 		 * against globbing characters).
    283 		 */
    284 		if (pmatch(name, wdq->wdq_match, &estr) == 2)
    285 			return (wdq);
    286 	}
    287 	return (NULL);
    288 }
    289 
    290 static int
    291 wdprobe(device_t parent, cfdata_t match, void *aux)
    292 {
    293 	struct ata_device *adev = aux;
    294 
    295 	if (adev == NULL)
    296 		return 0;
    297 	if (adev->adev_bustype->bustype_type != SCSIPI_BUSTYPE_ATA)
    298 		return 0;
    299 
    300 	if (match->cf_loc[ATA_HLCF_DRIVE] != ATA_HLCF_DRIVE_DEFAULT &&
    301 	    match->cf_loc[ATA_HLCF_DRIVE] != adev->adev_drv_data->drive)
    302 		return 0;
    303 	return 1;
    304 }
    305 
    306 static void
    307 wdattach(device_t parent, device_t self, void *aux)
    308 {
    309 	struct wd_softc *wd = device_private(self);
    310 	struct dk_softc *dksc = &wd->sc_dksc;
    311 	struct ata_device *adev= aux;
    312 	int i, blank;
    313 	char tbuf[41],pbuf[9], c, *p, *q;
    314 	const struct wd_quirk *wdq;
    315 	int dtype = DKTYPE_UNKNOWN;
    316 
    317 	dksc->sc_dev = self;
    318 
    319 	ATADEBUG_PRINT(("wdattach\n"), DEBUG_FUNCS | DEBUG_PROBE);
    320 	mutex_init(&wd->sc_lock, MUTEX_DEFAULT, IPL_BIO);
    321 #ifdef WD_SOFTBADSECT
    322 	SLIST_INIT(&wd->sc_bslist);
    323 	cv_init(&wd->sc_bslist_cv, "wdbadsect");
    324 #endif
    325 	wd->atabus = adev->adev_bustype;
    326 	wd->inflight = 0;
    327 	wd->drvp = adev->adev_drv_data;
    328 
    329 	wd->drvp->drv_openings = 1;
    330 	wd->drvp->drv_done = wddone;
    331 	wd->drvp->drv_softc = dksc->sc_dev; /* done in atabusconfig_thread()
    332 					     but too late */
    333 
    334 	SLIST_INIT(&wd->sc_retry_list);
    335 	SLIST_INIT(&wd->sc_requeue_list);
    336 	callout_init(&wd->sc_retry_callout, 0);		/* XXX MPSAFE */
    337 	callout_init(&wd->sc_requeue_callout, 0);	/* XXX MPSAFE */
    338 	callout_init(&wd->sc_restart_diskqueue, 0);	/* XXX MPSAFE */
    339 
    340 	aprint_naive("\n");
    341 	aprint_normal("\n");
    342 
    343 	/* read our drive info */
    344 	if (wd_get_params(wd, &wd->sc_params) != 0) {
    345 		aprint_error_dev(self, "IDENTIFY failed\n");
    346 		goto out;
    347 	}
    348 
    349 	for (blank = 0, p = wd->sc_params.atap_model, q = tbuf, i = 0;
    350 	    i < sizeof(wd->sc_params.atap_model); i++) {
    351 		c = *p++;
    352 		if (c == '\0')
    353 			break;
    354 		if (c != ' ') {
    355 			if (blank) {
    356 				*q++ = ' ';
    357 				blank = 0;
    358 			}
    359 			*q++ = c;
    360 		} else
    361 			blank = 1;
    362 	}
    363 	*q++ = '\0';
    364 
    365 	wd->sc_typename = kmem_asprintf("%s", tbuf);
    366 	aprint_normal_dev(self, "<%s>\n", wd->sc_typename);
    367 
    368 	wdq = wd_lookup_quirks(tbuf);
    369 	if (wdq != NULL)
    370 		wd->sc_quirks = wdq->wdq_quirks;
    371 
    372 	if (wd->sc_quirks != 0) {
    373 		char sbuf[sizeof(WD_QUIRK_FMT) + 64];
    374 		snprintb(sbuf, sizeof(sbuf), WD_QUIRK_FMT, wd->sc_quirks);
    375 		aprint_normal_dev(self, "quirks %s\n", sbuf);
    376 
    377 		if (wd->sc_quirks & WD_QUIRK_SPLIT_MOD15_WRITE) {
    378 			aprint_error_dev(self, "drive corrupts write transfers with certain controllers, consider replacing\n");
    379 		}
    380 	}
    381 
    382 	if ((wd->sc_params.atap_multi & 0xff) > 1) {
    383 		wd->drvp->multi = wd->sc_params.atap_multi & 0xff;
    384 	} else {
    385 		wd->drvp->multi = 1;
    386 	}
    387 
    388 	aprint_verbose_dev(self, "drive supports %d-sector PIO transfers,",
    389 	    wd->drvp->multi);
    390 
    391 	/* 48-bit LBA addressing */
    392 	if ((wd->sc_params.atap_cmd2_en & ATA_CMD2_LBA48) != 0)
    393 		wd->sc_flags |= WDF_LBA48;
    394 
    395 	/* Prior to ATA-4, LBA was optional. */
    396 	if ((wd->sc_params.atap_capabilities1 & WDC_CAP_LBA) != 0)
    397 		wd->sc_flags |= WDF_LBA;
    398 #if 0
    399 	/* ATA-4 requires LBA. */
    400 	if (wd->sc_params.atap_ataversion != 0xffff &&
    401 	    wd->sc_params.atap_ataversion >= WDC_VER_ATA4)
    402 		wd->sc_flags |= WDF_LBA;
    403 #endif
    404 
    405 	if ((wd->sc_flags & WDF_LBA48) != 0) {
    406 		aprint_verbose(" LBA48 addressing\n");
    407 		wd->sc_capacity =
    408 		    ((uint64_t) wd->sc_params.atap_max_lba[3] << 48) |
    409 		    ((uint64_t) wd->sc_params.atap_max_lba[2] << 32) |
    410 		    ((uint64_t) wd->sc_params.atap_max_lba[1] << 16) |
    411 		    ((uint64_t) wd->sc_params.atap_max_lba[0] <<  0);
    412 		wd->sc_capacity28 =
    413 		    (wd->sc_params.atap_capacity[1] << 16) |
    414 		    wd->sc_params.atap_capacity[0];
    415 		/*
    416 		 * Force LBA48 addressing for invalid numbers.
    417 		 */
    418 		if (wd->sc_capacity28 > 0xfffffff)
    419 			wd->sc_capacity28 = 0xfffffff;
    420 	} else if ((wd->sc_flags & WDF_LBA) != 0) {
    421 		aprint_verbose(" LBA addressing\n");
    422 		wd->sc_capacity28 =
    423 		    (wd->sc_params.atap_capacity[1] << 16) |
    424 		    wd->sc_params.atap_capacity[0];
    425 		/*
    426 		 * Limit capacity to LBA28 numbers to avoid overflow.
    427 		 */
    428 		if (wd->sc_capacity28 > 0xfffffff)
    429 			wd->sc_capacity28 = 0xfffffff;
    430 		wd->sc_capacity = wd->sc_capacity28;
    431 	} else {
    432 		aprint_verbose(" chs addressing\n");
    433 		wd->sc_capacity =
    434 		    wd->sc_params.atap_cylinders *
    435 		    wd->sc_params.atap_heads *
    436 		    wd->sc_params.atap_sectors;
    437 		/*
    438 		 * LBA28 size is ignored for CHS addressing. Use a reasonable
    439 		 * value for debugging. The CHS values may be artificial and
    440 		 * are mostly ignored.
    441 		 */
    442 		if (wd->sc_capacity < 0xfffffff)
    443 			wd->sc_capacity28 = wd->sc_capacity;
    444 		else
    445 			wd->sc_capacity28 = 0xfffffff;
    446 	}
    447 	if ((wd->sc_params.atap_secsz & ATA_SECSZ_VALID_MASK) == ATA_SECSZ_VALID
    448 	    && ((wd->sc_params.atap_secsz & ATA_SECSZ_LLS) != 0)) {
    449 		wd->sc_blksize = 2ULL *
    450 		    ((uint32_t)((wd->sc_params.atap_lls_secsz[1] << 16) |
    451 		    wd->sc_params.atap_lls_secsz[0]));
    452 	} else {
    453 		wd->sc_blksize = 512;
    454 	}
    455 	wd->sc_sectoralign.dsa_firstaligned = 0;
    456 	wd->sc_sectoralign.dsa_alignment = 1;
    457 	if ((wd->sc_params.atap_secsz & ATA_SECSZ_VALID_MASK) == ATA_SECSZ_VALID
    458 	    && ((wd->sc_params.atap_secsz & ATA_SECSZ_LPS) != 0)) {
    459 		wd->sc_sectoralign.dsa_alignment = 1 <<
    460 		    (wd->sc_params.atap_secsz & ATA_SECSZ_LPS_SZMSK);
    461 		if ((wd->sc_params.atap_logical_align & ATA_LA_VALID_MASK) ==
    462 		    ATA_LA_VALID) {
    463 			wd->sc_sectoralign.dsa_firstaligned =
    464 			    wd->sc_params.atap_logical_align & ATA_LA_MASK;
    465 		}
    466 	}
    467 	wd->sc_capacity512 = (wd->sc_capacity * wd->sc_blksize) / DEV_BSIZE;
    468 	format_bytes(pbuf, sizeof(pbuf), wd->sc_capacity * wd->sc_blksize);
    469 	aprint_normal_dev(self, "%s, %d cyl, %d head, %d sec, "
    470 	    "%d bytes/sect x %llu sectors",
    471 	    pbuf,
    472 	    (wd->sc_flags & WDF_LBA) ? (int)(wd->sc_capacity /
    473 		(wd->sc_params.atap_heads * wd->sc_params.atap_sectors)) :
    474 		wd->sc_params.atap_cylinders,
    475 	    wd->sc_params.atap_heads, wd->sc_params.atap_sectors,
    476 	    wd->sc_blksize, (unsigned long long)wd->sc_capacity);
    477 	if (wd->sc_sectoralign.dsa_alignment != 1) {
    478 		aprint_normal(" (%d bytes/physsect",
    479 		    wd->sc_sectoralign.dsa_alignment * wd->sc_blksize);
    480 		if (wd->sc_sectoralign.dsa_firstaligned != 0) {
    481 			aprint_normal("; first aligned sector: %jd",
    482 			    (intmax_t)wd->sc_sectoralign.dsa_firstaligned);
    483 		}
    484 		aprint_normal(")");
    485 	}
    486 	aprint_normal("\n");
    487 
    488 	ATADEBUG_PRINT(("%s: atap_dmatiming_mimi=%d, atap_dmatiming_recom=%d\n",
    489 	    device_xname(self), wd->sc_params.atap_dmatiming_mimi,
    490 	    wd->sc_params.atap_dmatiming_recom), DEBUG_PROBE);
    491 
    492 	if (wd->sc_blksize <= 0 || !powerof2(wd->sc_blksize) ||
    493 	    wd->sc_blksize < DEV_BSIZE || wd->sc_blksize > MAXPHYS) {
    494 		aprint_normal_dev(self, "WARNING: block size %u "
    495 		    "might not actually work\n", wd->sc_blksize);
    496 	}
    497 
    498 	if (strcmp(wd->sc_params.atap_model, "ST506") == 0)
    499 		dtype = DKTYPE_ST506;
    500 	else
    501 		dtype = DKTYPE_ESDI;
    502 
    503 out:
    504 	/*
    505 	 * Initialize and attach the disk structure.
    506 	 */
    507 	dk_init(dksc, self, dtype);
    508 	disk_init(&dksc->sc_dkdev, dksc->sc_xname, &wddkdriver);
    509 
    510 	/* Attach dk and disk subsystems */
    511 	dk_attach(dksc);
    512 	disk_attach(&dksc->sc_dkdev);
    513 	wd_set_geometry(wd);
    514 
    515 	bufq_alloc(&dksc->sc_bufq, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
    516 
    517 	/* reference to label structure, used by ata code */
    518 	wd->drvp->lp = dksc->sc_dkdev.dk_label;
    519 
    520 	/* Discover wedges on this disk. */
    521 	dkwedge_discover(&dksc->sc_dkdev);
    522 
    523 	if (!pmf_device_register1(self, wd_suspend, NULL, wd_shutdown))
    524 		aprint_error_dev(self, "couldn't establish power handler\n");
    525 
    526 	wd_sysctl_attach(wd);
    527 }
    528 
    529 static bool
    530 wd_suspend(device_t dv, const pmf_qual_t *qual)
    531 {
    532 	struct wd_softc *sc = device_private(dv);
    533 
    534 	/* the adapter needs to be enabled */
    535 	if (sc->atabus->ata_addref(sc->drvp))
    536 		return true; /* no need to complain */
    537 
    538 	wd_flushcache(sc, AT_WAIT);
    539 	wd_standby(sc, AT_WAIT);
    540 
    541 	sc->atabus->ata_delref(sc->drvp);
    542 	return true;
    543 }
    544 
    545 static int
    546 wddetach(device_t self, int flags)
    547 {
    548 	struct wd_softc *wd = device_private(self);
    549 	struct dk_softc *dksc = &wd->sc_dksc;
    550 	int bmaj, cmaj, i, mn, rc;
    551 
    552 	if ((rc = disk_begindetach(&dksc->sc_dkdev, wd_lastclose, self, flags)) != 0)
    553 		return rc;
    554 
    555 	/* locate the major number */
    556 	bmaj = bdevsw_lookup_major(&wd_bdevsw);
    557 	cmaj = cdevsw_lookup_major(&wd_cdevsw);
    558 
    559 	/* Nuke the vnodes for any open instances. */
    560 	for (i = 0; i < MAXPARTITIONS; i++) {
    561 		mn = WDMINOR(device_unit(self), i);
    562 		vdevgone(bmaj, mn, mn, VBLK);
    563 		vdevgone(cmaj, mn, mn, VCHR);
    564 	}
    565 
    566 	dk_drain(dksc);
    567 
    568 	/* Kill off any pending commands. */
    569 	mutex_enter(&wd->sc_lock);
    570 	wd->atabus->ata_killpending(wd->drvp);
    571 
    572 	callout_halt(&wd->sc_retry_callout, &wd->sc_lock);
    573 	callout_destroy(&wd->sc_retry_callout);
    574 	callout_halt(&wd->sc_requeue_callout, &wd->sc_lock);
    575 	callout_destroy(&wd->sc_requeue_callout);
    576 	callout_halt(&wd->sc_restart_diskqueue, &wd->sc_lock);
    577 	callout_destroy(&wd->sc_restart_diskqueue);
    578 
    579 	mutex_exit(&wd->sc_lock);
    580 
    581 	bufq_free(dksc->sc_bufq);
    582 
    583 	/* Delete all of our wedges. */
    584 	dkwedge_delall(&dksc->sc_dkdev);
    585 
    586 	if (flags & DETACH_POWEROFF)
    587 		wd_standby(wd, AT_POLL);
    588 
    589 	/* Detach from the disk list. */
    590 	disk_detach(&dksc->sc_dkdev);
    591 	disk_destroy(&dksc->sc_dkdev);
    592 
    593 	dk_detach(dksc);
    594 
    595 #ifdef WD_SOFTBADSECT
    596 	/* Clean out the bad sector list */
    597 	while (!SLIST_EMPTY(&wd->sc_bslist)) {
    598 		struct disk_badsectors *dbs = SLIST_FIRST(&wd->sc_bslist);
    599 		SLIST_REMOVE_HEAD(&wd->sc_bslist, dbs_next);
    600 		kmem_free(dbs, sizeof(*dbs));
    601 	}
    602 	wd->sc_bscount = 0;
    603 #endif
    604 	if (wd->sc_typename != NULL) {
    605 		kmem_free(wd->sc_typename, strlen(wd->sc_typename) + 1);
    606 		wd->sc_typename = NULL;
    607 	}
    608 
    609 	pmf_device_deregister(self);
    610 
    611 	wd_sysctl_detach(wd);
    612 
    613 #ifdef WD_SOFTBADSECT
    614 	KASSERT(SLIST_EMPTY(&wd->sc_bslist));
    615 	cv_destroy(&wd->sc_bslist_cv);
    616 #endif
    617 
    618 	mutex_destroy(&wd->sc_lock);
    619 
    620 	wd->drvp->drive_type = ATA_DRIVET_NONE; /* no drive any more here */
    621 	wd->drvp->drive_flags = 0;
    622 
    623 	return (0);
    624 }
    625 
    626 /*
    627  * Read/write routine for a buffer.  Validates the arguments and schedules the
    628  * transfer.  Does not wait for the transfer to complete.
    629  */
    630 static void
    631 wdstrategy(struct buf *bp)
    632 {
    633 	struct wd_softc *wd =
    634 	    device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
    635 	struct dk_softc *dksc = &wd->sc_dksc;
    636 
    637 	ATADEBUG_PRINT(("wdstrategy (%s)\n", dksc->sc_xname),
    638 	    DEBUG_XFERS);
    639 
    640 	/* If device invalidated (e.g. media change, door open,
    641 	 * device detachment), then error.
    642 	 */
    643 	if ((wd->sc_flags & WDF_LOADED) == 0 ||
    644 	    !device_is_enabled(dksc->sc_dev))
    645 		goto err;
    646 
    647 #ifdef WD_SOFTBADSECT
    648 	/*
    649 	 * If the transfer about to be attempted contains only a block that
    650 	 * is known to be bad then return an error for the transfer without
    651 	 * even attempting to start a transfer up under the premis that we
    652 	 * will just end up doing more retries for a transfer that will end
    653 	 * up failing again.
    654 	 */
    655 	if (__predict_false(!SLIST_EMPTY(&wd->sc_bslist))) {
    656 		struct disklabel *lp = dksc->sc_dkdev.dk_label;
    657 		struct disk_badsectors *dbs;
    658 		daddr_t blkno, maxblk;
    659 
    660 		/* convert the block number to absolute */
    661 		if (lp->d_secsize >= DEV_BSIZE)
    662 			blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
    663 		else
    664 			blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
    665 		if (WDPART(bp->b_dev) != RAW_PART)
    666 			blkno += lp->d_partitions[WDPART(bp->b_dev)].p_offset;
    667 		maxblk = blkno + (bp->b_bcount / wd->sc_blksize) - 1;
    668 
    669 		mutex_enter(&wd->sc_lock);
    670 		SLIST_FOREACH(dbs, &wd->sc_bslist, dbs_next)
    671 			if ((dbs->dbs_min <= bp->b_rawblkno &&
    672 			     bp->b_rawblkno <= dbs->dbs_max) ||
    673 			    (dbs->dbs_min <= maxblk && maxblk <= dbs->dbs_max)){
    674 				mutex_exit(&wd->sc_lock);
    675 				goto err;
    676 			}
    677 		mutex_exit(&wd->sc_lock);
    678 	}
    679 #endif
    680 
    681 	dk_strategy(dksc, bp);
    682 	return;
    683 
    684 err:
    685 	bp->b_error = EIO;
    686 	bp->b_resid = bp->b_bcount;
    687 	biodone(bp);
    688 }
    689 
    690 static void
    691 wdstart1(struct wd_softc *wd, struct buf *bp, struct ata_xfer *xfer)
    692 {
    693 	struct dk_softc *dksc = &wd->sc_dksc;
    694 	const uint32_t secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
    695 
    696 	KASSERT(bp == xfer->c_bio.bp || xfer->c_bio.bp == NULL);
    697 	KASSERT((xfer->c_flags & (C_WAITACT|C_FREE)) == 0);
    698 	KASSERT(mutex_owned(&wd->sc_lock));
    699 
    700 	/* Reset state, so that retries don't use stale info */
    701 	if (__predict_false(xfer->c_retries > 0)) {
    702 		xfer->c_flags = 0;
    703 		memset(&xfer->c_bio, 0, sizeof(xfer->c_bio));
    704 	}
    705 
    706 	xfer->c_bio.blkno = bp->b_rawblkno;
    707 	xfer->c_bio.bcount = bp->b_bcount;
    708 	xfer->c_bio.databuf = bp->b_data;
    709 	xfer->c_bio.blkdone = 0;
    710 	xfer->c_bio.bp = bp;
    711 
    712 	/* Adjust blkno and bcount if xfer has been already partially done */
    713 	if (__predict_false(xfer->c_skip > 0)) {
    714 		KASSERT(xfer->c_skip < xfer->c_bio.bcount);
    715 		KASSERT((xfer->c_skip % secsize) == 0);
    716 		xfer->c_bio.bcount -= xfer->c_skip;
    717 		xfer->c_bio.blkno += xfer->c_skip / secsize;
    718 	}
    719 
    720 #ifdef WD_CHAOS_MONKEY
    721 	/*
    722 	 * Override blkno to be over device capacity to trigger error,
    723 	 * but only if it's read, to avoid trashing disk contents should
    724 	 * the command be clipped, or otherwise misinterpreted, by the
    725 	 * driver or controller.
    726 	 */
    727 	if (BUF_ISREAD(bp) && xfer->c_retries == 0 && wd->drv_chaos_freq > 0 &&
    728 	    (++wd->drv_chaos_cnt % wd->drv_chaos_freq) == 0) {
    729 		device_printf(dksc->sc_dev, "%s: chaos xfer %"PRIxPTR"\n",
    730 		    __func__, (intptr_t)xfer & PAGE_MASK);
    731 		xfer->c_bio.blkno = 7777777 + wd->sc_capacity;
    732 		xfer->c_flags |= C_CHAOS;
    733 	}
    734 #endif
    735 
    736 	/*
    737 	 * If we're retrying, retry in single-sector mode. This will give us
    738 	 * the sector number of the problem, and will eventually allow the
    739 	 * transfer to succeed. If FUA is requested, we can't actually
    740 	 * do this, as ATA_SINGLE is usually executed as PIO transfer by drivers
    741 	 * which support it, and that isn't compatible with NCQ/FUA.
    742 	 */
    743 	if (xfer->c_retries >= WDIORETRIES_SINGLE &&
    744 	    (bp->b_flags & B_MEDIA_FUA) == 0)
    745 		xfer->c_bio.flags = ATA_SINGLE;
    746 	else
    747 		xfer->c_bio.flags = 0;
    748 
    749 	/*
    750 	 * request LBA48 transfers when supported by the controller
    751 	 * and needed by transfer offset or size.
    752 	 */
    753 	if (wd->sc_flags & WDF_LBA48 &&
    754 	    (((xfer->c_bio.blkno + xfer->c_bio.bcount / secsize) >
    755 	    wd->sc_capacity28) ||
    756 	    ((xfer->c_bio.bcount / secsize) > 128)))
    757 		xfer->c_bio.flags |= ATA_LBA48;
    758 
    759 	/*
    760 	 * If NCQ was negotiated, always use it for the first several attempts.
    761 	 * Since device cancels all outstanding requests on error, downgrade
    762 	 * to non-NCQ on retry, so that the retried transfer would not cause
    763 	 * cascade failure for the other transfers if it fails again.
    764 	 * If FUA was requested, we can't downgrade, as that would violate
    765 	 * the semantics - FUA would not be honored. In that case, continue
    766 	 * retrying with NCQ.
    767 	 */
    768 	if (WD_USE_NCQ(wd) && (xfer->c_retries < WDIORETRIES_SINGLE ||
    769 	    (bp->b_flags & B_MEDIA_FUA) != 0)) {
    770 		xfer->c_bio.flags |= ATA_LBA48;
    771 		xfer->c_flags |= C_NCQ;
    772 
    773 		if (WD_USE_NCQ_PRIO(wd) &&
    774 		    BIO_GETPRIO(bp) == BPRIO_TIMECRITICAL)
    775 			xfer->c_bio.flags |= ATA_PRIO_HIGH;
    776 	}
    777 
    778 	if (wd->sc_flags & WDF_LBA)
    779 		xfer->c_bio.flags |= ATA_LBA;
    780 	if (bp->b_flags & B_READ) {
    781 		xfer->c_bio.flags |= ATA_READ;
    782 	} else {
    783 		/* it's a write */
    784 		wd->sc_flags |= WDF_DIRTY;
    785 	}
    786 	if (bp->b_flags & B_MEDIA_FUA) {
    787 		/* If not using NCQ, the command WRITE DMA FUA EXT is LBA48 */
    788 		KASSERT((wd->sc_flags & WDF_LBA48) != 0);
    789 		if ((xfer->c_flags & C_NCQ) == 0)
    790 			xfer->c_bio.flags |= ATA_LBA48;
    791 
    792 		xfer->c_bio.flags |= ATA_FUA;
    793 	}
    794 
    795 	if (xfer->c_retries == 0)
    796 		wd->inflight++;
    797 	mutex_exit(&wd->sc_lock);
    798 
    799 	/* Queue the xfer */
    800 	wd->atabus->ata_bio(wd->drvp, xfer);
    801 
    802 	mutex_enter(&wd->sc_lock);
    803 }
    804 
    805 static int
    806 wd_diskstart(device_t dev, struct buf *bp)
    807 {
    808 	struct wd_softc *wd = device_private(dev);
    809 #ifdef ATADEBUG
    810 	struct dk_softc *dksc = &wd->sc_dksc;
    811 #endif
    812 	struct ata_xfer *xfer;
    813 	struct ata_channel *chp;
    814 	unsigned openings;
    815 	int ticks;
    816 
    817 	mutex_enter(&wd->sc_lock);
    818 
    819 	chp = wd->drvp->chnl_softc;
    820 
    821 	ata_channel_lock(chp);
    822 	openings = ata_queue_openings(chp);
    823 	ata_channel_unlock(chp);
    824 
    825 	openings = uimin(openings, wd->drvp->drv_openings);
    826 
    827 	if (wd->inflight >= openings) {
    828 		/*
    829 		 * pretend we run out of memory when the queue is full,
    830 		 * so that the operation is retried after a minimal
    831 		 * delay.
    832 		 */
    833 		xfer = NULL;
    834 		ticks = 1;
    835 	} else {
    836 		/*
    837 		 * If there is no available memory, retry later. This
    838 		 * happens very rarely and only under memory pressure,
    839 		 * so wait relatively long before retry.
    840 		 */
    841 		xfer = ata_get_xfer(chp, false);
    842 		ticks = hz/2;
    843 	}
    844 
    845 	if (xfer == NULL) {
    846 		ATADEBUG_PRINT(("wd_diskstart %s no xfer\n",
    847 		    dksc->sc_xname), DEBUG_XFERS);
    848 
    849 		/*
    850 		 * The disk queue is pushed automatically when an I/O
    851 		 * operation finishes or another one is queued. We
    852 		 * need this extra timeout because an ATA channel
    853 		 * might be shared by more than one disk queue and
    854 		 * all queues need to be restarted when another slot
    855 		 * becomes available.
    856 		 */
    857 		if (!callout_pending(&wd->sc_restart_diskqueue)) {
    858 			callout_reset(&wd->sc_restart_diskqueue, ticks,
    859 			    wdrestart, dev);
    860 		}
    861 
    862 		mutex_exit(&wd->sc_lock);
    863 		return EAGAIN;
    864 	}
    865 
    866 	wdstart1(wd, bp, xfer);
    867 
    868 	mutex_exit(&wd->sc_lock);
    869 
    870 	return 0;
    871 }
    872 
    873 /*
    874  * Queue a drive for I/O.
    875  */
    876 static void
    877 wdrestart(void *x)
    878 {
    879 	device_t self = x;
    880 	struct wd_softc *wd = device_private(self);
    881 	struct dk_softc *dksc = &wd->sc_dksc;
    882 
    883 	ATADEBUG_PRINT(("wdstart %s\n", dksc->sc_xname),
    884 	    DEBUG_XFERS);
    885 
    886 	if (!device_is_active(dksc->sc_dev))
    887 		return;
    888 
    889 	dk_start(dksc, NULL);
    890 }
    891 
    892 static void
    893 wddone(device_t self, struct ata_xfer *xfer)
    894 {
    895 	struct wd_softc *wd = device_private(self);
    896 	struct dk_softc *dksc = &wd->sc_dksc;
    897 	const char *errmsg;
    898 	int do_perror = 0;
    899 	struct buf *bp;
    900 
    901 	ATADEBUG_PRINT(("wddone %s\n", dksc->sc_xname),
    902 	    DEBUG_XFERS);
    903 
    904 	if (__predict_false(wddoingadump)) {
    905 		/* just drop it to the floor */
    906 		ata_free_xfer(wd->drvp->chnl_softc, xfer);
    907 		return;
    908 	}
    909 
    910 	bp = xfer->c_bio.bp;
    911 	KASSERT(bp != NULL);
    912 
    913 	bp->b_resid = xfer->c_bio.bcount;
    914 	switch (xfer->c_bio.error) {
    915 	case ERR_DMA:
    916 		errmsg = "DMA error";
    917 		goto retry;
    918 	case ERR_DF:
    919 		errmsg = "device fault";
    920 		goto retry;
    921 	case TIMEOUT:
    922 		errmsg = "device timeout";
    923 		goto retry;
    924 	case REQUEUE:
    925 		errmsg = "requeue";
    926 		goto retry2;
    927 	case ERR_RESET:
    928 		errmsg = "channel reset";
    929 		goto retry2;
    930 	case ERROR:
    931 		/* Don't care about media change bits */
    932 		if (xfer->c_bio.r_error != 0 &&
    933 		    (xfer->c_bio.r_error & ~(WDCE_MC | WDCE_MCR)) == 0)
    934 			goto noerror;
    935 		errmsg = "error";
    936 		do_perror = 1;
    937 retry:		/* Just reset and retry. Can we do more ? */
    938 		if ((xfer->c_flags & C_RECOVERED) == 0) {
    939 			int wflags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
    940 			ata_channel_lock(wd->drvp->chnl_softc);
    941 			ata_thread_run(wd->drvp->chnl_softc, wflags,
    942 			    ATACH_TH_DRIVE_RESET, wd->drvp->drive);
    943 			ata_channel_unlock(wd->drvp->chnl_softc);
    944 		}
    945 retry2:
    946 		mutex_enter(&wd->sc_lock);
    947 
    948 		diskerr(bp, "wd", errmsg, LOG_PRINTF,
    949 		    xfer->c_bio.blkdone, dksc->sc_dkdev.dk_label);
    950 		if (xfer->c_retries < WDIORETRIES)
    951 			printf(", xfer %"PRIxPTR", retry %d",
    952 			    (intptr_t)xfer & PAGE_MASK,
    953 			    xfer->c_retries);
    954 		printf("\n");
    955 		if (do_perror)
    956 			wdperror(wd, xfer);
    957 
    958 		if (xfer->c_retries < WDIORETRIES) {
    959 			xfer->c_retries++;
    960 
    961 			/* Rerun ASAP if just requeued */
    962 			if (xfer->c_bio.error == REQUEUE) {
    963 				SLIST_INSERT_HEAD(&wd->sc_requeue_list, xfer,
    964 				    c_retrychain);
    965 				callout_reset(&wd->sc_requeue_callout,
    966 				    1, wdbiorequeue, wd);
    967 			} else {
    968 				SLIST_INSERT_HEAD(&wd->sc_retry_list, xfer,
    969 				    c_retrychain);
    970 				callout_reset(&wd->sc_retry_callout,
    971 				    RECOVERYTIME, wdbioretry, wd);
    972 			}
    973 
    974 			mutex_exit(&wd->sc_lock);
    975 			return;
    976 		}
    977 
    978 		mutex_exit(&wd->sc_lock);
    979 
    980 #ifdef WD_SOFTBADSECT
    981 		/*
    982 		 * Not all errors indicate a failed block but those that do,
    983 		 * put the block on the bad-block list for the device.  Only
    984 		 * do this for reads because the drive should do it for writes,
    985 		 * itself, according to Manuel.
    986 		 */
    987 		if ((bp->b_flags & B_READ) &&
    988 		    ((wd->drvp->ata_vers >= 4 && xfer->c_bio.r_error & 64) ||
    989 		     (wd->drvp->ata_vers < 4 && xfer->c_bio.r_error & 192))) {
    990 			struct disk_badsectors *dbs;
    991 
    992 			dbs = kmem_zalloc(sizeof *dbs, KM_NOSLEEP);
    993 			if (dbs == NULL) {
    994 				device_printf(dksc->sc_dev,
    995 				    "failed to add bad block to list\n");
    996 				goto out;
    997 			}
    998 
    999 			dbs->dbs_min = bp->b_rawblkno;
   1000 			dbs->dbs_max = dbs->dbs_min +
   1001 			    (bp->b_bcount /wd->sc_blksize) - 1;
   1002 			microtime(&dbs->dbs_failedat);
   1003 
   1004 			mutex_enter(&wd->sc_lock);
   1005 			SLIST_INSERT_HEAD(&wd->sc_bslist, dbs, dbs_next);
   1006 			wd->sc_bscount++;
   1007 			mutex_exit(&wd->sc_lock);
   1008 		}
   1009 out:
   1010 #endif
   1011 		bp->b_error = EIO;
   1012 		break;
   1013 	case NOERROR:
   1014 #ifdef WD_CHAOS_MONKEY
   1015 		/*
   1016 		 * For example Parallels AHCI emulation doesn't actually
   1017 		 * return error for the invalid I/O, so just re-run
   1018 		 * the request and do not panic.
   1019 		 */
   1020 		if (__predict_false(xfer->c_flags & C_CHAOS)) {
   1021 			xfer->c_bio.error = REQUEUE;
   1022 			errmsg = "chaos noerror";
   1023 			goto retry2;
   1024 		}
   1025 #endif
   1026 
   1027 noerror:	if ((xfer->c_bio.flags & ATA_CORR) || xfer->c_retries > 0)
   1028 			device_printf(dksc->sc_dev,
   1029 			    "soft error (corrected) xfer %"PRIxPTR"\n",
   1030 			    (intptr_t)xfer & PAGE_MASK);
   1031 		break;
   1032 	case ERR_NODEV:
   1033 		bp->b_error = EIO;
   1034 		break;
   1035 	}
   1036 	if (__predict_false(bp->b_error != 0) && bp->b_resid == 0) {
   1037 		/*
   1038 		 * the disk or controller sometimes report a complete
   1039 		 * xfer, when there has been an error. This is wrong,
   1040 		 * assume nothing got transferred in this case
   1041 		 */
   1042 		bp->b_resid = bp->b_bcount;
   1043 	}
   1044 
   1045 	ata_free_xfer(wd->drvp->chnl_softc, xfer);
   1046 
   1047 	mutex_enter(&wd->sc_lock);
   1048 	wd->inflight--;
   1049 	mutex_exit(&wd->sc_lock);
   1050 	dk_done(dksc, bp);
   1051 	dk_start(dksc, NULL);
   1052 }
   1053 
   1054 static void
   1055 wdbioretry(void *v)
   1056 {
   1057 	struct wd_softc *wd = v;
   1058 	struct ata_xfer *xfer;
   1059 
   1060 	ATADEBUG_PRINT(("%s %s\n", __func__, wd->sc_dksc.sc_xname),
   1061 	    DEBUG_XFERS);
   1062 
   1063 	mutex_enter(&wd->sc_lock);
   1064 	while ((xfer = SLIST_FIRST(&wd->sc_retry_list))) {
   1065 		SLIST_REMOVE_HEAD(&wd->sc_retry_list, c_retrychain);
   1066 		wdstart1(wd, xfer->c_bio.bp, xfer);
   1067 	}
   1068 	mutex_exit(&wd->sc_lock);
   1069 }
   1070 
   1071 static void
   1072 wdbiorequeue(void *v)
   1073 {
   1074 	struct wd_softc *wd = v;
   1075 	struct ata_xfer *xfer;
   1076 
   1077 	ATADEBUG_PRINT(("%s %s\n", __func__, wd->sc_dksc.sc_xname),
   1078 	    DEBUG_XFERS);
   1079 
   1080 	mutex_enter(&wd->sc_lock);
   1081 	while ((xfer = SLIST_FIRST(&wd->sc_requeue_list))) {
   1082 		SLIST_REMOVE_HEAD(&wd->sc_requeue_list, c_retrychain);
   1083 		wdstart1(wd, xfer->c_bio.bp, xfer);
   1084 	}
   1085 	mutex_exit(&wd->sc_lock);
   1086 }
   1087 
   1088 static void
   1089 wdminphys(struct buf *bp)
   1090 {
   1091 	const struct wd_softc * const wd =
   1092 	    device_lookup_private(&wd_cd, WDUNIT(bp->b_dev));
   1093 	int maxsectors;
   1094 
   1095 	/*
   1096 	 * The limit is actually 65536 for LBA48 and 256 for non-LBA48,
   1097 	 * but that requires to set the count for the ATA command
   1098 	 * to 0, which is somewhat error prone, so better stay safe.
   1099 	 */
   1100 	if (wd->sc_flags & WDF_LBA48)
   1101 		maxsectors = 65535;
   1102 	else
   1103 		maxsectors = 128;
   1104 
   1105 	if (bp->b_bcount > (wd->sc_blksize * maxsectors))
   1106 		bp->b_bcount = (wd->sc_blksize * maxsectors);
   1107 
   1108 	minphys(bp);
   1109 }
   1110 
   1111 static void
   1112 wd_iosize(device_t dev, int *count)
   1113 {
   1114 	struct buf B;
   1115 	int bmaj;
   1116 
   1117 	bmaj       = bdevsw_lookup_major(&wd_bdevsw);
   1118 	B.b_dev    = MAKEWDDEV(bmaj,device_unit(dev),RAW_PART);
   1119 	B.b_bcount = *count;
   1120 
   1121 	wdminphys(&B);
   1122 
   1123 	*count = B.b_bcount;
   1124 }
   1125 
   1126 static int
   1127 wdread(dev_t dev, struct uio *uio, int flags)
   1128 {
   1129 
   1130 	ATADEBUG_PRINT(("wdread\n"), DEBUG_XFERS);
   1131 	return (physio(wdstrategy, NULL, dev, B_READ, wdminphys, uio));
   1132 }
   1133 
   1134 static int
   1135 wdwrite(dev_t dev, struct uio *uio, int flags)
   1136 {
   1137 
   1138 	ATADEBUG_PRINT(("wdwrite\n"), DEBUG_XFERS);
   1139 	return (physio(wdstrategy, NULL, dev, B_WRITE, wdminphys, uio));
   1140 }
   1141 
   1142 static int
   1143 wdopen(dev_t dev, int flag, int fmt, struct lwp *l)
   1144 {
   1145 	struct wd_softc *wd;
   1146 	struct dk_softc *dksc;
   1147 	int unit, part, error;
   1148 
   1149 	ATADEBUG_PRINT(("wdopen\n"), DEBUG_FUNCS);
   1150 	unit = WDUNIT(dev);
   1151 	wd = device_lookup_private(&wd_cd, unit);
   1152 	if (wd == NULL)
   1153 		return (ENXIO);
   1154 	dksc = &wd->sc_dksc;
   1155 
   1156 	if (! device_is_active(dksc->sc_dev))
   1157 		return (ENODEV);
   1158 
   1159 	part = WDPART(dev);
   1160 
   1161 	if (wd->sc_capacity == 0)
   1162 		return (ENODEV);
   1163 
   1164 	/*
   1165 	 * If any partition is open, but the disk has been invalidated,
   1166 	 * disallow further opens.
   1167 	 */
   1168 	if ((wd->sc_flags & (WDF_OPEN | WDF_LOADED)) == WDF_OPEN) {
   1169 		if (part != RAW_PART || fmt != S_IFCHR)
   1170 			return EIO;
   1171 	}
   1172 
   1173 	error = dk_open(dksc, dev, flag, fmt, l);
   1174 
   1175 	return error;
   1176 }
   1177 
   1178 /*
   1179  * Serialized by caller
   1180  */
   1181 static int
   1182 wd_firstopen(device_t self, dev_t dev, int flag, int fmt)
   1183 {
   1184 	struct wd_softc *wd = device_private(self);
   1185 	struct dk_softc *dksc = &wd->sc_dksc;
   1186 	int error;
   1187 
   1188 	error = wd->atabus->ata_addref(wd->drvp);
   1189 	if (error)
   1190 		return error;
   1191 
   1192 	if ((wd->sc_flags & WDF_LOADED) == 0) {
   1193 		int param_error;
   1194 
   1195 		/* Load the physical device parameters. */
   1196 		param_error = wd_get_params(wd, &wd->sc_params);
   1197 		if (param_error != 0) {
   1198 			aprint_error_dev(dksc->sc_dev, "IDENTIFY failed\n");
   1199 			error = EIO;
   1200 			goto bad;
   1201 		}
   1202 		wd_set_geometry(wd);
   1203 		wd->sc_flags |= WDF_LOADED;
   1204 	}
   1205 
   1206 	wd->sc_flags |= WDF_OPEN;
   1207 	return 0;
   1208 
   1209 bad:
   1210 	wd->atabus->ata_delref(wd->drvp);
   1211 	return error;
   1212 }
   1213 
   1214 /*
   1215  * Caller must hold wd->sc_dk.dk_openlock.
   1216  */
   1217 static int
   1218 wd_lastclose(device_t self)
   1219 {
   1220 	struct wd_softc *wd = device_private(self);
   1221 
   1222 	KASSERTMSG(bufq_peek(wd->sc_dksc.sc_bufq) == NULL, "bufq not empty");
   1223 
   1224 	if (wd->sc_flags & WDF_DIRTY)
   1225 		wd_flushcache(wd, AT_WAIT);
   1226 
   1227 	wd->atabus->ata_delref(wd->drvp);
   1228 	wd->sc_flags &= ~WDF_OPEN;
   1229 
   1230 	return 0;
   1231 }
   1232 
   1233 static int
   1234 wdclose(dev_t dev, int flag, int fmt, struct lwp *l)
   1235 {
   1236 	struct wd_softc *wd;
   1237 	struct dk_softc *dksc;
   1238 	int unit;
   1239 
   1240 	unit = WDUNIT(dev);
   1241 	wd = device_lookup_private(&wd_cd, unit);
   1242 	dksc = &wd->sc_dksc;
   1243 
   1244 	return dk_close(dksc, dev, flag, fmt, l);
   1245 }
   1246 
   1247 void
   1248 wdperror(const struct wd_softc *wd, struct ata_xfer *xfer)
   1249 {
   1250 	static const char *const errstr0_3[] = {"address mark not found",
   1251 	    "track 0 not found", "aborted command", "media change requested",
   1252 	    "id not found", "media changed", "uncorrectable data error",
   1253 	    "bad block detected"};
   1254 	static const char *const errstr4_5[] = {
   1255 	    "obsolete (address mark not found)",
   1256 	    "no media/write protected", "aborted command",
   1257 	    "media change requested", "id not found", "media changed",
   1258 	    "uncorrectable data error", "interface CRC error"};
   1259 	const char *const *errstr;
   1260 	int i;
   1261 	const char *sep = "";
   1262 
   1263 	const struct dk_softc *dksc = &wd->sc_dksc;
   1264 	const char *devname = dksc->sc_xname;
   1265 	struct ata_drive_datas *drvp = wd->drvp;
   1266 	int errno = xfer->c_bio.r_error;
   1267 
   1268 	if (drvp->ata_vers >= 4)
   1269 		errstr = errstr4_5;
   1270 	else
   1271 		errstr = errstr0_3;
   1272 
   1273 	printf("%s: (", devname);
   1274 
   1275 	if (errno == 0)
   1276 		printf("error not notified");
   1277 
   1278 	for (i = 0; i < 8; i++) {
   1279 		if (errno & (1 << i)) {
   1280 			printf("%s%s", sep, errstr[i]);
   1281 			sep = ", ";
   1282 		}
   1283 	}
   1284 	printf(")\n");
   1285 }
   1286 
   1287 int
   1288 wdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
   1289 {
   1290 	struct wd_softc *wd =
   1291 	    device_lookup_private(&wd_cd, WDUNIT(dev));
   1292 	struct dk_softc *dksc = &wd->sc_dksc;
   1293 
   1294 	ATADEBUG_PRINT(("wdioctl\n"), DEBUG_FUNCS);
   1295 
   1296 	if ((wd->sc_flags & WDF_LOADED) == 0)
   1297 		return EIO;
   1298 
   1299 	switch (cmd) {
   1300 #ifdef HAS_BAD144_HANDLING
   1301 	case DIOCSBAD:
   1302 		if ((flag & FWRITE) == 0)
   1303 			return EBADF;
   1304 		dksc->sc_dkdev.dk_cpulabel->bad = *(struct dkbad *)addr;
   1305 		dksc->sc_dkdev.dk_label->d_flags |= D_BADSECT;
   1306 		bad144intern(wd);
   1307 		return 0;
   1308 #endif
   1309 #ifdef WD_SOFTBADSECT
   1310 	case DIOCBSLIST: {
   1311 		uint32_t count, missing, skip;
   1312 		struct disk_badsecinfo dbsi;
   1313 		struct disk_badsectors *dbs, dbsbuf;
   1314 		size_t available;
   1315 		uint8_t *laddr;
   1316 		int error;
   1317 
   1318 		dbsi = *(struct disk_badsecinfo *)addr;
   1319 		missing = wd->sc_bscount;
   1320 		count = 0;
   1321 		available = dbsi.dbsi_bufsize;
   1322 		skip = dbsi.dbsi_skip;
   1323 		laddr = (uint8_t *)dbsi.dbsi_buffer;
   1324 
   1325 		/*
   1326 		 * We start this loop with the expectation that all of the
   1327 		 * entries will be missed and decrement this counter each
   1328 		 * time we either skip over one (already copied out) or
   1329 		 * we actually copy it back to user space.  The structs
   1330 		 * holding the bad sector information are copied directly
   1331 		 * back to user space whilst the summary is returned via
   1332 		 * the struct passed in via the ioctl.
   1333 		 */
   1334 		error = 0;
   1335 		mutex_enter(&wd->sc_lock);
   1336 		wd->sc_bslist_inuse++;
   1337 		SLIST_FOREACH(dbs, &wd->sc_bslist, dbs_next) {
   1338 			if (skip > 0) {
   1339 				missing--;
   1340 				skip--;
   1341 				continue;
   1342 			}
   1343 			if (available < sizeof(*dbs))
   1344 				break;
   1345 			available -= sizeof(*dbs);
   1346 			memset(&dbsbuf, 0, sizeof(dbsbuf));
   1347 			dbsbuf.dbs_min = dbs->dbs_min;
   1348 			dbsbuf.dbs_max = dbs->dbs_max;
   1349 			dbsbuf.dbs_failedat = dbs->dbs_failedat;
   1350 			mutex_exit(&wd->sc_lock);
   1351 			error = copyout(&dbsbuf, laddr, sizeof(dbsbuf));
   1352 			mutex_enter(&wd->sc_lock);
   1353 			if (error)
   1354 				break;
   1355 			laddr += sizeof(*dbs);
   1356 			missing--;
   1357 			count++;
   1358 		}
   1359 		if (--wd->sc_bslist_inuse == 0)
   1360 			cv_broadcast(&wd->sc_bslist_cv);
   1361 		mutex_exit(&wd->sc_lock);
   1362 		dbsi.dbsi_left = missing;
   1363 		dbsi.dbsi_copied = count;
   1364 		*(struct disk_badsecinfo *)addr = dbsi;
   1365 
   1366 		/*
   1367 		 * If we copied anything out, ignore error and return
   1368 		 * success -- can't back it out.
   1369 		 */
   1370 		return count ? 0 : error;
   1371 	}
   1372 
   1373 	case DIOCBSFLUSH: {
   1374 		int error;
   1375 
   1376 		/* Clean out the bad sector list */
   1377 		mutex_enter(&wd->sc_lock);
   1378 		while (wd->sc_bslist_inuse) {
   1379 			error = cv_wait_sig(&wd->sc_bslist_cv, &wd->sc_lock);
   1380 			if (error) {
   1381 				mutex_exit(&wd->sc_lock);
   1382 				return error;
   1383 			}
   1384 		}
   1385 		while (!SLIST_EMPTY(&wd->sc_bslist)) {
   1386 			struct disk_badsectors *dbs =
   1387 			    SLIST_FIRST(&wd->sc_bslist);
   1388 			SLIST_REMOVE_HEAD(&wd->sc_bslist, dbs_next);
   1389 			mutex_exit(&wd->sc_lock);
   1390 			kmem_free(dbs, sizeof(*dbs));
   1391 			mutex_enter(&wd->sc_lock);
   1392 		}
   1393 		mutex_exit(&wd->sc_lock);
   1394 		wd->sc_bscount = 0;
   1395 		return 0;
   1396 	}
   1397 #endif
   1398 
   1399 #ifdef notyet
   1400 	case DIOCWFORMAT:
   1401 		if ((flag & FWRITE) == 0)
   1402 			return EBADF;
   1403 		{
   1404 		register struct format_op *fop;
   1405 		struct iovec aiov;
   1406 		struct uio auio;
   1407 		int error1;
   1408 
   1409 		fop = (struct format_op *)addr;
   1410 		aiov.iov_base = fop->df_buf;
   1411 		aiov.iov_len = fop->df_count;
   1412 		auio.uio_iov = &aiov;
   1413 		auio.uio_iovcnt = 1;
   1414 		auio.uio_resid = fop->df_count;
   1415 		auio.uio_offset =
   1416 			fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
   1417 		auio.uio_vmspace = l->l_proc->p_vmspace;
   1418 		error1 = physio(wdformat, NULL, dev, B_WRITE, wdminphys,
   1419 		    &auio);
   1420 		fop->df_count -= auio.uio_resid;
   1421 		fop->df_reg[0] = wdc->sc_status;
   1422 		fop->df_reg[1] = wdc->sc_error;
   1423 		return error1;
   1424 		}
   1425 #endif
   1426 	case DIOCGCACHE:
   1427 		return wd_getcache(wd, (int *)addr);
   1428 
   1429 	case DIOCSCACHE:
   1430 		return wd_setcache(wd, *(int *)addr);
   1431 
   1432 	case DIOCCACHESYNC:
   1433 		return wd_flushcache(wd, AT_WAIT);
   1434 
   1435 	case ATAIOCCOMMAND:
   1436 		/*
   1437 		 * Make sure this command is (relatively) safe first
   1438 		 */
   1439 		if ((((atareq_t *) addr)->flags & ATACMD_READ) == 0 &&
   1440 		    (flag & FWRITE) == 0)
   1441 			return (EBADF);
   1442 		{
   1443 		struct wd_ioctl *wi;
   1444 		atareq_t *atareq = (atareq_t *) addr;
   1445 		int error1;
   1446 
   1447 		wi = wi_get(wd);
   1448 		wi->wi_atareq = *atareq;
   1449 
   1450 		if (atareq->datalen && atareq->flags &
   1451 		    (ATACMD_READ | ATACMD_WRITE)) {
   1452 			void *tbuf;
   1453 			if (atareq->datalen < DEV_BSIZE
   1454 			    && atareq->command == WDCC_IDENTIFY) {
   1455 				tbuf = kmem_zalloc(DEV_BSIZE, KM_SLEEP);
   1456 				wi->wi_iov.iov_base = tbuf;
   1457 				wi->wi_iov.iov_len = DEV_BSIZE;
   1458 				UIO_SETUP_SYSSPACE(&wi->wi_uio);
   1459 			} else {
   1460 				tbuf = NULL;
   1461 				wi->wi_iov.iov_base = atareq->databuf;
   1462 				wi->wi_iov.iov_len = atareq->datalen;
   1463 				wi->wi_uio.uio_vmspace = l->l_proc->p_vmspace;
   1464 			}
   1465 			wi->wi_uio.uio_iov = &wi->wi_iov;
   1466 			wi->wi_uio.uio_iovcnt = 1;
   1467 			wi->wi_uio.uio_resid = atareq->datalen;
   1468 			wi->wi_uio.uio_offset = 0;
   1469 			wi->wi_uio.uio_rw =
   1470 			    (atareq->flags & ATACMD_READ) ? B_READ : B_WRITE;
   1471 			error1 = physio(wdioctlstrategy, &wi->wi_bp, dev,
   1472 			    (atareq->flags & ATACMD_READ) ? B_READ : B_WRITE,
   1473 			    wdminphys, &wi->wi_uio);
   1474 			if (tbuf != NULL && error1 == 0) {
   1475 				error1 = copyout(tbuf, atareq->databuf,
   1476 				    atareq->datalen);
   1477 				kmem_free(tbuf, DEV_BSIZE);
   1478 			}
   1479 		} else {
   1480 			/* No need to call physio if we don't have any
   1481 			   user data */
   1482 			wi->wi_bp.b_flags = 0;
   1483 			wi->wi_bp.b_data = 0;
   1484 			wi->wi_bp.b_bcount = 0;
   1485 			wi->wi_bp.b_dev = dev;
   1486 			wi->wi_bp.b_proc = l->l_proc;
   1487 			wdioctlstrategy(&wi->wi_bp);
   1488 			error1 = wi->wi_bp.b_error;
   1489 		}
   1490 		*atareq = wi->wi_atareq;
   1491 		wi_free(wi);
   1492 		return(error1);
   1493 		}
   1494 
   1495 	case DIOCGSECTORALIGN: {
   1496 		struct disk_sectoralign *dsa = addr;
   1497 		int part = WDPART(dev);
   1498 
   1499 		*dsa = wd->sc_sectoralign;
   1500 		if (part != RAW_PART) {
   1501 			struct disklabel *lp = dksc->sc_dkdev.dk_label;
   1502 			daddr_t offset = lp->d_partitions[part].p_offset;
   1503 			uint32_t r = offset % dsa->dsa_alignment;
   1504 
   1505 			if (r < dsa->dsa_firstaligned)
   1506 				dsa->dsa_firstaligned = dsa->dsa_firstaligned
   1507 				    - r;
   1508 			else
   1509 				dsa->dsa_firstaligned = (dsa->dsa_firstaligned
   1510 				    + dsa->dsa_alignment) - r;
   1511 		}
   1512 
   1513 		return 0;
   1514 	}
   1515 
   1516 	default:
   1517 		return dk_ioctl(dksc, dev, cmd, addr, flag, l);
   1518 	}
   1519 
   1520 #ifdef DIAGNOSTIC
   1521 	panic("wdioctl: impossible");
   1522 #endif
   1523 }
   1524 
   1525 static int
   1526 wd_discard(device_t dev, off_t pos, off_t len)
   1527 {
   1528 	struct wd_softc *wd = device_private(dev);
   1529 	daddr_t bno;
   1530 	long size, done;
   1531 	long maxatonce, amount;
   1532 	int result;
   1533 
   1534 	if (!(wd->sc_params.atap_ata_major & WDC_VER_ATA7)
   1535 	    || !(wd->sc_params.support_dsm & ATA_SUPPORT_DSM_TRIM)) {
   1536 		/* not supported; ignore request */
   1537 		ATADEBUG_PRINT(("wddiscard (unsupported)\n"), DEBUG_FUNCS);
   1538 		return 0;
   1539 	}
   1540 	maxatonce = 0xffff; /*wd->sc_params.max_dsm_blocks*/
   1541 
   1542 	ATADEBUG_PRINT(("wddiscard\n"), DEBUG_FUNCS);
   1543 
   1544 	if ((wd->sc_flags & WDF_LOADED) == 0)
   1545 		return EIO;
   1546 
   1547 	/* round the start up and the end down */
   1548 	bno = (pos + wd->sc_blksize - 1) / wd->sc_blksize;
   1549 	size = ((pos + len) / wd->sc_blksize) - bno;
   1550 
   1551 	done = 0;
   1552 	while (done < size) {
   1553 	     amount = size - done;
   1554 	     if (amount > maxatonce) {
   1555 		     amount = maxatonce;
   1556 	     }
   1557 	     result = wd_trim(wd, bno + done, amount);
   1558 	     if (result) {
   1559 		     return result;
   1560 	     }
   1561 	     done += amount;
   1562 	}
   1563 	return 0;
   1564 }
   1565 
   1566 static int
   1567 wddiscard(dev_t dev, off_t pos, off_t len)
   1568 {
   1569 	struct wd_softc *wd;
   1570 	struct dk_softc *dksc;
   1571 	int unit;
   1572 
   1573 	unit = WDUNIT(dev);
   1574 	wd = device_lookup_private(&wd_cd, unit);
   1575 	dksc = &wd->sc_dksc;
   1576 
   1577 	return dk_discard(dksc, dev, pos, len);
   1578 }
   1579 
   1580 #ifdef B_FORMAT
   1581 int
   1582 wdformat(struct buf *bp)
   1583 {
   1584 
   1585 	bp->b_flags |= B_FORMAT;
   1586 	return wdstrategy(bp);
   1587 }
   1588 #endif
   1589 
   1590 int
   1591 wdsize(dev_t dev)
   1592 {
   1593 	struct wd_softc *wd;
   1594 	struct dk_softc *dksc;
   1595 	int unit;
   1596 
   1597 	ATADEBUG_PRINT(("wdsize\n"), DEBUG_FUNCS);
   1598 
   1599 	unit = WDUNIT(dev);
   1600 	wd = device_lookup_private(&wd_cd, unit);
   1601 	if (wd == NULL)
   1602 		return (-1);
   1603 	dksc = &wd->sc_dksc;
   1604 
   1605 	if (!device_is_active(dksc->sc_dev))
   1606 		return (-1);
   1607 
   1608 	return dk_size(dksc, dev);
   1609 }
   1610 
   1611 /*
   1612  * Dump core after a system crash.
   1613  */
   1614 static int
   1615 wddump(dev_t dev, daddr_t blkno, void *va, size_t size)
   1616 {
   1617 	struct wd_softc *wd;
   1618 	struct dk_softc *dksc;
   1619 	int unit;
   1620 
   1621 	/* Check if recursive dump; if so, punt. */
   1622 	if (wddoingadump)
   1623 		return EFAULT;
   1624 	wddoingadump = 1;
   1625 
   1626 	unit = WDUNIT(dev);
   1627 	wd = device_lookup_private(&wd_cd, unit);
   1628 	if (wd == NULL)
   1629 		return (ENXIO);
   1630 	dksc = &wd->sc_dksc;
   1631 
   1632 	return dk_dump(dksc, dev, blkno, va, size, 0);
   1633 }
   1634 
   1635 static int
   1636 wd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
   1637 {
   1638 	struct wd_softc *wd = device_private(dev);
   1639 	struct dk_softc *dksc = &wd->sc_dksc;
   1640 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
   1641 	struct ata_xfer *xfer = &wd->dump_xfer;
   1642 	int err;
   1643 
   1644 	/* Recalibrate, if first dump transfer. */
   1645 	if (wddumprecalibrated == 0) {
   1646 		wddumprecalibrated = 1;
   1647 		ata_channel_lock(wd->drvp->chnl_softc);
   1648 		/* This will directly execute the reset due to AT_POLL */
   1649 		ata_thread_run(wd->drvp->chnl_softc, AT_POLL,
   1650 		    ATACH_TH_DRIVE_RESET, wd->drvp->drive);
   1651 
   1652 		wd->drvp->state = RESET;
   1653 		ata_channel_unlock(wd->drvp->chnl_softc);
   1654 	}
   1655 
   1656 	memset(xfer, 0, sizeof(*xfer));
   1657 	xfer->c_flags |= C_PRIVATE_ALLOC | C_SKIP_QUEUE;
   1658 
   1659 	xfer->c_bio.blkno = blkno;
   1660 	xfer->c_bio.flags = ATA_POLL;
   1661 	if (wd->sc_flags & WDF_LBA48 &&
   1662 	    (xfer->c_bio.blkno + nblk) > wd->sc_capacity28)
   1663 		xfer->c_bio.flags |= ATA_LBA48;
   1664 	if (wd->sc_flags & WDF_LBA)
   1665 		xfer->c_bio.flags |= ATA_LBA;
   1666 	xfer->c_bio.bcount = nblk * dg->dg_secsize;
   1667 	xfer->c_bio.databuf = va;
   1668 #ifndef WD_DUMP_NOT_TRUSTED
   1669 	/* This will poll until the bio is complete */
   1670 	wd->atabus->ata_bio(wd->drvp, xfer);
   1671 
   1672 	switch(err = xfer->c_bio.error) {
   1673 	case TIMEOUT:
   1674 		printf("wddump: device timed out");
   1675 		err = EIO;
   1676 		break;
   1677 	case ERR_DF:
   1678 		printf("wddump: drive fault");
   1679 		err = EIO;
   1680 		break;
   1681 	case ERR_DMA:
   1682 		printf("wddump: DMA error");
   1683 		err = EIO;
   1684 		break;
   1685 	case ERROR:
   1686 		printf("wddump: ");
   1687 		wdperror(wd, xfer);
   1688 		err = EIO;
   1689 		break;
   1690 	case NOERROR:
   1691 		err = 0;
   1692 		break;
   1693 	default:
   1694 		panic("wddump: unknown error type %x", err);
   1695 	}
   1696 
   1697 	if (err != 0) {
   1698 		printf("\n");
   1699 		return err;
   1700 	}
   1701 #else	/* WD_DUMP_NOT_TRUSTED */
   1702 	/* Let's just talk about this first... */
   1703 	printf("wd%d: dump addr 0x%x, cylin %d, head %d, sector %d\n",
   1704 	    unit, va, cylin, head, sector);
   1705 	delay(500 * 1000);	/* half a second */
   1706 #endif
   1707 
   1708 	wddoingadump = 0;
   1709 	return 0;
   1710 }
   1711 
   1712 #ifdef HAS_BAD144_HANDLING
   1713 /*
   1714  * Internalize the bad sector table.
   1715  */
   1716 void
   1717 bad144intern(struct wd_softc *wd)
   1718 {
   1719 	struct dk_softc *dksc = &wd->sc_dksc;
   1720 	struct dkbad *bt = &dksc->sc_dkdev.dk_cpulabel->bad;
   1721 	struct disklabel *lp = dksc->sc_dkdev.dk_label;
   1722 	int i = 0;
   1723 
   1724 	ATADEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
   1725 
   1726 	for (; i < NBT_BAD; i++) {
   1727 		if (bt->bt_bad[i].bt_cyl == 0xffff)
   1728 			break;
   1729 		wd->drvp->badsect[i] =
   1730 		    bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
   1731 		    (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
   1732 		    (bt->bt_bad[i].bt_trksec & 0xff);
   1733 	}
   1734 	for (; i < NBT_BAD+1; i++)
   1735 		wd->drvp->badsect[i] = -1;
   1736 }
   1737 #endif
   1738 
   1739 static void
   1740 wd_set_geometry(struct wd_softc *wd)
   1741 {
   1742 	struct dk_softc *dksc = &wd->sc_dksc;
   1743 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
   1744 
   1745 	memset(dg, 0, sizeof(*dg));
   1746 
   1747 	dg->dg_secperunit = wd->sc_capacity;
   1748 	dg->dg_secsize = wd->sc_blksize;
   1749 	dg->dg_nsectors = wd->sc_params.atap_sectors;
   1750 	dg->dg_ntracks = wd->sc_params.atap_heads;
   1751 	if ((wd->sc_flags & WDF_LBA) == 0)
   1752 		dg->dg_ncylinders = wd->sc_params.atap_cylinders;
   1753 
   1754 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, wd->sc_typename);
   1755 }
   1756 
   1757 int
   1758 wd_get_params(struct wd_softc *wd, struct ataparams *params)
   1759 {
   1760 	int retry = 0;
   1761 	struct ata_channel *chp = wd->drvp->chnl_softc;
   1762 	const int flags = AT_WAIT;
   1763 
   1764 again:
   1765 	switch (wd->atabus->ata_get_params(wd->drvp, flags, params)) {
   1766 	case CMD_AGAIN:
   1767 		return 1;
   1768 	case CMD_ERR:
   1769 		if (retry == 0) {
   1770 			retry++;
   1771 			ata_channel_lock(chp);
   1772 			(*wd->atabus->ata_reset_drive)(wd->drvp, flags, NULL);
   1773 			ata_channel_unlock(chp);
   1774 			goto again;
   1775 		}
   1776 
   1777 		if (wd->drvp->drive_type != ATA_DRIVET_OLD)
   1778 			return 1;
   1779 		/*
   1780 		 * We `know' there's a drive here; just assume it's old.
   1781 		 * This geometry is only used to read the MBR and print a
   1782 		 * (false) attach message.
   1783 		 */
   1784 		strncpy(params->atap_model, "ST506",
   1785 		    sizeof params->atap_model);
   1786 		params->atap_config = ATA_CFG_FIXED;
   1787 		params->atap_cylinders = 1024;
   1788 		params->atap_heads = 8;
   1789 		params->atap_sectors = 17;
   1790 		params->atap_multi = 1;
   1791 		params->atap_capabilities1 = params->atap_capabilities2 = 0;
   1792 		wd->drvp->ata_vers = -1; /* Mark it as pre-ATA */
   1793 		/* FALLTHROUGH */
   1794 	case CMD_OK:
   1795 		return 0;
   1796 	default:
   1797 		panic("wd_get_params: bad return code from ata_get_params");
   1798 		/* NOTREACHED */
   1799 	}
   1800 }
   1801 
   1802 int
   1803 wd_getcache(struct wd_softc *wd, int *bitsp)
   1804 {
   1805 	struct ataparams params;
   1806 
   1807 	if (wd_get_params(wd, &params) != 0)
   1808 		return EIO;
   1809 	if (params.atap_cmd_set1 == 0x0000 ||
   1810 	    params.atap_cmd_set1 == 0xffff ||
   1811 	    (params.atap_cmd_set1 & WDC_CMD1_CACHE) == 0) {
   1812 		*bitsp = 0;
   1813 		return 0;
   1814 	}
   1815 	*bitsp = DKCACHE_WCHANGE | DKCACHE_READ;
   1816 	if (params.atap_cmd1_en & WDC_CMD1_CACHE)
   1817 		*bitsp |= DKCACHE_WRITE;
   1818 
   1819 	if (WD_USE_NCQ(wd) || (wd->drvp->drive_flags & ATA_DRIVE_WFUA))
   1820 		*bitsp |= DKCACHE_FUA;
   1821 
   1822 	return 0;
   1823 }
   1824 
   1825 
   1826 static int
   1827 wd_check_error(const struct dk_softc *dksc, const struct ata_xfer *xfer,
   1828     const char *func)
   1829 {
   1830 	static const char at_errbits[] = "\20\10ERROR\11TIMEOU\12DF";
   1831 
   1832 	int flags = xfer->c_ata_c.flags;
   1833 
   1834 	if ((flags & AT_ERROR) != 0 && xfer->c_ata_c.r_error == WDCE_ABRT) {
   1835 		/* command not supported */
   1836 		aprint_debug_dev(dksc->sc_dev, "%s: not supported\n", func);
   1837 		return ENODEV;
   1838 	}
   1839 	if (flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
   1840 		char sbuf[sizeof(at_errbits) + 64];
   1841 		snprintb(sbuf, sizeof(sbuf), at_errbits, flags);
   1842 		device_printf(dksc->sc_dev, "%s: status=%s\n", func, sbuf);
   1843 		return EIO;
   1844 	}
   1845 	return 0;
   1846 }
   1847 
   1848 int
   1849 wd_setcache(struct wd_softc *wd, int bits)
   1850 {
   1851 	struct dk_softc *dksc = &wd->sc_dksc;
   1852 	struct ataparams params;
   1853 	struct ata_xfer *xfer;
   1854 	int error;
   1855 
   1856 	if (wd_get_params(wd, &params) != 0)
   1857 		return EIO;
   1858 
   1859 	if (params.atap_cmd_set1 == 0x0000 ||
   1860 	    params.atap_cmd_set1 == 0xffff ||
   1861 	    (params.atap_cmd_set1 & WDC_CMD1_CACHE) == 0)
   1862 		return EOPNOTSUPP;
   1863 
   1864 	if ((bits & DKCACHE_READ) == 0 ||
   1865 	    (bits & DKCACHE_SAVE) != 0)
   1866 		return EOPNOTSUPP;
   1867 
   1868 	xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
   1869 
   1870 	xfer->c_ata_c.r_command = SET_FEATURES;
   1871 	xfer->c_ata_c.r_st_bmask = 0;
   1872 	xfer->c_ata_c.r_st_pmask = 0;
   1873 	xfer->c_ata_c.timeout = 30000; /* 30s timeout */
   1874 	xfer->c_ata_c.flags = AT_WAIT;
   1875 	if (bits & DKCACHE_WRITE)
   1876 		xfer->c_ata_c.r_features = WDSF_WRITE_CACHE_EN;
   1877 	else
   1878 		xfer->c_ata_c.r_features = WDSF_WRITE_CACHE_DS;
   1879 
   1880 	wd->atabus->ata_exec_command(wd->drvp, xfer);
   1881 	ata_wait_cmd(wd->drvp->chnl_softc, xfer);
   1882 
   1883 	error = wd_check_error(dksc, xfer, __func__);
   1884 	ata_free_xfer(wd->drvp->chnl_softc, xfer);
   1885 	return error;
   1886 }
   1887 
   1888 static int
   1889 wd_standby(struct wd_softc *wd, int flags)
   1890 {
   1891 	struct dk_softc *dksc = &wd->sc_dksc;
   1892 	struct ata_xfer *xfer;
   1893 	int error;
   1894 
   1895 	aprint_debug_dev(dksc->sc_dev, "standby immediate\n");
   1896 	xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
   1897 
   1898 	xfer->c_ata_c.r_command = WDCC_STANDBY_IMMED;
   1899 	xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
   1900 	xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
   1901 	xfer->c_ata_c.flags = flags;
   1902 	xfer->c_ata_c.timeout = 30000; /* 30s timeout */
   1903 
   1904 	wd->atabus->ata_exec_command(wd->drvp, xfer);
   1905 	ata_wait_cmd(wd->drvp->chnl_softc, xfer);
   1906 
   1907 	error = wd_check_error(dksc, xfer, __func__);
   1908 	ata_free_xfer(wd->drvp->chnl_softc, xfer);
   1909 	return error;
   1910 }
   1911 
   1912 int
   1913 wd_flushcache(struct wd_softc *wd, int flags)
   1914 {
   1915 	struct dk_softc *dksc = &wd->sc_dksc;
   1916 	struct ata_xfer *xfer;
   1917 	int error;
   1918 
   1919 	/*
   1920 	 * WDCC_FLUSHCACHE is here since ATA-4, but some drives report
   1921 	 * only ATA-2 and still support it.
   1922 	 */
   1923 	if (wd->drvp->ata_vers < 4 &&
   1924 	    ((wd->sc_params.atap_cmd_set2 & WDC_CMD2_FC) == 0 ||
   1925 	    wd->sc_params.atap_cmd_set2 == 0xffff))
   1926 		return ENODEV;
   1927 
   1928 	xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
   1929 
   1930 	if ((wd->sc_params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 &&
   1931 	    (wd->sc_params.atap_cmd2_en & ATA_CMD2_FCE) != 0) {
   1932 		xfer->c_ata_c.r_command = WDCC_FLUSHCACHE_EXT;
   1933 		flags |= AT_LBA48;
   1934 	} else
   1935 		xfer->c_ata_c.r_command = WDCC_FLUSHCACHE;
   1936 	xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
   1937 	xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
   1938 	xfer->c_ata_c.flags = flags | AT_READREG;
   1939 	xfer->c_ata_c.timeout = 300000; /* 5m timeout */
   1940 
   1941 	wd->atabus->ata_exec_command(wd->drvp, xfer);
   1942 	ata_wait_cmd(wd->drvp->chnl_softc, xfer);
   1943 
   1944 	error = wd_check_error(dksc, xfer, __func__);
   1945 	wd->sc_flags &= ~WDF_DIRTY;
   1946 	ata_free_xfer(wd->drvp->chnl_softc, xfer);
   1947 	return error;
   1948 }
   1949 
   1950 /*
   1951  * Execute TRIM command, assumes sleep context.
   1952  */
   1953 static int
   1954 wd_trim(struct wd_softc *wd, daddr_t bno, long size)
   1955 {
   1956 	struct dk_softc *dksc = &wd->sc_dksc;
   1957 	struct ata_xfer *xfer;
   1958 	int error;
   1959 	unsigned char *req;
   1960 
   1961 	xfer = ata_get_xfer(wd->drvp->chnl_softc, true);
   1962 
   1963 	req = kmem_zalloc(512, KM_SLEEP);
   1964 	req[0] = bno & 0xff;
   1965 	req[1] = (bno >> 8) & 0xff;
   1966 	req[2] = (bno >> 16) & 0xff;
   1967 	req[3] = (bno >> 24) & 0xff;
   1968 	req[4] = (bno >> 32) & 0xff;
   1969 	req[5] = (bno >> 40) & 0xff;
   1970 	req[6] = size & 0xff;
   1971 	req[7] = (size >> 8) & 0xff;
   1972 
   1973 	/*
   1974  	 * XXX We could possibly use NCQ TRIM, which supports executing
   1975  	 * this command concurrently. It would need some investigation, some
   1976  	 * early or not so early disk firmware caused data loss with NCQ TRIM.
   1977 	 * atastart() et.al would need to be adjusted to allow and support
   1978 	 * running several non-I/O ATA commands in parallel.
   1979 	 */
   1980 
   1981 	xfer->c_ata_c.r_command = ATA_DATA_SET_MANAGEMENT;
   1982 	xfer->c_ata_c.r_count = 1;
   1983 	xfer->c_ata_c.r_features = ATA_SUPPORT_DSM_TRIM;
   1984 	xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
   1985 	xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
   1986 	xfer->c_ata_c.timeout = 30000; /* 30s timeout */
   1987 	xfer->c_ata_c.data = req;
   1988 	xfer->c_ata_c.bcount = 512;
   1989 	xfer->c_ata_c.flags |= AT_WRITE | AT_WAIT;
   1990 
   1991 	wd->atabus->ata_exec_command(wd->drvp, xfer);
   1992 	ata_wait_cmd(wd->drvp->chnl_softc, xfer);
   1993 
   1994 	kmem_free(req, 512);
   1995 	error = wd_check_error(dksc, xfer, __func__);
   1996 	ata_free_xfer(wd->drvp->chnl_softc, xfer);
   1997 	return error;
   1998 }
   1999 
   2000 bool
   2001 wd_shutdown(device_t dev, int how)
   2002 {
   2003 	struct wd_softc *wd = device_private(dev);
   2004 
   2005 	/* the adapter needs to be enabled */
   2006 	if (wd->atabus->ata_addref(wd->drvp))
   2007 		return true; /* no need to complain */
   2008 
   2009 	wd_flushcache(wd, AT_POLL);
   2010 	if ((how & RB_POWERDOWN) == RB_POWERDOWN)
   2011 		wd_standby(wd, AT_POLL);
   2012 	return true;
   2013 }
   2014 
   2015 /*
   2016  * Allocate space for a ioctl queue structure.  Mostly taken from
   2017  * scsipi_ioctl.c
   2018  */
   2019 struct wd_ioctl *
   2020 wi_get(struct wd_softc *wd)
   2021 {
   2022 	struct wd_ioctl *wi;
   2023 
   2024 	wi = kmem_zalloc(sizeof(struct wd_ioctl), KM_SLEEP);
   2025 	wi->wi_softc = wd;
   2026 	buf_init(&wi->wi_bp);
   2027 
   2028 	return (wi);
   2029 }
   2030 
   2031 /*
   2032  * Free an ioctl structure and remove it from our list
   2033  */
   2034 
   2035 void
   2036 wi_free(struct wd_ioctl *wi)
   2037 {
   2038 	buf_destroy(&wi->wi_bp);
   2039 	kmem_free(wi, sizeof(*wi));
   2040 }
   2041 
   2042 /*
   2043  * Find a wd_ioctl structure based on the struct buf.
   2044  */
   2045 
   2046 struct wd_ioctl *
   2047 wi_find(struct buf *bp)
   2048 {
   2049 	return container_of(bp, struct wd_ioctl, wi_bp);
   2050 }
   2051 
   2052 static uint
   2053 wi_sector_size(const struct wd_ioctl * const wi)
   2054 {
   2055 	switch (wi->wi_atareq.command) {
   2056 	case WDCC_READ:
   2057 	case WDCC_WRITE:
   2058 	case WDCC_READMULTI:
   2059 	case WDCC_WRITEMULTI:
   2060 	case WDCC_READDMA:
   2061 	case WDCC_WRITEDMA:
   2062 	case WDCC_READ_EXT:
   2063 	case WDCC_WRITE_EXT:
   2064 	case WDCC_READMULTI_EXT:
   2065 	case WDCC_WRITEMULTI_EXT:
   2066 	case WDCC_READDMA_EXT:
   2067 	case WDCC_WRITEDMA_EXT:
   2068 	case WDCC_READ_FPDMA_QUEUED:
   2069 	case WDCC_WRITE_FPDMA_QUEUED:
   2070 		return wi->wi_softc->sc_blksize;
   2071 	default:
   2072 		return 512;
   2073 	}
   2074 }
   2075 
   2076 /*
   2077  * Ioctl pseudo strategy routine
   2078  *
   2079  * This is mostly stolen from scsipi_ioctl.c:scsistrategy().  What
   2080  * happens here is:
   2081  *
   2082  * - wdioctl() queues a wd_ioctl structure.
   2083  *
   2084  * - wdioctl() calls physio/wdioctlstrategy based on whether or not
   2085  *   user space I/O is required.  If physio() is called, physio() eventually
   2086  *   calls wdioctlstrategy().
   2087  *
   2088  * - In either case, wdioctlstrategy() calls wd->atabus->ata_exec_command()
   2089  *   to perform the actual command
   2090  *
   2091  * The reason for the use of the pseudo strategy routine is because
   2092  * when doing I/O to/from user space, physio _really_ wants to be in
   2093  * the loop.  We could put the entire buffer into the ioctl request
   2094  * structure, but that won't scale if we want to do things like download
   2095  * microcode.
   2096  */
   2097 
   2098 void
   2099 wdioctlstrategy(struct buf *bp)
   2100 {
   2101 	struct wd_ioctl *wi;
   2102 	struct ata_xfer *xfer;
   2103 	int error = 0;
   2104 
   2105 	wi = wi_find(bp);
   2106 	if (wi == NULL) {
   2107 		printf("wdioctlstrategy: "
   2108 		    "No matching ioctl request found in queue\n");
   2109 		error = EINVAL;
   2110 		goto out2;
   2111 	}
   2112 
   2113 	xfer = ata_get_xfer(wi->wi_softc->drvp->chnl_softc, true);
   2114 
   2115 	/*
   2116 	 * Abort if physio broke up the transfer
   2117 	 */
   2118 
   2119 	if (bp->b_bcount != wi->wi_atareq.datalen) {
   2120 		printf("physio split wd ioctl request... cannot proceed\n");
   2121 		error = EIO;
   2122 		goto out;
   2123 	}
   2124 
   2125 	/*
   2126 	 * Abort if we didn't get a buffer size that was a multiple of
   2127 	 * our sector size (or overflows CHS/LBA28 sector count)
   2128 	 */
   2129 
   2130 	if ((bp->b_bcount % wi_sector_size(wi)) != 0 ||
   2131 	    (bp->b_bcount / wi_sector_size(wi)) >=
   2132 	     (1 << NBBY)) {
   2133 		error = EINVAL;
   2134 		goto out;
   2135 	}
   2136 
   2137 	/*
   2138 	 * Make sure a timeout was supplied in the ioctl request
   2139 	 */
   2140 
   2141 	if (wi->wi_atareq.timeout == 0) {
   2142 		error = EINVAL;
   2143 		goto out;
   2144 	}
   2145 
   2146 	if (wi->wi_atareq.flags & ATACMD_READ)
   2147 		xfer->c_ata_c.flags |= AT_READ;
   2148 	else if (wi->wi_atareq.flags & ATACMD_WRITE)
   2149 		xfer->c_ata_c.flags |= AT_WRITE;
   2150 
   2151 	if (wi->wi_atareq.flags & ATACMD_READREG)
   2152 		xfer->c_ata_c.flags |= AT_READREG;
   2153 
   2154 	if ((wi->wi_atareq.flags & ATACMD_LBA) != 0)
   2155 		xfer->c_ata_c.flags |= AT_LBA;
   2156 
   2157 	xfer->c_ata_c.flags |= AT_WAIT;
   2158 
   2159 	xfer->c_ata_c.timeout = wi->wi_atareq.timeout;
   2160 	xfer->c_ata_c.r_command = wi->wi_atareq.command;
   2161 	xfer->c_ata_c.r_lba = ((wi->wi_atareq.head & 0x0f) << 24) |
   2162 	    (wi->wi_atareq.cylinder << 8) |
   2163 	    wi->wi_atareq.sec_num;
   2164 	xfer->c_ata_c.r_count = wi->wi_atareq.sec_count;
   2165 	xfer->c_ata_c.r_features = wi->wi_atareq.features;
   2166 	xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
   2167 	xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
   2168 	xfer->c_ata_c.data = wi->wi_bp.b_data;
   2169 	xfer->c_ata_c.bcount = wi->wi_bp.b_bcount;
   2170 
   2171 	wi->wi_softc->atabus->ata_exec_command(wi->wi_softc->drvp, xfer);
   2172 	ata_wait_cmd(wi->wi_softc->drvp->chnl_softc, xfer);
   2173 
   2174 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
   2175 		if (xfer->c_ata_c.flags & AT_ERROR) {
   2176 			wi->wi_atareq.retsts = ATACMD_ERROR;
   2177 			wi->wi_atareq.error = xfer->c_ata_c.r_error;
   2178 		} else if (xfer->c_ata_c.flags & AT_DF)
   2179 			wi->wi_atareq.retsts = ATACMD_DF;
   2180 		else
   2181 			wi->wi_atareq.retsts = ATACMD_TIMEOUT;
   2182 	} else {
   2183 		wi->wi_atareq.retsts = ATACMD_OK;
   2184 		if (wi->wi_atareq.flags & ATACMD_READREG) {
   2185 			wi->wi_atareq.command = xfer->c_ata_c.r_status;
   2186 			wi->wi_atareq.features = xfer->c_ata_c.r_error;
   2187 			wi->wi_atareq.sec_count = xfer->c_ata_c.r_count;
   2188 			wi->wi_atareq.sec_num = xfer->c_ata_c.r_lba & 0xff;
   2189 			wi->wi_atareq.head = (xfer->c_ata_c.r_device & 0xf0) |
   2190 			    ((xfer->c_ata_c.r_lba >> 24) & 0x0f);
   2191 			wi->wi_atareq.cylinder =
   2192 			    (xfer->c_ata_c.r_lba >> 8) & 0xffff;
   2193 			wi->wi_atareq.error = xfer->c_ata_c.r_error;
   2194 		}
   2195 	}
   2196 
   2197 out:
   2198 	ata_free_xfer(wi->wi_softc->drvp->chnl_softc, xfer);
   2199 out2:
   2200 	bp->b_error = error;
   2201 	if (error)
   2202 		bp->b_resid = bp->b_bcount;
   2203 	biodone(bp);
   2204 }
   2205 
   2206 static void
   2207 wd_sysctl_attach(struct wd_softc *wd)
   2208 {
   2209 	struct dk_softc *dksc = &wd->sc_dksc;
   2210 	const struct sysctlnode *node;
   2211 	int error;
   2212 
   2213 	/* sysctl set-up */
   2214 	if (sysctl_createv(&wd->nodelog, 0, NULL, &node,
   2215 				0, CTLTYPE_NODE, dksc->sc_xname,
   2216 				SYSCTL_DESCR("wd driver settings"),
   2217 				NULL, 0, NULL, 0,
   2218 				CTL_HW, CTL_CREATE, CTL_EOL) != 0) {
   2219 		aprint_error_dev(dksc->sc_dev,
   2220 		    "could not create %s.%s sysctl node\n",
   2221 		    "hw", dksc->sc_xname);
   2222 		return;
   2223 	}
   2224 
   2225 	wd->drv_ncq = true;
   2226 	if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
   2227 				CTLFLAG_READWRITE, CTLTYPE_BOOL, "use_ncq",
   2228 				SYSCTL_DESCR("use NCQ if supported"),
   2229 				NULL, 0, &wd->drv_ncq, 0,
   2230 				CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
   2231 				!= 0) {
   2232 		aprint_error_dev(dksc->sc_dev,
   2233 		    "could not create %s.%s.use_ncq sysctl - error %d\n",
   2234 		    "hw", dksc->sc_xname, error);
   2235 		return;
   2236 	}
   2237 
   2238 	wd->drv_ncq_prio = false;
   2239 	if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
   2240 				CTLFLAG_READWRITE, CTLTYPE_BOOL, "use_ncq_prio",
   2241 				SYSCTL_DESCR("use NCQ PRIORITY if supported"),
   2242 				NULL, 0, &wd->drv_ncq_prio, 0,
   2243 				CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
   2244 				!= 0) {
   2245 		aprint_error_dev(dksc->sc_dev,
   2246 		    "could not create %s.%s.use_ncq_prio sysctl - error %d\n",
   2247 		    "hw", dksc->sc_xname, error);
   2248 		return;
   2249 	}
   2250 
   2251 #ifdef WD_CHAOS_MONKEY
   2252 	wd->drv_chaos_freq = 0;
   2253 	if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
   2254 				CTLFLAG_READWRITE, CTLTYPE_INT, "chaos_freq",
   2255 				SYSCTL_DESCR("simulated bio read error rate"),
   2256 				NULL, 0, &wd->drv_chaos_freq, 0,
   2257 				CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
   2258 				!= 0) {
   2259 		aprint_error_dev(dksc->sc_dev,
   2260 		    "could not create %s.%s.chaos_freq sysctl - error %d\n",
   2261 		    "hw", dksc->sc_xname, error);
   2262 		return;
   2263 	}
   2264 
   2265 	wd->drv_chaos_cnt = 0;
   2266 	if ((error = sysctl_createv(&wd->nodelog, 0, NULL, NULL,
   2267 				CTLFLAG_READONLY, CTLTYPE_INT, "chaos_cnt",
   2268 				SYSCTL_DESCR("number of processed bio reads"),
   2269 				NULL, 0, &wd->drv_chaos_cnt, 0,
   2270 				CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
   2271 				!= 0) {
   2272 		aprint_error_dev(dksc->sc_dev,
   2273 		    "could not create %s.%s.chaos_cnt sysctl - error %d\n",
   2274 		    "hw", dksc->sc_xname, error);
   2275 		return;
   2276 	}
   2277 #endif
   2278 
   2279 }
   2280 
   2281 static void
   2282 wd_sysctl_detach(struct wd_softc *wd)
   2283 {
   2284 	sysctl_teardown(&wd->nodelog);
   2285 }
   2286 
   2287 #ifdef ATADEBUG
   2288 int wddebug(void);
   2289 
   2290 int
   2291 wddebug(void)
   2292 {
   2293 	struct wd_softc *wd;
   2294 	  struct dk_softc *dksc;
   2295 	  int unit;
   2296 
   2297 	  for (unit = 0; unit <= 3; unit++) {
   2298 		    wd = device_lookup_private(&wd_cd, unit);
   2299 		    if (wd == NULL)
   2300 				continue;
   2301 		    dksc = &wd->sc_dksc;
   2302 		printf("%s fl %x bufq %p:\n",
   2303 		    dksc->sc_xname, wd->sc_flags, bufq_peek(dksc->sc_bufq));
   2304 
   2305 		atachannel_debug(wd->drvp->chnl_softc);
   2306 	}
   2307 	return 0;
   2308 }
   2309 #endif /* ATADEBUG */
   2310