Home | History | Annotate | Line # | Download | only in ata
ata.c revision 1.137
      1 /*	$NetBSD: ata.c,v 1.137 2017/10/15 14:41:06 jdolecek Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998, 2001 Manuel Bouyer.  All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.137 2017/10/15 14:41:06 jdolecek Exp $");
     29 
     30 #include "opt_ata.h"
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/kernel.h>
     35 #include <sys/malloc.h>
     36 #include <sys/device.h>
     37 #include <sys/conf.h>
     38 #include <sys/fcntl.h>
     39 #include <sys/proc.h>
     40 #include <sys/kthread.h>
     41 #include <sys/errno.h>
     42 #include <sys/ataio.h>
     43 #include <sys/kmem.h>
     44 #include <sys/intr.h>
     45 #include <sys/bus.h>
     46 #include <sys/once.h>
     47 #include <sys/bitops.h>
     48 
     49 #define ATABUS_PRIVATE
     50 
     51 #include <dev/ata/ataconf.h>
     52 #include <dev/ata/atareg.h>
     53 #include <dev/ata/atavar.h>
     54 #include <dev/ic/wdcvar.h>	/* for PIOBM */
     55 
     56 #include "locators.h"
     57 
     58 #include "atapibus.h"
     59 #include "ataraid.h"
     60 #include "sata_pmp.h"
     61 
     62 #if NATARAID > 0
     63 #include <dev/ata/ata_raidvar.h>
     64 #endif
     65 #if NSATA_PMP > 0
     66 #include <dev/ata/satapmpvar.h>
     67 #endif
     68 #include <dev/ata/satapmpreg.h>
     69 
     70 #define DEBUG_FUNCS  0x08
     71 #define DEBUG_PROBE  0x10
     72 #define DEBUG_DETACH 0x20
     73 #define	DEBUG_XFERS  0x40
     74 #ifdef ATADEBUG
     75 #ifndef ATADEBUG_MASK
     76 #define ATADEBUG_MASK 0
     77 #endif
     78 int atadebug_mask = ATADEBUG_MASK;
     79 #define ATADEBUG_PRINT(args, level) \
     80 	if (atadebug_mask & (level)) \
     81 		printf args
     82 #else
     83 #define ATADEBUG_PRINT(args, level)
     84 #endif
     85 
     86 static ONCE_DECL(ata_init_ctrl);
     87 
     88 /*
     89  * A queue of atabus instances, used to ensure the same bus probe order
     90  * for a given hardware configuration at each boot.  Kthread probing
     91  * devices on a atabus.  Only one probing at once.
     92  */
     93 static TAILQ_HEAD(, atabus_initq)	atabus_initq_head;
     94 static kmutex_t				atabus_qlock;
     95 static kcondvar_t			atabus_qcv;
     96 static lwp_t *				atabus_cfg_lwp;
     97 
     98 /*****************************************************************************
     99  * ATA bus layer.
    100  *
    101  * ATA controllers attach an atabus instance, which handles probing the bus
    102  * for drives, etc.
    103  *****************************************************************************/
    104 
    105 dev_type_open(atabusopen);
    106 dev_type_close(atabusclose);
    107 dev_type_ioctl(atabusioctl);
    108 
    109 const struct cdevsw atabus_cdevsw = {
    110 	.d_open = atabusopen,
    111 	.d_close = atabusclose,
    112 	.d_read = noread,
    113 	.d_write = nowrite,
    114 	.d_ioctl = atabusioctl,
    115 	.d_stop = nostop,
    116 	.d_tty = notty,
    117 	.d_poll = nopoll,
    118 	.d_mmap = nommap,
    119 	.d_kqfilter = nokqfilter,
    120 	.d_discard = nodiscard,
    121 	.d_flag = D_OTHER
    122 };
    123 
    124 extern struct cfdriver atabus_cd;
    125 
    126 static void atabus_childdetached(device_t, device_t);
    127 static int atabus_rescan(device_t, const char *, const int *);
    128 static bool atabus_resume(device_t, const pmf_qual_t *);
    129 static bool atabus_suspend(device_t, const pmf_qual_t *);
    130 static void atabusconfig_thread(void *);
    131 
    132 static void ata_channel_idle(struct ata_channel *);
    133 static void ata_channel_thaw_locked(struct ata_channel *);
    134 static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *);
    135 static void ata_channel_freeze_locked(struct ata_channel *);
    136 static void ata_thread_wake_locked(struct ata_channel *);
    137 
    138 /*
    139  * atabus_init:
    140  *
    141  *	Initialize ATA subsystem structures.
    142  */
    143 static int
    144 atabus_init(void)
    145 {
    146 
    147 	TAILQ_INIT(&atabus_initq_head);
    148 	mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE);
    149 	cv_init(&atabus_qcv, "atainitq");
    150 	return 0;
    151 }
    152 
    153 /*
    154  * atabusprint:
    155  *
    156  *	Autoconfiguration print routine used by ATA controllers when
    157  *	attaching an atabus instance.
    158  */
    159 int
    160 atabusprint(void *aux, const char *pnp)
    161 {
    162 	struct ata_channel *chan = aux;
    163 
    164 	if (pnp)
    165 		aprint_normal("atabus at %s", pnp);
    166 	aprint_normal(" channel %d", chan->ch_channel);
    167 
    168 	return (UNCONF);
    169 }
    170 
    171 /*
    172  * ataprint:
    173  *
    174  *	Autoconfiguration print routine.
    175  */
    176 int
    177 ataprint(void *aux, const char *pnp)
    178 {
    179 	struct ata_device *adev = aux;
    180 
    181 	if (pnp)
    182 		aprint_normal("wd at %s", pnp);
    183 	aprint_normal(" drive %d", adev->adev_drv_data->drive);
    184 
    185 	return (UNCONF);
    186 }
    187 
    188 /*
    189  * ata_channel_attach:
    190  *
    191  *	Common parts of attaching an atabus to an ATA controller channel.
    192  */
    193 void
    194 ata_channel_attach(struct ata_channel *chp)
    195 {
    196 	if (chp->ch_flags & ATACH_DISABLED)
    197 		return;
    198 
    199 	KASSERT(chp->ch_queue != NULL);
    200 
    201 	ata_channel_init(chp);
    202 
    203 	chp->atabus = config_found_ia(chp->ch_atac->atac_dev, "ata", chp,
    204 		atabusprint);
    205 }
    206 
    207 /*
    208  * ata_channel_detach:
    209  *
    210  *	Common parts of detaching an atabus to an ATA controller channel.
    211  */
    212 void
    213 ata_channel_detach(struct ata_channel *chp)
    214 {
    215 	if (chp->ch_flags & ATACH_DISABLED)
    216 		return;
    217 
    218 	ata_channel_destroy(chp);
    219 }
    220 
    221 static void
    222 atabusconfig(struct atabus_softc *atabus_sc)
    223 {
    224 	struct ata_channel *chp = atabus_sc->sc_chan;
    225 	struct atac_softc *atac = chp->ch_atac;
    226 	struct atabus_initq *atabus_initq = NULL;
    227 	int i, error;
    228 
    229 	/* we are in the atabus's thread context */
    230 	ata_channel_lock(chp);
    231 	chp->ch_flags |= ATACH_TH_RUN;
    232 	ata_channel_unlock(chp);
    233 
    234 	/*
    235 	 * Probe for the drives attached to controller, unless a PMP
    236 	 * is already known
    237 	 */
    238 	/* XXX for SATA devices we will power up all drives at once */
    239 	if (chp->ch_satapmp_nports == 0)
    240 		(*atac->atac_probe)(chp);
    241 
    242 	if (chp->ch_ndrives >= 2) {
    243 		ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n",
    244 		    chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type),
    245 		    DEBUG_PROBE);
    246 	}
    247 
    248 	/* next operations will occurs in a separate thread */
    249 	ata_channel_lock(chp);
    250 	chp->ch_flags &= ~ATACH_TH_RUN;
    251 	ata_channel_unlock(chp);
    252 
    253 	/* Make sure the devices probe in atabus order to avoid jitter. */
    254 	mutex_enter(&atabus_qlock);
    255 	for (;;) {
    256 		atabus_initq = TAILQ_FIRST(&atabus_initq_head);
    257 		if (atabus_initq->atabus_sc == atabus_sc)
    258 			break;
    259 		cv_wait(&atabus_qcv, &atabus_qlock);
    260 	}
    261 	mutex_exit(&atabus_qlock);
    262 
    263 	ata_channel_lock(chp);
    264 
    265 	/* If no drives, abort here */
    266 	if (chp->ch_drive == NULL)
    267 		goto out;
    268 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    269 	for (i = 0; i < chp->ch_ndrives; i++)
    270 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE)
    271 			break;
    272 	if (i == chp->ch_ndrives)
    273 		goto out;
    274 
    275 	/* Shortcut in case we've been shutdown */
    276 	if (chp->ch_flags & ATACH_SHUTDOWN)
    277 		goto out;
    278 
    279 	ata_channel_unlock(chp);
    280 
    281 	if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread,
    282 	    atabus_sc, &atabus_cfg_lwp,
    283 	    "%scnf", device_xname(atac->atac_dev))) != 0)
    284 		aprint_error_dev(atac->atac_dev,
    285 		    "unable to create config thread: error %d\n", error);
    286 	return;
    287 
    288  out:
    289 	ata_channel_unlock(chp);
    290 
    291 	mutex_enter(&atabus_qlock);
    292 	TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
    293 	cv_broadcast(&atabus_qcv);
    294 	mutex_exit(&atabus_qlock);
    295 
    296 	free(atabus_initq, M_DEVBUF);
    297 
    298 	ata_delref(chp);
    299 
    300 	config_pending_decr(atac->atac_dev);
    301 }
    302 
    303 /*
    304  * atabus_configthread: finish attach of atabus's childrens, in a separate
    305  * kernel thread.
    306  */
    307 static void
    308 atabusconfig_thread(void *arg)
    309 {
    310 	struct atabus_softc *atabus_sc = arg;
    311 	struct ata_channel *chp = atabus_sc->sc_chan;
    312 	struct atac_softc *atac = chp->ch_atac;
    313 	struct atabus_initq *atabus_initq = NULL;
    314 	int i, s;
    315 
    316 	/* XXX seems wrong */
    317 	mutex_enter(&atabus_qlock);
    318 	atabus_initq = TAILQ_FIRST(&atabus_initq_head);
    319 	KASSERT(atabus_initq->atabus_sc == atabus_sc);
    320 	mutex_exit(&atabus_qlock);
    321 
    322 	/*
    323 	 * First look for a port multiplier
    324 	 */
    325 	if (chp->ch_ndrives == PMP_MAX_DRIVES &&
    326 	    chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
    327 #if NSATA_PMP > 0
    328 		satapmp_attach(chp);
    329 #else
    330 		aprint_error_dev(atabus_sc->sc_dev,
    331 		    "SATA port multiplier not supported\n");
    332 		/* no problems going on, all drives are ATA_DRIVET_NONE */
    333 #endif
    334 	}
    335 
    336 	/*
    337 	 * Attach an ATAPI bus, if needed.
    338 	 */
    339 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    340 	for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) {
    341 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) {
    342 #if NATAPIBUS > 0
    343 			(*atac->atac_atapibus_attach)(atabus_sc);
    344 #else
    345 			/*
    346 			 * Fake the autoconfig "not configured" message
    347 			 */
    348 			aprint_normal("atapibus at %s not configured\n",
    349 			    device_xname(atac->atac_dev));
    350 			chp->atapibus = NULL;
    351 			s = splbio();
    352 			for (i = 0; i < chp->ch_ndrives; i++) {
    353 				if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
    354 					chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    355 			}
    356 			splx(s);
    357 #endif
    358 			break;
    359 		}
    360 	}
    361 
    362 	for (i = 0; i < chp->ch_ndrives; i++) {
    363 		struct ata_device adev;
    364 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA &&
    365 		    chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) {
    366 			continue;
    367 		}
    368 		if (chp->ch_drive[i].drv_softc != NULL)
    369 			continue;
    370 		memset(&adev, 0, sizeof(struct ata_device));
    371 		adev.adev_bustype = atac->atac_bustype_ata;
    372 		adev.adev_channel = chp->ch_channel;
    373 		adev.adev_drv_data = &chp->ch_drive[i];
    374 		chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev,
    375 		    "ata_hl", &adev, ataprint);
    376 		if (chp->ch_drive[i].drv_softc != NULL) {
    377 			ata_probe_caps(&chp->ch_drive[i]);
    378 		} else {
    379 			s = splbio();
    380 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    381 			splx(s);
    382 		}
    383 	}
    384 
    385 	/* now that we know the drives, the controller can set its modes */
    386 	if (atac->atac_set_modes) {
    387 		(*atac->atac_set_modes)(chp);
    388 		ata_print_modes(chp);
    389 	}
    390 #if NATARAID > 0
    391 	if (atac->atac_cap & ATAC_CAP_RAID) {
    392 		for (i = 0; i < chp->ch_ndrives; i++) {
    393 			if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) {
    394 				ata_raid_check_component(
    395 				    chp->ch_drive[i].drv_softc);
    396 			}
    397 		}
    398 	}
    399 #endif /* NATARAID > 0 */
    400 
    401 	/*
    402 	 * reset drive_flags for unattached devices, reset state for attached
    403 	 * ones
    404 	 */
    405 	s = splbio();
    406 	for (i = 0; i < chp->ch_ndrives; i++) {
    407 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
    408 			continue;
    409 		if (chp->ch_drive[i].drv_softc == NULL) {
    410 			chp->ch_drive[i].drive_flags = 0;
    411 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    412 		} else
    413 			chp->ch_drive[i].state = 0;
    414 	}
    415 	splx(s);
    416 
    417 	mutex_enter(&atabus_qlock);
    418 	TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
    419 	cv_broadcast(&atabus_qcv);
    420 	mutex_exit(&atabus_qlock);
    421 
    422 	free(atabus_initq, M_DEVBUF);
    423 
    424 	ata_delref(chp);
    425 
    426 	config_pending_decr(atac->atac_dev);
    427 	kthread_exit(0);
    428 }
    429 
    430 /*
    431  * atabus_thread:
    432  *
    433  *	Worker thread for the ATA bus.
    434  */
    435 static void
    436 atabus_thread(void *arg)
    437 {
    438 	struct atabus_softc *sc = arg;
    439 	struct ata_channel *chp = sc->sc_chan;
    440 	struct ata_queue *chq = chp->ch_queue;
    441 	struct ata_xfer *xfer;
    442 	int i, rv, s;
    443 
    444 	ata_channel_lock(chp);
    445 	chp->ch_flags |= ATACH_TH_RUN;
    446 
    447 	/*
    448 	 * Probe the drives.  Reset type to indicate to controllers
    449 	 * that can re-probe that all drives must be probed..
    450 	 *
    451 	 * Note: ch_ndrives may be changed during the probe.
    452 	 */
    453 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    454 	for (i = 0; i < chp->ch_ndrives; i++) {
    455 		chp->ch_drive[i].drive_flags = 0;
    456 		chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    457 	}
    458 	ata_channel_unlock(chp);
    459 
    460 	atabusconfig(sc);
    461 
    462 	ata_channel_lock(chp);
    463 	for (;;) {
    464 		if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_SHUTDOWN)) == 0 &&
    465 		    (chq->queue_active == 0 || chq->queue_freeze == 0)) {
    466 			chp->ch_flags &= ~ATACH_TH_RUN;
    467 			cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
    468 			chp->ch_flags |= ATACH_TH_RUN;
    469 		}
    470 		if (chp->ch_flags & ATACH_SHUTDOWN) {
    471 			break;
    472 		}
    473 		if (chp->ch_flags & ATACH_TH_RESCAN) {
    474 			chp->ch_flags &= ~ATACH_TH_RESCAN;
    475 			ata_channel_unlock(chp);
    476 			atabusconfig(sc);
    477 			ata_channel_lock(chp);
    478 		}
    479 		if (chp->ch_flags & ATACH_TH_RESET) {
    480 			/* ata_reset_channel() will unfreeze the channel */
    481 			ata_channel_unlock(chp);
    482 			s = splbio();
    483 			ata_reset_channel(chp, AT_WAIT | chp->ch_reset_flags);
    484 			splx(s);
    485 			ata_channel_lock(chp);
    486 		} else if (chq->queue_active > 0 && chq->queue_freeze == 1) {
    487 			/*
    488 			 * Caller has bumped queue_freeze, decrease it. This
    489 			 * flow shalt never be executed for NCQ commands.
    490 			 */
    491 			KASSERT((chp->ch_flags & ATACH_NCQ) == 0);
    492 			KASSERT(chq->queue_active == 1);
    493 
    494 			ata_channel_thaw_locked(chp);
    495 			xfer = ata_queue_get_active_xfer_locked(chp);
    496 
    497 			KASSERT(xfer != NULL);
    498 			KASSERT((xfer->c_flags & C_POLL) == 0);
    499 
    500 			switch ((rv = ata_xfer_start(xfer))) {
    501 			case ATASTART_STARTED:
    502 			case ATASTART_POLL:
    503 			case ATASTART_ABORT:
    504 				break;
    505 			case ATASTART_TH:
    506 			default:
    507 				panic("%s: ata_xfer_start() unexpected rv %d",
    508 				    __func__, rv);
    509 				/* NOTREACHED */
    510 			}
    511 		} else if (chq->queue_freeze > 1)
    512 			panic("%s: queue_freeze", __func__);
    513 	}
    514 	chp->ch_thread = NULL;
    515 	cv_signal(&chp->ch_thr_idle);
    516 	ata_channel_unlock(chp);
    517 	kthread_exit(0);
    518 }
    519 
    520 static void
    521 ata_thread_wake_locked(struct ata_channel *chp)
    522 {
    523 	KASSERT(mutex_owned(&chp->ch_lock));
    524 	ata_channel_freeze_locked(chp);
    525 	cv_signal(&chp->ch_thr_idle);
    526 }
    527 
    528 /*
    529  * atabus_match:
    530  *
    531  *	Autoconfiguration match routine.
    532  */
    533 static int
    534 atabus_match(device_t parent, cfdata_t cf, void *aux)
    535 {
    536 	struct ata_channel *chp = aux;
    537 
    538 	if (chp == NULL)
    539 		return (0);
    540 
    541 	if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel &&
    542 	    cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT)
    543 		return (0);
    544 
    545 	return (1);
    546 }
    547 
    548 /*
    549  * atabus_attach:
    550  *
    551  *	Autoconfiguration attach routine.
    552  */
    553 static void
    554 atabus_attach(device_t parent, device_t self, void *aux)
    555 {
    556 	struct atabus_softc *sc = device_private(self);
    557 	struct ata_channel *chp = aux;
    558 	struct atabus_initq *initq;
    559 	int error;
    560 
    561 	sc->sc_chan = chp;
    562 
    563 	aprint_normal("\n");
    564 	aprint_naive("\n");
    565 
    566 	sc->sc_dev = self;
    567 
    568 	if (ata_addref(chp))
    569 		return;
    570 
    571 	RUN_ONCE(&ata_init_ctrl, atabus_init);
    572 
    573 	initq = malloc(sizeof(*initq), M_DEVBUF, M_WAITOK);
    574 	initq->atabus_sc = sc;
    575 	mutex_enter(&atabus_qlock);
    576 	TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
    577 	mutex_exit(&atabus_qlock);
    578 	config_pending_incr(sc->sc_dev);
    579 
    580 	if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc,
    581 	    &chp->ch_thread, "%s", device_xname(self))) != 0)
    582 		aprint_error_dev(self,
    583 		    "unable to create kernel thread: error %d\n", error);
    584 
    585 	if (!pmf_device_register(self, atabus_suspend, atabus_resume))
    586 		aprint_error_dev(self, "couldn't establish power handler\n");
    587 }
    588 
    589 /*
    590  * atabus_detach:
    591  *
    592  *	Autoconfiguration detach routine.
    593  */
    594 static int
    595 atabus_detach(device_t self, int flags)
    596 {
    597 	struct atabus_softc *sc = device_private(self);
    598 	struct ata_channel *chp = sc->sc_chan;
    599 	device_t dev = NULL;
    600 	int i, error = 0;
    601 
    602 	/* Shutdown the channel. */
    603 	ata_channel_lock(chp);
    604 	chp->ch_flags |= ATACH_SHUTDOWN;
    605 	while (chp->ch_thread != NULL) {
    606 		cv_signal(&chp->ch_thr_idle);
    607 		cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
    608 	}
    609 	ata_channel_unlock(chp);
    610 
    611 	/*
    612 	 * Detach atapibus and its children.
    613 	 */
    614 	if ((dev = chp->atapibus) != NULL) {
    615 		ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n",
    616 		    device_xname(self), device_xname(dev)), DEBUG_DETACH);
    617 
    618 		error = config_detach(dev, flags);
    619 		if (error)
    620 			goto out;
    621 		KASSERT(chp->atapibus == NULL);
    622 	}
    623 
    624 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    625 
    626 	/*
    627 	 * Detach our other children.
    628 	 */
    629 	for (i = 0; i < chp->ch_ndrives; i++) {
    630 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
    631 			continue;
    632 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
    633 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    634 		if ((dev = chp->ch_drive[i].drv_softc) != NULL) {
    635 			ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__,
    636 			    __LINE__, device_xname(self), device_xname(dev)),
    637 			    DEBUG_DETACH);
    638 			error = config_detach(dev, flags);
    639 			if (error)
    640 				goto out;
    641 			KASSERT(chp->ch_drive[i].drv_softc == NULL);
    642 			KASSERT(chp->ch_drive[i].drive_type == 0);
    643 		}
    644 	}
    645 	atabus_free_drives(chp);
    646 
    647  out:
    648 #ifdef ATADEBUG
    649 	if (dev != NULL && error != 0)
    650 		ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__,
    651 		    device_xname(self), error, device_xname(dev)),
    652 		    DEBUG_DETACH);
    653 #endif /* ATADEBUG */
    654 
    655 	return (error);
    656 }
    657 
    658 void
    659 atabus_childdetached(device_t self, device_t child)
    660 {
    661 	bool found = false;
    662 	struct atabus_softc *sc = device_private(self);
    663 	struct ata_channel *chp = sc->sc_chan;
    664 	int i;
    665 
    666 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    667 	/*
    668 	 * atapibus detached.
    669 	 */
    670 	if (child == chp->atapibus) {
    671 		chp->atapibus = NULL;
    672 		found = true;
    673 		for (i = 0; i < chp->ch_ndrives; i++) {
    674 			if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI)
    675 				continue;
    676 			KASSERT(chp->ch_drive[i].drv_softc != NULL);
    677 			chp->ch_drive[i].drv_softc = NULL;
    678 			chp->ch_drive[i].drive_flags = 0;
    679 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    680 		}
    681 	}
    682 
    683 	/*
    684 	 * Detach our other children.
    685 	 */
    686 	for (i = 0; i < chp->ch_ndrives; i++) {
    687 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
    688 			continue;
    689 		if (child == chp->ch_drive[i].drv_softc) {
    690 			chp->ch_drive[i].drv_softc = NULL;
    691 			chp->ch_drive[i].drive_flags = 0;
    692 			if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
    693 				chp->ch_satapmp_nports = 0;
    694 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    695 			found = true;
    696 		}
    697 	}
    698 
    699 	if (!found)
    700 		panic("%s: unknown child %p", device_xname(self),
    701 		    (const void *)child);
    702 }
    703 
    704 CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc),
    705     atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan,
    706     atabus_childdetached, DVF_DETACH_SHUTDOWN);
    707 
    708 /*****************************************************************************
    709  * Common ATA bus operations.
    710  *****************************************************************************/
    711 
    712 /* allocate/free the channel's ch_drive[] array */
    713 int
    714 atabus_alloc_drives(struct ata_channel *chp, int ndrives)
    715 {
    716 	int i;
    717 	if (chp->ch_ndrives != ndrives)
    718 		atabus_free_drives(chp);
    719 	if (chp->ch_drive == NULL) {
    720 		chp->ch_drive = malloc(
    721 		    sizeof(struct ata_drive_datas) * ndrives,
    722 		    M_DEVBUF, M_NOWAIT | M_ZERO);
    723 	}
    724 	if (chp->ch_drive == NULL) {
    725 	    aprint_error_dev(chp->ch_atac->atac_dev,
    726 		"can't alloc drive array\n");
    727 	    chp->ch_ndrives = 0;
    728 	    return ENOMEM;
    729 	};
    730 	for (i = 0; i < ndrives; i++) {
    731 		chp->ch_drive[i].chnl_softc = chp;
    732 		chp->ch_drive[i].drive = i;
    733 	}
    734 	chp->ch_ndrives = ndrives;
    735 	return 0;
    736 }
    737 
    738 void
    739 atabus_free_drives(struct ata_channel *chp)
    740 {
    741 #ifdef DIAGNOSTIC
    742 	int i;
    743 	int dopanic = 0;
    744 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    745 	for (i = 0; i < chp->ch_ndrives; i++) {
    746 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) {
    747 			printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n",
    748 			    device_xname(chp->atabus), i,
    749 			    chp->ch_drive[i].drive_type);
    750 			dopanic = 1;
    751 		}
    752 		if (chp->ch_drive[i].drv_softc != NULL) {
    753 			printf("%s: ch_drive[%d] attached to %s\n",
    754 			    device_xname(chp->atabus), i,
    755 			    device_xname(chp->ch_drive[i].drv_softc));
    756 			dopanic = 1;
    757 		}
    758 	}
    759 	if (dopanic)
    760 		panic("atabus_free_drives");
    761 #endif
    762 
    763 	if (chp->ch_drive == NULL)
    764 		return;
    765 	chp->ch_ndrives = 0;
    766 	free(chp->ch_drive, M_DEVBUF);
    767 	chp->ch_drive = NULL;
    768 }
    769 
    770 /* Get the disk's parameters */
    771 int
    772 ata_get_params(struct ata_drive_datas *drvp, uint8_t flags,
    773     struct ataparams *prms)
    774 {
    775 	struct ata_xfer *xfer;
    776 	struct ata_channel *chp = drvp->chnl_softc;
    777 	struct atac_softc *atac = chp->ch_atac;
    778 	char *tb;
    779 	int i, rv;
    780 	uint16_t *p;
    781 
    782 	ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);
    783 
    784 	xfer = ata_get_xfer(chp);
    785 	if (xfer == NULL) {
    786 		ATADEBUG_PRINT(("%s: no xfer\n", __func__),
    787 		    DEBUG_FUNCS|DEBUG_PROBE);
    788 		return CMD_AGAIN;
    789 	}
    790 
    791 	tb = kmem_zalloc(ATA_BSIZE, KM_SLEEP);
    792 	memset(prms, 0, sizeof(struct ataparams));
    793 
    794 	if (drvp->drive_type == ATA_DRIVET_ATA) {
    795 		xfer->c_ata_c.r_command = WDCC_IDENTIFY;
    796 		xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
    797 		xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
    798 		xfer->c_ata_c.timeout = 3000; /* 3s */
    799 	} else if (drvp->drive_type == ATA_DRIVET_ATAPI) {
    800 		xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE;
    801 		xfer->c_ata_c.r_st_bmask = 0;
    802 		xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
    803 		xfer->c_ata_c.timeout = 10000; /* 10s */
    804 	} else {
    805 		ATADEBUG_PRINT(("ata_get_parms: no disks\n"),
    806 		    DEBUG_FUNCS|DEBUG_PROBE);
    807 		rv = CMD_ERR;
    808 		goto out;
    809 	}
    810 	xfer->c_ata_c.flags = AT_READ | flags;
    811 	xfer->c_ata_c.data = tb;
    812 	xfer->c_ata_c.bcount = ATA_BSIZE;
    813 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
    814 						xfer) != ATACMD_COMPLETE) {
    815 		ATADEBUG_PRINT(("ata_get_parms: wdc_exec_command failed\n"),
    816 		    DEBUG_FUNCS|DEBUG_PROBE);
    817 		rv = CMD_AGAIN;
    818 		goto out;
    819 	}
    820 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
    821 		ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n",
    822 		    xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE);
    823 		rv = CMD_ERR;
    824 		goto out;
    825 	}
    826 	/* if we didn't read any data something is wrong */
    827 	if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) {
    828 		rv = CMD_ERR;
    829 		goto out;
    830 	}
    831 
    832 	/* Read in parameter block. */
    833 	memcpy(prms, tb, sizeof(struct ataparams));
    834 
    835 	/*
    836 	 * Shuffle string byte order.
    837 	 * ATAPI NEC, Mitsumi and Pioneer drives and
    838 	 * old ATA TDK CompactFlash cards
    839 	 * have different byte order.
    840 	 */
    841 #if BYTE_ORDER == BIG_ENDIAN
    842 # define M(n)	prms->atap_model[(n) ^ 1]
    843 #else
    844 # define M(n)	prms->atap_model[n]
    845 #endif
    846 	if (
    847 #if BYTE_ORDER == BIG_ENDIAN
    848 	    !
    849 #endif
    850 	    ((drvp->drive_type == ATA_DRIVET_ATAPI) ?
    851 	     ((M(0) == 'N' && M(1) == 'E') ||
    852 	      (M(0) == 'F' && M(1) == 'X') ||
    853 	      (M(0) == 'P' && M(1) == 'i')) :
    854 	     ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) {
    855 		rv = CMD_OK;
    856 		goto out;
    857 	     }
    858 #undef M
    859 	for (i = 0; i < sizeof(prms->atap_model); i += 2) {
    860 		p = (uint16_t *)(prms->atap_model + i);
    861 		*p = bswap16(*p);
    862 	}
    863 	for (i = 0; i < sizeof(prms->atap_serial); i += 2) {
    864 		p = (uint16_t *)(prms->atap_serial + i);
    865 		*p = bswap16(*p);
    866 	}
    867 	for (i = 0; i < sizeof(prms->atap_revision); i += 2) {
    868 		p = (uint16_t *)(prms->atap_revision + i);
    869 		*p = bswap16(*p);
    870 	}
    871 
    872 	rv = CMD_OK;
    873  out:
    874 	kmem_free(tb, ATA_BSIZE);
    875 	ata_free_xfer(chp, xfer);
    876 	return rv;
    877 }
    878 
    879 int
    880 ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags)
    881 {
    882 	struct ata_xfer *xfer;
    883 	int rv;
    884 	struct ata_channel *chp = drvp->chnl_softc;
    885 	struct atac_softc *atac = chp->ch_atac;
    886 
    887 	ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS);
    888 
    889 	xfer = ata_get_xfer(chp);
    890 	if (xfer == NULL) {
    891 		ATADEBUG_PRINT(("%s: no xfer\n", __func__),
    892 		    DEBUG_FUNCS|DEBUG_PROBE);
    893 		return CMD_AGAIN;
    894 	}
    895 
    896 	xfer->c_ata_c.r_command = SET_FEATURES;
    897 	xfer->c_ata_c.r_st_bmask = 0;
    898 	xfer->c_ata_c.r_st_pmask = 0;
    899 	xfer->c_ata_c.r_features = WDSF_SET_MODE;
    900 	xfer->c_ata_c.r_count = mode;
    901 	xfer->c_ata_c.flags = flags;
    902 	xfer->c_ata_c.timeout = 1000; /* 1s */
    903 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
    904 						xfer) != ATACMD_COMPLETE) {
    905 		rv = CMD_AGAIN;
    906 		goto out;
    907 	}
    908 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
    909 		rv = CMD_ERR;
    910 		goto out;
    911 	}
    912 
    913 	rv = CMD_OK;
    914 
    915 out:
    916 	ata_free_xfer(chp, xfer);
    917 	return rv;
    918 }
    919 
    920 int
    921 ata_read_log_ext_ncq(struct ata_drive_datas *drvp, uint8_t flags,
    922     uint8_t *slot, uint8_t *status, uint8_t *err)
    923 {
    924 	struct ata_xfer *xfer;
    925 	int rv;
    926 	struct ata_channel *chp = drvp->chnl_softc;
    927 	struct atac_softc *atac = chp->ch_atac;
    928 	uint8_t *tb, cksum, page;
    929 
    930 	ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);
    931 
    932 	/* Only NCQ ATA drives support/need this */
    933 	if (drvp->drive_type != ATA_DRIVET_ATA ||
    934 	    (drvp->drive_flags & ATA_DRIVE_NCQ) == 0)
    935 		return EOPNOTSUPP;
    936 
    937 	xfer = ata_get_xfer_ext(chp, C_RECOVERY, 0);
    938 
    939 	tb = drvp->recovery_blk;
    940 	memset(tb, 0, sizeof(drvp->recovery_blk));
    941 
    942 	/*
    943 	 * We could use READ LOG DMA EXT if drive supports it (i.e.
    944 	 * when it supports Streaming feature) to avoid PIO command,
    945 	 * and to make this a little faster. Realistically, it
    946 	 * should not matter.
    947 	 */
    948 	xfer->c_flags |= C_RECOVERY;
    949 	xfer->c_ata_c.r_command = WDCC_READ_LOG_EXT;
    950 	xfer->c_ata_c.r_lba = page = WDCC_LOG_PAGE_NCQ;
    951 	xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
    952 	xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
    953 	xfer->c_ata_c.r_count = 1;
    954 	xfer->c_ata_c.r_device = WDSD_LBA;
    955 	xfer->c_ata_c.flags = AT_READ | AT_LBA | AT_LBA48 | flags;
    956 	xfer->c_ata_c.timeout = 1000; /* 1s */
    957 	xfer->c_ata_c.data = tb;
    958 	xfer->c_ata_c.bcount = sizeof(drvp->recovery_blk);
    959 
    960 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
    961 						xfer) != ATACMD_COMPLETE) {
    962 		rv = EAGAIN;
    963 		goto out;
    964 	}
    965 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
    966 		rv = EINVAL;
    967 		goto out;
    968 	}
    969 
    970 	cksum = 0;
    971 	for (int i = 0; i < sizeof(drvp->recovery_blk); i++)
    972 		cksum += tb[i];
    973 	if (cksum != 0) {
    974 		aprint_error_dev(drvp->drv_softc,
    975 		    "invalid checksum %x for READ LOG EXT page %x\n",
    976 		    cksum, page);
    977 		rv = EINVAL;
    978 		goto out;
    979 	}
    980 
    981 	if (tb[0] & WDCC_LOG_NQ) {
    982 		/* not queued command */
    983 		rv = EOPNOTSUPP;
    984 		goto out;
    985 	}
    986 
    987 	*slot = tb[0] & 0x1f;
    988 	*status = tb[2];
    989 	*err = tb[3];
    990 
    991 	KASSERTMSG((*status & WDCS_ERR),
    992 	    "%s: non-error command slot %d reported by READ LOG EXT page %x: "
    993 	    "err %x status %x\n",
    994 	    device_xname(drvp->drv_softc), *slot, page, *err, *status);
    995 
    996 	rv = 0;
    997 
    998 out:
    999 	ata_free_xfer(chp, xfer);
   1000 	return rv;
   1001 }
   1002 
   1003 #if NATA_DMA
   1004 void
   1005 ata_dmaerr(struct ata_drive_datas *drvp, int flags)
   1006 {
   1007 	/*
   1008 	 * Downgrade decision: if we get NERRS_MAX in NXFER.
   1009 	 * We start with n_dmaerrs set to NERRS_MAX-1 so that the
   1010 	 * first error within the first NXFER ops will immediatly trigger
   1011 	 * a downgrade.
   1012 	 * If we got an error and n_xfers is bigger than NXFER reset counters.
   1013 	 */
   1014 	drvp->n_dmaerrs++;
   1015 	if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) {
   1016 		ata_downgrade_mode(drvp, flags);
   1017 		drvp->n_dmaerrs = NERRS_MAX-1;
   1018 		drvp->n_xfers = 0;
   1019 		return;
   1020 	}
   1021 	if (drvp->n_xfers > NXFER) {
   1022 		drvp->n_dmaerrs = 1; /* just got an error */
   1023 		drvp->n_xfers = 1; /* restart counting from this error */
   1024 	}
   1025 }
   1026 #endif	/* NATA_DMA */
   1027 
   1028 /*
   1029  * freeze the queue and wait for the controller to be idle. Caller has to
   1030  * unfreeze/restart the queue
   1031  */
   1032 static void
   1033 ata_channel_idle(struct ata_channel *chp)
   1034 {
   1035 	ata_channel_lock(chp);
   1036 	ata_channel_freeze_locked(chp);
   1037 	while (chp->ch_queue->queue_active > 0) {
   1038 		chp->ch_queue->queue_flags |= QF_IDLE_WAIT;
   1039 		cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1);
   1040 	}
   1041 	ata_channel_unlock(chp);
   1042 }
   1043 
   1044 /*
   1045  * Add a command to the queue and start controller.
   1046  *
   1047  * MUST BE CALLED AT splbio()!
   1048  */
   1049 void
   1050 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   1051 {
   1052 
   1053 	ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer,
   1054 	    chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
   1055 
   1056 	/* complete xfer setup */
   1057 	xfer->c_chp = chp;
   1058 
   1059 	ata_channel_lock(chp);
   1060 
   1061 	/*
   1062 	 * Standard commands are added to the end of command list, but
   1063 	 * recovery commands must be run immediatelly.
   1064 	 */
   1065 	if ((xfer->c_flags & C_RECOVERY) == 0)
   1066 		TAILQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer,
   1067 		    c_xferchain);
   1068 	else
   1069 		TAILQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer,
   1070 		    c_xferchain);
   1071 
   1072 	/*
   1073 	 * if polling and can sleep, wait for the xfer to be at head of queue
   1074 	 */
   1075 	if ((xfer->c_flags & (C_POLL | C_WAIT)) ==  (C_POLL | C_WAIT)) {
   1076 		while (chp->ch_queue->queue_active > 0 ||
   1077 		    TAILQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) {
   1078 			xfer->c_flags |= C_WAITACT;
   1079 			cv_wait(&xfer->c_active, &chp->ch_lock);
   1080 			xfer->c_flags &= ~C_WAITACT;
   1081 
   1082 			/*
   1083 			 * Free xfer now if it there was attempt to free it
   1084 			 * while we were waiting.
   1085 			 */
   1086 			if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) {
   1087 				ata_channel_unlock(chp);
   1088 
   1089 				ata_free_xfer(chp, xfer);
   1090 				return;
   1091 			}
   1092 		}
   1093 	}
   1094 
   1095 	ata_channel_unlock(chp);
   1096 
   1097 	ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n",
   1098 	    chp->ch_flags), DEBUG_XFERS);
   1099 	atastart(chp);
   1100 }
   1101 
   1102 /*
   1103  * Start I/O on a controller, for the given channel.
   1104  * The first xfer may be not for our channel if the channel queues
   1105  * are shared.
   1106  *
   1107  * MUST BE CALLED AT splbio()!
   1108  */
   1109 void
   1110 atastart(struct ata_channel *chp)
   1111 {
   1112 	struct atac_softc *atac = chp->ch_atac;
   1113 	struct ata_queue *chq = chp->ch_queue;
   1114 	struct ata_xfer *xfer, *axfer;
   1115 	bool recovery;
   1116 
   1117 #ifdef ATA_DEBUG
   1118 	int spl1, spl2;
   1119 
   1120 	spl1 = splbio();
   1121 	spl2 = splbio();
   1122 	if (spl2 != spl1) {
   1123 		printf("atastart: not at splbio()\n");
   1124 		panic("atastart");
   1125 	}
   1126 	splx(spl2);
   1127 	splx(spl1);
   1128 #endif /* ATA_DEBUG */
   1129 
   1130 	ata_channel_lock(chp);
   1131 
   1132 again:
   1133 	KASSERT(chq->queue_active <= chq->queue_openings);
   1134 	if (chq->queue_active == chq->queue_openings) {
   1135 		ATADEBUG_PRINT(("%s: channel completely busy", __func__),
   1136 		    DEBUG_XFERS);
   1137 		goto out;
   1138 	}
   1139 
   1140 	/* is there a xfer ? */
   1141 	if ((xfer = TAILQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) {
   1142 		ATADEBUG_PRINT(("%s: queue_xfer is empty", __func__),
   1143 		    DEBUG_XFERS);
   1144 		goto out;
   1145 	}
   1146 
   1147 	recovery = ISSET(xfer->c_flags, C_RECOVERY);
   1148 
   1149 	/* is the queue frozen? */
   1150 	if (__predict_false(!recovery && chq->queue_freeze > 0)) {
   1151 		if (chq->queue_flags & QF_IDLE_WAIT) {
   1152 			chq->queue_flags &= ~QF_IDLE_WAIT;
   1153 			cv_signal(&chp->ch_queue->queue_idle);
   1154 		}
   1155 		ATADEBUG_PRINT(("%s(chp=%p): channel %d drive %d "
   1156 		    "queue frozen: %d (recovery: %d)\n",
   1157 		    __func__, chp, chp->ch_channel, xfer->c_drive,
   1158 		    chq->queue_freeze, recovery),
   1159 		    DEBUG_XFERS);
   1160 		goto out;
   1161 	}
   1162 
   1163 	/* all xfers on same queue must belong to the same channel */
   1164 	KASSERT(xfer->c_chp == chp);
   1165 
   1166 	/*
   1167 	 * Can only take the command if there are no current active
   1168 	 * commands, or if the command is NCQ and the active commands are also
   1169 	 * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based
   1170 	 * switching, can only send commands to single drive.
   1171 	 * Need only check first xfer.
   1172 	 * XXX FIS-based switching - revisit
   1173 	 */
   1174 	if (!recovery && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) {
   1175 		if (!ISSET(xfer->c_flags, C_NCQ) ||
   1176 		    !ISSET(axfer->c_flags, C_NCQ) ||
   1177 		    xfer->c_drive != axfer->c_drive)
   1178 			goto out;
   1179 	}
   1180 
   1181 	struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive];
   1182 
   1183 	/*
   1184 	 * if someone is waiting for the command to be active, wake it up
   1185 	 * and let it process the command
   1186 	 */
   1187 	if (xfer->c_flags & C_WAITACT) {
   1188 		ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d "
   1189 		    "wait active\n", xfer, chp->ch_channel, xfer->c_drive),
   1190 		    DEBUG_XFERS);
   1191 		cv_signal(&xfer->c_active);
   1192 		goto out;
   1193 	}
   1194 
   1195 	if (atac->atac_claim_hw)
   1196 		if (!atac->atac_claim_hw(chp, 0))
   1197 			goto out;
   1198 
   1199 	ATADEBUG_PRINT(("%s(chp=%p): xfer %p channel %d drive %d\n",
   1200 	    __func__, chp, xfer, chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
   1201 	if (drvp->drive_flags & ATA_DRIVE_RESET) {
   1202 		drvp->drive_flags &= ~ATA_DRIVE_RESET;
   1203 		drvp->state = 0;
   1204 	}
   1205 
   1206 	if (ISSET(xfer->c_flags, C_NCQ))
   1207 		SET(chp->ch_flags, ATACH_NCQ);
   1208 	else
   1209 		CLR(chp->ch_flags, ATACH_NCQ);
   1210 
   1211 	ata_activate_xfer_locked(chp, xfer);
   1212 
   1213 	if (atac->atac_cap & ATAC_CAP_NOIRQ)
   1214 		KASSERT(xfer->c_flags & C_POLL);
   1215 
   1216 	switch (ata_xfer_start(xfer)) {
   1217 	case ATASTART_TH:
   1218 	case ATASTART_ABORT:
   1219 		/* don't start any further commands in this case */
   1220 		goto out;
   1221 	default:
   1222 		/* nothing to do */
   1223 		break;
   1224 	}
   1225 
   1226 	/* Queue more commands if possible, but not during recovery */
   1227 	if (!recovery && chq->queue_active < chq->queue_openings)
   1228 		goto again;
   1229 
   1230 out:
   1231 	ata_channel_unlock(chp);
   1232 }
   1233 
   1234 int
   1235 ata_xfer_start(struct ata_xfer *xfer)
   1236 {
   1237 	struct ata_channel *chp = xfer->c_chp;
   1238 	int rv;
   1239 
   1240 	KASSERT(mutex_owned(&chp->ch_lock));
   1241 
   1242 	rv = xfer->c_start(chp, xfer);
   1243 	switch (rv) {
   1244 	case ATASTART_STARTED:
   1245 		/* nothing to do */
   1246 		break;
   1247 	case ATASTART_TH:
   1248 		/* postpone xfer to thread */
   1249 		ata_thread_wake_locked(chp);
   1250 		break;
   1251 	case ATASTART_POLL:
   1252 		/* can happen even in thread context for some ATAPI devices */
   1253 		ata_channel_unlock(chp);
   1254 		KASSERT(xfer->c_poll != NULL);
   1255 		xfer->c_poll(chp, xfer);
   1256 		ata_channel_lock(chp);
   1257 		break;
   1258 	case ATASTART_ABORT:
   1259 		ata_channel_unlock(chp);
   1260 		KASSERT(xfer->c_abort != NULL);
   1261 		xfer->c_abort(chp, xfer);
   1262 		ata_channel_lock(chp);
   1263 		break;
   1264 	}
   1265 
   1266 	return rv;
   1267 }
   1268 
   1269 static void
   1270 ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer)
   1271 {
   1272 	struct ata_queue * const chq = chp->ch_queue;
   1273 
   1274 	KASSERT(mutex_owned(&chp->ch_lock));
   1275 
   1276 	KASSERT(chq->queue_active < chq->queue_openings);
   1277 	KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0);
   1278 
   1279 	TAILQ_REMOVE(&chq->queue_xfer, xfer, c_xferchain);
   1280 	if ((xfer->c_flags & C_RECOVERY) == 0)
   1281 		TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain);
   1282 	else {
   1283 		/*
   1284 		 * Must go to head, so that ata_queue_get_active_xfer()
   1285 		 * returns the recovery command, and not some other
   1286 		 * random active transfer.
   1287 		 */
   1288 		TAILQ_INSERT_HEAD(&chq->active_xfers, xfer, c_activechain);
   1289 	}
   1290 	chq->active_xfers_used |= __BIT(xfer->c_slot);
   1291 	chq->queue_active++;
   1292 }
   1293 
   1294 void
   1295 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   1296 {
   1297 	struct ata_queue * const chq = chp->ch_queue;
   1298 
   1299 	ata_channel_lock(chp);
   1300 
   1301 	KASSERT(chq->queue_active > 0);
   1302 	KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0);
   1303 
   1304 	callout_stop(&xfer->c_timo_callout);
   1305 
   1306 	if (callout_invoking(&xfer->c_timo_callout))
   1307 		xfer->c_flags |= C_WAITTIMO;
   1308 
   1309 	TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain);
   1310 	chq->active_xfers_used &= ~__BIT(xfer->c_slot);
   1311 	chq->queue_active--;
   1312 
   1313 	ata_channel_unlock(chp);
   1314 }
   1315 
   1316 /*
   1317  * Called in c_intr hook. Must be called before before any deactivations
   1318  * are done - if there is drain pending, it calls c_kill_xfer hook which
   1319  * deactivates the xfer.
   1320  * Calls c_kill_xfer with channel lock free.
   1321  * Returns true if caller should just exit without further processing.
   1322  * Caller must not further access any part of xfer or any related controller
   1323  * structures in that case, it should just return.
   1324  */
   1325 bool
   1326 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer)
   1327 {
   1328 	int drive = xfer->c_drive;
   1329 	bool draining = false;
   1330 
   1331 	ata_channel_lock(chp);
   1332 
   1333 	if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
   1334 		ata_channel_unlock(chp);
   1335 
   1336 		(*xfer->c_kill_xfer)(chp, xfer, KILL_GONE);
   1337 
   1338 		ata_channel_lock(chp);
   1339 		chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
   1340 		cv_signal(&chp->ch_queue->queue_drain);
   1341 		draining = true;
   1342 	}
   1343 
   1344 	ata_channel_unlock(chp);
   1345 
   1346 	return draining;
   1347 }
   1348 
   1349 /*
   1350  * Check for race of normal transfer handling vs. timeout.
   1351  */
   1352 bool
   1353 ata_timo_xfer_check(struct ata_xfer *xfer)
   1354 {
   1355 	struct ata_channel *chp = xfer->c_chp;
   1356 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
   1357 
   1358 	ata_channel_lock(chp);
   1359 
   1360 	callout_ack(&xfer->c_timo_callout);
   1361 
   1362 	if (xfer->c_flags & C_WAITTIMO) {
   1363 		xfer->c_flags &= ~C_WAITTIMO;
   1364 
   1365 		/* Handle race vs. ata_free_xfer() */
   1366 		if (xfer->c_flags & C_FREE) {
   1367 			xfer->c_flags &= ~C_FREE;
   1368 			ata_channel_unlock(chp);
   1369 
   1370 	    		aprint_normal_dev(drvp->drv_softc,
   1371 			    "xfer %d freed while invoking timeout\n",
   1372 			    xfer->c_slot);
   1373 
   1374 			ata_free_xfer(chp, xfer);
   1375 			return true;
   1376 		}
   1377 
   1378 		/* Race vs. callout_stop() in ata_deactivate_xfer() */
   1379 		ata_channel_unlock(chp);
   1380 
   1381 	    	aprint_normal_dev(drvp->drv_softc,
   1382 		    "xfer %d deactivated while invoking timeout\n",
   1383 		    xfer->c_slot);
   1384 		return true;
   1385 	}
   1386 
   1387 	ata_channel_unlock(chp);
   1388 
   1389 	/* No race, proceed with timeout handling */
   1390 	return false;
   1391 }
   1392 
   1393 void
   1394 ata_timeout(void *v)
   1395 {
   1396 	struct ata_xfer *xfer = v;
   1397 	int s;
   1398 
   1399 	ATADEBUG_PRINT(("%s: slot %d\n", __func__, xfer->c_slot),
   1400 	    DEBUG_FUNCS|DEBUG_XFERS);
   1401 
   1402 	s = splbio();				/* XXX MPSAFE */
   1403 
   1404 	if (ata_timo_xfer_check(xfer)) {
   1405 		/* Already logged */
   1406 		goto out;
   1407 	}
   1408 
   1409 	/* Mark as timed out. Do not print anything, wd(4) will. */
   1410 	xfer->c_flags |= C_TIMEOU;
   1411 	xfer->c_intr(xfer->c_chp, xfer, 0);
   1412 
   1413 out:
   1414 	splx(s);
   1415 }
   1416 
   1417 /*
   1418  * Kill off all active xfers for a ata_channel.
   1419  *
   1420  * Must be called with channel lock held.
   1421  */
   1422 void
   1423 ata_kill_active(struct ata_channel *chp, int reason, int flags)
   1424 {
   1425 	struct ata_queue * const chq = chp->ch_queue;
   1426 	struct ata_xfer *xfer, *xfernext;
   1427 
   1428 	KASSERT(mutex_owned(&chp->ch_lock));
   1429 
   1430 	TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) {
   1431 		(*xfer->c_kill_xfer)(xfer->c_chp, xfer, reason);
   1432 	}
   1433 
   1434 	if (flags & AT_RST_EMERG)
   1435 		ata_queue_reset(chq);
   1436 }
   1437 
   1438 /*
   1439  * Kill off all pending xfers for a drive.
   1440  */
   1441 void
   1442 ata_kill_pending(struct ata_drive_datas *drvp)
   1443 {
   1444 	struct ata_channel * const chp = drvp->chnl_softc;
   1445 	struct ata_queue * const chq = chp->ch_queue;
   1446 	struct ata_xfer *xfer, *xfernext;
   1447 
   1448 	ata_channel_lock(chp);
   1449 
   1450 	/* Kill all pending transfers */
   1451 	TAILQ_FOREACH_SAFE(xfer, &chq->queue_xfer, c_xferchain, xfernext) {
   1452 		KASSERT(xfer->c_chp == chp);
   1453 
   1454 		if (xfer->c_drive != drvp->drive)
   1455 			continue;
   1456 
   1457 		TAILQ_REMOVE(&chp->ch_queue->queue_xfer, xfer, c_xferchain);
   1458 
   1459 		/*
   1460 		 * Keep the lock, so that we get deadlock (and 'locking against
   1461 		 * myself' with LOCKDEBUG), instead of silent
   1462 		 * data corruption, if the hook tries to call back into
   1463 		 * middle layer for inactive xfer.
   1464 		 */
   1465 		(*xfer->c_kill_xfer)(chp, xfer, KILL_GONE_INACTIVE);
   1466 	}
   1467 
   1468 	/* Wait until all active transfers on the drive finish */
   1469 	while (chq->queue_active > 0) {
   1470 		bool drv_active = false;
   1471 
   1472 		TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
   1473 			KASSERT(xfer->c_chp == chp);
   1474 
   1475 			if (xfer->c_drive == drvp->drive) {
   1476 				drv_active = true;
   1477 				break;
   1478 			}
   1479 		}
   1480 
   1481 		if (!drv_active) {
   1482 			/* all finished */
   1483 			break;
   1484 		}
   1485 
   1486 		drvp->drive_flags |= ATA_DRIVE_WAITDRAIN;
   1487 		cv_wait(&chq->queue_drain, &chp->ch_lock);
   1488 	}
   1489 
   1490 	ata_channel_unlock(chp);
   1491 }
   1492 
   1493 static void
   1494 ata_channel_freeze_locked(struct ata_channel *chp)
   1495 {
   1496 	chp->ch_queue->queue_freeze++;
   1497 
   1498 	ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
   1499 	    chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
   1500 }
   1501 
   1502 void
   1503 ata_channel_freeze(struct ata_channel *chp)
   1504 {
   1505 	ata_channel_lock(chp);
   1506 	ata_channel_freeze_locked(chp);
   1507 	ata_channel_unlock(chp);
   1508 }
   1509 
   1510 static void
   1511 ata_channel_thaw_locked(struct ata_channel *chp)
   1512 {
   1513 	KASSERT(mutex_owned(&chp->ch_lock));
   1514 	KASSERT(chp->ch_queue->queue_freeze > 0);
   1515 
   1516 	chp->ch_queue->queue_freeze--;
   1517 
   1518 	ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
   1519 	    chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
   1520 }
   1521 
   1522 void
   1523 ata_channel_thaw(struct ata_channel *chp)
   1524 {
   1525 	ata_channel_lock(chp);
   1526 	ata_channel_thaw_locked(chp);
   1527 	ata_channel_unlock(chp);
   1528 }
   1529 
   1530 /*
   1531  * ata_reset_channel:
   1532  *
   1533  *	Reset and ATA channel.
   1534  *
   1535  *	MUST BE CALLED AT splbio()!
   1536  */
   1537 void
   1538 ata_reset_channel(struct ata_channel *chp, int flags)
   1539 {
   1540 	struct atac_softc *atac = chp->ch_atac;
   1541 	int drive;
   1542 	bool threset = false;
   1543 
   1544 #ifdef ATA_DEBUG
   1545 	int spl1, spl2;
   1546 
   1547 	spl1 = splbio();
   1548 	spl2 = splbio();
   1549 	if (spl2 != spl1) {
   1550 		printf("ata_reset_channel: not at splbio()\n");
   1551 		panic("ata_reset_channel");
   1552 	}
   1553 	splx(spl2);
   1554 	splx(spl1);
   1555 #endif /* ATA_DEBUG */
   1556 
   1557 	ata_channel_lock(chp);
   1558 
   1559 	/*
   1560 	 * If we can poll or wait it's OK, otherwise wake up the
   1561 	 * kernel thread to do it for us.
   1562 	 */
   1563 	ATADEBUG_PRINT(("ata_reset_channel flags 0x%x ch_flags 0x%x\n",
   1564 	    flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS);
   1565 	if ((flags & (AT_POLL | AT_WAIT)) == 0) {
   1566 		if (chp->ch_flags & ATACH_TH_RESET) {
   1567 			/* No need to schedule a reset more than one time. */
   1568 			ata_channel_unlock(chp);
   1569 			return;
   1570 		}
   1571 
   1572 		/*
   1573 		 * Block execution of other commands while reset is scheduled
   1574 		 * to a thread.
   1575 		 */
   1576 		ata_channel_freeze_locked(chp);
   1577 		chp->ch_flags |= ATACH_TH_RESET;
   1578 		chp->ch_reset_flags = flags & AT_RST_EMERG;
   1579 		cv_signal(&chp->ch_thr_idle);
   1580 		ata_channel_unlock(chp);
   1581 		return;
   1582 	}
   1583 
   1584 	/* Block execution of other commands during reset */
   1585 	ata_channel_freeze_locked(chp);
   1586 
   1587 	/*
   1588 	 * If reset has been scheduled to a thread, then clear
   1589 	 * the flag now so that the thread won't try to execute it if
   1590 	 * we happen to sleep, and thaw one more time after the reset.
   1591 	 */
   1592 	if (chp->ch_flags & ATACH_TH_RESET) {
   1593 		chp->ch_flags &= ~ATACH_TH_RESET;
   1594 		threset = true;
   1595 	}
   1596 
   1597 	ata_channel_unlock(chp);
   1598 
   1599 	(*atac->atac_bustype_ata->ata_reset_channel)(chp, flags);
   1600 
   1601 	ata_channel_lock(chp);
   1602 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
   1603 	for (drive = 0; drive < chp->ch_ndrives; drive++)
   1604 		chp->ch_drive[drive].state = 0;
   1605 
   1606 	/*
   1607 	 * Thaw one extra time to clear the freeze done when the reset has
   1608 	 * been scheduled to the thread.
   1609 	 */
   1610 	if (threset)
   1611 		ata_channel_thaw_locked(chp);
   1612 
   1613 	/* Allow commands to run again */
   1614 	ata_channel_thaw_locked(chp);
   1615 
   1616 	/* Signal the thread in case there is an xfer to run */
   1617 	cv_signal(&chp->ch_thr_idle);
   1618 
   1619 	ata_channel_unlock(chp);
   1620 
   1621 	if (flags & AT_RST_EMERG) {
   1622 		/* make sure that we can use polled commands */
   1623 		ata_queue_reset(chp->ch_queue);
   1624 	} else {
   1625 		atastart(chp);
   1626 	}
   1627 }
   1628 
   1629 int
   1630 ata_addref(struct ata_channel *chp)
   1631 {
   1632 	struct atac_softc *atac = chp->ch_atac;
   1633 	struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
   1634 	int s, error = 0;
   1635 
   1636 	s = splbio();
   1637 	if (adapt->adapt_refcnt++ == 0 &&
   1638 	    adapt->adapt_enable != NULL) {
   1639 		error = (*adapt->adapt_enable)(atac->atac_dev, 1);
   1640 		if (error)
   1641 			adapt->adapt_refcnt--;
   1642 	}
   1643 	splx(s);
   1644 	return (error);
   1645 }
   1646 
   1647 void
   1648 ata_delref(struct ata_channel *chp)
   1649 {
   1650 	struct atac_softc *atac = chp->ch_atac;
   1651 	struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
   1652 	int s;
   1653 
   1654 	s = splbio();
   1655 	if (adapt->adapt_refcnt-- == 1 &&
   1656 	    adapt->adapt_enable != NULL)
   1657 		(void) (*adapt->adapt_enable)(atac->atac_dev, 0);
   1658 	splx(s);
   1659 }
   1660 
   1661 void
   1662 ata_print_modes(struct ata_channel *chp)
   1663 {
   1664 	struct atac_softc *atac = chp->ch_atac;
   1665 	int drive;
   1666 	struct ata_drive_datas *drvp;
   1667 
   1668 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
   1669 	for (drive = 0; drive < chp->ch_ndrives; drive++) {
   1670 		drvp = &chp->ch_drive[drive];
   1671 		if (drvp->drive_type == ATA_DRIVET_NONE ||
   1672 		    drvp->drv_softc == NULL)
   1673 			continue;
   1674 		aprint_verbose("%s(%s:%d:%d): using PIO mode %d",
   1675 			device_xname(drvp->drv_softc),
   1676 			device_xname(atac->atac_dev),
   1677 			chp->ch_channel, drvp->drive, drvp->PIO_mode);
   1678 #if NATA_DMA
   1679 		if (drvp->drive_flags & ATA_DRIVE_DMA)
   1680 			aprint_verbose(", DMA mode %d", drvp->DMA_mode);
   1681 #if NATA_UDMA
   1682 		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
   1683 			aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode);
   1684 			if (drvp->UDMA_mode == 2)
   1685 				aprint_verbose(" (Ultra/33)");
   1686 			else if (drvp->UDMA_mode == 4)
   1687 				aprint_verbose(" (Ultra/66)");
   1688 			else if (drvp->UDMA_mode == 5)
   1689 				aprint_verbose(" (Ultra/100)");
   1690 			else if (drvp->UDMA_mode == 6)
   1691 				aprint_verbose(" (Ultra/133)");
   1692 		}
   1693 #endif	/* NATA_UDMA */
   1694 #endif	/* NATA_DMA */
   1695 #if NATA_DMA || NATA_PIOBM
   1696 		if (0
   1697 #if NATA_DMA
   1698 		    || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA))
   1699 #endif
   1700 #if NATA_PIOBM
   1701 		    /* PIOBM capable controllers use DMA for PIO commands */
   1702 		    || (atac->atac_cap & ATAC_CAP_PIOBM)
   1703 #endif
   1704 		    )
   1705 			aprint_verbose(" (using DMA)");
   1706 
   1707 		if (drvp->drive_flags & ATA_DRIVE_NCQ) {
   1708 			aprint_verbose(", NCQ (%d tags)%s",
   1709 			    ATA_REAL_OPENINGS(chp->ch_queue->queue_openings),
   1710 			    (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO)
   1711 			    ? " w/PRIO" : "");
   1712 		} else if (drvp->drive_flags & ATA_DRIVE_WFUA)
   1713 			aprint_verbose(", WRITE DMA FUA EXT");
   1714 
   1715 #endif	/* NATA_DMA || NATA_PIOBM */
   1716 		aprint_verbose("\n");
   1717 	}
   1718 }
   1719 
   1720 #if NATA_DMA
   1721 /*
   1722  * downgrade the transfer mode of a drive after an error. return 1 if
   1723  * downgrade was possible, 0 otherwise.
   1724  *
   1725  * MUST BE CALLED AT splbio()!
   1726  */
   1727 int
   1728 ata_downgrade_mode(struct ata_drive_datas *drvp, int flags)
   1729 {
   1730 	struct ata_channel *chp = drvp->chnl_softc;
   1731 	struct atac_softc *atac = chp->ch_atac;
   1732 	device_t drv_dev = drvp->drv_softc;
   1733 	int cf_flags = device_cfdata(drv_dev)->cf_flags;
   1734 
   1735 	/* if drive or controller don't know its mode, we can't do much */
   1736 	if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 ||
   1737 	    (atac->atac_set_modes == NULL))
   1738 		return 0;
   1739 	/* current drive mode was set by a config flag, let it this way */
   1740 	if ((cf_flags & ATA_CONFIG_PIO_SET) ||
   1741 	    (cf_flags & ATA_CONFIG_DMA_SET) ||
   1742 	    (cf_flags & ATA_CONFIG_UDMA_SET))
   1743 		return 0;
   1744 
   1745 #if NATA_UDMA
   1746 	/*
   1747 	 * If we were using Ultra-DMA mode, downgrade to the next lower mode.
   1748 	 */
   1749 	if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) {
   1750 		drvp->UDMA_mode--;
   1751 		aprint_error_dev(drv_dev,
   1752 		    "transfer error, downgrading to Ultra-DMA mode %d\n",
   1753 		    drvp->UDMA_mode);
   1754 	}
   1755 #endif
   1756 
   1757 	/*
   1758 	 * If we were using ultra-DMA, don't downgrade to multiword DMA.
   1759 	 */
   1760 	else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) {
   1761 		drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA);
   1762 		drvp->PIO_mode = drvp->PIO_cap;
   1763 		aprint_error_dev(drv_dev,
   1764 		    "transfer error, downgrading to PIO mode %d\n",
   1765 		    drvp->PIO_mode);
   1766 	} else /* already using PIO, can't downgrade */
   1767 		return 0;
   1768 
   1769 	(*atac->atac_set_modes)(chp);
   1770 	ata_print_modes(chp);
   1771 	/* reset the channel, which will schedule all drives for setup */
   1772 	ata_reset_channel(chp, flags);
   1773 	return 1;
   1774 }
   1775 #endif	/* NATA_DMA */
   1776 
   1777 /*
   1778  * Probe drive's capabilities, for use by the controller later
   1779  * Assumes drvp points to an existing drive.
   1780  */
   1781 void
   1782 ata_probe_caps(struct ata_drive_datas *drvp)
   1783 {
   1784 	struct ataparams params, params2;
   1785 	struct ata_channel *chp = drvp->chnl_softc;
   1786 	struct atac_softc *atac = chp->ch_atac;
   1787 	device_t drv_dev = drvp->drv_softc;
   1788 	int i, printed = 0;
   1789 	const char *sep = "";
   1790 	int cf_flags;
   1791 
   1792 	if (ata_get_params(drvp, AT_WAIT, &params) != CMD_OK) {
   1793 		/* IDENTIFY failed. Can't tell more about the device */
   1794 		return;
   1795 	}
   1796 	if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) ==
   1797 	    (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) {
   1798 		/*
   1799 		 * Controller claims 16 and 32 bit transfers.
   1800 		 * Re-do an IDENTIFY with 32-bit transfers,
   1801 		 * and compare results.
   1802 		 */
   1803 		ata_channel_lock(chp);
   1804 		drvp->drive_flags |= ATA_DRIVE_CAP32;
   1805 		ata_channel_unlock(chp);
   1806 		ata_get_params(drvp, AT_WAIT, &params2);
   1807 		if (memcmp(&params, &params2, sizeof(struct ataparams)) != 0) {
   1808 			/* Not good. fall back to 16bits */
   1809 			ata_channel_lock(chp);
   1810 			drvp->drive_flags &= ~ATA_DRIVE_CAP32;
   1811 			ata_channel_unlock(chp);
   1812 		} else {
   1813 			aprint_verbose_dev(drv_dev, "32-bit data port\n");
   1814 		}
   1815 	}
   1816 #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */
   1817 	if (params.atap_ata_major > 0x01 &&
   1818 	    params.atap_ata_major != 0xffff) {
   1819 		for (i = 14; i > 0; i--) {
   1820 			if (params.atap_ata_major & (1 << i)) {
   1821 				aprint_verbose_dev(drv_dev,
   1822 				    "ATA version %d\n", i);
   1823 				drvp->ata_vers = i;
   1824 				break;
   1825 			}
   1826 		}
   1827 	}
   1828 #endif
   1829 
   1830 	/* An ATAPI device is at last PIO mode 3 */
   1831 	if (drvp->drive_type == ATA_DRIVET_ATAPI)
   1832 		drvp->PIO_mode = 3;
   1833 
   1834 	/*
   1835 	 * It's not in the specs, but it seems that some drive
   1836 	 * returns 0xffff in atap_extensions when this field is invalid
   1837 	 */
   1838 	if (params.atap_extensions != 0xffff &&
   1839 	    (params.atap_extensions & WDC_EXT_MODES)) {
   1840 		/*
   1841 		 * XXX some drives report something wrong here (they claim to
   1842 		 * support PIO mode 8 !). As mode is coded on 3 bits in
   1843 		 * SET FEATURE, limit it to 7 (so limit i to 4).
   1844 		 * If higher mode than 7 is found, abort.
   1845 		 */
   1846 		for (i = 7; i >= 0; i--) {
   1847 			if ((params.atap_piomode_supp & (1 << i)) == 0)
   1848 				continue;
   1849 			if (i > 4)
   1850 				return;
   1851 			/*
   1852 			 * See if mode is accepted.
   1853 			 * If the controller can't set its PIO mode,
   1854 			 * assume the defaults are good, so don't try
   1855 			 * to set it
   1856 			 */
   1857 			if (atac->atac_set_modes)
   1858 				/*
   1859 				 * It's OK to pool here, it's fast enough
   1860 				 * to not bother waiting for interrupt
   1861 				 */
   1862 				if (ata_set_mode(drvp, 0x08 | (i + 3),
   1863 				   AT_WAIT) != CMD_OK)
   1864 					continue;
   1865 			if (!printed) {
   1866 				aprint_verbose_dev(drv_dev,
   1867 				    "drive supports PIO mode %d", i + 3);
   1868 				sep = ",";
   1869 				printed = 1;
   1870 			}
   1871 			/*
   1872 			 * If controller's driver can't set its PIO mode,
   1873 			 * get the highter one for the drive.
   1874 			 */
   1875 			if (atac->atac_set_modes == NULL ||
   1876 			    atac->atac_pio_cap >= i + 3) {
   1877 				drvp->PIO_mode = i + 3;
   1878 				drvp->PIO_cap = i + 3;
   1879 				break;
   1880 			}
   1881 		}
   1882 		if (!printed) {
   1883 			/*
   1884 			 * We didn't find a valid PIO mode.
   1885 			 * Assume the values returned for DMA are buggy too
   1886 			 */
   1887 			return;
   1888 		}
   1889 		ata_channel_lock(chp);
   1890 		drvp->drive_flags |= ATA_DRIVE_MODE;
   1891 		ata_channel_unlock(chp);
   1892 		printed = 0;
   1893 		for (i = 7; i >= 0; i--) {
   1894 			if ((params.atap_dmamode_supp & (1 << i)) == 0)
   1895 				continue;
   1896 #if NATA_DMA
   1897 			if ((atac->atac_cap & ATAC_CAP_DMA) &&
   1898 			    atac->atac_set_modes != NULL)
   1899 				if (ata_set_mode(drvp, 0x20 | i, AT_WAIT)
   1900 				    != CMD_OK)
   1901 					continue;
   1902 #endif
   1903 			if (!printed) {
   1904 				aprint_verbose("%s DMA mode %d", sep, i);
   1905 				sep = ",";
   1906 				printed = 1;
   1907 			}
   1908 #if NATA_DMA
   1909 			if (atac->atac_cap & ATAC_CAP_DMA) {
   1910 				if (atac->atac_set_modes != NULL &&
   1911 				    atac->atac_dma_cap < i)
   1912 					continue;
   1913 				drvp->DMA_mode = i;
   1914 				drvp->DMA_cap = i;
   1915 				ata_channel_lock(chp);
   1916 				drvp->drive_flags |= ATA_DRIVE_DMA;
   1917 				ata_channel_unlock(chp);
   1918 			}
   1919 #endif
   1920 			break;
   1921 		}
   1922 		if (params.atap_extensions & WDC_EXT_UDMA_MODES) {
   1923 			printed = 0;
   1924 			for (i = 7; i >= 0; i--) {
   1925 				if ((params.atap_udmamode_supp & (1 << i))
   1926 				    == 0)
   1927 					continue;
   1928 #if NATA_UDMA
   1929 				if (atac->atac_set_modes != NULL &&
   1930 				    (atac->atac_cap & ATAC_CAP_UDMA))
   1931 					if (ata_set_mode(drvp, 0x40 | i,
   1932 					    AT_WAIT) != CMD_OK)
   1933 						continue;
   1934 #endif
   1935 				if (!printed) {
   1936 					aprint_verbose("%s Ultra-DMA mode %d",
   1937 					    sep, i);
   1938 					if (i == 2)
   1939 						aprint_verbose(" (Ultra/33)");
   1940 					else if (i == 4)
   1941 						aprint_verbose(" (Ultra/66)");
   1942 					else if (i == 5)
   1943 						aprint_verbose(" (Ultra/100)");
   1944 					else if (i == 6)
   1945 						aprint_verbose(" (Ultra/133)");
   1946 					sep = ",";
   1947 					printed = 1;
   1948 				}
   1949 #if NATA_UDMA
   1950 				if (atac->atac_cap & ATAC_CAP_UDMA) {
   1951 					if (atac->atac_set_modes != NULL &&
   1952 					    atac->atac_udma_cap < i)
   1953 						continue;
   1954 					drvp->UDMA_mode = i;
   1955 					drvp->UDMA_cap = i;
   1956 					ata_channel_lock(chp);
   1957 					drvp->drive_flags |= ATA_DRIVE_UDMA;
   1958 					ata_channel_unlock(chp);
   1959 				}
   1960 #endif
   1961 				break;
   1962 			}
   1963 		}
   1964 	}
   1965 
   1966 	ata_channel_lock(chp);
   1967 	drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM;
   1968 	if (drvp->drive_type == ATA_DRIVET_ATAPI) {
   1969 		if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM)
   1970 			drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
   1971 	} else {
   1972 		if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM)
   1973 			drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
   1974 	}
   1975 	ata_channel_unlock(chp);
   1976 
   1977 	/* Try to guess ATA version here, if it didn't get reported */
   1978 	if (drvp->ata_vers == 0) {
   1979 #if NATA_UDMA
   1980 		if (drvp->drive_flags & ATA_DRIVE_UDMA)
   1981 			drvp->ata_vers = 4; /* should be at last ATA-4 */
   1982 		else
   1983 #endif
   1984 		if (drvp->PIO_cap > 2)
   1985 			drvp->ata_vers = 2; /* should be at last ATA-2 */
   1986 	}
   1987 	cf_flags = device_cfdata(drv_dev)->cf_flags;
   1988 	if (cf_flags & ATA_CONFIG_PIO_SET) {
   1989 		ata_channel_lock(chp);
   1990 		drvp->PIO_mode =
   1991 		    (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF;
   1992 		drvp->drive_flags |= ATA_DRIVE_MODE;
   1993 		ata_channel_unlock(chp);
   1994 	}
   1995 #if NATA_DMA
   1996 	if ((atac->atac_cap & ATAC_CAP_DMA) == 0) {
   1997 		/* don't care about DMA modes */
   1998 		return;
   1999 	}
   2000 	if (cf_flags & ATA_CONFIG_DMA_SET) {
   2001 		ata_channel_lock(chp);
   2002 		if ((cf_flags & ATA_CONFIG_DMA_MODES) ==
   2003 		    ATA_CONFIG_DMA_DISABLE) {
   2004 			drvp->drive_flags &= ~ATA_DRIVE_DMA;
   2005 		} else {
   2006 			drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >>
   2007 			    ATA_CONFIG_DMA_OFF;
   2008 			drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE;
   2009 		}
   2010 		ata_channel_unlock(chp);
   2011 	}
   2012 
   2013 	/*
   2014 	 * Probe WRITE DMA FUA EXT. Support is mandatory for devices
   2015 	 * supporting LBA48, but nevertheless confirm with the feature flag.
   2016 	 */
   2017 	if (drvp->drive_flags & ATA_DRIVE_DMA) {
   2018 		if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0
   2019 		    && (params.atap_cmd_def & ATA_CMDE_WFE)) {
   2020 			drvp->drive_flags |= ATA_DRIVE_WFUA;
   2021 			aprint_verbose("%s WRITE DMA FUA", sep);
   2022 			sep = ",";
   2023 		}
   2024 	}
   2025 
   2026 	/* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */
   2027 	ata_channel_lock(chp);
   2028 	drvp->drv_openings = 1;
   2029 	if (params.atap_sata_caps & SATA_NATIVE_CMDQ) {
   2030 		if (atac->atac_cap & ATAC_CAP_NCQ)
   2031 			drvp->drive_flags |= ATA_DRIVE_NCQ;
   2032 		drvp->drv_openings =
   2033 		    (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1;
   2034 		aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings);
   2035 		sep = ",";
   2036 
   2037 		if (params.atap_sata_caps & SATA_NCQ_PRIO) {
   2038 			drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO;
   2039 			aprint_verbose(" w/PRIO");
   2040 		}
   2041 	}
   2042 	ata_channel_unlock(chp);
   2043 
   2044 	if (printed)
   2045 		aprint_verbose("\n");
   2046 
   2047 #if NATA_UDMA
   2048 	if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) {
   2049 		/* don't care about UDMA modes */
   2050 		return;
   2051 	}
   2052 	if (cf_flags & ATA_CONFIG_UDMA_SET) {
   2053 		ata_channel_lock(chp);
   2054 		if ((cf_flags & ATA_CONFIG_UDMA_MODES) ==
   2055 		    ATA_CONFIG_UDMA_DISABLE) {
   2056 			drvp->drive_flags &= ~ATA_DRIVE_UDMA;
   2057 		} else {
   2058 			drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >>
   2059 			    ATA_CONFIG_UDMA_OFF;
   2060 			drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE;
   2061 		}
   2062 		ata_channel_unlock(chp);
   2063 	}
   2064 #endif	/* NATA_UDMA */
   2065 #endif	/* NATA_DMA */
   2066 }
   2067 
   2068 /* management of the /dev/atabus* devices */
   2069 int
   2070 atabusopen(dev_t dev, int flag, int fmt, struct lwp *l)
   2071 {
   2072 	struct atabus_softc *sc;
   2073 	int error;
   2074 
   2075 	sc = device_lookup_private(&atabus_cd, minor(dev));
   2076 	if (sc == NULL)
   2077 		return (ENXIO);
   2078 
   2079 	if (sc->sc_flags & ATABUSCF_OPEN)
   2080 		return (EBUSY);
   2081 
   2082 	if ((error = ata_addref(sc->sc_chan)) != 0)
   2083 		return (error);
   2084 
   2085 	sc->sc_flags |= ATABUSCF_OPEN;
   2086 
   2087 	return (0);
   2088 }
   2089 
   2090 
   2091 int
   2092 atabusclose(dev_t dev, int flag, int fmt, struct lwp *l)
   2093 {
   2094 	struct atabus_softc *sc =
   2095 	    device_lookup_private(&atabus_cd, minor(dev));
   2096 
   2097 	ata_delref(sc->sc_chan);
   2098 
   2099 	sc->sc_flags &= ~ATABUSCF_OPEN;
   2100 
   2101 	return (0);
   2102 }
   2103 
   2104 int
   2105 atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
   2106 {
   2107 	struct atabus_softc *sc =
   2108 	    device_lookup_private(&atabus_cd, minor(dev));
   2109 	struct ata_channel *chp = sc->sc_chan;
   2110 	int min_drive, max_drive, drive;
   2111 	int error;
   2112 	int s;
   2113 
   2114 	/*
   2115 	 * Enforce write permission for ioctls that change the
   2116 	 * state of the bus.  Host adapter specific ioctls must
   2117 	 * be checked by the adapter driver.
   2118 	 */
   2119 	switch (cmd) {
   2120 	case ATABUSIOSCAN:
   2121 	case ATABUSIODETACH:
   2122 	case ATABUSIORESET:
   2123 		if ((flag & FWRITE) == 0)
   2124 			return (EBADF);
   2125 	}
   2126 
   2127 	switch (cmd) {
   2128 	case ATABUSIORESET:
   2129 		s = splbio();
   2130 		ata_reset_channel(sc->sc_chan, AT_WAIT | AT_POLL);
   2131 		splx(s);
   2132 		return 0;
   2133 	case ATABUSIOSCAN:
   2134 	{
   2135 #if 0
   2136 		struct atabusioscan_args *a=
   2137 		    (struct atabusioscan_args *)addr;
   2138 #endif
   2139 		if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
   2140 		    (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
   2141 			return (EOPNOTSUPP);
   2142 		return (EOPNOTSUPP);
   2143 	}
   2144 	case ATABUSIODETACH:
   2145 	{
   2146 		struct atabusiodetach_args *a=
   2147 		    (struct atabusiodetach_args *)addr;
   2148 		if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
   2149 		    (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
   2150 			return (EOPNOTSUPP);
   2151 		switch (a->at_dev) {
   2152 		case -1:
   2153 			min_drive = 0;
   2154 			max_drive = 1;
   2155 			break;
   2156 		case 0:
   2157 		case 1:
   2158 			min_drive = max_drive = a->at_dev;
   2159 			break;
   2160 		default:
   2161 			return (EINVAL);
   2162 		}
   2163 		for (drive = min_drive; drive <= max_drive; drive++) {
   2164 			if (chp->ch_drive[drive].drv_softc != NULL) {
   2165 				error = config_detach(
   2166 				    chp->ch_drive[drive].drv_softc, 0);
   2167 				if (error)
   2168 					return (error);
   2169 				KASSERT(chp->ch_drive[drive].drv_softc == NULL);
   2170 			}
   2171 		}
   2172 		return 0;
   2173 	}
   2174 	default:
   2175 		return ENOTTY;
   2176 	}
   2177 }
   2178 
   2179 static bool
   2180 atabus_suspend(device_t dv, const pmf_qual_t *qual)
   2181 {
   2182 	struct atabus_softc *sc = device_private(dv);
   2183 	struct ata_channel *chp = sc->sc_chan;
   2184 
   2185 	ata_channel_idle(chp);
   2186 
   2187 	return true;
   2188 }
   2189 
   2190 static bool
   2191 atabus_resume(device_t dv, const pmf_qual_t *qual)
   2192 {
   2193 	struct atabus_softc *sc = device_private(dv);
   2194 	struct ata_channel *chp = sc->sc_chan;
   2195 
   2196 	/*
   2197 	 * XXX joerg: with wdc, the first channel unfreezes the controler.
   2198 	 * Move this the reset and queue idling into wdc.
   2199 	 */
   2200 	ata_channel_lock(chp);
   2201 	if (chp->ch_queue->queue_freeze == 0) {
   2202 		ata_channel_unlock(chp);
   2203 		goto out;
   2204 	}
   2205 
   2206 	/* unfreeze the queue and reset drives */
   2207 	ata_channel_thaw_locked(chp);
   2208 
   2209 	ata_channel_unlock(chp);
   2210 
   2211 	/* reset channel only if there are drives attached */
   2212 	if (chp->ch_ndrives > 0)
   2213 		ata_reset_channel(chp, AT_WAIT);
   2214 
   2215 out:
   2216 	return true;
   2217 }
   2218 
   2219 static int
   2220 atabus_rescan(device_t self, const char *ifattr, const int *locators)
   2221 {
   2222 	struct atabus_softc *sc = device_private(self);
   2223 	struct ata_channel *chp = sc->sc_chan;
   2224 	struct atabus_initq *initq;
   2225 	int i;
   2226 
   2227 	/*
   2228 	 * we can rescan a port multiplier atabus, even if some devices are
   2229 	 * still attached
   2230 	 */
   2231 	if (chp->ch_satapmp_nports == 0) {
   2232 		if (chp->atapibus != NULL) {
   2233 			return EBUSY;
   2234 		}
   2235 
   2236 		KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
   2237 		for (i = 0; i < chp->ch_ndrives; i++) {
   2238 			if (chp->ch_drive[i].drv_softc != NULL) {
   2239 				return EBUSY;
   2240 			}
   2241 		}
   2242 	}
   2243 
   2244 	initq = malloc(sizeof(*initq), M_DEVBUF, M_WAITOK);
   2245 	initq->atabus_sc = sc;
   2246 	mutex_enter(&atabus_qlock);
   2247 	TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
   2248 	mutex_exit(&atabus_qlock);
   2249 	config_pending_incr(sc->sc_dev);
   2250 
   2251 	ata_channel_lock(chp);
   2252 	chp->ch_flags |= ATACH_TH_RESCAN;
   2253 	cv_signal(&chp->ch_thr_idle);
   2254 	ata_channel_unlock(chp);
   2255 
   2256 	return 0;
   2257 }
   2258 
   2259 void
   2260 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags)
   2261 {
   2262 
   2263 	if ((flags & (AT_WAIT | AT_POLL)) == AT_POLL) {
   2264 		/*
   2265 		 * can't use kpause(), we may be in interrupt context
   2266 		 * or taking a crash dump
   2267 		 */
   2268 		delay(ms * 1000);
   2269 	} else {
   2270 		int pause = mstohz(ms);
   2271 
   2272 		KASSERT(mutex_owned(&chp->ch_lock));
   2273 		kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock);
   2274 	}
   2275 }
   2276 
   2277 void
   2278 atacmd_toncq(struct ata_xfer *xfer, uint8_t *cmd, uint16_t *count,
   2279     uint16_t *features, uint8_t *device)
   2280 {
   2281 	if ((xfer->c_flags & C_NCQ) == 0) {
   2282 		/* FUA handling for non-NCQ drives */
   2283 		if (xfer->c_bio.flags & ATA_FUA
   2284 		    && *cmd == WDCC_WRITEDMA_EXT)
   2285 			*cmd = WDCC_WRITEDMA_FUA_EXT;
   2286 
   2287 		return;
   2288 	}
   2289 
   2290 	*cmd = (xfer->c_bio.flags & ATA_READ) ?
   2291 	    WDCC_READ_FPDMA_QUEUED : WDCC_WRITE_FPDMA_QUEUED;
   2292 
   2293 	/* for FPDMA the block count is in features */
   2294 	*features = *count;
   2295 
   2296 	/* NCQ tag */
   2297 	*count = (xfer->c_slot << 3);
   2298 
   2299 	if (xfer->c_bio.flags & ATA_PRIO_HIGH)
   2300 		*count |= WDSC_PRIO_HIGH;
   2301 
   2302 	/* other device flags */
   2303 	if (xfer->c_bio.flags & ATA_FUA)
   2304 		*device |= WDSD_FUA;
   2305 }
   2306 
   2307 void
   2308 ata_wait_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   2309 {
   2310 	KASSERT(mutex_owned(&chp->ch_lock));
   2311 
   2312 	cv_wait(&xfer->c_finish, &chp->ch_lock);
   2313 }
   2314 
   2315 void
   2316 ata_wake_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   2317 {
   2318 	KASSERT(mutex_owned(&chp->ch_lock));
   2319 
   2320 	cv_signal(&xfer->c_finish);
   2321 }
   2322