Home | History | Annotate | Line # | Download | only in ata
ata.c revision 1.134
      1 /*	$NetBSD: ata.c,v 1.134 2017/10/08 04:52:33 mlelstv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998, 2001 Manuel Bouyer.  All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.134 2017/10/08 04:52:33 mlelstv Exp $");
     29 
     30 #include "opt_ata.h"
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/kernel.h>
     35 #include <sys/malloc.h>
     36 #include <sys/device.h>
     37 #include <sys/conf.h>
     38 #include <sys/fcntl.h>
     39 #include <sys/proc.h>
     40 #include <sys/kthread.h>
     41 #include <sys/errno.h>
     42 #include <sys/ataio.h>
     43 #include <sys/kmem.h>
     44 #include <sys/intr.h>
     45 #include <sys/bus.h>
     46 #include <sys/once.h>
     47 #include <sys/bitops.h>
     48 
     49 #define ATABUS_PRIVATE
     50 
     51 #include <dev/ata/ataconf.h>
     52 #include <dev/ata/atareg.h>
     53 #include <dev/ata/atavar.h>
     54 #include <dev/ic/wdcvar.h>	/* for PIOBM */
     55 
     56 #include "locators.h"
     57 
     58 #include "atapibus.h"
     59 #include "ataraid.h"
     60 #include "sata_pmp.h"
     61 
     62 #if NATARAID > 0
     63 #include <dev/ata/ata_raidvar.h>
     64 #endif
     65 #if NSATA_PMP > 0
     66 #include <dev/ata/satapmpvar.h>
     67 #endif
     68 #include <dev/ata/satapmpreg.h>
     69 
     70 #define DEBUG_FUNCS  0x08
     71 #define DEBUG_PROBE  0x10
     72 #define DEBUG_DETACH 0x20
     73 #define	DEBUG_XFERS  0x40
     74 #ifdef ATADEBUG
     75 #ifndef ATADEBUG_MASK
     76 #define ATADEBUG_MASK 0
     77 #endif
     78 int atadebug_mask = ATADEBUG_MASK;
     79 #define ATADEBUG_PRINT(args, level) \
     80 	if (atadebug_mask & (level)) \
     81 		printf args
     82 #else
     83 #define ATADEBUG_PRINT(args, level)
     84 #endif
     85 
     86 static ONCE_DECL(ata_init_ctrl);
     87 
     88 /*
     89  * A queue of atabus instances, used to ensure the same bus probe order
     90  * for a given hardware configuration at each boot.  Kthread probing
     91  * devices on a atabus.  Only one probing at once.
     92  */
     93 static TAILQ_HEAD(, atabus_initq)	atabus_initq_head;
     94 static kmutex_t				atabus_qlock;
     95 static kcondvar_t			atabus_qcv;
     96 static lwp_t *				atabus_cfg_lwp;
     97 
     98 /*****************************************************************************
     99  * ATA bus layer.
    100  *
    101  * ATA controllers attach an atabus instance, which handles probing the bus
    102  * for drives, etc.
    103  *****************************************************************************/
    104 
    105 dev_type_open(atabusopen);
    106 dev_type_close(atabusclose);
    107 dev_type_ioctl(atabusioctl);
    108 
    109 const struct cdevsw atabus_cdevsw = {
    110 	.d_open = atabusopen,
    111 	.d_close = atabusclose,
    112 	.d_read = noread,
    113 	.d_write = nowrite,
    114 	.d_ioctl = atabusioctl,
    115 	.d_stop = nostop,
    116 	.d_tty = notty,
    117 	.d_poll = nopoll,
    118 	.d_mmap = nommap,
    119 	.d_kqfilter = nokqfilter,
    120 	.d_discard = nodiscard,
    121 	.d_flag = D_OTHER
    122 };
    123 
    124 extern struct cfdriver atabus_cd;
    125 
    126 static void atabus_childdetached(device_t, device_t);
    127 static int atabus_rescan(device_t, const char *, const int *);
    128 static bool atabus_resume(device_t, const pmf_qual_t *);
    129 static bool atabus_suspend(device_t, const pmf_qual_t *);
    130 static void atabusconfig_thread(void *);
    131 
    132 static void ata_channel_idle(struct ata_channel *);
    133 static void ata_channel_thaw_locked(struct ata_channel *);
    134 static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *);
    135 static void ata_channel_freeze_locked(struct ata_channel *);
    136 static struct ata_xfer *ata_queue_get_active_xfer_locked(struct ata_channel *);
    137 static void ata_thread_wake_locked(struct ata_channel *);
    138 
    139 /*
    140  * atabus_init:
    141  *
    142  *	Initialize ATA subsystem structures.
    143  */
    144 static int
    145 atabus_init(void)
    146 {
    147 
    148 	TAILQ_INIT(&atabus_initq_head);
    149 	mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE);
    150 	cv_init(&atabus_qcv, "atainitq");
    151 	return 0;
    152 }
    153 
    154 /*
    155  * atabusprint:
    156  *
    157  *	Autoconfiguration print routine used by ATA controllers when
    158  *	attaching an atabus instance.
    159  */
    160 int
    161 atabusprint(void *aux, const char *pnp)
    162 {
    163 	struct ata_channel *chan = aux;
    164 
    165 	if (pnp)
    166 		aprint_normal("atabus at %s", pnp);
    167 	aprint_normal(" channel %d", chan->ch_channel);
    168 
    169 	return (UNCONF);
    170 }
    171 
    172 /*
    173  * ataprint:
    174  *
    175  *	Autoconfiguration print routine.
    176  */
    177 int
    178 ataprint(void *aux, const char *pnp)
    179 {
    180 	struct ata_device *adev = aux;
    181 
    182 	if (pnp)
    183 		aprint_normal("wd at %s", pnp);
    184 	aprint_normal(" drive %d", adev->adev_drv_data->drive);
    185 
    186 	return (UNCONF);
    187 }
    188 
    189 static void
    190 ata_queue_reset(struct ata_queue *chq)
    191 {
    192 	/* make sure that we can use polled commands */
    193 	TAILQ_INIT(&chq->queue_xfer);
    194 	TAILQ_INIT(&chq->active_xfers);
    195 	chq->queue_freeze = 0;
    196 	chq->queue_active = 0;
    197 	chq->active_xfers_used = 0;
    198 	chq->queue_xfers_avail = __BIT(chq->queue_openings) - 1;
    199 }
    200 
    201 struct ata_xfer *
    202 ata_queue_hwslot_to_xfer(struct ata_channel *chp, int hwslot)
    203 {
    204 	struct ata_queue *chq = chp->ch_queue;
    205 	struct ata_xfer *xfer = NULL;
    206 
    207 	ata_channel_lock(chp);
    208 
    209 	KASSERTMSG(hwslot < chq->queue_openings, "hwslot %d > openings %d",
    210 	    hwslot, chq->queue_openings);
    211 	KASSERTMSG((chq->active_xfers_used & __BIT(hwslot)) != 0,
    212 	    "hwslot %d not active", hwslot);
    213 
    214 	/* Usually the first entry will be the one */
    215 	TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
    216 		if (xfer->c_slot == hwslot)
    217 			break;
    218 	}
    219 
    220 	ata_channel_unlock(chp);
    221 
    222 	KASSERTMSG((xfer != NULL),
    223 	    "%s: xfer with slot %d not found (active %x)", __func__,
    224 	    hwslot, chq->active_xfers_used);
    225 
    226 	return xfer;
    227 }
    228 
    229 static struct ata_xfer *
    230 ata_queue_get_active_xfer_locked(struct ata_channel *chp)
    231 {
    232 	struct ata_xfer *xfer;
    233 
    234 	KASSERT(mutex_owned(&chp->ch_lock));
    235 	xfer = TAILQ_FIRST(&chp->ch_queue->active_xfers);
    236 
    237 	if (xfer && ISSET(xfer->c_flags, C_NCQ)) {
    238 		/* Spurious call, never return NCQ xfer from this interface */
    239 		xfer = NULL;
    240 	}
    241 
    242 	return xfer;
    243 }
    244 
    245 /*
    246  * This interface is supposed only to be used when there is exactly
    247  * one outstanding command, when there is no information about the slot,
    248  * which triggered the command. ata_queue_hwslot_to_xfer() interface
    249  * is preferred in all NCQ cases.
    250  */
    251 struct ata_xfer *
    252 ata_queue_get_active_xfer(struct ata_channel *chp)
    253 {
    254 	struct ata_xfer *xfer = NULL;
    255 
    256 	ata_channel_lock(chp);
    257 	xfer = ata_queue_get_active_xfer_locked(chp);
    258 	ata_channel_unlock(chp);
    259 
    260 	return xfer;
    261 }
    262 
    263 struct ata_xfer *
    264 ata_queue_drive_active_xfer(struct ata_channel *chp, int drive)
    265 {
    266 	struct ata_xfer *xfer = NULL;
    267 
    268 	ata_channel_lock(chp);
    269 
    270 	TAILQ_FOREACH(xfer, &chp->ch_queue->active_xfers, c_activechain) {
    271 		if (xfer->c_drive == drive)
    272 			break;
    273 	}
    274 	KASSERT(xfer != NULL);
    275 
    276 	ata_channel_unlock(chp);
    277 
    278 	return xfer;
    279 }
    280 
    281 static void
    282 ata_xfer_init(struct ata_xfer *xfer, uint8_t slot)
    283 {
    284 	memset(xfer, 0, sizeof(*xfer));
    285 
    286 	xfer->c_slot = slot;
    287 
    288 	cv_init(&xfer->c_active, "ataact");
    289 	cv_init(&xfer->c_finish, "atafin");
    290 	callout_init(&xfer->c_timo_callout, 0); 	/* XXX MPSAFE */
    291 	callout_init(&xfer->c_retry_callout, 0); 	/* XXX MPSAFE */
    292 }
    293 
    294 static void
    295 ata_xfer_destroy(struct ata_xfer *xfer)
    296 {
    297 	callout_halt(&xfer->c_timo_callout, NULL);	/* XXX MPSAFE */
    298 	callout_destroy(&xfer->c_timo_callout);
    299 	callout_halt(&xfer->c_retry_callout, NULL);	/* XXX MPSAFE */
    300 	callout_destroy(&xfer->c_retry_callout);
    301 	cv_destroy(&xfer->c_active);
    302 	cv_destroy(&xfer->c_finish);
    303 }
    304 
    305 struct ata_queue *
    306 ata_queue_alloc(uint8_t openings)
    307 {
    308 	if (openings == 0)
    309 		openings = 1;
    310 
    311 	if (openings > ATA_MAX_OPENINGS)
    312 		openings = ATA_MAX_OPENINGS;
    313 
    314 	struct ata_queue *chq = malloc(offsetof(struct ata_queue, queue_xfers[openings]),
    315 	    M_DEVBUF, M_WAITOK | M_ZERO);
    316 
    317 	chq->queue_openings = openings;
    318 	ata_queue_reset(chq);
    319 
    320 	cv_init(&chq->queue_busy, "ataqbusy");
    321 	cv_init(&chq->queue_drain, "atdrn");
    322 	cv_init(&chq->queue_idle, "qidl");
    323 
    324 	for (uint8_t i = 0; i < openings; i++)
    325 		ata_xfer_init(&chq->queue_xfers[i], i);
    326 
    327 	return chq;
    328 }
    329 
    330 void
    331 ata_queue_free(struct ata_queue *chq)
    332 {
    333 	for (uint8_t i = 0; i < chq->queue_openings; i++)
    334 		ata_xfer_destroy(&chq->queue_xfers[i]);
    335 
    336 	cv_destroy(&chq->queue_busy);
    337 	cv_destroy(&chq->queue_drain);
    338 	cv_destroy(&chq->queue_idle);
    339 
    340 	free(chq, M_DEVBUF);
    341 }
    342 
    343 void
    344 ata_channel_init(struct ata_channel *chp)
    345 {
    346 	mutex_init(&chp->ch_lock, MUTEX_DEFAULT, IPL_BIO);
    347 	cv_init(&chp->ch_thr_idle, "atath");
    348 }
    349 
    350 /*
    351  * ata_channel_attach:
    352  *
    353  *	Common parts of attaching an atabus to an ATA controller channel.
    354  */
    355 void
    356 ata_channel_attach(struct ata_channel *chp)
    357 {
    358 	if (chp->ch_flags & ATACH_DISABLED)
    359 		return;
    360 
    361 	KASSERT(chp->ch_queue != NULL);
    362 
    363 	ata_channel_init(chp);
    364 
    365 	chp->atabus = config_found_ia(chp->ch_atac->atac_dev, "ata", chp,
    366 		atabusprint);
    367 }
    368 
    369 void
    370 ata_channel_destroy(struct ata_channel *chp)
    371 {
    372 	mutex_destroy(&chp->ch_lock);
    373 	cv_destroy(&chp->ch_thr_idle);
    374 }
    375 
    376 /*
    377  * ata_channel_detach:
    378  *
    379  *	Common parts of detaching an atabus to an ATA controller channel.
    380  */
    381 void
    382 ata_channel_detach(struct ata_channel *chp)
    383 {
    384 	if (chp->ch_flags & ATACH_DISABLED)
    385 		return;
    386 
    387 	ata_channel_destroy(chp);
    388 }
    389 
    390 static void
    391 atabusconfig(struct atabus_softc *atabus_sc)
    392 {
    393 	struct ata_channel *chp = atabus_sc->sc_chan;
    394 	struct atac_softc *atac = chp->ch_atac;
    395 	struct atabus_initq *atabus_initq = NULL;
    396 	int i, error;
    397 
    398 	/* we are in the atabus's thread context */
    399 	ata_channel_lock(chp);
    400 	chp->ch_flags |= ATACH_TH_RUN;
    401 	ata_channel_unlock(chp);
    402 
    403 	/*
    404 	 * Probe for the drives attached to controller, unless a PMP
    405 	 * is already known
    406 	 */
    407 	/* XXX for SATA devices we will power up all drives at once */
    408 	if (chp->ch_satapmp_nports == 0)
    409 		(*atac->atac_probe)(chp);
    410 
    411 	if (chp->ch_ndrives >= 2) {
    412 		ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n",
    413 		    chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type),
    414 		    DEBUG_PROBE);
    415 	}
    416 
    417 	/* next operations will occurs in a separate thread */
    418 	ata_channel_lock(chp);
    419 	chp->ch_flags &= ~ATACH_TH_RUN;
    420 	ata_channel_unlock(chp);
    421 
    422 	/* Make sure the devices probe in atabus order to avoid jitter. */
    423 	mutex_enter(&atabus_qlock);
    424 	for (;;) {
    425 		atabus_initq = TAILQ_FIRST(&atabus_initq_head);
    426 		if (atabus_initq->atabus_sc == atabus_sc)
    427 			break;
    428 		cv_wait(&atabus_qcv, &atabus_qlock);
    429 	}
    430 	mutex_exit(&atabus_qlock);
    431 
    432 	ata_channel_lock(chp);
    433 
    434 	/* If no drives, abort here */
    435 	if (chp->ch_drive == NULL)
    436 		goto out;
    437 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    438 	for (i = 0; i < chp->ch_ndrives; i++)
    439 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE)
    440 			break;
    441 	if (i == chp->ch_ndrives)
    442 		goto out;
    443 
    444 	/* Shortcut in case we've been shutdown */
    445 	if (chp->ch_flags & ATACH_SHUTDOWN)
    446 		goto out;
    447 
    448 	ata_channel_unlock(chp);
    449 
    450 	if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread,
    451 	    atabus_sc, &atabus_cfg_lwp,
    452 	    "%scnf", device_xname(atac->atac_dev))) != 0)
    453 		aprint_error_dev(atac->atac_dev,
    454 		    "unable to create config thread: error %d\n", error);
    455 	return;
    456 
    457  out:
    458 	ata_channel_unlock(chp);
    459 
    460 	mutex_enter(&atabus_qlock);
    461 	TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
    462 	cv_broadcast(&atabus_qcv);
    463 	mutex_exit(&atabus_qlock);
    464 
    465 	free(atabus_initq, M_DEVBUF);
    466 
    467 	ata_delref(chp);
    468 
    469 	config_pending_decr(atac->atac_dev);
    470 }
    471 
    472 /*
    473  * atabus_configthread: finish attach of atabus's childrens, in a separate
    474  * kernel thread.
    475  */
    476 static void
    477 atabusconfig_thread(void *arg)
    478 {
    479 	struct atabus_softc *atabus_sc = arg;
    480 	struct ata_channel *chp = atabus_sc->sc_chan;
    481 	struct atac_softc *atac = chp->ch_atac;
    482 	struct atabus_initq *atabus_initq = NULL;
    483 	int i, s;
    484 
    485 	/* XXX seems wrong */
    486 	mutex_enter(&atabus_qlock);
    487 	atabus_initq = TAILQ_FIRST(&atabus_initq_head);
    488 	KASSERT(atabus_initq->atabus_sc == atabus_sc);
    489 	mutex_exit(&atabus_qlock);
    490 
    491 	/*
    492 	 * First look for a port multiplier
    493 	 */
    494 	if (chp->ch_ndrives == PMP_MAX_DRIVES &&
    495 	    chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
    496 #if NSATA_PMP > 0
    497 		satapmp_attach(chp);
    498 #else
    499 		aprint_error_dev(atabus_sc->sc_dev,
    500 		    "SATA port multiplier not supported\n");
    501 		/* no problems going on, all drives are ATA_DRIVET_NONE */
    502 #endif
    503 	}
    504 
    505 	/*
    506 	 * Attach an ATAPI bus, if needed.
    507 	 */
    508 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    509 	for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) {
    510 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) {
    511 #if NATAPIBUS > 0
    512 			(*atac->atac_atapibus_attach)(atabus_sc);
    513 #else
    514 			/*
    515 			 * Fake the autoconfig "not configured" message
    516 			 */
    517 			aprint_normal("atapibus at %s not configured\n",
    518 			    device_xname(atac->atac_dev));
    519 			chp->atapibus = NULL;
    520 			s = splbio();
    521 			for (i = 0; i < chp->ch_ndrives; i++) {
    522 				if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
    523 					chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    524 			}
    525 			splx(s);
    526 #endif
    527 			break;
    528 		}
    529 	}
    530 
    531 	for (i = 0; i < chp->ch_ndrives; i++) {
    532 		struct ata_device adev;
    533 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA &&
    534 		    chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) {
    535 			continue;
    536 		}
    537 		if (chp->ch_drive[i].drv_softc != NULL)
    538 			continue;
    539 		memset(&adev, 0, sizeof(struct ata_device));
    540 		adev.adev_bustype = atac->atac_bustype_ata;
    541 		adev.adev_channel = chp->ch_channel;
    542 		adev.adev_drv_data = &chp->ch_drive[i];
    543 		chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev,
    544 		    "ata_hl", &adev, ataprint);
    545 		if (chp->ch_drive[i].drv_softc != NULL) {
    546 			ata_probe_caps(&chp->ch_drive[i]);
    547 		} else {
    548 			s = splbio();
    549 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    550 			splx(s);
    551 		}
    552 	}
    553 
    554 	/* now that we know the drives, the controller can set its modes */
    555 	if (atac->atac_set_modes) {
    556 		(*atac->atac_set_modes)(chp);
    557 		ata_print_modes(chp);
    558 	}
    559 #if NATARAID > 0
    560 	if (atac->atac_cap & ATAC_CAP_RAID) {
    561 		for (i = 0; i < chp->ch_ndrives; i++) {
    562 			if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) {
    563 				ata_raid_check_component(
    564 				    chp->ch_drive[i].drv_softc);
    565 			}
    566 		}
    567 	}
    568 #endif /* NATARAID > 0 */
    569 
    570 	/*
    571 	 * reset drive_flags for unattached devices, reset state for attached
    572 	 * ones
    573 	 */
    574 	s = splbio();
    575 	for (i = 0; i < chp->ch_ndrives; i++) {
    576 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
    577 			continue;
    578 		if (chp->ch_drive[i].drv_softc == NULL) {
    579 			chp->ch_drive[i].drive_flags = 0;
    580 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    581 		} else
    582 			chp->ch_drive[i].state = 0;
    583 	}
    584 	splx(s);
    585 
    586 	mutex_enter(&atabus_qlock);
    587 	TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
    588 	cv_broadcast(&atabus_qcv);
    589 	mutex_exit(&atabus_qlock);
    590 
    591 	free(atabus_initq, M_DEVBUF);
    592 
    593 	ata_delref(chp);
    594 
    595 	config_pending_decr(atac->atac_dev);
    596 	kthread_exit(0);
    597 }
    598 
    599 /*
    600  * atabus_thread:
    601  *
    602  *	Worker thread for the ATA bus.
    603  */
    604 static void
    605 atabus_thread(void *arg)
    606 {
    607 	struct atabus_softc *sc = arg;
    608 	struct ata_channel *chp = sc->sc_chan;
    609 	struct ata_queue *chq = chp->ch_queue;
    610 	struct ata_xfer *xfer;
    611 	int i, rv, s;
    612 
    613 	ata_channel_lock(chp);
    614 	chp->ch_flags |= ATACH_TH_RUN;
    615 
    616 	/*
    617 	 * Probe the drives.  Reset type to indicate to controllers
    618 	 * that can re-probe that all drives must be probed..
    619 	 *
    620 	 * Note: ch_ndrives may be changed during the probe.
    621 	 */
    622 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    623 	for (i = 0; i < chp->ch_ndrives; i++) {
    624 		chp->ch_drive[i].drive_flags = 0;
    625 		chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    626 	}
    627 	ata_channel_unlock(chp);
    628 
    629 	atabusconfig(sc);
    630 
    631 	ata_channel_lock(chp);
    632 	for (;;) {
    633 		if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_SHUTDOWN)) == 0 &&
    634 		    (chq->queue_active == 0 || chq->queue_freeze == 0)) {
    635 			chp->ch_flags &= ~ATACH_TH_RUN;
    636 			cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
    637 			chp->ch_flags |= ATACH_TH_RUN;
    638 		}
    639 		if (chp->ch_flags & ATACH_SHUTDOWN) {
    640 			break;
    641 		}
    642 		if (chp->ch_flags & ATACH_TH_RESCAN) {
    643 			chp->ch_flags &= ~ATACH_TH_RESCAN;
    644 			ata_channel_unlock(chp);
    645 			atabusconfig(sc);
    646 			ata_channel_lock(chp);
    647 		}
    648 		if (chp->ch_flags & ATACH_TH_RESET) {
    649 			/*
    650 			 * ata_reset_channel() will freeze 2 times, so
    651 			 * unfreeze one time. Not a problem as we're at splbio
    652 			 */
    653 			ata_channel_thaw_locked(chp);
    654 			ata_channel_unlock(chp);
    655 			s = splbio();
    656 			ata_reset_channel(chp, AT_WAIT | chp->ch_reset_flags);
    657 			splx(s);
    658 			ata_channel_lock(chp);
    659 		} else if (chq->queue_active > 0 && chq->queue_freeze == 1) {
    660 			/*
    661 			 * Caller has bumped queue_freeze, decrease it. This
    662 			 * flow shalt never be executed for NCQ commands.
    663 			 */
    664 			KASSERT((chp->ch_flags & ATACH_NCQ) == 0);
    665 			KASSERT(chq->queue_active == 1);
    666 
    667 			ata_channel_thaw_locked(chp);
    668 			xfer = ata_queue_get_active_xfer_locked(chp);
    669 
    670 			KASSERT(xfer != NULL);
    671 			KASSERT((xfer->c_flags & C_POLL) == 0);
    672 
    673 			switch ((rv = ata_xfer_start(xfer))) {
    674 			case ATASTART_STARTED:
    675 			case ATASTART_POLL:
    676 			case ATASTART_ABORT:
    677 				break;
    678 			case ATASTART_TH:
    679 			default:
    680 				panic("%s: ata_xfer_start() unexpected rv %d",
    681 				    __func__, rv);
    682 				/* NOTREACHED */
    683 			}
    684 		} else if (chq->queue_freeze > 1)
    685 			panic("%s: queue_freeze", __func__);
    686 	}
    687 	chp->ch_thread = NULL;
    688 	cv_signal(&chp->ch_thr_idle);
    689 	ata_channel_unlock(chp);
    690 	kthread_exit(0);
    691 }
    692 
    693 static void
    694 ata_thread_wake_locked(struct ata_channel *chp)
    695 {
    696 	KASSERT(mutex_owned(&chp->ch_lock));
    697 	ata_channel_freeze_locked(chp);
    698 	cv_signal(&chp->ch_thr_idle);
    699 }
    700 
    701 /*
    702  * atabus_match:
    703  *
    704  *	Autoconfiguration match routine.
    705  */
    706 static int
    707 atabus_match(device_t parent, cfdata_t cf, void *aux)
    708 {
    709 	struct ata_channel *chp = aux;
    710 
    711 	if (chp == NULL)
    712 		return (0);
    713 
    714 	if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel &&
    715 	    cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT)
    716 		return (0);
    717 
    718 	return (1);
    719 }
    720 
    721 /*
    722  * atabus_attach:
    723  *
    724  *	Autoconfiguration attach routine.
    725  */
    726 static void
    727 atabus_attach(device_t parent, device_t self, void *aux)
    728 {
    729 	struct atabus_softc *sc = device_private(self);
    730 	struct ata_channel *chp = aux;
    731 	struct atabus_initq *initq;
    732 	int error;
    733 
    734 	sc->sc_chan = chp;
    735 
    736 	aprint_normal("\n");
    737 	aprint_naive("\n");
    738 
    739 	sc->sc_dev = self;
    740 
    741 	if (ata_addref(chp))
    742 		return;
    743 
    744 	RUN_ONCE(&ata_init_ctrl, atabus_init);
    745 
    746 	initq = malloc(sizeof(*initq), M_DEVBUF, M_WAITOK);
    747 	initq->atabus_sc = sc;
    748 	mutex_enter(&atabus_qlock);
    749 	TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
    750 	mutex_exit(&atabus_qlock);
    751 	config_pending_incr(sc->sc_dev);
    752 
    753 	if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc,
    754 	    &chp->ch_thread, "%s", device_xname(self))) != 0)
    755 		aprint_error_dev(self,
    756 		    "unable to create kernel thread: error %d\n", error);
    757 
    758 	if (!pmf_device_register(self, atabus_suspend, atabus_resume))
    759 		aprint_error_dev(self, "couldn't establish power handler\n");
    760 }
    761 
    762 /*
    763  * atabus_detach:
    764  *
    765  *	Autoconfiguration detach routine.
    766  */
    767 static int
    768 atabus_detach(device_t self, int flags)
    769 {
    770 	struct atabus_softc *sc = device_private(self);
    771 	struct ata_channel *chp = sc->sc_chan;
    772 	device_t dev = NULL;
    773 	int i, error = 0;
    774 
    775 	/* Shutdown the channel. */
    776 	ata_channel_lock(chp);
    777 	chp->ch_flags |= ATACH_SHUTDOWN;
    778 	while (chp->ch_thread != NULL) {
    779 		cv_signal(&chp->ch_thr_idle);
    780 		cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
    781 	}
    782 	ata_channel_unlock(chp);
    783 
    784 	/*
    785 	 * Detach atapibus and its children.
    786 	 */
    787 	if ((dev = chp->atapibus) != NULL) {
    788 		ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n",
    789 		    device_xname(self), device_xname(dev)), DEBUG_DETACH);
    790 
    791 		error = config_detach(dev, flags);
    792 		if (error)
    793 			goto out;
    794 		KASSERT(chp->atapibus == NULL);
    795 	}
    796 
    797 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    798 
    799 	/*
    800 	 * Detach our other children.
    801 	 */
    802 	for (i = 0; i < chp->ch_ndrives; i++) {
    803 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
    804 			continue;
    805 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
    806 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    807 		if ((dev = chp->ch_drive[i].drv_softc) != NULL) {
    808 			ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__,
    809 			    __LINE__, device_xname(self), device_xname(dev)),
    810 			    DEBUG_DETACH);
    811 			error = config_detach(dev, flags);
    812 			if (error)
    813 				goto out;
    814 			KASSERT(chp->ch_drive[i].drv_softc == NULL);
    815 			KASSERT(chp->ch_drive[i].drive_type == 0);
    816 		}
    817 	}
    818 	atabus_free_drives(chp);
    819 
    820  out:
    821 #ifdef ATADEBUG
    822 	if (dev != NULL && error != 0)
    823 		ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__,
    824 		    device_xname(self), error, device_xname(dev)),
    825 		    DEBUG_DETACH);
    826 #endif /* ATADEBUG */
    827 
    828 	return (error);
    829 }
    830 
    831 void
    832 atabus_childdetached(device_t self, device_t child)
    833 {
    834 	bool found = false;
    835 	struct atabus_softc *sc = device_private(self);
    836 	struct ata_channel *chp = sc->sc_chan;
    837 	int i;
    838 
    839 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    840 	/*
    841 	 * atapibus detached.
    842 	 */
    843 	if (child == chp->atapibus) {
    844 		chp->atapibus = NULL;
    845 		found = true;
    846 		for (i = 0; i < chp->ch_ndrives; i++) {
    847 			if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI)
    848 				continue;
    849 			KASSERT(chp->ch_drive[i].drv_softc != NULL);
    850 			chp->ch_drive[i].drv_softc = NULL;
    851 			chp->ch_drive[i].drive_flags = 0;
    852 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    853 		}
    854 	}
    855 
    856 	/*
    857 	 * Detach our other children.
    858 	 */
    859 	for (i = 0; i < chp->ch_ndrives; i++) {
    860 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
    861 			continue;
    862 		if (child == chp->ch_drive[i].drv_softc) {
    863 			chp->ch_drive[i].drv_softc = NULL;
    864 			chp->ch_drive[i].drive_flags = 0;
    865 			if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
    866 				chp->ch_satapmp_nports = 0;
    867 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
    868 			found = true;
    869 		}
    870 	}
    871 
    872 	if (!found)
    873 		panic("%s: unknown child %p", device_xname(self),
    874 		    (const void *)child);
    875 }
    876 
    877 CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc),
    878     atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan,
    879     atabus_childdetached, DVF_DETACH_SHUTDOWN);
    880 
    881 /*****************************************************************************
    882  * Common ATA bus operations.
    883  *****************************************************************************/
    884 
    885 /* allocate/free the channel's ch_drive[] array */
    886 int
    887 atabus_alloc_drives(struct ata_channel *chp, int ndrives)
    888 {
    889 	int i;
    890 	if (chp->ch_ndrives != ndrives)
    891 		atabus_free_drives(chp);
    892 	if (chp->ch_drive == NULL) {
    893 		chp->ch_drive = malloc(
    894 		    sizeof(struct ata_drive_datas) * ndrives,
    895 		    M_DEVBUF, M_NOWAIT | M_ZERO);
    896 	}
    897 	if (chp->ch_drive == NULL) {
    898 	    aprint_error_dev(chp->ch_atac->atac_dev,
    899 		"can't alloc drive array\n");
    900 	    chp->ch_ndrives = 0;
    901 	    return ENOMEM;
    902 	};
    903 	for (i = 0; i < ndrives; i++) {
    904 		chp->ch_drive[i].chnl_softc = chp;
    905 		chp->ch_drive[i].drive = i;
    906 	}
    907 	chp->ch_ndrives = ndrives;
    908 	return 0;
    909 }
    910 
    911 void
    912 atabus_free_drives(struct ata_channel *chp)
    913 {
    914 #ifdef DIAGNOSTIC
    915 	int i;
    916 	int dopanic = 0;
    917 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
    918 	for (i = 0; i < chp->ch_ndrives; i++) {
    919 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) {
    920 			printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n",
    921 			    device_xname(chp->atabus), i,
    922 			    chp->ch_drive[i].drive_type);
    923 			dopanic = 1;
    924 		}
    925 		if (chp->ch_drive[i].drv_softc != NULL) {
    926 			printf("%s: ch_drive[%d] attached to %s\n",
    927 			    device_xname(chp->atabus), i,
    928 			    device_xname(chp->ch_drive[i].drv_softc));
    929 			dopanic = 1;
    930 		}
    931 	}
    932 	if (dopanic)
    933 		panic("atabus_free_drives");
    934 #endif
    935 
    936 	if (chp->ch_drive == NULL)
    937 		return;
    938 	chp->ch_ndrives = 0;
    939 	free(chp->ch_drive, M_DEVBUF);
    940 	chp->ch_drive = NULL;
    941 }
    942 
    943 /* Get the disk's parameters */
    944 int
    945 ata_get_params(struct ata_drive_datas *drvp, uint8_t flags,
    946     struct ataparams *prms)
    947 {
    948 	struct ata_xfer *xfer;
    949 	struct ata_channel *chp = drvp->chnl_softc;
    950 	struct atac_softc *atac = chp->ch_atac;
    951 	char *tb;
    952 	int i, rv;
    953 	uint16_t *p;
    954 
    955 	ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);
    956 
    957 	xfer = ata_get_xfer(chp);
    958 	if (xfer == NULL) {
    959 		ATADEBUG_PRINT(("%s: no xfer\n", __func__),
    960 		    DEBUG_FUNCS|DEBUG_PROBE);
    961 		return CMD_AGAIN;
    962 	}
    963 
    964 	tb = kmem_zalloc(ATA_BSIZE, KM_SLEEP);
    965 	memset(prms, 0, sizeof(struct ataparams));
    966 
    967 	if (drvp->drive_type == ATA_DRIVET_ATA) {
    968 		xfer->c_ata_c.r_command = WDCC_IDENTIFY;
    969 		xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
    970 		xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
    971 		xfer->c_ata_c.timeout = 3000; /* 3s */
    972 	} else if (drvp->drive_type == ATA_DRIVET_ATAPI) {
    973 		xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE;
    974 		xfer->c_ata_c.r_st_bmask = 0;
    975 		xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
    976 		xfer->c_ata_c.timeout = 10000; /* 10s */
    977 	} else {
    978 		ATADEBUG_PRINT(("ata_get_parms: no disks\n"),
    979 		    DEBUG_FUNCS|DEBUG_PROBE);
    980 		rv = CMD_ERR;
    981 		goto out;
    982 	}
    983 	xfer->c_ata_c.flags = AT_READ | flags;
    984 	xfer->c_ata_c.data = tb;
    985 	xfer->c_ata_c.bcount = ATA_BSIZE;
    986 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
    987 						xfer) != ATACMD_COMPLETE) {
    988 		ATADEBUG_PRINT(("ata_get_parms: wdc_exec_command failed\n"),
    989 		    DEBUG_FUNCS|DEBUG_PROBE);
    990 		rv = CMD_AGAIN;
    991 		goto out;
    992 	}
    993 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
    994 		ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n",
    995 		    xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE);
    996 		rv = CMD_ERR;
    997 		goto out;
    998 	}
    999 	/* if we didn't read any data something is wrong */
   1000 	if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) {
   1001 		rv = CMD_ERR;
   1002 		goto out;
   1003 	}
   1004 
   1005 	/* Read in parameter block. */
   1006 	memcpy(prms, tb, sizeof(struct ataparams));
   1007 
   1008 	/*
   1009 	 * Shuffle string byte order.
   1010 	 * ATAPI NEC, Mitsumi and Pioneer drives and
   1011 	 * old ATA TDK CompactFlash cards
   1012 	 * have different byte order.
   1013 	 */
   1014 #if BYTE_ORDER == BIG_ENDIAN
   1015 # define M(n)	prms->atap_model[(n) ^ 1]
   1016 #else
   1017 # define M(n)	prms->atap_model[n]
   1018 #endif
   1019 	if (
   1020 #if BYTE_ORDER == BIG_ENDIAN
   1021 	    !
   1022 #endif
   1023 	    ((drvp->drive_type == ATA_DRIVET_ATAPI) ?
   1024 	     ((M(0) == 'N' && M(1) == 'E') ||
   1025 	      (M(0) == 'F' && M(1) == 'X') ||
   1026 	      (M(0) == 'P' && M(1) == 'i')) :
   1027 	     ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) {
   1028 		rv = CMD_OK;
   1029 		goto out;
   1030 	     }
   1031 #undef M
   1032 	for (i = 0; i < sizeof(prms->atap_model); i += 2) {
   1033 		p = (uint16_t *)(prms->atap_model + i);
   1034 		*p = bswap16(*p);
   1035 	}
   1036 	for (i = 0; i < sizeof(prms->atap_serial); i += 2) {
   1037 		p = (uint16_t *)(prms->atap_serial + i);
   1038 		*p = bswap16(*p);
   1039 	}
   1040 	for (i = 0; i < sizeof(prms->atap_revision); i += 2) {
   1041 		p = (uint16_t *)(prms->atap_revision + i);
   1042 		*p = bswap16(*p);
   1043 	}
   1044 
   1045 	rv = CMD_OK;
   1046  out:
   1047 	kmem_free(tb, ATA_BSIZE);
   1048 	ata_free_xfer(chp, xfer);
   1049 	return rv;
   1050 }
   1051 
   1052 int
   1053 ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags)
   1054 {
   1055 	struct ata_xfer *xfer;
   1056 	int rv;
   1057 	struct ata_channel *chp = drvp->chnl_softc;
   1058 	struct atac_softc *atac = chp->ch_atac;
   1059 
   1060 	ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS);
   1061 
   1062 	xfer = ata_get_xfer(chp);
   1063 	if (xfer == NULL) {
   1064 		ATADEBUG_PRINT(("%s: no xfer\n", __func__),
   1065 		    DEBUG_FUNCS|DEBUG_PROBE);
   1066 		return CMD_AGAIN;
   1067 	}
   1068 
   1069 	xfer->c_ata_c.r_command = SET_FEATURES;
   1070 	xfer->c_ata_c.r_st_bmask = 0;
   1071 	xfer->c_ata_c.r_st_pmask = 0;
   1072 	xfer->c_ata_c.r_features = WDSF_SET_MODE;
   1073 	xfer->c_ata_c.r_count = mode;
   1074 	xfer->c_ata_c.flags = flags;
   1075 	xfer->c_ata_c.timeout = 1000; /* 1s */
   1076 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
   1077 						xfer) != ATACMD_COMPLETE) {
   1078 		rv = CMD_AGAIN;
   1079 		goto out;
   1080 	}
   1081 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
   1082 		rv = CMD_ERR;
   1083 		goto out;
   1084 	}
   1085 
   1086 	rv = CMD_OK;
   1087 
   1088 out:
   1089 	ata_free_xfer(chp, xfer);
   1090 	return rv;
   1091 }
   1092 
   1093 int
   1094 ata_read_log_ext_ncq(struct ata_drive_datas *drvp, uint8_t flags,
   1095     uint8_t *slot, uint8_t *status, uint8_t *err)
   1096 {
   1097 	struct ata_xfer *xfer;
   1098 	int rv;
   1099 	struct ata_channel *chp = drvp->chnl_softc;
   1100 	struct atac_softc *atac = chp->ch_atac;
   1101 	uint8_t *tb, cksum, page;
   1102 
   1103 	ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);
   1104 
   1105 	/* Only NCQ ATA drives support/need this */
   1106 	if (drvp->drive_type != ATA_DRIVET_ATA ||
   1107 	    (drvp->drive_flags & ATA_DRIVE_NCQ) == 0)
   1108 		return EOPNOTSUPP;
   1109 
   1110 	xfer = ata_get_xfer_ext(chp, C_RECOVERY, 0);
   1111 
   1112 	tb = drvp->recovery_blk;
   1113 	memset(tb, 0, sizeof(drvp->recovery_blk));
   1114 
   1115 	/*
   1116 	 * We could use READ LOG DMA EXT if drive supports it (i.e.
   1117 	 * when it supports Streaming feature) to avoid PIO command,
   1118 	 * and to make this a little faster. Realistically, it
   1119 	 * should not matter.
   1120 	 */
   1121 	xfer->c_flags |= C_RECOVERY;
   1122 	xfer->c_ata_c.r_command = WDCC_READ_LOG_EXT;
   1123 	xfer->c_ata_c.r_lba = page = WDCC_LOG_PAGE_NCQ;
   1124 	xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
   1125 	xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
   1126 	xfer->c_ata_c.r_count = 1;
   1127 	xfer->c_ata_c.r_device = WDSD_LBA;
   1128 	xfer->c_ata_c.flags = AT_READ | AT_LBA | AT_LBA48 | flags;
   1129 	xfer->c_ata_c.timeout = 1000; /* 1s */
   1130 	xfer->c_ata_c.data = tb;
   1131 	xfer->c_ata_c.bcount = sizeof(drvp->recovery_blk);
   1132 
   1133 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
   1134 						xfer) != ATACMD_COMPLETE) {
   1135 		rv = EAGAIN;
   1136 		goto out;
   1137 	}
   1138 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
   1139 		rv = EINVAL;
   1140 		goto out;
   1141 	}
   1142 
   1143 	cksum = 0;
   1144 	for (int i = 0; i < sizeof(drvp->recovery_blk); i++)
   1145 		cksum += tb[i];
   1146 	if (cksum != 0) {
   1147 		aprint_error_dev(drvp->drv_softc,
   1148 		    "invalid checksum %x for READ LOG EXT page %x\n",
   1149 		    cksum, page);
   1150 		rv = EINVAL;
   1151 		goto out;
   1152 	}
   1153 
   1154 	if (tb[0] & WDCC_LOG_NQ) {
   1155 		/* not queued command */
   1156 		rv = EOPNOTSUPP;
   1157 		goto out;
   1158 	}
   1159 
   1160 	*slot = tb[0] & 0x1f;
   1161 	*status = tb[2];
   1162 	*err = tb[3];
   1163 
   1164 	KASSERTMSG((*status & WDCS_ERR),
   1165 	    "%s: non-error command slot %d reported by READ LOG EXT page %x: "
   1166 	    "err %x status %x\n",
   1167 	    device_xname(drvp->drv_softc), *slot, page, *err, *status);
   1168 
   1169 	rv = 0;
   1170 
   1171 out:
   1172 	ata_free_xfer(chp, xfer);
   1173 	return rv;
   1174 }
   1175 
   1176 #if NATA_DMA
   1177 void
   1178 ata_dmaerr(struct ata_drive_datas *drvp, int flags)
   1179 {
   1180 	/*
   1181 	 * Downgrade decision: if we get NERRS_MAX in NXFER.
   1182 	 * We start with n_dmaerrs set to NERRS_MAX-1 so that the
   1183 	 * first error within the first NXFER ops will immediatly trigger
   1184 	 * a downgrade.
   1185 	 * If we got an error and n_xfers is bigger than NXFER reset counters.
   1186 	 */
   1187 	drvp->n_dmaerrs++;
   1188 	if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) {
   1189 		ata_downgrade_mode(drvp, flags);
   1190 		drvp->n_dmaerrs = NERRS_MAX-1;
   1191 		drvp->n_xfers = 0;
   1192 		return;
   1193 	}
   1194 	if (drvp->n_xfers > NXFER) {
   1195 		drvp->n_dmaerrs = 1; /* just got an error */
   1196 		drvp->n_xfers = 1; /* restart counting from this error */
   1197 	}
   1198 }
   1199 #endif	/* NATA_DMA */
   1200 
   1201 /*
   1202  * freeze the queue and wait for the controller to be idle. Caller has to
   1203  * unfreeze/restart the queue
   1204  */
   1205 static void
   1206 ata_channel_idle(struct ata_channel *chp)
   1207 {
   1208 	ata_channel_lock(chp);
   1209 	ata_channel_freeze_locked(chp);
   1210 	while (chp->ch_queue->queue_active > 0) {
   1211 		chp->ch_queue->queue_flags |= QF_IDLE_WAIT;
   1212 		cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1);
   1213 	}
   1214 	ata_channel_unlock(chp);
   1215 }
   1216 
   1217 /*
   1218  * Add a command to the queue and start controller.
   1219  *
   1220  * MUST BE CALLED AT splbio()!
   1221  */
   1222 void
   1223 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   1224 {
   1225 
   1226 	ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer,
   1227 	    chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
   1228 
   1229 	/* complete xfer setup */
   1230 	xfer->c_chp = chp;
   1231 
   1232 	ata_channel_lock(chp);
   1233 
   1234 	/*
   1235 	 * Standard commands are added to the end of command list, but
   1236 	 * recovery commands must be run immediatelly.
   1237 	 */
   1238 	if ((xfer->c_flags & C_RECOVERY) == 0)
   1239 		TAILQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer,
   1240 		    c_xferchain);
   1241 	else
   1242 		TAILQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer,
   1243 		    c_xferchain);
   1244 
   1245 	/*
   1246 	 * if polling and can sleep, wait for the xfer to be at head of queue
   1247 	 */
   1248 	if ((xfer->c_flags & (C_POLL | C_WAIT)) ==  (C_POLL | C_WAIT)) {
   1249 		while (chp->ch_queue->queue_active > 0 ||
   1250 		    TAILQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) {
   1251 			xfer->c_flags |= C_WAITACT;
   1252 			cv_wait(&xfer->c_active, &chp->ch_lock);
   1253 			xfer->c_flags &= ~C_WAITACT;
   1254 
   1255 			/*
   1256 			 * Free xfer now if it there was attempt to free it
   1257 			 * while we were waiting.
   1258 			 */
   1259 			if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) {
   1260 				ata_channel_unlock(chp);
   1261 
   1262 				ata_free_xfer(chp, xfer);
   1263 				return;
   1264 			}
   1265 		}
   1266 	}
   1267 
   1268 	ata_channel_unlock(chp);
   1269 
   1270 	ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n",
   1271 	    chp->ch_flags), DEBUG_XFERS);
   1272 	atastart(chp);
   1273 }
   1274 
   1275 /*
   1276  * Start I/O on a controller, for the given channel.
   1277  * The first xfer may be not for our channel if the channel queues
   1278  * are shared.
   1279  *
   1280  * MUST BE CALLED AT splbio()!
   1281  */
   1282 void
   1283 atastart(struct ata_channel *chp)
   1284 {
   1285 	struct atac_softc *atac = chp->ch_atac;
   1286 	struct ata_queue *chq = chp->ch_queue;
   1287 	struct ata_xfer *xfer, *axfer;
   1288 	bool recovery;
   1289 
   1290 #ifdef ATA_DEBUG
   1291 	int spl1, spl2;
   1292 
   1293 	spl1 = splbio();
   1294 	spl2 = splbio();
   1295 	if (spl2 != spl1) {
   1296 		printf("atastart: not at splbio()\n");
   1297 		panic("atastart");
   1298 	}
   1299 	splx(spl2);
   1300 	splx(spl1);
   1301 #endif /* ATA_DEBUG */
   1302 
   1303 	ata_channel_lock(chp);
   1304 
   1305 again:
   1306 	KASSERT(chq->queue_active <= chq->queue_openings);
   1307 	if (chq->queue_active == chq->queue_openings) {
   1308 		goto out; /* channel completely busy */
   1309 	}
   1310 
   1311 	/* is there a xfer ? */
   1312 	if ((xfer = TAILQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL)
   1313 		goto out;
   1314 
   1315 	recovery = ISSET(xfer->c_flags, C_RECOVERY);
   1316 
   1317 	/* is the queue frozen? */
   1318 	if (__predict_false(!recovery && chq->queue_freeze > 0)) {
   1319 		if (chq->queue_flags & QF_IDLE_WAIT) {
   1320 			chq->queue_flags &= ~QF_IDLE_WAIT;
   1321 			cv_signal(&chp->ch_queue->queue_idle);
   1322 		}
   1323 		goto out; /* queue frozen */
   1324 	}
   1325 
   1326 	/* all xfers on same queue must belong to the same channel */
   1327 	KASSERT(xfer->c_chp == chp);
   1328 
   1329 	/*
   1330 	 * Can only take the command if there are no current active
   1331 	 * commands, or if the command is NCQ and the active commands are also
   1332 	 * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based
   1333 	 * switching, can only send commands to single drive.
   1334 	 * Need only check first xfer.
   1335 	 * XXX FIS-based switching - revisit
   1336 	 */
   1337 	if (!recovery && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) {
   1338 		if (!ISSET(xfer->c_flags, C_NCQ) ||
   1339 		    !ISSET(axfer->c_flags, C_NCQ) ||
   1340 		    xfer->c_drive != axfer->c_drive)
   1341 			goto out;
   1342 	}
   1343 
   1344 	struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive];
   1345 
   1346 	/*
   1347 	 * if someone is waiting for the command to be active, wake it up
   1348 	 * and let it process the command
   1349 	 */
   1350 	if (xfer->c_flags & C_WAITACT) {
   1351 		ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d "
   1352 		    "wait active\n", xfer, chp->ch_channel, xfer->c_drive),
   1353 		    DEBUG_XFERS);
   1354 		cv_signal(&xfer->c_active);
   1355 		goto out;
   1356 	}
   1357 
   1358 	if (atac->atac_claim_hw)
   1359 		if (!atac->atac_claim_hw(chp, 0))
   1360 			goto out;
   1361 
   1362 	ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d\n", xfer,
   1363 	    chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
   1364 	if (drvp->drive_flags & ATA_DRIVE_RESET) {
   1365 		drvp->drive_flags &= ~ATA_DRIVE_RESET;
   1366 		drvp->state = 0;
   1367 	}
   1368 
   1369 	if (ISSET(xfer->c_flags, C_NCQ))
   1370 		SET(chp->ch_flags, ATACH_NCQ);
   1371 	else
   1372 		CLR(chp->ch_flags, ATACH_NCQ);
   1373 
   1374 	ata_activate_xfer_locked(chp, xfer);
   1375 
   1376 	if (atac->atac_cap & ATAC_CAP_NOIRQ)
   1377 		KASSERT(xfer->c_flags & C_POLL);
   1378 
   1379 	switch (ata_xfer_start(xfer)) {
   1380 	case ATASTART_TH:
   1381 	case ATASTART_ABORT:
   1382 		/* don't start any further commands in this case */
   1383 		goto out;
   1384 	default:
   1385 		/* nothing to do */
   1386 		break;
   1387 	}
   1388 
   1389 	/* Queue more commands if possible, but not during recovery */
   1390 	if (!recovery && chq->queue_active < chq->queue_openings)
   1391 		goto again;
   1392 
   1393 out:
   1394 	ata_channel_unlock(chp);
   1395 }
   1396 
   1397 int
   1398 ata_xfer_start(struct ata_xfer *xfer)
   1399 {
   1400 	struct ata_channel *chp = xfer->c_chp;
   1401 	int rv;
   1402 
   1403 	KASSERT(mutex_owned(&chp->ch_lock));
   1404 
   1405 	rv = xfer->c_start(chp, xfer);
   1406 	switch (rv) {
   1407 	case ATASTART_STARTED:
   1408 		/* nothing to do */
   1409 		break;
   1410 	case ATASTART_TH:
   1411 		/* postpone xfer to thread */
   1412 		ata_thread_wake_locked(chp);
   1413 		break;
   1414 	case ATASTART_POLL:
   1415 		/* can happen even in thread context for some ATAPI devices */
   1416 		ata_channel_unlock(chp);
   1417 		KASSERT(xfer->c_poll != NULL);
   1418 		xfer->c_poll(chp, xfer);
   1419 		ata_channel_lock(chp);
   1420 		break;
   1421 	case ATASTART_ABORT:
   1422 		ata_channel_unlock(chp);
   1423 		KASSERT(xfer->c_abort != NULL);
   1424 		xfer->c_abort(chp, xfer);
   1425 		ata_channel_lock(chp);
   1426 		break;
   1427 	}
   1428 
   1429 	return rv;
   1430 }
   1431 
   1432 /*
   1433  * Does it's own locking, does not require splbio().
   1434  * flags - whether to block waiting for free xfer
   1435  * openings - limit of openings supported by device, <= 0 means tag not
   1436  *     relevant, and any available xfer can be returned
   1437  */
   1438 struct ata_xfer *
   1439 ata_get_xfer_ext(struct ata_channel *chp, int flags, uint8_t openings)
   1440 {
   1441 	struct ata_queue *chq = chp->ch_queue;
   1442 	struct ata_xfer *xfer = NULL;
   1443 	uint32_t avail, slot, mask;
   1444 	int error;
   1445 
   1446 	ATADEBUG_PRINT(("%s: channel %d flags %x openings %d\n",
   1447 	    __func__, chp->ch_channel, flags, openings),
   1448 	    DEBUG_XFERS);
   1449 
   1450 	ata_channel_lock(chp);
   1451 
   1452 	/*
   1453 	 * When openings is just 1, can't reserve anything for
   1454 	 * recovery. KASSERT() here is to catch code which naively
   1455 	 * relies on C_RECOVERY to work under this condition.
   1456 	 */
   1457 	KASSERT((flags & C_RECOVERY) == 0 || chq->queue_openings > 1);
   1458 
   1459 	if (flags & C_RECOVERY) {
   1460 		mask = UINT32_MAX;
   1461 	} else {
   1462 		if (openings <= 0 || openings > chq->queue_openings)
   1463 			openings = chq->queue_openings;
   1464 
   1465 		if (openings > 1) {
   1466 			mask = __BIT(openings - 1) - 1;
   1467 		} else {
   1468 			mask = UINT32_MAX;
   1469 		}
   1470 	}
   1471 
   1472 retry:
   1473 	avail = ffs32(chq->queue_xfers_avail & mask);
   1474 	if (avail == 0) {
   1475 		/*
   1476 		 * Catch code which tries to get another recovery xfer while
   1477 		 * already holding one (wrong recursion).
   1478 		 */
   1479 		KASSERTMSG((flags & C_RECOVERY) == 0,
   1480 		    "recovery xfer busy openings %d mask %x avail %x",
   1481 		    openings, mask, chq->queue_xfers_avail);
   1482 
   1483 		if (flags & C_WAIT) {
   1484 			chq->queue_flags |= QF_NEED_XFER;
   1485 			error = cv_wait_sig(&chq->queue_busy, &chp->ch_lock);
   1486 			if (error == 0)
   1487 				goto retry;
   1488 		}
   1489 
   1490 		goto out;
   1491 	}
   1492 
   1493 	slot = avail - 1;
   1494 	xfer = &chq->queue_xfers[slot];
   1495 	chq->queue_xfers_avail &= ~__BIT(slot);
   1496 
   1497 	KASSERT((chq->active_xfers_used & __BIT(slot)) == 0);
   1498 
   1499 	/* zero everything after the callout member */
   1500 	memset(&xfer->c_startzero, 0,
   1501 	    sizeof(struct ata_xfer) - offsetof(struct ata_xfer, c_startzero));
   1502 
   1503 out:
   1504 	ata_channel_unlock(chp);
   1505 	return xfer;
   1506 }
   1507 
   1508 /*
   1509  * ata_deactivate_xfer() must be always called prior to ata_free_xfer()
   1510  */
   1511 void
   1512 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   1513 {
   1514 	struct ata_queue *chq = chp->ch_queue;
   1515 
   1516 	ata_channel_lock(chp);
   1517 
   1518 	if (xfer->c_flags & (C_WAITACT|C_WAITTIMO)) {
   1519 		/* Someone is waiting for this xfer, so we can't free now */
   1520 		xfer->c_flags |= C_FREE;
   1521 		cv_signal(&xfer->c_active);
   1522 		goto out;
   1523 	}
   1524 
   1525 #if NATA_PIOBM		/* XXX wdc dependent code */
   1526 	if (xfer->c_flags & C_PIOBM) {
   1527 		struct wdc_softc *wdc = CHAN_TO_WDC(chp);
   1528 
   1529 		/* finish the busmastering PIO */
   1530 		(*wdc->piobm_done)(wdc->dma_arg,
   1531 		    chp->ch_channel, xfer->c_drive);
   1532 		chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT);
   1533 	}
   1534 #endif
   1535 
   1536 	if (chp->ch_atac->atac_free_hw)
   1537 		chp->ch_atac->atac_free_hw(chp);
   1538 
   1539 	KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0);
   1540 	KASSERT((chq->queue_xfers_avail & __BIT(xfer->c_slot)) == 0);
   1541 	chq->queue_xfers_avail |= __BIT(xfer->c_slot);
   1542 
   1543 out:
   1544 	if (chq->queue_flags & QF_NEED_XFER) {
   1545 		chq->queue_flags &= ~QF_NEED_XFER;
   1546 		cv_broadcast(&chq->queue_busy);
   1547 	}
   1548 
   1549 	ata_channel_unlock(chp);
   1550 }
   1551 
   1552 static void
   1553 ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer)
   1554 {
   1555 	struct ata_queue * const chq = chp->ch_queue;
   1556 
   1557 	KASSERT(mutex_owned(&chp->ch_lock));
   1558 
   1559 	KASSERT(chq->queue_active < chq->queue_openings);
   1560 	KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0);
   1561 
   1562 	TAILQ_REMOVE(&chq->queue_xfer, xfer, c_xferchain);
   1563 	if ((xfer->c_flags & C_RECOVERY) == 0)
   1564 		TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain);
   1565 	else {
   1566 		/*
   1567 		 * Must go to head, so that ata_queue_get_active_xfer()
   1568 		 * returns the recovery command, and not some other
   1569 		 * random active transfer.
   1570 		 */
   1571 		TAILQ_INSERT_HEAD(&chq->active_xfers, xfer, c_activechain);
   1572 	}
   1573 	chq->active_xfers_used |= __BIT(xfer->c_slot);
   1574 	chq->queue_active++;
   1575 }
   1576 
   1577 void
   1578 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   1579 {
   1580 	struct ata_queue * const chq = chp->ch_queue;
   1581 
   1582 	ata_channel_lock(chp);
   1583 
   1584 	KASSERT(chq->queue_active > 0);
   1585 	KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0);
   1586 
   1587 	callout_stop(&xfer->c_timo_callout);
   1588 
   1589 	if (callout_invoking(&xfer->c_timo_callout))
   1590 		xfer->c_flags |= C_WAITTIMO;
   1591 
   1592 	TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain);
   1593 	chq->active_xfers_used &= ~__BIT(xfer->c_slot);
   1594 	chq->queue_active--;
   1595 
   1596 	ata_channel_unlock(chp);
   1597 }
   1598 
   1599 /*
   1600  * Called in c_intr hook. Must be called before before any deactivations
   1601  * are done - if there is drain pending, it calls c_kill_xfer hook which
   1602  * deactivates the xfer.
   1603  * Calls c_kill_xfer with channel lock free.
   1604  * Returns true if caller should just exit without further processing.
   1605  * Caller must not further access any part of xfer or any related controller
   1606  * structures in that case, it should just return.
   1607  */
   1608 bool
   1609 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer)
   1610 {
   1611 	int drive = xfer->c_drive;
   1612 	bool draining = false;
   1613 
   1614 	ata_channel_lock(chp);
   1615 
   1616 	if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
   1617 		ata_channel_unlock(chp);
   1618 
   1619 		(*xfer->c_kill_xfer)(chp, xfer, KILL_GONE);
   1620 
   1621 		ata_channel_lock(chp);
   1622 		chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
   1623 		cv_signal(&chp->ch_queue->queue_drain);
   1624 		draining = true;
   1625 	}
   1626 
   1627 	ata_channel_unlock(chp);
   1628 
   1629 	return draining;
   1630 }
   1631 
   1632 /*
   1633  * Check for race of normal transfer handling vs. timeout.
   1634  */
   1635 bool
   1636 ata_timo_xfer_check(struct ata_xfer *xfer)
   1637 {
   1638 	struct ata_channel *chp = xfer->c_chp;
   1639 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
   1640 
   1641 	ata_channel_lock(chp);
   1642 
   1643 	callout_ack(&xfer->c_timo_callout);
   1644 
   1645 	if (xfer->c_flags & C_WAITTIMO) {
   1646 		xfer->c_flags &= ~C_WAITTIMO;
   1647 
   1648 		/* Handle race vs. ata_free_xfer() */
   1649 		if (xfer->c_flags & C_FREE) {
   1650 			xfer->c_flags &= ~C_FREE;
   1651 			ata_channel_unlock(chp);
   1652 
   1653 	    		aprint_normal_dev(drvp->drv_softc,
   1654 			    "xfer %d freed while invoking timeout\n",
   1655 			    xfer->c_slot);
   1656 
   1657 			ata_free_xfer(chp, xfer);
   1658 			return true;
   1659 		}
   1660 
   1661 		/* Race vs. callout_stop() in ata_deactivate_xfer() */
   1662 		ata_channel_unlock(chp);
   1663 
   1664 	    	aprint_normal_dev(drvp->drv_softc,
   1665 		    "xfer %d deactivated while invoking timeout\n",
   1666 		    xfer->c_slot);
   1667 		return true;
   1668 	}
   1669 
   1670 	ata_channel_unlock(chp);
   1671 
   1672 	/* No race, proceed with timeout handling */
   1673 	return false;
   1674 }
   1675 
   1676 void
   1677 ata_timeout(void *v)
   1678 {
   1679 	struct ata_xfer *xfer = v;
   1680 	int s;
   1681 
   1682 	ATADEBUG_PRINT(("%s: slot %d\n", __func__, xfer->c_slot),
   1683 	    DEBUG_FUNCS|DEBUG_XFERS);
   1684 
   1685 	s = splbio();				/* XXX MPSAFE */
   1686 
   1687 	if (ata_timo_xfer_check(xfer)) {
   1688 		/* Already logged */
   1689 		goto out;
   1690 	}
   1691 
   1692 	/* Mark as timed out. Do not print anything, wd(4) will. */
   1693 	xfer->c_flags |= C_TIMEOU;
   1694 	xfer->c_intr(xfer->c_chp, xfer, 0);
   1695 
   1696 out:
   1697 	splx(s);
   1698 }
   1699 
   1700 /*
   1701  * Kill off all active xfers for a ata_channel.
   1702  *
   1703  * Must be called with channel lock held.
   1704  */
   1705 void
   1706 ata_kill_active(struct ata_channel *chp, int reason, int flags)
   1707 {
   1708 	struct ata_queue * const chq = chp->ch_queue;
   1709 	struct ata_xfer *xfer, *xfernext;
   1710 
   1711 	KASSERT(mutex_owned(&chp->ch_lock));
   1712 
   1713 	TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) {
   1714 		(*xfer->c_kill_xfer)(xfer->c_chp, xfer, reason);
   1715 	}
   1716 
   1717 	if (flags & AT_RST_EMERG)
   1718 		ata_queue_reset(chq);
   1719 }
   1720 
   1721 /*
   1722  * Kill off all pending xfers for a drive.
   1723  */
   1724 void
   1725 ata_kill_pending(struct ata_drive_datas *drvp)
   1726 {
   1727 	struct ata_channel * const chp = drvp->chnl_softc;
   1728 	struct ata_queue * const chq = chp->ch_queue;
   1729 	struct ata_xfer *xfer, *xfernext;
   1730 
   1731 	ata_channel_lock(chp);
   1732 
   1733 	/* Kill all pending transfers */
   1734 	TAILQ_FOREACH_SAFE(xfer, &chq->queue_xfer, c_xferchain, xfernext) {
   1735 		KASSERT(xfer->c_chp == chp);
   1736 
   1737 		if (xfer->c_drive != drvp->drive)
   1738 			continue;
   1739 
   1740 		TAILQ_REMOVE(&chp->ch_queue->queue_xfer, xfer, c_xferchain);
   1741 
   1742 		/*
   1743 		 * Keep the lock, so that we get deadlock (and 'locking against
   1744 		 * myself' with LOCKDEBUG), instead of silent
   1745 		 * data corruption, if the hook tries to call back into
   1746 		 * middle layer for inactive xfer.
   1747 		 */
   1748 		(*xfer->c_kill_xfer)(chp, xfer, KILL_GONE_INACTIVE);
   1749 	}
   1750 
   1751 	/* Wait until all active transfers on the drive finish */
   1752 	while (chq->queue_active > 0) {
   1753 		bool drv_active = false;
   1754 
   1755 		TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
   1756 			KASSERT(xfer->c_chp == chp);
   1757 
   1758 			if (xfer->c_drive == drvp->drive) {
   1759 				drv_active = true;
   1760 				break;
   1761 			}
   1762 		}
   1763 
   1764 		if (!drv_active) {
   1765 			/* all finished */
   1766 			break;
   1767 		}
   1768 
   1769 		drvp->drive_flags |= ATA_DRIVE_WAITDRAIN;
   1770 		cv_wait(&chq->queue_drain, &chp->ch_lock);
   1771 	}
   1772 
   1773 	ata_channel_unlock(chp);
   1774 }
   1775 
   1776 static void
   1777 ata_channel_freeze_locked(struct ata_channel *chp)
   1778 {
   1779 	chp->ch_queue->queue_freeze++;
   1780 }
   1781 
   1782 void
   1783 ata_channel_freeze(struct ata_channel *chp)
   1784 {
   1785 	ata_channel_lock(chp);
   1786 	ata_channel_freeze_locked(chp);
   1787 	ata_channel_unlock(chp);
   1788 }
   1789 
   1790 static void
   1791 ata_channel_thaw_locked(struct ata_channel *chp)
   1792 {
   1793 	KASSERT(mutex_owned(&chp->ch_lock));
   1794 
   1795 	chp->ch_queue->queue_freeze--;
   1796 }
   1797 
   1798 void
   1799 ata_channel_thaw(struct ata_channel *chp)
   1800 {
   1801 	ata_channel_lock(chp);
   1802 	ata_channel_thaw_locked(chp);
   1803 	ata_channel_unlock(chp);
   1804 }
   1805 
   1806 /*
   1807  * ata_reset_channel:
   1808  *
   1809  *	Reset and ATA channel.
   1810  *
   1811  *	MUST BE CALLED AT splbio()!
   1812  */
   1813 void
   1814 ata_reset_channel(struct ata_channel *chp, int flags)
   1815 {
   1816 	struct atac_softc *atac = chp->ch_atac;
   1817 	int drive;
   1818 
   1819 #ifdef ATA_DEBUG
   1820 	int spl1, spl2;
   1821 
   1822 	spl1 = splbio();
   1823 	spl2 = splbio();
   1824 	if (spl2 != spl1) {
   1825 		printf("ata_reset_channel: not at splbio()\n");
   1826 		panic("ata_reset_channel");
   1827 	}
   1828 	splx(spl2);
   1829 	splx(spl1);
   1830 #endif /* ATA_DEBUG */
   1831 
   1832 	ata_channel_freeze(chp);
   1833 
   1834 	/*
   1835 	 * If we can poll or wait it's OK, otherwise wake up the
   1836 	 * kernel thread to do it for us.
   1837 	 */
   1838 	ATADEBUG_PRINT(("ata_reset_channel flags 0x%x ch_flags 0x%x\n",
   1839 	    flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS);
   1840 	if ((flags & (AT_POLL | AT_WAIT)) == 0) {
   1841 		if (chp->ch_flags & ATACH_TH_RESET) {
   1842 			/* No need to schedule a reset more than one time. */
   1843 			ata_channel_thaw(chp);
   1844 			return;
   1845 		}
   1846 		ata_channel_lock(chp);
   1847 		chp->ch_flags |= ATACH_TH_RESET;
   1848 		chp->ch_reset_flags = flags & AT_RST_EMERG;
   1849 		cv_signal(&chp->ch_thr_idle);
   1850 		ata_channel_unlock(chp);
   1851 		return;
   1852 	}
   1853 
   1854 	(*atac->atac_bustype_ata->ata_reset_channel)(chp, flags);
   1855 
   1856 	ata_channel_lock(chp);
   1857 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
   1858 	for (drive = 0; drive < chp->ch_ndrives; drive++)
   1859 		chp->ch_drive[drive].state = 0;
   1860 
   1861 	chp->ch_flags &= ~ATACH_TH_RESET;
   1862 	ata_channel_unlock(chp);
   1863 
   1864 	if (flags & AT_RST_EMERG) {
   1865 		/* make sure that we can use polled commands */
   1866 		ata_queue_reset(chp->ch_queue);
   1867 	} else {
   1868 		ata_channel_thaw(chp);
   1869 		atastart(chp);
   1870 	}
   1871 }
   1872 
   1873 int
   1874 ata_addref(struct ata_channel *chp)
   1875 {
   1876 	struct atac_softc *atac = chp->ch_atac;
   1877 	struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
   1878 	int s, error = 0;
   1879 
   1880 	s = splbio();
   1881 	if (adapt->adapt_refcnt++ == 0 &&
   1882 	    adapt->adapt_enable != NULL) {
   1883 		error = (*adapt->adapt_enable)(atac->atac_dev, 1);
   1884 		if (error)
   1885 			adapt->adapt_refcnt--;
   1886 	}
   1887 	splx(s);
   1888 	return (error);
   1889 }
   1890 
   1891 void
   1892 ata_delref(struct ata_channel *chp)
   1893 {
   1894 	struct atac_softc *atac = chp->ch_atac;
   1895 	struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
   1896 	int s;
   1897 
   1898 	s = splbio();
   1899 	if (adapt->adapt_refcnt-- == 1 &&
   1900 	    adapt->adapt_enable != NULL)
   1901 		(void) (*adapt->adapt_enable)(atac->atac_dev, 0);
   1902 	splx(s);
   1903 }
   1904 
   1905 void
   1906 ata_print_modes(struct ata_channel *chp)
   1907 {
   1908 	struct atac_softc *atac = chp->ch_atac;
   1909 	int drive;
   1910 	struct ata_drive_datas *drvp;
   1911 
   1912 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
   1913 	for (drive = 0; drive < chp->ch_ndrives; drive++) {
   1914 		drvp = &chp->ch_drive[drive];
   1915 		if (drvp->drive_type == ATA_DRIVET_NONE ||
   1916 		    drvp->drv_softc == NULL)
   1917 			continue;
   1918 		aprint_verbose("%s(%s:%d:%d): using PIO mode %d",
   1919 			device_xname(drvp->drv_softc),
   1920 			device_xname(atac->atac_dev),
   1921 			chp->ch_channel, drvp->drive, drvp->PIO_mode);
   1922 #if NATA_DMA
   1923 		if (drvp->drive_flags & ATA_DRIVE_DMA)
   1924 			aprint_verbose(", DMA mode %d", drvp->DMA_mode);
   1925 #if NATA_UDMA
   1926 		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
   1927 			aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode);
   1928 			if (drvp->UDMA_mode == 2)
   1929 				aprint_verbose(" (Ultra/33)");
   1930 			else if (drvp->UDMA_mode == 4)
   1931 				aprint_verbose(" (Ultra/66)");
   1932 			else if (drvp->UDMA_mode == 5)
   1933 				aprint_verbose(" (Ultra/100)");
   1934 			else if (drvp->UDMA_mode == 6)
   1935 				aprint_verbose(" (Ultra/133)");
   1936 		}
   1937 #endif	/* NATA_UDMA */
   1938 #endif	/* NATA_DMA */
   1939 #if NATA_DMA || NATA_PIOBM
   1940 		if (0
   1941 #if NATA_DMA
   1942 		    || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA))
   1943 #endif
   1944 #if NATA_PIOBM
   1945 		    /* PIOBM capable controllers use DMA for PIO commands */
   1946 		    || (atac->atac_cap & ATAC_CAP_PIOBM)
   1947 #endif
   1948 		    )
   1949 			aprint_verbose(" (using DMA)");
   1950 
   1951 		if (drvp->drive_flags & ATA_DRIVE_NCQ) {
   1952 			aprint_verbose(", NCQ (%d tags)%s",
   1953 			    ATA_REAL_OPENINGS(chp->ch_queue->queue_openings),
   1954 			    (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO)
   1955 			    ? " w/PRIO" : "");
   1956 		} else if (drvp->drive_flags & ATA_DRIVE_WFUA)
   1957 			aprint_verbose(", WRITE DMA FUA EXT");
   1958 
   1959 #endif	/* NATA_DMA || NATA_PIOBM */
   1960 		aprint_verbose("\n");
   1961 	}
   1962 }
   1963 
   1964 #if NATA_DMA
   1965 /*
   1966  * downgrade the transfer mode of a drive after an error. return 1 if
   1967  * downgrade was possible, 0 otherwise.
   1968  *
   1969  * MUST BE CALLED AT splbio()!
   1970  */
   1971 int
   1972 ata_downgrade_mode(struct ata_drive_datas *drvp, int flags)
   1973 {
   1974 	struct ata_channel *chp = drvp->chnl_softc;
   1975 	struct atac_softc *atac = chp->ch_atac;
   1976 	device_t drv_dev = drvp->drv_softc;
   1977 	int cf_flags = device_cfdata(drv_dev)->cf_flags;
   1978 
   1979 	/* if drive or controller don't know its mode, we can't do much */
   1980 	if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 ||
   1981 	    (atac->atac_set_modes == NULL))
   1982 		return 0;
   1983 	/* current drive mode was set by a config flag, let it this way */
   1984 	if ((cf_flags & ATA_CONFIG_PIO_SET) ||
   1985 	    (cf_flags & ATA_CONFIG_DMA_SET) ||
   1986 	    (cf_flags & ATA_CONFIG_UDMA_SET))
   1987 		return 0;
   1988 
   1989 #if NATA_UDMA
   1990 	/*
   1991 	 * If we were using Ultra-DMA mode, downgrade to the next lower mode.
   1992 	 */
   1993 	if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) {
   1994 		drvp->UDMA_mode--;
   1995 		aprint_error_dev(drv_dev,
   1996 		    "transfer error, downgrading to Ultra-DMA mode %d\n",
   1997 		    drvp->UDMA_mode);
   1998 	}
   1999 #endif
   2000 
   2001 	/*
   2002 	 * If we were using ultra-DMA, don't downgrade to multiword DMA.
   2003 	 */
   2004 	else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) {
   2005 		drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA);
   2006 		drvp->PIO_mode = drvp->PIO_cap;
   2007 		aprint_error_dev(drv_dev,
   2008 		    "transfer error, downgrading to PIO mode %d\n",
   2009 		    drvp->PIO_mode);
   2010 	} else /* already using PIO, can't downgrade */
   2011 		return 0;
   2012 
   2013 	(*atac->atac_set_modes)(chp);
   2014 	ata_print_modes(chp);
   2015 	/* reset the channel, which will schedule all drives for setup */
   2016 	ata_reset_channel(chp, flags);
   2017 	return 1;
   2018 }
   2019 #endif	/* NATA_DMA */
   2020 
   2021 /*
   2022  * Probe drive's capabilities, for use by the controller later
   2023  * Assumes drvp points to an existing drive.
   2024  */
   2025 void
   2026 ata_probe_caps(struct ata_drive_datas *drvp)
   2027 {
   2028 	struct ataparams params, params2;
   2029 	struct ata_channel *chp = drvp->chnl_softc;
   2030 	struct atac_softc *atac = chp->ch_atac;
   2031 	device_t drv_dev = drvp->drv_softc;
   2032 	int i, printed = 0;
   2033 	const char *sep = "";
   2034 	int cf_flags;
   2035 
   2036 	if (ata_get_params(drvp, AT_WAIT, &params) != CMD_OK) {
   2037 		/* IDENTIFY failed. Can't tell more about the device */
   2038 		return;
   2039 	}
   2040 	if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) ==
   2041 	    (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) {
   2042 		/*
   2043 		 * Controller claims 16 and 32 bit transfers.
   2044 		 * Re-do an IDENTIFY with 32-bit transfers,
   2045 		 * and compare results.
   2046 		 */
   2047 		ata_channel_lock(chp);
   2048 		drvp->drive_flags |= ATA_DRIVE_CAP32;
   2049 		ata_channel_unlock(chp);
   2050 		ata_get_params(drvp, AT_WAIT, &params2);
   2051 		if (memcmp(&params, &params2, sizeof(struct ataparams)) != 0) {
   2052 			/* Not good. fall back to 16bits */
   2053 			ata_channel_lock(chp);
   2054 			drvp->drive_flags &= ~ATA_DRIVE_CAP32;
   2055 			ata_channel_unlock(chp);
   2056 		} else {
   2057 			aprint_verbose_dev(drv_dev, "32-bit data port\n");
   2058 		}
   2059 	}
   2060 #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */
   2061 	if (params.atap_ata_major > 0x01 &&
   2062 	    params.atap_ata_major != 0xffff) {
   2063 		for (i = 14; i > 0; i--) {
   2064 			if (params.atap_ata_major & (1 << i)) {
   2065 				aprint_verbose_dev(drv_dev,
   2066 				    "ATA version %d\n", i);
   2067 				drvp->ata_vers = i;
   2068 				break;
   2069 			}
   2070 		}
   2071 	}
   2072 #endif
   2073 
   2074 	/* An ATAPI device is at last PIO mode 3 */
   2075 	if (drvp->drive_type == ATA_DRIVET_ATAPI)
   2076 		drvp->PIO_mode = 3;
   2077 
   2078 	/*
   2079 	 * It's not in the specs, but it seems that some drive
   2080 	 * returns 0xffff in atap_extensions when this field is invalid
   2081 	 */
   2082 	if (params.atap_extensions != 0xffff &&
   2083 	    (params.atap_extensions & WDC_EXT_MODES)) {
   2084 		/*
   2085 		 * XXX some drives report something wrong here (they claim to
   2086 		 * support PIO mode 8 !). As mode is coded on 3 bits in
   2087 		 * SET FEATURE, limit it to 7 (so limit i to 4).
   2088 		 * If higher mode than 7 is found, abort.
   2089 		 */
   2090 		for (i = 7; i >= 0; i--) {
   2091 			if ((params.atap_piomode_supp & (1 << i)) == 0)
   2092 				continue;
   2093 			if (i > 4)
   2094 				return;
   2095 			/*
   2096 			 * See if mode is accepted.
   2097 			 * If the controller can't set its PIO mode,
   2098 			 * assume the defaults are good, so don't try
   2099 			 * to set it
   2100 			 */
   2101 			if (atac->atac_set_modes)
   2102 				/*
   2103 				 * It's OK to pool here, it's fast enough
   2104 				 * to not bother waiting for interrupt
   2105 				 */
   2106 				if (ata_set_mode(drvp, 0x08 | (i + 3),
   2107 				   AT_WAIT) != CMD_OK)
   2108 					continue;
   2109 			if (!printed) {
   2110 				aprint_verbose_dev(drv_dev,
   2111 				    "drive supports PIO mode %d", i + 3);
   2112 				sep = ",";
   2113 				printed = 1;
   2114 			}
   2115 			/*
   2116 			 * If controller's driver can't set its PIO mode,
   2117 			 * get the highter one for the drive.
   2118 			 */
   2119 			if (atac->atac_set_modes == NULL ||
   2120 			    atac->atac_pio_cap >= i + 3) {
   2121 				drvp->PIO_mode = i + 3;
   2122 				drvp->PIO_cap = i + 3;
   2123 				break;
   2124 			}
   2125 		}
   2126 		if (!printed) {
   2127 			/*
   2128 			 * We didn't find a valid PIO mode.
   2129 			 * Assume the values returned for DMA are buggy too
   2130 			 */
   2131 			return;
   2132 		}
   2133 		ata_channel_lock(chp);
   2134 		drvp->drive_flags |= ATA_DRIVE_MODE;
   2135 		ata_channel_unlock(chp);
   2136 		printed = 0;
   2137 		for (i = 7; i >= 0; i--) {
   2138 			if ((params.atap_dmamode_supp & (1 << i)) == 0)
   2139 				continue;
   2140 #if NATA_DMA
   2141 			if ((atac->atac_cap & ATAC_CAP_DMA) &&
   2142 			    atac->atac_set_modes != NULL)
   2143 				if (ata_set_mode(drvp, 0x20 | i, AT_WAIT)
   2144 				    != CMD_OK)
   2145 					continue;
   2146 #endif
   2147 			if (!printed) {
   2148 				aprint_verbose("%s DMA mode %d", sep, i);
   2149 				sep = ",";
   2150 				printed = 1;
   2151 			}
   2152 #if NATA_DMA
   2153 			if (atac->atac_cap & ATAC_CAP_DMA) {
   2154 				if (atac->atac_set_modes != NULL &&
   2155 				    atac->atac_dma_cap < i)
   2156 					continue;
   2157 				drvp->DMA_mode = i;
   2158 				drvp->DMA_cap = i;
   2159 				ata_channel_lock(chp);
   2160 				drvp->drive_flags |= ATA_DRIVE_DMA;
   2161 				ata_channel_unlock(chp);
   2162 			}
   2163 #endif
   2164 			break;
   2165 		}
   2166 		if (params.atap_extensions & WDC_EXT_UDMA_MODES) {
   2167 			printed = 0;
   2168 			for (i = 7; i >= 0; i--) {
   2169 				if ((params.atap_udmamode_supp & (1 << i))
   2170 				    == 0)
   2171 					continue;
   2172 #if NATA_UDMA
   2173 				if (atac->atac_set_modes != NULL &&
   2174 				    (atac->atac_cap & ATAC_CAP_UDMA))
   2175 					if (ata_set_mode(drvp, 0x40 | i,
   2176 					    AT_WAIT) != CMD_OK)
   2177 						continue;
   2178 #endif
   2179 				if (!printed) {
   2180 					aprint_verbose("%s Ultra-DMA mode %d",
   2181 					    sep, i);
   2182 					if (i == 2)
   2183 						aprint_verbose(" (Ultra/33)");
   2184 					else if (i == 4)
   2185 						aprint_verbose(" (Ultra/66)");
   2186 					else if (i == 5)
   2187 						aprint_verbose(" (Ultra/100)");
   2188 					else if (i == 6)
   2189 						aprint_verbose(" (Ultra/133)");
   2190 					sep = ",";
   2191 					printed = 1;
   2192 				}
   2193 #if NATA_UDMA
   2194 				if (atac->atac_cap & ATAC_CAP_UDMA) {
   2195 					if (atac->atac_set_modes != NULL &&
   2196 					    atac->atac_udma_cap < i)
   2197 						continue;
   2198 					drvp->UDMA_mode = i;
   2199 					drvp->UDMA_cap = i;
   2200 					ata_channel_lock(chp);
   2201 					drvp->drive_flags |= ATA_DRIVE_UDMA;
   2202 					ata_channel_unlock(chp);
   2203 				}
   2204 #endif
   2205 				break;
   2206 			}
   2207 		}
   2208 	}
   2209 
   2210 	ata_channel_lock(chp);
   2211 	drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM;
   2212 	if (drvp->drive_type == ATA_DRIVET_ATAPI) {
   2213 		if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM)
   2214 			drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
   2215 	} else {
   2216 		if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM)
   2217 			drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
   2218 	}
   2219 	ata_channel_unlock(chp);
   2220 
   2221 	/* Try to guess ATA version here, if it didn't get reported */
   2222 	if (drvp->ata_vers == 0) {
   2223 #if NATA_UDMA
   2224 		if (drvp->drive_flags & ATA_DRIVE_UDMA)
   2225 			drvp->ata_vers = 4; /* should be at last ATA-4 */
   2226 		else
   2227 #endif
   2228 		if (drvp->PIO_cap > 2)
   2229 			drvp->ata_vers = 2; /* should be at last ATA-2 */
   2230 	}
   2231 	cf_flags = device_cfdata(drv_dev)->cf_flags;
   2232 	if (cf_flags & ATA_CONFIG_PIO_SET) {
   2233 		ata_channel_lock(chp);
   2234 		drvp->PIO_mode =
   2235 		    (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF;
   2236 		drvp->drive_flags |= ATA_DRIVE_MODE;
   2237 		ata_channel_unlock(chp);
   2238 	}
   2239 #if NATA_DMA
   2240 	if ((atac->atac_cap & ATAC_CAP_DMA) == 0) {
   2241 		/* don't care about DMA modes */
   2242 		return;
   2243 	}
   2244 	if (cf_flags & ATA_CONFIG_DMA_SET) {
   2245 		ata_channel_lock(chp);
   2246 		if ((cf_flags & ATA_CONFIG_DMA_MODES) ==
   2247 		    ATA_CONFIG_DMA_DISABLE) {
   2248 			drvp->drive_flags &= ~ATA_DRIVE_DMA;
   2249 		} else {
   2250 			drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >>
   2251 			    ATA_CONFIG_DMA_OFF;
   2252 			drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE;
   2253 		}
   2254 		ata_channel_unlock(chp);
   2255 	}
   2256 
   2257 	/*
   2258 	 * Probe WRITE DMA FUA EXT. Support is mandatory for devices
   2259 	 * supporting LBA48, but nevertheless confirm with the feature flag.
   2260 	 */
   2261 	if (drvp->drive_flags & ATA_DRIVE_DMA) {
   2262 		if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0
   2263 		    && (params.atap_cmd_def & ATA_CMDE_WFE)) {
   2264 			drvp->drive_flags |= ATA_DRIVE_WFUA;
   2265 			aprint_verbose("%s WRITE DMA FUA", sep);
   2266 			sep = ",";
   2267 		}
   2268 	}
   2269 
   2270 	/* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */
   2271 	ata_channel_lock(chp);
   2272 	drvp->drv_openings = 1;
   2273 	if (params.atap_sata_caps & SATA_NATIVE_CMDQ) {
   2274 		if (atac->atac_cap & ATAC_CAP_NCQ)
   2275 			drvp->drive_flags |= ATA_DRIVE_NCQ;
   2276 		drvp->drv_openings =
   2277 		    (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1;
   2278 		aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings);
   2279 		sep = ",";
   2280 
   2281 		if (params.atap_sata_caps & SATA_NCQ_PRIO) {
   2282 			drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO;
   2283 			aprint_verbose(" w/PRIO");
   2284 		}
   2285 	}
   2286 	ata_channel_unlock(chp);
   2287 
   2288 	if (printed)
   2289 		aprint_verbose("\n");
   2290 
   2291 #if NATA_UDMA
   2292 	if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) {
   2293 		/* don't care about UDMA modes */
   2294 		return;
   2295 	}
   2296 	if (cf_flags & ATA_CONFIG_UDMA_SET) {
   2297 		ata_channel_lock(chp);
   2298 		if ((cf_flags & ATA_CONFIG_UDMA_MODES) ==
   2299 		    ATA_CONFIG_UDMA_DISABLE) {
   2300 			drvp->drive_flags &= ~ATA_DRIVE_UDMA;
   2301 		} else {
   2302 			drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >>
   2303 			    ATA_CONFIG_UDMA_OFF;
   2304 			drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE;
   2305 		}
   2306 		ata_channel_unlock(chp);
   2307 	}
   2308 #endif	/* NATA_UDMA */
   2309 #endif	/* NATA_DMA */
   2310 }
   2311 
   2312 /* management of the /dev/atabus* devices */
   2313 int
   2314 atabusopen(dev_t dev, int flag, int fmt, struct lwp *l)
   2315 {
   2316 	struct atabus_softc *sc;
   2317 	int error;
   2318 
   2319 	sc = device_lookup_private(&atabus_cd, minor(dev));
   2320 	if (sc == NULL)
   2321 		return (ENXIO);
   2322 
   2323 	if (sc->sc_flags & ATABUSCF_OPEN)
   2324 		return (EBUSY);
   2325 
   2326 	if ((error = ata_addref(sc->sc_chan)) != 0)
   2327 		return (error);
   2328 
   2329 	sc->sc_flags |= ATABUSCF_OPEN;
   2330 
   2331 	return (0);
   2332 }
   2333 
   2334 
   2335 int
   2336 atabusclose(dev_t dev, int flag, int fmt, struct lwp *l)
   2337 {
   2338 	struct atabus_softc *sc =
   2339 	    device_lookup_private(&atabus_cd, minor(dev));
   2340 
   2341 	ata_delref(sc->sc_chan);
   2342 
   2343 	sc->sc_flags &= ~ATABUSCF_OPEN;
   2344 
   2345 	return (0);
   2346 }
   2347 
   2348 int
   2349 atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
   2350 {
   2351 	struct atabus_softc *sc =
   2352 	    device_lookup_private(&atabus_cd, minor(dev));
   2353 	struct ata_channel *chp = sc->sc_chan;
   2354 	int min_drive, max_drive, drive;
   2355 	int error;
   2356 	int s;
   2357 
   2358 	/*
   2359 	 * Enforce write permission for ioctls that change the
   2360 	 * state of the bus.  Host adapter specific ioctls must
   2361 	 * be checked by the adapter driver.
   2362 	 */
   2363 	switch (cmd) {
   2364 	case ATABUSIOSCAN:
   2365 	case ATABUSIODETACH:
   2366 	case ATABUSIORESET:
   2367 		if ((flag & FWRITE) == 0)
   2368 			return (EBADF);
   2369 	}
   2370 
   2371 	switch (cmd) {
   2372 	case ATABUSIORESET:
   2373 		s = splbio();
   2374 		ata_reset_channel(sc->sc_chan, AT_WAIT | AT_POLL);
   2375 		splx(s);
   2376 		return 0;
   2377 	case ATABUSIOSCAN:
   2378 	{
   2379 #if 0
   2380 		struct atabusioscan_args *a=
   2381 		    (struct atabusioscan_args *)addr;
   2382 #endif
   2383 		if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
   2384 		    (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
   2385 			return (EOPNOTSUPP);
   2386 		return (EOPNOTSUPP);
   2387 	}
   2388 	case ATABUSIODETACH:
   2389 	{
   2390 		struct atabusiodetach_args *a=
   2391 		    (struct atabusiodetach_args *)addr;
   2392 		if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
   2393 		    (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
   2394 			return (EOPNOTSUPP);
   2395 		switch (a->at_dev) {
   2396 		case -1:
   2397 			min_drive = 0;
   2398 			max_drive = 1;
   2399 			break;
   2400 		case 0:
   2401 		case 1:
   2402 			min_drive = max_drive = a->at_dev;
   2403 			break;
   2404 		default:
   2405 			return (EINVAL);
   2406 		}
   2407 		for (drive = min_drive; drive <= max_drive; drive++) {
   2408 			if (chp->ch_drive[drive].drv_softc != NULL) {
   2409 				error = config_detach(
   2410 				    chp->ch_drive[drive].drv_softc, 0);
   2411 				if (error)
   2412 					return (error);
   2413 				KASSERT(chp->ch_drive[drive].drv_softc == NULL);
   2414 			}
   2415 		}
   2416 		return 0;
   2417 	}
   2418 	default:
   2419 		return ENOTTY;
   2420 	}
   2421 }
   2422 
   2423 static bool
   2424 atabus_suspend(device_t dv, const pmf_qual_t *qual)
   2425 {
   2426 	struct atabus_softc *sc = device_private(dv);
   2427 	struct ata_channel *chp = sc->sc_chan;
   2428 
   2429 	ata_channel_idle(chp);
   2430 
   2431 	return true;
   2432 }
   2433 
   2434 static bool
   2435 atabus_resume(device_t dv, const pmf_qual_t *qual)
   2436 {
   2437 	struct atabus_softc *sc = device_private(dv);
   2438 	struct ata_channel *chp = sc->sc_chan;
   2439 
   2440 	/*
   2441 	 * XXX joerg: with wdc, the first channel unfreezes the controler.
   2442 	 * Move this the reset and queue idling into wdc.
   2443 	 */
   2444 	ata_channel_lock(chp);
   2445 	if (chp->ch_queue->queue_freeze == 0) {
   2446 		ata_channel_unlock(chp);
   2447 		goto out;
   2448 	}
   2449 	KASSERT(chp->ch_queue->queue_freeze > 0);
   2450 	ata_channel_unlock(chp);
   2451 
   2452 	/* unfreeze the queue and reset drives */
   2453 	ata_channel_thaw(chp);
   2454 
   2455 	/* reset channel only if there are drives attached */
   2456 	if (chp->ch_ndrives > 0)
   2457 		ata_reset_channel(chp, AT_WAIT);
   2458 
   2459 out:
   2460 	return true;
   2461 }
   2462 
   2463 static int
   2464 atabus_rescan(device_t self, const char *ifattr, const int *locators)
   2465 {
   2466 	struct atabus_softc *sc = device_private(self);
   2467 	struct ata_channel *chp = sc->sc_chan;
   2468 	struct atabus_initq *initq;
   2469 	int i;
   2470 
   2471 	/*
   2472 	 * we can rescan a port multiplier atabus, even if some devices are
   2473 	 * still attached
   2474 	 */
   2475 	if (chp->ch_satapmp_nports == 0) {
   2476 		if (chp->atapibus != NULL) {
   2477 			return EBUSY;
   2478 		}
   2479 
   2480 		KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
   2481 		for (i = 0; i < chp->ch_ndrives; i++) {
   2482 			if (chp->ch_drive[i].drv_softc != NULL) {
   2483 				return EBUSY;
   2484 			}
   2485 		}
   2486 	}
   2487 
   2488 	initq = malloc(sizeof(*initq), M_DEVBUF, M_WAITOK);
   2489 	initq->atabus_sc = sc;
   2490 	mutex_enter(&atabus_qlock);
   2491 	TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
   2492 	mutex_exit(&atabus_qlock);
   2493 	config_pending_incr(sc->sc_dev);
   2494 
   2495 	ata_channel_lock(chp);
   2496 	chp->ch_flags |= ATACH_TH_RESCAN;
   2497 	cv_signal(&chp->ch_thr_idle);
   2498 	ata_channel_unlock(chp);
   2499 
   2500 	return 0;
   2501 }
   2502 
   2503 void
   2504 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags)
   2505 {
   2506 
   2507 	if ((flags & (AT_WAIT | AT_POLL)) == AT_POLL) {
   2508 		/*
   2509 		 * can't use kpause(), we may be in interrupt context
   2510 		 * or taking a crash dump
   2511 		 */
   2512 		delay(ms * 1000);
   2513 	} else {
   2514 		int pause = mstohz(ms);
   2515 
   2516 		KASSERT(mutex_owned(&chp->ch_lock));
   2517 		kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock);
   2518 	}
   2519 }
   2520 
   2521 void
   2522 atacmd_toncq(struct ata_xfer *xfer, uint8_t *cmd, uint16_t *count,
   2523     uint16_t *features, uint8_t *device)
   2524 {
   2525 	if ((xfer->c_flags & C_NCQ) == 0) {
   2526 		/* FUA handling for non-NCQ drives */
   2527 		if (xfer->c_bio.flags & ATA_FUA
   2528 		    && *cmd == WDCC_WRITEDMA_EXT)
   2529 			*cmd = WDCC_WRITEDMA_FUA_EXT;
   2530 
   2531 		return;
   2532 	}
   2533 
   2534 	*cmd = (xfer->c_bio.flags & ATA_READ) ?
   2535 	    WDCC_READ_FPDMA_QUEUED : WDCC_WRITE_FPDMA_QUEUED;
   2536 
   2537 	/* for FPDMA the block count is in features */
   2538 	*features = *count;
   2539 
   2540 	/* NCQ tag */
   2541 	*count = (xfer->c_slot << 3);
   2542 
   2543 	if (xfer->c_bio.flags & ATA_PRIO_HIGH)
   2544 		*count |= WDSC_PRIO_HIGH;
   2545 
   2546 	/* other device flags */
   2547 	if (xfer->c_bio.flags & ATA_FUA)
   2548 		*device |= WDSD_FUA;
   2549 }
   2550 
   2551 /*
   2552  * Must be called without any locks, i.e. with both drive and channel locks
   2553  * released.
   2554  */
   2555 void
   2556 ata_channel_start(struct ata_channel *chp, int drive)
   2557 {
   2558 	int i, s;
   2559 	struct ata_drive_datas *drvp;
   2560 
   2561 	s = splbio();
   2562 
   2563 	KASSERT(chp->ch_ndrives > 0);
   2564 
   2565 #define ATA_DRIVE_START(chp, drive) \
   2566 	do {							\
   2567 		KASSERT(drive < chp->ch_ndrives);		\
   2568 		drvp = &chp->ch_drive[drive];			\
   2569 								\
   2570 		if (drvp->drive_type != ATA_DRIVET_ATA &&	\
   2571 		    drvp->drive_type != ATA_DRIVET_ATAPI &&	\
   2572 		    drvp->drive_type != ATA_DRIVET_OLD)		\
   2573 			continue;				\
   2574 								\
   2575 		if (drvp->drv_start != NULL)			\
   2576 			(*drvp->drv_start)(drvp->drv_softc);	\
   2577 	} while (0)
   2578 
   2579 	/*
   2580 	 * Process drives in round robin fashion starting with next one after
   2581 	 * the one which finished transfer. Thus no single drive would
   2582 	 * completely starve other drives on same channel.
   2583 	 * This loop processes all but the current drive, so won't do anything
   2584 	 * if there is only one drive in channel.
   2585 	 */
   2586 	for (i = (drive + 1) % chp->ch_ndrives; i != drive;
   2587 	    i = (i + 1) % chp->ch_ndrives) {
   2588 		ATA_DRIVE_START(chp, i);
   2589 	}
   2590 
   2591 	/* Now try to kick off xfers on the current drive */
   2592 	ATA_DRIVE_START(chp, drive);
   2593 
   2594 	splx(s);
   2595 #undef ATA_DRIVE_START
   2596 }
   2597 
   2598 void
   2599 ata_channel_lock(struct ata_channel *chp)
   2600 {
   2601 	mutex_enter(&chp->ch_lock);
   2602 }
   2603 
   2604 void
   2605 ata_channel_unlock(struct ata_channel *chp)
   2606 {
   2607 	mutex_exit(&chp->ch_lock);
   2608 }
   2609 
   2610 void
   2611 ata_channel_lock_owned(struct ata_channel *chp)
   2612 {
   2613 	KASSERT(mutex_owned(&chp->ch_lock));
   2614 }
   2615 
   2616 void
   2617 ata_wait_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   2618 {
   2619 	KASSERT(mutex_owned(&chp->ch_lock));
   2620 
   2621 	cv_wait(&xfer->c_finish, &chp->ch_lock);
   2622 }
   2623 
   2624 void
   2625 ata_wake_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
   2626 {
   2627 	KASSERT(mutex_owned(&chp->ch_lock));
   2628 
   2629 	cv_signal(&xfer->c_finish);
   2630 }
   2631