Lines Matching defs:chp
199 ata_channel_attach(struct ata_channel *chp)
201 if (chp->ch_flags & ATACH_DISABLED)
204 ata_channel_init(chp);
206 KASSERT(chp->ch_queue != NULL);
208 chp->atabus = config_found(chp->ch_atac->atac_dev, chp, atabusprint,
218 ata_channel_detach(struct ata_channel *chp)
220 if (chp->ch_flags & ATACH_DISABLED)
223 ata_channel_destroy(chp);
225 chp->ch_flags |= ATACH_DETACHED;
231 struct ata_channel *chp = atabus_sc->sc_chan;
232 struct atac_softc *atac = chp->ch_atac;
243 if (chp->ch_satapmp_nports == 0)
244 (*atac->atac_probe)(chp);
246 if (chp->ch_ndrives >= 2) {
248 chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type),
262 ata_channel_lock(chp);
264 KASSERT(ata_is_thread_run(chp));
267 if (chp->ch_drive == NULL)
269 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
270 for (i = 0; i < chp->ch_ndrives; i++)
271 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE)
273 if (i == chp->ch_ndrives)
277 if (chp->ch_flags & ATACH_SHUTDOWN)
280 ata_channel_unlock(chp);
290 ata_channel_unlock(chp);
299 ata_delref(chp);
312 struct ata_channel *chp = atabus_sc->sc_chan;
313 struct atac_softc *atac = chp->ch_atac;
326 if (chp->ch_ndrives == PMP_MAX_DRIVES &&
327 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
329 satapmp_attach(chp);
340 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
341 for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) {
342 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) {
351 chp->atapibus = NULL;
353 for (i = 0; i < chp->ch_ndrives; i++) {
354 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
355 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
363 for (i = 0; i < chp->ch_ndrives; i++) {
365 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA &&
366 chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) {
369 if (chp->ch_drive[i].drv_softc != NULL)
373 adev.adev_channel = chp->ch_channel;
374 adev.adev_drv_data = &chp->ch_drive[i];
375 chp->ch_drive[i].drv_softc = config_found(atabus_sc->sc_dev,
378 if (chp->ch_drive[i].drv_softc != NULL) {
379 ata_probe_caps(&chp->ch_drive[i]);
382 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
389 (*atac->atac_set_modes)(chp);
390 ata_print_modes(chp);
394 for (i = 0; i < chp->ch_ndrives; i++) {
395 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) {
397 chp->ch_drive[i].drv_softc);
408 for (i = 0; i < chp->ch_ndrives; i++) {
409 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
411 if (chp->ch_drive[i].drv_softc == NULL) {
412 chp->ch_drive[i].drive_flags = 0;
413 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
415 chp->ch_drive[i].state = 0;
426 ata_delref(chp);
441 struct ata_channel *chp = sc->sc_chan;
442 struct ata_queue *chq = chp->ch_queue;
446 ata_channel_lock(chp);
447 KASSERT(ata_is_thread_run(chp));
455 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
456 for (i = 0; i < chp->ch_ndrives; i++) {
457 chp->ch_drive[i].drive_flags = 0;
458 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
460 ata_channel_unlock(chp);
464 ata_channel_lock(chp);
466 if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_TH_DRIVE_RESET
469 cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
471 if (chp->ch_flags & ATACH_SHUTDOWN) {
474 if (chp->ch_flags & ATACH_TH_RESCAN) {
475 chp->ch_flags &= ~ATACH_TH_RESCAN;
476 ata_channel_unlock(chp);
478 ata_channel_lock(chp);
480 if (chp->ch_flags & ATACH_TH_RESET) {
482 ata_thread_run(chp, AT_WAIT,
484 } else if (chp->ch_flags & ATACH_TH_DRIVE_RESET) {
486 for (i = 0; i < chp->ch_ndrives; i++) {
489 drvp = &chp->ch_drive[i];
492 ata_thread_run(chp,
496 chp->ch_flags &= ~ATACH_TH_DRIVE_RESET;
497 } else if (chp->ch_flags & ATACH_TH_RECOVERY) {
504 ata_thread_run(chp, AT_WAIT, ATACH_TH_RECOVERY,
505 chp->recovery_tfd);
512 KASSERT((chp->ch_flags & ATACH_NCQ) == 0);
515 ata_channel_thaw_locked(chp);
516 xfer = ata_queue_get_active_xfer_locked(chp);
537 ata_channel_unlock(chp);
538 atastart(chp);
539 ata_channel_lock(chp);
542 chp->ch_thread = NULL;
543 cv_signal(&chp->ch_thr_idle);
544 ata_channel_unlock(chp);
549 ata_is_thread_run(struct ata_channel *chp)
551 KASSERT(mutex_owned(&chp->ch_lock));
553 return (chp->ch_thread == curlwp && !cpu_intr_p());
557 ata_thread_wake_locked(struct ata_channel *chp)
559 KASSERT(mutex_owned(&chp->ch_lock));
560 ata_channel_freeze_locked(chp);
561 cv_signal(&chp->ch_thr_idle);
572 struct ata_channel *chp = aux;
574 if (chp == NULL)
577 if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel &&
593 struct ata_channel *chp = aux;
597 sc->sc_chan = chp;
604 if (ata_addref(chp))
618 &chp->ch_thread, "%s", device_xname(self))) != 0)
635 struct ata_channel *chp = sc->sc_chan;
642 if ((dev = chp->atapibus) != NULL) {
649 KASSERT(chp->atapibus == NULL);
652 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
657 for (i = 0; i < chp->ch_ndrives; i++) {
658 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
660 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
661 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
662 if ((dev = chp->ch_drive[i].drv_softc) != NULL) {
669 KASSERT(chp->ch_drive[i].drv_softc == NULL);
670 KASSERT(chp->ch_drive[i].drive_type == 0);
675 ata_channel_lock(chp);
676 chp->ch_flags |= ATACH_SHUTDOWN;
677 while (chp->ch_thread != NULL) {
678 cv_signal(&chp->ch_thr_idle);
679 cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
681 ata_channel_unlock(chp);
683 atabus_free_drives(chp);
701 struct ata_channel *chp = sc->sc_chan;
704 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
708 if (child == chp->atapibus) {
709 chp->atapibus = NULL;
711 for (i = 0; i < chp->ch_ndrives; i++) {
712 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI)
714 KASSERT(chp->ch_drive[i].drv_softc != NULL);
715 chp->ch_drive[i].drv_softc = NULL;
716 chp->ch_drive[i].drive_flags = 0;
717 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
724 for (i = 0; i < chp->ch_ndrives; i++) {
725 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
727 if (child == chp->ch_drive[i].drv_softc) {
728 chp->ch_drive[i].drv_softc = NULL;
729 chp->ch_drive[i].drive_flags = 0;
730 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
731 chp->ch_satapmp_nports = 0;
732 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
752 atabus_alloc_drives(struct ata_channel *chp, int ndrives)
755 if (chp->ch_ndrives != ndrives)
756 atabus_free_drives(chp);
757 if (chp->ch_drive == NULL) {
760 ata_channel_unlock(chp);
761 drv = kmem_zalloc(sizeof(*chp->ch_drive) * ndrives, KM_SLEEP);
762 ata_channel_lock(chp);
764 if (chp->ch_drive != NULL) {
766 kmem_free(drv, sizeof(*chp->ch_drive) * ndrives);
769 chp->ch_drive = drv;
772 chp->ch_drive[i].chnl_softc = chp;
773 chp->ch_drive[i].drive = i;
775 chp->ch_ndrives = ndrives;
780 atabus_free_drives(struct ata_channel *chp)
785 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
786 for (i = 0; i < chp->ch_ndrives; i++) {
787 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) {
789 device_xname(chp->atabus), i,
790 chp->ch_drive[i].drive_type);
793 if (chp->ch_drive[i].drv_softc != NULL) {
795 device_xname(chp->atabus), i,
796 device_xname(chp->ch_drive[i].drv_softc));
804 if (chp->ch_drive == NULL)
806 kmem_free(chp->ch_drive,
807 sizeof(struct ata_drive_datas) * chp->ch_ndrives);
808 chp->ch_ndrives = 0;
809 chp->ch_drive = NULL;
818 struct ata_channel *chp = drvp->chnl_softc;
819 struct atac_softc *atac = chp->ch_atac;
826 xfer = ata_get_xfer(chp, false);
856 ata_wait_cmd(chp, xfer);
912 ata_free_xfer(chp, xfer);
921 struct ata_channel *chp = drvp->chnl_softc;
922 struct atac_softc *atac = chp->ch_atac;
926 xfer = ata_get_xfer(chp, false);
941 ata_wait_cmd(chp, xfer);
950 ata_free_xfer(chp, xfer);
997 ata_channel_idle(struct ata_channel *chp)
999 ata_channel_lock(chp);
1000 ata_channel_freeze_locked(chp);
1001 while (chp->ch_queue->queue_active > 0) {
1002 chp->ch_queue->queue_flags |= QF_IDLE_WAIT;
1003 cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1);
1005 ata_channel_unlock(chp);
1014 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1018 chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
1021 xfer->c_chp = chp;
1023 ata_channel_lock(chp);
1030 SIMPLEQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer,
1033 SIMPLEQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer,
1040 while (chp->ch_queue->queue_active > 0 ||
1041 SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) {
1043 cv_wait(&chp->ch_queue->c_active, &chp->ch_lock);
1052 ata_channel_unlock(chp);
1054 ata_free_xfer(chp, xfer);
1059 ata_channel_unlock(chp);
1062 chp->ch_flags), DEBUG_XFERS);
1063 atastart(chp);
1082 atastart(struct ata_channel *chp)
1084 struct atac_softc *atac = chp->ch_atac;
1085 struct ata_queue *chq = chp->ch_queue;
1102 ata_channel_lock(chp);
1106 if ((xfer = SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) {
1107 ATADEBUG_PRINT(("%s(chp=%p): channel %d queue_xfer is empty\n",
1108 __func__, chp, chp->ch_channel), DEBUG_XFERS);
1118 "wait active\n", xfer, chp->ch_channel, xfer->c_drive),
1120 cv_broadcast(&chp->ch_queue->c_active);
1130 cv_signal(&chp->ch_queue->queue_idle);
1132 ATADEBUG_PRINT(("%s(chp=%p): channel %d drive %d "
1134 __func__, chp, chp->ch_channel, xfer->c_drive,
1141 KASSERT(xfer->c_chp == chp);
1151 if (!skipq && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) {
1158 struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive];
1172 __func__, chp->ch_channel);
1175 ATADEBUG_PRINT(("%s(chp=%p): channel %d completely busy\n",
1176 __func__, chp, chp->ch_channel), DEBUG_XFERS);
1181 if (!ata_queue_alloc_slot(chp, &xfer->c_slot, drv_openings))
1185 if (!atac->atac_claim_hw(chp, 0)) {
1186 ata_queue_free_slot(chp, xfer->c_slot);
1193 ATADEBUG_PRINT(("%s(chp=%p): xfer %p channel %d drive %d\n",
1194 __func__, chp, xfer, chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
1201 SET(chp->ch_flags, ATACH_NCQ);
1203 CLR(chp->ch_flags, ATACH_NCQ);
1207 ata_activate_xfer_locked(chp, xfer);
1227 ata_channel_unlock(chp);
1233 struct ata_channel *chp = xfer->c_chp;
1236 KASSERT(mutex_owned(&chp->ch_lock));
1239 rv = xfer->ops->c_start(chp, xfer);
1246 ata_thread_wake_locked(chp);
1250 ata_channel_unlock(chp);
1252 status = xfer->ops->c_poll(chp, xfer);
1253 ata_channel_lock(chp);
1258 ata_channel_unlock(chp);
1260 xfer->ops->c_abort(chp, xfer);
1261 ata_channel_lock(chp);
1269 ata_activate_xfer_locked(struct ata_channel *chp
1271 struct ata_queue * const chq = chp->ch_queue;
1273 KASSERT(mutex_owned(&chp->ch_lock));
1295 ata_get_xfer(struct ata_channel *chp, bool waitok)
1305 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1307 struct ata_queue *chq = chp->ch_queue;
1309 ata_channel_lock(chp);
1315 ata_channel_unlock(chp);
1322 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1326 chp->ch_channel, xfer->c_drive);
1327 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT | ATACH_IRQ_WAIT);
1331 if (__predict_false(chp->ch_atac->atac_free_hw))
1332 chp->ch_atac->atac_free_hw(chp);
1334 ata_channel_unlock(chp);
1341 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1343 struct ata_queue * const chq = chp->ch_queue;
1345 ata_channel_lock(chp);
1352 callout_stop(&chp->c_timo_callout);
1354 if (callout_invoking(&chp->c_timo_callout))
1361 ata_queue_free_slot(chp, xfer->c_slot);
1366 ata_channel_unlock(chp);
1379 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer)
1384 ata_channel_lock(chp);
1386 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
1387 ata_channel_unlock(chp);
1389 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE);
1391 ata_channel_lock(chp);
1392 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
1393 cv_signal(&chp->ch_queue->queue_drain);
1397 ata_channel_unlock(chp);
1408 struct ata_channel *chp = xfer->c_chp;
1409 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1411 ata_channel_lock(chp);
1419 ata_channel_unlock(chp);
1425 ata_free_xfer(chp, xfer);
1430 ata_channel_unlock(chp);
1438 ata_channel_unlock(chp);
1450 ata_kill_active(struct ata_channel *chp, int reason, int flags)
1452 struct ata_queue * const chq = chp->ch_queue;
1455 KASSERT(mutex_owned(&chp->ch_lock));
1458 ata_channel_unlock(chp);
1460 ata_channel_lock(chp);
1470 struct ata_channel * const chp = drvp->chnl_softc;
1471 struct ata_queue * const chq = chp->ch_queue;
1474 ata_channel_lock(chp);
1478 KASSERT(xfer->c_chp == chp);
1483 SIMPLEQ_REMOVE_HEAD(&chp->ch_queue->queue_xfer, c_xferchain);
1491 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE_INACTIVE);
1499 KASSERT(xfer->c_chp == chp);
1513 cv_wait(&chq->queue_drain, &chp->ch_lock);
1516 ata_channel_unlock(chp);
1520 ata_channel_freeze_locked(struct ata_channel *chp)
1522 chp->ch_queue->queue_freeze++;
1524 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
1525 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
1529 ata_channel_freeze(struct ata_channel *chp)
1531 ata_channel_lock(chp);
1532 ata_channel_freeze_locked(chp);
1533 ata_channel_unlock(chp);
1537 ata_channel_thaw_locked(struct ata_channel *chp)
1539 KASSERT(mutex_owned(&chp->ch_lock));
1540 KASSERT(chp->ch_queue->queue_freeze > 0);
1542 chp->ch_queue->queue_freeze--;
1544 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
1545 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
1554 ata_thread_run(struct ata_channel *chp, int flags, int type, int arg)
1556 struct atac_softc *atac = chp->ch_atac;
1560 ata_channel_lock_owned(chp);
1567 __func__, flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS);
1571 if (chp->ch_flags & ATACH_TH_RESET) {
1580 KASSERT(drive <= chp->ch_ndrives);
1581 drvp = &chp->ch_drive[drive];
1594 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0);
1595 chp->recovery_tfd = tfd;
1603 if (!(chp->ch_flags & type)) {
1608 ata_channel_freeze_locked(chp);
1609 chp->ch_flags |= type;
1612 cv_signal(&chp->ch_thr_idle);
1617 ata_channel_freeze_locked(chp);
1624 if (chp->ch_flags & type) {
1625 chp->ch_flags &= ~type;
1631 (*atac->atac_bustype_ata->ata_reset_channel)(chp, flags);
1633 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
1634 for (int drive = 0; drive < chp->ch_ndrives; drive++)
1635 chp->ch_drive[drive].state = 0;
1642 KASSERT(drive <= chp->ch_ndrives);
1643 drvp = &chp->ch_drive[drive];
1653 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0);
1656 SET(chp->ch_flags, ATACH_RECOVERING);
1657 (*atac->atac_bustype_ata->ata_recovery)(chp, flags, tfd);
1658 CLR(chp->ch_flags, ATACH_RECOVERING);
1672 ata_channel_thaw_locked(chp);
1675 ata_channel_thaw_locked(chp);
1678 cv_signal(&chp->ch_thr_idle);
1682 ata_addref(struct ata_channel *chp)
1684 struct atac_softc *atac = chp->ch_atac;
1700 ata_delref(struct ata_channel *chp)
1702 struct atac_softc *atac = chp->ch_atac;
1714 ata_print_modes(struct ata_channel *chp)
1716 struct atac_softc *atac = chp->ch_atac;
1720 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
1721 for (drive = 0; drive < chp->ch_ndrives; drive++) {
1722 drvp = &chp->ch_drive[drive];
1729 chp->ch_channel, drvp->drive, drvp->PIO_mode);
1761 ATA_REAL_OPENINGS(chp->ch_queue->queue_openings),
1782 struct ata_channel *chp = drvp->chnl_softc;
1783 struct atac_softc *atac = chp->ch_atac;
1823 (*atac->atac_set_modes)(chp);
1824 ata_print_modes(chp);
1826 ata_thread_run(chp, flags, ATACH_TH_RESET, ATACH_NODRIVE);
1839 struct ata_channel *chp = drvp->chnl_softc;
1840 struct atac_softc *atac = chp->ch_atac;
1857 ata_channel_lock(chp);
1859 ata_channel_unlock(chp);
1863 ata_channel_lock(chp);
1865 ata_channel_unlock(chp);
1943 ata_channel_lock(chp);
1945 ata_channel_unlock(chp);
1969 ata_channel_lock(chp);
1971 ata_channel_unlock(chp);
2010 ata_channel_lock(chp);
2012 ata_channel_unlock(chp);
2020 ata_channel_lock(chp);
2029 ata_channel_unlock(chp);
2043 ata_channel_lock(chp);
2047 ata_channel_unlock(chp);
2055 ata_channel_lock(chp);
2064 ata_channel_unlock(chp);
2081 ata_channel_lock(chp);
2096 ata_channel_unlock(chp);
2104 ata_channel_lock(chp);
2113 ata_channel_unlock(chp);
2163 struct ata_channel *chp = sc->sc_chan;
2182 ata_channel_lock(chp);
2185 ata_channel_unlock(chp);
2193 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
2194 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
2202 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
2203 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
2218 if (chp->ch_drive[drive].drv_softc != NULL) {
2220 chp->ch_drive[drive].drv_softc, 0);
2223 KASSERT(chp->ch_drive[drive].drv_softc == NULL);
2237 struct ata_channel *chp = sc->sc_chan;
2239 ata_channel_idle(chp);
2248 struct ata_channel *chp = sc->sc_chan;
2254 ata_channel_lock(chp);
2255 if (chp->ch_queue->queue_freeze == 0) {
2256 ata_channel_unlock(chp);
2261 ata_channel_thaw_locked(chp);
2264 if (chp->ch_ndrives > 0)
2265 ata_thread_run(chp, AT_WAIT, ATACH_TH_RESET, ATACH_NODRIVE);
2267 ata_channel_unlock(chp);
2277 struct ata_channel *chp = sc->sc_chan;
2285 if (chp->ch_satapmp_nports == 0) {
2286 if (chp->atapibus != NULL) {
2290 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
2291 for (i = 0; i < chp->ch_ndrives; i++) {
2292 if (chp->ch_drive[i].drv_softc != NULL) {
2305 ata_channel_lock(chp);
2306 chp->ch_flags |= ATACH_TH_RESCAN;
2307 cv_signal(&chp->ch_thr_idle);
2308 ata_channel_unlock(chp);
2314 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags)
2316 KASSERT(mutex_owned(&chp->ch_lock));
2327 kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock);
2362 ata_wait_cmd(struct ata_channel *chp, struct ata_xfer *xfer)
2364 struct ata_queue *chq = chp->ch_queue;
2367 ata_channel_lock(chp);
2370 cv_wait(&chq->c_cmd_finish, &chp->ch_lock);
2372 ata_channel_unlock(chp);