ata.c revision 1.161 1 /* $NetBSD: ata.c,v 1.161 2021/04/24 23:36:52 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.161 2021/04/24 23:36:52 thorpej Exp $");
29
30 #include "opt_ata.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/device.h>
36 #include <sys/conf.h>
37 #include <sys/fcntl.h>
38 #include <sys/proc.h>
39 #include <sys/kthread.h>
40 #include <sys/errno.h>
41 #include <sys/ataio.h>
42 #include <sys/kmem.h>
43 #include <sys/intr.h>
44 #include <sys/bus.h>
45 #include <sys/once.h>
46 #include <sys/bitops.h>
47 #include <sys/cpu.h>
48
49 #define ATABUS_PRIVATE
50
51 #include <dev/ata/ataconf.h>
52 #include <dev/ata/atareg.h>
53 #include <dev/ata/atavar.h>
54 #include <dev/ic/wdcvar.h> /* for PIOBM */
55
56 #include "ioconf.h"
57 #include "locators.h"
58
59 #include "atapibus.h"
60 #include "ataraid.h"
61 #include "sata_pmp.h"
62
63 #if NATARAID > 0
64 #include <dev/ata/ata_raidvar.h>
65 #endif
66 #if NSATA_PMP > 0
67 #include <dev/ata/satapmpvar.h>
68 #endif
69 #include <dev/ata/satapmpreg.h>
70
71 #define DEBUG_FUNCS 0x08
72 #define DEBUG_PROBE 0x10
73 #define DEBUG_DETACH 0x20
74 #define DEBUG_XFERS 0x40
75 #ifdef ATADEBUG
76 #ifndef ATADEBUG_MASK
77 #define ATADEBUG_MASK 0
78 #endif
79 int atadebug_mask = ATADEBUG_MASK;
80 #define ATADEBUG_PRINT(args, level) \
81 if (atadebug_mask & (level)) \
82 printf args
83 #else
84 #define ATADEBUG_PRINT(args, level)
85 #endif
86
87 #if defined(ATA_DOWNGRADE_MODE) && NATA_DMA
88 static int ata_downgrade_mode(struct ata_drive_datas *, int);
89 #endif
90
91 static ONCE_DECL(ata_init_ctrl);
92 static struct pool ata_xfer_pool;
93
94 /*
95 * A queue of atabus instances, used to ensure the same bus probe order
96 * for a given hardware configuration at each boot. Kthread probing
97 * devices on a atabus. Only one probing at once.
98 */
99 static TAILQ_HEAD(, atabus_initq) atabus_initq_head;
100 static kmutex_t atabus_qlock;
101 static kcondvar_t atabus_qcv;
102 static lwp_t * atabus_cfg_lwp;
103
104 /*****************************************************************************
105 * ATA bus layer.
106 *
107 * ATA controllers attach an atabus instance, which handles probing the bus
108 * for drives, etc.
109 *****************************************************************************/
110
111 dev_type_open(atabusopen);
112 dev_type_close(atabusclose);
113 dev_type_ioctl(atabusioctl);
114
115 const struct cdevsw atabus_cdevsw = {
116 .d_open = atabusopen,
117 .d_close = atabusclose,
118 .d_read = noread,
119 .d_write = nowrite,
120 .d_ioctl = atabusioctl,
121 .d_stop = nostop,
122 .d_tty = notty,
123 .d_poll = nopoll,
124 .d_mmap = nommap,
125 .d_kqfilter = nokqfilter,
126 .d_discard = nodiscard,
127 .d_flag = D_OTHER
128 };
129
130 static void atabus_childdetached(device_t, device_t);
131 static int atabus_rescan(device_t, const char *, const int *);
132 static bool atabus_resume(device_t, const pmf_qual_t *);
133 static bool atabus_suspend(device_t, const pmf_qual_t *);
134 static void atabusconfig_thread(void *);
135
136 static void ata_channel_idle(struct ata_channel *);
137 static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *);
138 static void ata_channel_freeze_locked(struct ata_channel *);
139 static void ata_thread_wake_locked(struct ata_channel *);
140
141 /*
142 * atabus_init:
143 *
144 * Initialize ATA subsystem structures.
145 */
146 static int
147 atabus_init(void)
148 {
149
150 pool_init(&ata_xfer_pool, sizeof(struct ata_xfer), 0, 0, 0,
151 "ataspl", NULL, IPL_BIO);
152 TAILQ_INIT(&atabus_initq_head);
153 mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE);
154 cv_init(&atabus_qcv, "atainitq");
155 return 0;
156 }
157
158 /*
159 * atabusprint:
160 *
161 * Autoconfiguration print routine used by ATA controllers when
162 * attaching an atabus instance.
163 */
164 int
165 atabusprint(void *aux, const char *pnp)
166 {
167 struct ata_channel *chan = aux;
168
169 if (pnp)
170 aprint_normal("atabus at %s", pnp);
171 aprint_normal(" channel %d", chan->ch_channel);
172
173 return (UNCONF);
174 }
175
176 /*
177 * ataprint:
178 *
179 * Autoconfiguration print routine.
180 */
181 int
182 ataprint(void *aux, const char *pnp)
183 {
184 struct ata_device *adev = aux;
185
186 if (pnp)
187 aprint_normal("wd at %s", pnp);
188 aprint_normal(" drive %d", adev->adev_drv_data->drive);
189
190 return (UNCONF);
191 }
192
193 /*
194 * ata_channel_attach:
195 *
196 * Common parts of attaching an atabus to an ATA controller channel.
197 */
198 void
199 ata_channel_attach(struct ata_channel *chp)
200 {
201 if (chp->ch_flags & ATACH_DISABLED)
202 return;
203
204 ata_channel_init(chp);
205
206 KASSERT(chp->ch_queue != NULL);
207
208 chp->atabus = config_found(chp->ch_atac->atac_dev, chp, atabusprint,
209 CFARG_IATTR, "ata",
210 CFARG_EOL);
211 }
212
213 /*
214 * ata_channel_detach:
215 *
216 * Common parts of detaching an atabus to an ATA controller channel.
217 */
218 void
219 ata_channel_detach(struct ata_channel *chp)
220 {
221 if (chp->ch_flags & ATACH_DISABLED)
222 return;
223
224 ata_channel_destroy(chp);
225
226 chp->ch_flags |= ATACH_DETACHED;
227 }
228
229 static void
230 atabusconfig(struct atabus_softc *atabus_sc)
231 {
232 struct ata_channel *chp = atabus_sc->sc_chan;
233 struct atac_softc *atac = chp->ch_atac;
234 struct atabus_initq *atabus_initq = NULL;
235 int i, error;
236
237 /* we are in the atabus's thread context */
238
239 /*
240 * Probe for the drives attached to controller, unless a PMP
241 * is already known
242 */
243 /* XXX for SATA devices we will power up all drives at once */
244 if (chp->ch_satapmp_nports == 0)
245 (*atac->atac_probe)(chp);
246
247 if (chp->ch_ndrives >= 2) {
248 ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n",
249 chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type),
250 DEBUG_PROBE);
251 }
252
253 /* Make sure the devices probe in atabus order to avoid jitter. */
254 mutex_enter(&atabus_qlock);
255 for (;;) {
256 atabus_initq = TAILQ_FIRST(&atabus_initq_head);
257 if (atabus_initq->atabus_sc == atabus_sc)
258 break;
259 cv_wait(&atabus_qcv, &atabus_qlock);
260 }
261 mutex_exit(&atabus_qlock);
262
263 ata_channel_lock(chp);
264
265 KASSERT(ata_is_thread_run(chp));
266
267 /* If no drives, abort here */
268 if (chp->ch_drive == NULL)
269 goto out;
270 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
271 for (i = 0; i < chp->ch_ndrives; i++)
272 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE)
273 break;
274 if (i == chp->ch_ndrives)
275 goto out;
276
277 /* Shortcut in case we've been shutdown */
278 if (chp->ch_flags & ATACH_SHUTDOWN)
279 goto out;
280
281 ata_channel_unlock(chp);
282
283 if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread,
284 atabus_sc, &atabus_cfg_lwp,
285 "%scnf", device_xname(atac->atac_dev))) != 0)
286 aprint_error_dev(atac->atac_dev,
287 "unable to create config thread: error %d\n", error);
288 return;
289
290 out:
291 ata_channel_unlock(chp);
292
293 mutex_enter(&atabus_qlock);
294 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
295 cv_broadcast(&atabus_qcv);
296 mutex_exit(&atabus_qlock);
297
298 kmem_free(atabus_initq, sizeof(*atabus_initq));
299
300 ata_delref(chp);
301
302 config_pending_decr(atabus_sc->sc_dev);
303 }
304
305 /*
306 * atabus_configthread: finish attach of atabus's childrens, in a separate
307 * kernel thread.
308 */
309 static void
310 atabusconfig_thread(void *arg)
311 {
312 struct atabus_softc *atabus_sc = arg;
313 struct ata_channel *chp = atabus_sc->sc_chan;
314 struct atac_softc *atac = chp->ch_atac;
315 struct atabus_initq *atabus_initq = NULL;
316 int i, s;
317
318 /* XXX seems wrong */
319 mutex_enter(&atabus_qlock);
320 atabus_initq = TAILQ_FIRST(&atabus_initq_head);
321 KASSERT(atabus_initq->atabus_sc == atabus_sc);
322 mutex_exit(&atabus_qlock);
323
324 /*
325 * First look for a port multiplier
326 */
327 if (chp->ch_ndrives == PMP_MAX_DRIVES &&
328 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
329 #if NSATA_PMP > 0
330 satapmp_attach(chp);
331 #else
332 aprint_error_dev(atabus_sc->sc_dev,
333 "SATA port multiplier not supported\n");
334 /* no problems going on, all drives are ATA_DRIVET_NONE */
335 #endif
336 }
337
338 /*
339 * Attach an ATAPI bus, if needed.
340 */
341 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
342 for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) {
343 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) {
344 #if NATAPIBUS > 0
345 (*atac->atac_atapibus_attach)(atabus_sc);
346 #else
347 /*
348 * Fake the autoconfig "not configured" message
349 */
350 aprint_normal("atapibus at %s not configured\n",
351 device_xname(atac->atac_dev));
352 chp->atapibus = NULL;
353 s = splbio();
354 for (i = 0; i < chp->ch_ndrives; i++) {
355 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
356 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
357 }
358 splx(s);
359 #endif
360 break;
361 }
362 }
363
364 for (i = 0; i < chp->ch_ndrives; i++) {
365 struct ata_device adev;
366 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA &&
367 chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) {
368 continue;
369 }
370 if (chp->ch_drive[i].drv_softc != NULL)
371 continue;
372 memset(&adev, 0, sizeof(struct ata_device));
373 adev.adev_bustype = atac->atac_bustype_ata;
374 adev.adev_channel = chp->ch_channel;
375 adev.adev_drv_data = &chp->ch_drive[i];
376 chp->ch_drive[i].drv_softc = config_found(atabus_sc->sc_dev,
377 &adev, ataprint,
378 CFARG_IATTR, "ata_hl",
379 CFARG_EOL);
380 if (chp->ch_drive[i].drv_softc != NULL) {
381 ata_probe_caps(&chp->ch_drive[i]);
382 } else {
383 s = splbio();
384 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
385 splx(s);
386 }
387 }
388
389 /* now that we know the drives, the controller can set its modes */
390 if (atac->atac_set_modes) {
391 (*atac->atac_set_modes)(chp);
392 ata_print_modes(chp);
393 }
394 #if NATARAID > 0
395 if (atac->atac_cap & ATAC_CAP_RAID) {
396 for (i = 0; i < chp->ch_ndrives; i++) {
397 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) {
398 ata_raid_check_component(
399 chp->ch_drive[i].drv_softc);
400 }
401 }
402 }
403 #endif /* NATARAID > 0 */
404
405 /*
406 * reset drive_flags for unattached devices, reset state for attached
407 * ones
408 */
409 s = splbio();
410 for (i = 0; i < chp->ch_ndrives; i++) {
411 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
412 continue;
413 if (chp->ch_drive[i].drv_softc == NULL) {
414 chp->ch_drive[i].drive_flags = 0;
415 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
416 } else
417 chp->ch_drive[i].state = 0;
418 }
419 splx(s);
420
421 mutex_enter(&atabus_qlock);
422 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
423 cv_broadcast(&atabus_qcv);
424 mutex_exit(&atabus_qlock);
425
426 kmem_free(atabus_initq, sizeof(*atabus_initq));
427
428 ata_delref(chp);
429
430 config_pending_decr(atabus_sc->sc_dev);
431 kthread_exit(0);
432 }
433
434 /*
435 * atabus_thread:
436 *
437 * Worker thread for the ATA bus.
438 */
439 static void
440 atabus_thread(void *arg)
441 {
442 struct atabus_softc *sc = arg;
443 struct ata_channel *chp = sc->sc_chan;
444 struct ata_queue *chq = chp->ch_queue;
445 struct ata_xfer *xfer;
446 int i, rv;
447
448 ata_channel_lock(chp);
449 KASSERT(ata_is_thread_run(chp));
450
451 /*
452 * Probe the drives. Reset type to indicate to controllers
453 * that can re-probe that all drives must be probed..
454 *
455 * Note: ch_ndrives may be changed during the probe.
456 */
457 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
458 for (i = 0; i < chp->ch_ndrives; i++) {
459 chp->ch_drive[i].drive_flags = 0;
460 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
461 }
462 ata_channel_unlock(chp);
463
464 atabusconfig(sc);
465
466 ata_channel_lock(chp);
467 for (;;) {
468 if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_TH_DRIVE_RESET
469 | ATACH_TH_RECOVERY | ATACH_SHUTDOWN)) == 0 &&
470 (chq->queue_active == 0 || chq->queue_freeze == 0)) {
471 cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
472 }
473 if (chp->ch_flags & ATACH_SHUTDOWN) {
474 break;
475 }
476 if (chp->ch_flags & ATACH_TH_RESCAN) {
477 chp->ch_flags &= ~ATACH_TH_RESCAN;
478 ata_channel_unlock(chp);
479 atabusconfig(sc);
480 ata_channel_lock(chp);
481 }
482 if (chp->ch_flags & ATACH_TH_RESET) {
483 /* this will unfreeze the channel */
484 ata_thread_run(chp, AT_WAIT,
485 ATACH_TH_RESET, ATACH_NODRIVE);
486 } else if (chp->ch_flags & ATACH_TH_DRIVE_RESET) {
487 /* this will unfreeze the channel */
488 for (i = 0; i < chp->ch_ndrives; i++) {
489 struct ata_drive_datas *drvp;
490
491 drvp = &chp->ch_drive[i];
492
493 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) {
494 ata_thread_run(chp,
495 AT_WAIT, ATACH_TH_DRIVE_RESET, i);
496 }
497 }
498 chp->ch_flags &= ~ATACH_TH_DRIVE_RESET;
499 } else if (chp->ch_flags & ATACH_TH_RECOVERY) {
500 /*
501 * This will unfreeze the channel; drops locks during
502 * run, so must wrap in splbio()/splx() to avoid
503 * spurious interrupts. XXX MPSAFE
504 */
505 int s = splbio();
506 ata_thread_run(chp, AT_WAIT, ATACH_TH_RECOVERY,
507 chp->recovery_tfd);
508 splx(s);
509 } else if (chq->queue_active > 0 && chq->queue_freeze == 1) {
510 /*
511 * Caller has bumped queue_freeze, decrease it. This
512 * flow shalt never be executed for NCQ commands.
513 */
514 KASSERT((chp->ch_flags & ATACH_NCQ) == 0);
515 KASSERT(chq->queue_active == 1);
516
517 ata_channel_thaw_locked(chp);
518 xfer = ata_queue_get_active_xfer_locked(chp);
519
520 KASSERT(xfer != NULL);
521 KASSERT((xfer->c_flags & C_POLL) == 0);
522
523 switch ((rv = ata_xfer_start(xfer))) {
524 case ATASTART_STARTED:
525 case ATASTART_POLL:
526 case ATASTART_ABORT:
527 break;
528 case ATASTART_TH:
529 default:
530 panic("%s: ata_xfer_start() unexpected rv %d",
531 __func__, rv);
532 /* NOTREACHED */
533 }
534 } else if (chq->queue_freeze > 1)
535 panic("%s: queue_freeze", __func__);
536
537 /* Try to run down the queue once channel is unfrozen */
538 if (chq->queue_freeze == 0) {
539 ata_channel_unlock(chp);
540 atastart(chp);
541 ata_channel_lock(chp);
542 }
543 }
544 chp->ch_thread = NULL;
545 cv_signal(&chp->ch_thr_idle);
546 ata_channel_unlock(chp);
547 kthread_exit(0);
548 }
549
550 bool
551 ata_is_thread_run(struct ata_channel *chp)
552 {
553 KASSERT(mutex_owned(&chp->ch_lock));
554
555 return (chp->ch_thread == curlwp && !cpu_intr_p());
556 }
557
558 static void
559 ata_thread_wake_locked(struct ata_channel *chp)
560 {
561 KASSERT(mutex_owned(&chp->ch_lock));
562 ata_channel_freeze_locked(chp);
563 cv_signal(&chp->ch_thr_idle);
564 }
565
566 /*
567 * atabus_match:
568 *
569 * Autoconfiguration match routine.
570 */
571 static int
572 atabus_match(device_t parent, cfdata_t cf, void *aux)
573 {
574 struct ata_channel *chp = aux;
575
576 if (chp == NULL)
577 return (0);
578
579 if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel &&
580 cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT)
581 return (0);
582
583 return (1);
584 }
585
586 /*
587 * atabus_attach:
588 *
589 * Autoconfiguration attach routine.
590 */
591 static void
592 atabus_attach(device_t parent, device_t self, void *aux)
593 {
594 struct atabus_softc *sc = device_private(self);
595 struct ata_channel *chp = aux;
596 struct atabus_initq *initq;
597 int error;
598
599 sc->sc_chan = chp;
600
601 aprint_normal("\n");
602 aprint_naive("\n");
603
604 sc->sc_dev = self;
605
606 if (ata_addref(chp))
607 return;
608
609 RUN_ONCE(&ata_init_ctrl, atabus_init);
610
611 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP);
612 initq->atabus_sc = sc;
613 mutex_enter(&atabus_qlock);
614 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
615 mutex_exit(&atabus_qlock);
616 config_pending_incr(sc->sc_dev);
617
618 /* XXX MPSAFE - no KTHREAD_MPSAFE, so protected by KERNEL_LOCK() */
619 if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc,
620 &chp->ch_thread, "%s", device_xname(self))) != 0)
621 aprint_error_dev(self,
622 "unable to create kernel thread: error %d\n", error);
623
624 if (!pmf_device_register(self, atabus_suspend, atabus_resume))
625 aprint_error_dev(self, "couldn't establish power handler\n");
626 }
627
628 /*
629 * atabus_detach:
630 *
631 * Autoconfiguration detach routine.
632 */
633 static int
634 atabus_detach(device_t self, int flags)
635 {
636 struct atabus_softc *sc = device_private(self);
637 struct ata_channel *chp = sc->sc_chan;
638 device_t dev = NULL;
639 int i, error = 0;
640
641 /*
642 * Detach atapibus and its children.
643 */
644 if ((dev = chp->atapibus) != NULL) {
645 ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n",
646 device_xname(self), device_xname(dev)), DEBUG_DETACH);
647
648 error = config_detach(dev, flags);
649 if (error)
650 goto out;
651 KASSERT(chp->atapibus == NULL);
652 }
653
654 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
655
656 /*
657 * Detach our other children.
658 */
659 for (i = 0; i < chp->ch_ndrives; i++) {
660 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
661 continue;
662 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
663 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
664 if ((dev = chp->ch_drive[i].drv_softc) != NULL) {
665 ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__,
666 __LINE__, device_xname(self), device_xname(dev)),
667 DEBUG_DETACH);
668 error = config_detach(dev, flags);
669 if (error)
670 goto out;
671 KASSERT(chp->ch_drive[i].drv_softc == NULL);
672 KASSERT(chp->ch_drive[i].drive_type == 0);
673 }
674 }
675
676 /* Shutdown the channel. */
677 ata_channel_lock(chp);
678 chp->ch_flags |= ATACH_SHUTDOWN;
679 while (chp->ch_thread != NULL) {
680 cv_signal(&chp->ch_thr_idle);
681 cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
682 }
683 ata_channel_unlock(chp);
684
685 atabus_free_drives(chp);
686
687 out:
688 #ifdef ATADEBUG
689 if (dev != NULL && error != 0)
690 ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__,
691 device_xname(self), error, device_xname(dev)),
692 DEBUG_DETACH);
693 #endif /* ATADEBUG */
694
695 return (error);
696 }
697
698 void
699 atabus_childdetached(device_t self, device_t child)
700 {
701 bool found = false;
702 struct atabus_softc *sc = device_private(self);
703 struct ata_channel *chp = sc->sc_chan;
704 int i;
705
706 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
707 /*
708 * atapibus detached.
709 */
710 if (child == chp->atapibus) {
711 chp->atapibus = NULL;
712 found = true;
713 for (i = 0; i < chp->ch_ndrives; i++) {
714 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI)
715 continue;
716 KASSERT(chp->ch_drive[i].drv_softc != NULL);
717 chp->ch_drive[i].drv_softc = NULL;
718 chp->ch_drive[i].drive_flags = 0;
719 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
720 }
721 }
722
723 /*
724 * Detach our other children.
725 */
726 for (i = 0; i < chp->ch_ndrives; i++) {
727 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
728 continue;
729 if (child == chp->ch_drive[i].drv_softc) {
730 chp->ch_drive[i].drv_softc = NULL;
731 chp->ch_drive[i].drive_flags = 0;
732 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
733 chp->ch_satapmp_nports = 0;
734 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
735 found = true;
736 }
737 }
738
739 if (!found)
740 panic("%s: unknown child %p", device_xname(self),
741 (const void *)child);
742 }
743
744 CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc),
745 atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan,
746 atabus_childdetached, DVF_DETACH_SHUTDOWN);
747
748 /*****************************************************************************
749 * Common ATA bus operations.
750 *****************************************************************************/
751
752 /* allocate/free the channel's ch_drive[] array */
753 int
754 atabus_alloc_drives(struct ata_channel *chp, int ndrives)
755 {
756 int i;
757 if (chp->ch_ndrives != ndrives)
758 atabus_free_drives(chp);
759 if (chp->ch_drive == NULL) {
760 void *drv;
761
762 ata_channel_unlock(chp);
763 drv = kmem_zalloc(sizeof(*chp->ch_drive) * ndrives, KM_SLEEP);
764 ata_channel_lock(chp);
765
766 if (chp->ch_drive != NULL) {
767 /* lost the race */
768 kmem_free(drv, sizeof(*chp->ch_drive) * ndrives);
769 return 0;
770 }
771 chp->ch_drive = drv;
772 }
773 for (i = 0; i < ndrives; i++) {
774 chp->ch_drive[i].chnl_softc = chp;
775 chp->ch_drive[i].drive = i;
776 }
777 chp->ch_ndrives = ndrives;
778 return 0;
779 }
780
781 void
782 atabus_free_drives(struct ata_channel *chp)
783 {
784 #ifdef DIAGNOSTIC
785 int i;
786 int dopanic = 0;
787 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
788 for (i = 0; i < chp->ch_ndrives; i++) {
789 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) {
790 printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n",
791 device_xname(chp->atabus), i,
792 chp->ch_drive[i].drive_type);
793 dopanic = 1;
794 }
795 if (chp->ch_drive[i].drv_softc != NULL) {
796 printf("%s: ch_drive[%d] attached to %s\n",
797 device_xname(chp->atabus), i,
798 device_xname(chp->ch_drive[i].drv_softc));
799 dopanic = 1;
800 }
801 }
802 if (dopanic)
803 panic("atabus_free_drives");
804 #endif
805
806 if (chp->ch_drive == NULL)
807 return;
808 kmem_free(chp->ch_drive,
809 sizeof(struct ata_drive_datas) * chp->ch_ndrives);
810 chp->ch_ndrives = 0;
811 chp->ch_drive = NULL;
812 }
813
814 /* Get the disk's parameters */
815 int
816 ata_get_params(struct ata_drive_datas *drvp, uint8_t flags,
817 struct ataparams *prms)
818 {
819 struct ata_xfer *xfer;
820 struct ata_channel *chp = drvp->chnl_softc;
821 struct atac_softc *atac = chp->ch_atac;
822 char *tb;
823 int i, rv;
824 uint16_t *p;
825
826 ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);
827
828 xfer = ata_get_xfer(chp, false);
829 if (xfer == NULL) {
830 ATADEBUG_PRINT(("%s: no xfer\n", __func__),
831 DEBUG_FUNCS|DEBUG_PROBE);
832 return CMD_AGAIN;
833 }
834
835 tb = kmem_zalloc(ATA_BSIZE, KM_SLEEP);
836 memset(prms, 0, sizeof(struct ataparams));
837
838 if (drvp->drive_type == ATA_DRIVET_ATA) {
839 xfer->c_ata_c.r_command = WDCC_IDENTIFY;
840 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
841 xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
842 xfer->c_ata_c.timeout = 3000; /* 3s */
843 } else if (drvp->drive_type == ATA_DRIVET_ATAPI) {
844 xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE;
845 xfer->c_ata_c.r_st_bmask = 0;
846 xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
847 xfer->c_ata_c.timeout = 10000; /* 10s */
848 } else {
849 ATADEBUG_PRINT(("ata_get_parms: no disks\n"),
850 DEBUG_FUNCS|DEBUG_PROBE);
851 rv = CMD_ERR;
852 goto out;
853 }
854 xfer->c_ata_c.flags = AT_READ | flags;
855 xfer->c_ata_c.data = tb;
856 xfer->c_ata_c.bcount = ATA_BSIZE;
857 (*atac->atac_bustype_ata->ata_exec_command)(drvp, xfer);
858 ata_wait_cmd(chp, xfer);
859 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
860 ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n",
861 xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE);
862 rv = CMD_ERR;
863 goto out;
864 }
865 /* if we didn't read any data something is wrong */
866 if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) {
867 rv = CMD_ERR;
868 goto out;
869 }
870
871 /* Read in parameter block. */
872 memcpy(prms, tb, sizeof(struct ataparams));
873
874 /*
875 * Shuffle string byte order.
876 * ATAPI NEC, Mitsumi and Pioneer drives and
877 * old ATA TDK CompactFlash cards
878 * have different byte order.
879 */
880 #if BYTE_ORDER == BIG_ENDIAN
881 # define M(n) prms->atap_model[(n) ^ 1]
882 #else
883 # define M(n) prms->atap_model[n]
884 #endif
885 if (
886 #if BYTE_ORDER == BIG_ENDIAN
887 !
888 #endif
889 ((drvp->drive_type == ATA_DRIVET_ATAPI) ?
890 ((M(0) == 'N' && M(1) == 'E') ||
891 (M(0) == 'F' && M(1) == 'X') ||
892 (M(0) == 'P' && M(1) == 'i')) :
893 ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) {
894 rv = CMD_OK;
895 goto out;
896 }
897 #undef M
898 for (i = 0; i < sizeof(prms->atap_model); i += 2) {
899 p = (uint16_t *)(prms->atap_model + i);
900 *p = bswap16(*p);
901 }
902 for (i = 0; i < sizeof(prms->atap_serial); i += 2) {
903 p = (uint16_t *)(prms->atap_serial + i);
904 *p = bswap16(*p);
905 }
906 for (i = 0; i < sizeof(prms->atap_revision); i += 2) {
907 p = (uint16_t *)(prms->atap_revision + i);
908 *p = bswap16(*p);
909 }
910
911 rv = CMD_OK;
912 out:
913 kmem_free(tb, ATA_BSIZE);
914 ata_free_xfer(chp, xfer);
915 return rv;
916 }
917
918 int
919 ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags)
920 {
921 struct ata_xfer *xfer;
922 int rv;
923 struct ata_channel *chp = drvp->chnl_softc;
924 struct atac_softc *atac = chp->ch_atac;
925
926 ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS);
927
928 xfer = ata_get_xfer(chp, false);
929 if (xfer == NULL) {
930 ATADEBUG_PRINT(("%s: no xfer\n", __func__),
931 DEBUG_FUNCS|DEBUG_PROBE);
932 return CMD_AGAIN;
933 }
934
935 xfer->c_ata_c.r_command = SET_FEATURES;
936 xfer->c_ata_c.r_st_bmask = 0;
937 xfer->c_ata_c.r_st_pmask = 0;
938 xfer->c_ata_c.r_features = WDSF_SET_MODE;
939 xfer->c_ata_c.r_count = mode;
940 xfer->c_ata_c.flags = flags;
941 xfer->c_ata_c.timeout = 1000; /* 1s */
942 (*atac->atac_bustype_ata->ata_exec_command)(drvp, xfer);
943 ata_wait_cmd(chp, xfer);
944 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
945 rv = CMD_ERR;
946 goto out;
947 }
948
949 rv = CMD_OK;
950
951 out:
952 ata_free_xfer(chp, xfer);
953 return rv;
954 }
955
956 #if NATA_DMA
957 void
958 ata_dmaerr(struct ata_drive_datas *drvp, int flags)
959 {
960 ata_channel_lock_owned(drvp->chnl_softc);
961
962 /*
963 * Downgrade decision: if we get NERRS_MAX in NXFER.
964 * We start with n_dmaerrs set to NERRS_MAX-1 so that the
965 * first error within the first NXFER ops will immediatly trigger
966 * a downgrade.
967 * If we got an error and n_xfers is bigger than NXFER reset counters.
968 */
969 drvp->n_dmaerrs++;
970 if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) {
971 #ifdef ATA_DOWNGRADE_MODE
972 ata_downgrade_mode(drvp, flags);
973 drvp->n_dmaerrs = NERRS_MAX-1;
974 #else
975 static struct timeval last;
976 static const struct timeval serrintvl = { 300, 0 };
977
978 if (ratecheck(&last, &serrintvl)) {
979 aprint_error_dev(drvp->drv_softc,
980 "excessive DMA errors - %d in last %d transfers\n",
981 drvp->n_dmaerrs, drvp->n_xfers);
982 }
983 #endif
984 drvp->n_xfers = 0;
985 return;
986 }
987 if (drvp->n_xfers > NXFER) {
988 drvp->n_dmaerrs = 1; /* just got an error */
989 drvp->n_xfers = 1; /* restart counting from this error */
990 }
991 }
992 #endif /* NATA_DMA */
993
994 /*
995 * freeze the queue and wait for the controller to be idle. Caller has to
996 * unfreeze/restart the queue
997 */
998 static void
999 ata_channel_idle(struct ata_channel *chp)
1000 {
1001 ata_channel_lock(chp);
1002 ata_channel_freeze_locked(chp);
1003 while (chp->ch_queue->queue_active > 0) {
1004 chp->ch_queue->queue_flags |= QF_IDLE_WAIT;
1005 cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1);
1006 }
1007 ata_channel_unlock(chp);
1008 }
1009
1010 /*
1011 * Add a command to the queue and start controller.
1012 *
1013 * MUST BE CALLED AT splbio()!
1014 */
1015 void
1016 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1017 {
1018
1019 ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer,
1020 chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
1021
1022 /* complete xfer setup */
1023 xfer->c_chp = chp;
1024
1025 ata_channel_lock(chp);
1026
1027 /*
1028 * Standard commands are added to the end of command list, but
1029 * recovery commands must be run immediatelly.
1030 */
1031 if ((xfer->c_flags & C_SKIP_QUEUE) == 0)
1032 SIMPLEQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer,
1033 c_xferchain);
1034 else
1035 SIMPLEQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer,
1036 c_xferchain);
1037
1038 /*
1039 * if polling and can sleep, wait for the xfer to be at head of queue
1040 */
1041 if ((xfer->c_flags & (C_POLL | C_WAIT)) == (C_POLL | C_WAIT)) {
1042 while (chp->ch_queue->queue_active > 0 ||
1043 SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) {
1044 xfer->c_flags |= C_WAITACT;
1045 cv_wait(&chp->ch_queue->c_active, &chp->ch_lock);
1046 xfer->c_flags &= ~C_WAITACT;
1047 }
1048
1049 /*
1050 * Free xfer now if it there was attempt to free it
1051 * while we were waiting.
1052 */
1053 if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) {
1054 ata_channel_unlock(chp);
1055
1056 ata_free_xfer(chp, xfer);
1057 return;
1058 }
1059 }
1060
1061 ata_channel_unlock(chp);
1062
1063 ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n",
1064 chp->ch_flags), DEBUG_XFERS);
1065 atastart(chp);
1066 }
1067
1068 /*
1069 * Start I/O on a controller, for the given channel.
1070 * The first xfer may be not for our channel if the channel queues
1071 * are shared.
1072 *
1073 * MUST BE CALLED AT splbio()!
1074 *
1075 * XXX FIS-based switching with PMP
1076 * Currently atastart() never schedules concurrent NCQ transfers to more than
1077 * one drive, even when channel has several SATA drives attached via PMP.
1078 * To support concurrent transfers to different drives with PMP, it would be
1079 * necessary to implement FIS-based switching support in controller driver,
1080 * and then adjust error handling and recovery to stop assuming at most
1081 * one active drive.
1082 */
1083 void
1084 atastart(struct ata_channel *chp)
1085 {
1086 struct atac_softc *atac = chp->ch_atac;
1087 struct ata_queue *chq = chp->ch_queue;
1088 struct ata_xfer *xfer, *axfer;
1089 bool skipq;
1090
1091 #ifdef ATA_DEBUG
1092 int spl1, spl2;
1093
1094 spl1 = splbio();
1095 spl2 = splbio();
1096 if (spl2 != spl1) {
1097 printf("atastart: not at splbio()\n");
1098 panic("atastart");
1099 }
1100 splx(spl2);
1101 splx(spl1);
1102 #endif /* ATA_DEBUG */
1103
1104 ata_channel_lock(chp);
1105
1106 again:
1107 /* is there a xfer ? */
1108 if ((xfer = SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) {
1109 ATADEBUG_PRINT(("%s(chp=%p): channel %d queue_xfer is empty\n",
1110 __func__, chp, chp->ch_channel), DEBUG_XFERS);
1111 goto out;
1112 }
1113
1114 /*
1115 * if someone is waiting for the command to be active, wake it up
1116 * and let it process the command
1117 */
1118 if (__predict_false(xfer->c_flags & C_WAITACT)) {
1119 ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d "
1120 "wait active\n", xfer, chp->ch_channel, xfer->c_drive),
1121 DEBUG_XFERS);
1122 cv_broadcast(&chp->ch_queue->c_active);
1123 goto out;
1124 }
1125
1126 skipq = ISSET(xfer->c_flags, C_SKIP_QUEUE);
1127
1128 /* is the queue frozen? */
1129 if (__predict_false(!skipq && chq->queue_freeze > 0)) {
1130 if (chq->queue_flags & QF_IDLE_WAIT) {
1131 chq->queue_flags &= ~QF_IDLE_WAIT;
1132 cv_signal(&chp->ch_queue->queue_idle);
1133 }
1134 ATADEBUG_PRINT(("%s(chp=%p): channel %d drive %d "
1135 "queue frozen: %d\n",
1136 __func__, chp, chp->ch_channel, xfer->c_drive,
1137 chq->queue_freeze),
1138 DEBUG_XFERS);
1139 goto out;
1140 }
1141
1142 /* all xfers on same queue must belong to the same channel */
1143 KASSERT(xfer->c_chp == chp);
1144
1145 /*
1146 * Can only take the command if there are no current active
1147 * commands, or if the command is NCQ and the active commands are also
1148 * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based
1149 * switching, can only send commands to single drive.
1150 * Need only check first xfer.
1151 * XXX FIS-based switching - revisit
1152 */
1153 if (!skipq && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) {
1154 if (!ISSET(xfer->c_flags, C_NCQ) ||
1155 !ISSET(axfer->c_flags, C_NCQ) ||
1156 xfer->c_drive != axfer->c_drive)
1157 goto out;
1158 }
1159
1160 struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive];
1161
1162 /*
1163 * Are we on limit of active xfers ? If the queue has more
1164 * than 1 openings, we keep one slot reserved for recovery or dump.
1165 */
1166 KASSERT(chq->queue_active <= chq->queue_openings);
1167 const uint8_t chq_openings = (!skipq && chq->queue_openings > 1)
1168 ? (chq->queue_openings - 1) : chq->queue_openings;
1169 const uint8_t drv_openings = ISSET(xfer->c_flags, C_NCQ)
1170 ? drvp->drv_openings : ATA_MAX_OPENINGS;
1171 if (chq->queue_active >= MIN(chq_openings, drv_openings)) {
1172 if (skipq) {
1173 panic("%s: channel %d busy, xfer not possible",
1174 __func__, chp->ch_channel);
1175 }
1176
1177 ATADEBUG_PRINT(("%s(chp=%p): channel %d completely busy\n",
1178 __func__, chp, chp->ch_channel), DEBUG_XFERS);
1179 goto out;
1180 }
1181
1182 /* Slot allocation can fail if drv_openings < ch_openings */
1183 if (!ata_queue_alloc_slot(chp, &xfer->c_slot, drv_openings))
1184 goto out;
1185
1186 if (__predict_false(atac->atac_claim_hw)) {
1187 if (!atac->atac_claim_hw(chp, 0)) {
1188 ata_queue_free_slot(chp, xfer->c_slot);
1189 goto out;
1190 }
1191 }
1192
1193 /* Now committed to start the xfer */
1194
1195 ATADEBUG_PRINT(("%s(chp=%p): xfer %p channel %d drive %d\n",
1196 __func__, chp, xfer, chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
1197 if (drvp->drive_flags & ATA_DRIVE_RESET) {
1198 drvp->drive_flags &= ~ATA_DRIVE_RESET;
1199 drvp->state = 0;
1200 }
1201
1202 if (ISSET(xfer->c_flags, C_NCQ))
1203 SET(chp->ch_flags, ATACH_NCQ);
1204 else
1205 CLR(chp->ch_flags, ATACH_NCQ);
1206
1207 SIMPLEQ_REMOVE_HEAD(&chq->queue_xfer, c_xferchain);
1208
1209 ata_activate_xfer_locked(chp, xfer);
1210
1211 if (atac->atac_cap & ATAC_CAP_NOIRQ)
1212 KASSERT(xfer->c_flags & C_POLL);
1213
1214 switch (ata_xfer_start(xfer)) {
1215 case ATASTART_TH:
1216 case ATASTART_ABORT:
1217 /* don't start any further commands in this case */
1218 goto out;
1219 default:
1220 /* nothing to do */
1221 break;
1222 }
1223
1224 /* Queue more commands if possible, but not during recovery or dump */
1225 if (!skipq && chq->queue_active < chq->queue_openings)
1226 goto again;
1227
1228 out:
1229 ata_channel_unlock(chp);
1230 }
1231
1232 int
1233 ata_xfer_start(struct ata_xfer *xfer)
1234 {
1235 struct ata_channel *chp = xfer->c_chp;
1236 int rv;
1237
1238 KASSERT(mutex_owned(&chp->ch_lock));
1239
1240 rv = xfer->ops->c_start(chp, xfer);
1241 switch (rv) {
1242 case ATASTART_STARTED:
1243 /* nothing to do */
1244 break;
1245 case ATASTART_TH:
1246 /* postpone xfer to thread */
1247 ata_thread_wake_locked(chp);
1248 break;
1249 case ATASTART_POLL:
1250 /* can happen even in thread context for some ATAPI devices */
1251 ata_channel_unlock(chp);
1252 KASSERT(xfer->ops != NULL && xfer->ops->c_poll != NULL);
1253 xfer->ops->c_poll(chp, xfer);
1254 ata_channel_lock(chp);
1255 break;
1256 case ATASTART_ABORT:
1257 ata_channel_unlock(chp);
1258 KASSERT(xfer->ops != NULL && xfer->ops->c_abort != NULL);
1259 xfer->ops->c_abort(chp, xfer);
1260 ata_channel_lock(chp);
1261 break;
1262 }
1263
1264 return rv;
1265 }
1266
1267 static void
1268 ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer)
1269 {
1270 struct ata_queue * const chq = chp->ch_queue;
1271
1272 KASSERT(mutex_owned(&chp->ch_lock));
1273 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0);
1274
1275 if ((xfer->c_flags & C_SKIP_QUEUE) == 0)
1276 TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain);
1277 else {
1278 /*
1279 * Must go to head, so that ata_queue_get_active_xfer()
1280 * returns the recovery command, and not some other
1281 * random active transfer.
1282 */
1283 TAILQ_INSERT_HEAD(&chq->active_xfers, xfer, c_activechain);
1284 }
1285 chq->active_xfers_used |= __BIT(xfer->c_slot);
1286 chq->queue_active++;
1287 }
1288
1289 /*
1290 * Does it's own locking, does not require splbio().
1291 * flags - whether to block waiting for free xfer
1292 */
1293 struct ata_xfer *
1294 ata_get_xfer(struct ata_channel *chp, bool waitok)
1295 {
1296 return pool_get(&ata_xfer_pool,
1297 PR_ZERO | (waitok ? PR_WAITOK : PR_NOWAIT));
1298 }
1299
1300 /*
1301 * ata_deactivate_xfer() must be always called prior to ata_free_xfer()
1302 */
1303 void
1304 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1305 {
1306 struct ata_queue *chq = chp->ch_queue;
1307
1308 ata_channel_lock(chp);
1309
1310 if (__predict_false(xfer->c_flags & (C_WAITACT|C_WAITTIMO))) {
1311 /* Someone is waiting for this xfer, so we can't free now */
1312 xfer->c_flags |= C_FREE;
1313 cv_broadcast(&chq->c_active);
1314 ata_channel_unlock(chp);
1315 return;
1316 }
1317
1318 /* XXX move PIOBM and free_gw to deactivate? */
1319 #if NATA_PIOBM /* XXX wdc dependent code */
1320 if (__predict_false(xfer->c_flags & C_PIOBM)) {
1321 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1322
1323 /* finish the busmastering PIO */
1324 (*wdc->piobm_done)(wdc->dma_arg,
1325 chp->ch_channel, xfer->c_drive);
1326 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT | ATACH_IRQ_WAIT);
1327 }
1328 #endif
1329
1330 if (__predict_false(chp->ch_atac->atac_free_hw))
1331 chp->ch_atac->atac_free_hw(chp);
1332
1333 ata_channel_unlock(chp);
1334
1335 if (__predict_true(!ISSET(xfer->c_flags, C_PRIVATE_ALLOC)))
1336 pool_put(&ata_xfer_pool, xfer);
1337 }
1338
1339 void
1340 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1341 {
1342 struct ata_queue * const chq = chp->ch_queue;
1343
1344 ata_channel_lock(chp);
1345
1346 KASSERT(chq->queue_active > 0);
1347 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0);
1348
1349 /* Stop only when this is last active xfer */
1350 if (chq->queue_active == 1)
1351 callout_stop(&chp->c_timo_callout);
1352
1353 if (callout_invoking(&chp->c_timo_callout))
1354 xfer->c_flags |= C_WAITTIMO;
1355
1356 TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain);
1357 chq->active_xfers_used &= ~__BIT(xfer->c_slot);
1358 chq->queue_active--;
1359
1360 ata_queue_free_slot(chp, xfer->c_slot);
1361
1362 if (xfer->c_flags & C_WAIT)
1363 cv_broadcast(&chq->c_cmd_finish);
1364
1365 ata_channel_unlock(chp);
1366 }
1367
1368 /*
1369 * Called in c_intr hook. Must be called before before any deactivations
1370 * are done - if there is drain pending, it calls c_kill_xfer hook which
1371 * deactivates the xfer.
1372 * Calls c_kill_xfer with channel lock free.
1373 * Returns true if caller should just exit without further processing.
1374 * Caller must not further access any part of xfer or any related controller
1375 * structures in that case, it should just return.
1376 */
1377 bool
1378 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer)
1379 {
1380 int drive = xfer->c_drive;
1381 bool draining = false;
1382
1383 ata_channel_lock(chp);
1384
1385 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
1386 ata_channel_unlock(chp);
1387
1388 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE);
1389
1390 ata_channel_lock(chp);
1391 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
1392 cv_signal(&chp->ch_queue->queue_drain);
1393 draining = true;
1394 }
1395
1396 ata_channel_unlock(chp);
1397
1398 return draining;
1399 }
1400
1401 /*
1402 * Check for race of normal transfer handling vs. timeout.
1403 */
1404 bool
1405 ata_timo_xfer_check(struct ata_xfer *xfer)
1406 {
1407 struct ata_channel *chp = xfer->c_chp;
1408 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1409
1410 ata_channel_lock(chp);
1411
1412 if (xfer->c_flags & C_WAITTIMO) {
1413 xfer->c_flags &= ~C_WAITTIMO;
1414
1415 /* Handle race vs. ata_free_xfer() */
1416 if (xfer->c_flags & C_FREE) {
1417 xfer->c_flags &= ~C_FREE;
1418 ata_channel_unlock(chp);
1419
1420 device_printf(drvp->drv_softc,
1421 "xfer %"PRIxPTR" freed while invoking timeout\n",
1422 (intptr_t)xfer & PAGE_MASK);
1423
1424 ata_free_xfer(chp, xfer);
1425 return true;
1426 }
1427
1428 /* Race vs. callout_stop() in ata_deactivate_xfer() */
1429 ata_channel_unlock(chp);
1430
1431 device_printf(drvp->drv_softc,
1432 "xfer %"PRIxPTR" deactivated while invoking timeout\n",
1433 (intptr_t)xfer & PAGE_MASK);
1434 return true;
1435 }
1436
1437 ata_channel_unlock(chp);
1438
1439 /* No race, proceed with timeout handling */
1440 return false;
1441 }
1442
1443 /*
1444 * Kill off all active xfers for a ata_channel.
1445 *
1446 * Must be called with channel lock held.
1447 */
1448 void
1449 ata_kill_active(struct ata_channel *chp, int reason, int flags)
1450 {
1451 struct ata_queue * const chq = chp->ch_queue;
1452 struct ata_xfer *xfer, *xfernext;
1453
1454 KASSERT(mutex_owned(&chp->ch_lock));
1455
1456 TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) {
1457 ata_channel_unlock(chp);
1458 xfer->ops->c_kill_xfer(xfer->c_chp, xfer, reason);
1459 ata_channel_lock(chp);
1460 }
1461 }
1462
1463 /*
1464 * Kill off all pending xfers for a drive.
1465 */
1466 void
1467 ata_kill_pending(struct ata_drive_datas *drvp)
1468 {
1469 struct ata_channel * const chp = drvp->chnl_softc;
1470 struct ata_queue * const chq = chp->ch_queue;
1471 struct ata_xfer *xfer;
1472
1473 ata_channel_lock(chp);
1474
1475 /* Kill all pending transfers */
1476 while ((xfer = SIMPLEQ_FIRST(&chq->queue_xfer))) {
1477 KASSERT(xfer->c_chp == chp);
1478
1479 if (xfer->c_drive != drvp->drive)
1480 continue;
1481
1482 SIMPLEQ_REMOVE_HEAD(&chp->ch_queue->queue_xfer, c_xferchain);
1483
1484 /*
1485 * Keep the lock, so that we get deadlock (and 'locking against
1486 * myself' with LOCKDEBUG), instead of silent
1487 * data corruption, if the hook tries to call back into
1488 * middle layer for inactive xfer.
1489 */
1490 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE_INACTIVE);
1491 }
1492
1493 /* Wait until all active transfers on the drive finish */
1494 while (chq->queue_active > 0) {
1495 bool drv_active = false;
1496
1497 TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
1498 KASSERT(xfer->c_chp == chp);
1499
1500 if (xfer->c_drive == drvp->drive) {
1501 drv_active = true;
1502 break;
1503 }
1504 }
1505
1506 if (!drv_active) {
1507 /* all finished */
1508 break;
1509 }
1510
1511 drvp->drive_flags |= ATA_DRIVE_WAITDRAIN;
1512 cv_wait(&chq->queue_drain, &chp->ch_lock);
1513 }
1514
1515 ata_channel_unlock(chp);
1516 }
1517
1518 static void
1519 ata_channel_freeze_locked(struct ata_channel *chp)
1520 {
1521 chp->ch_queue->queue_freeze++;
1522
1523 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
1524 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
1525 }
1526
1527 void
1528 ata_channel_freeze(struct ata_channel *chp)
1529 {
1530 ata_channel_lock(chp);
1531 ata_channel_freeze_locked(chp);
1532 ata_channel_unlock(chp);
1533 }
1534
1535 void
1536 ata_channel_thaw_locked(struct ata_channel *chp)
1537 {
1538 KASSERT(mutex_owned(&chp->ch_lock));
1539 KASSERT(chp->ch_queue->queue_freeze > 0);
1540
1541 chp->ch_queue->queue_freeze--;
1542
1543 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
1544 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
1545 }
1546
1547 /*
1548 * ata_thread_run:
1549 *
1550 * Reset and ATA channel. Channel lock must be held. arg is type-specific.
1551 */
1552 void
1553 ata_thread_run(struct ata_channel *chp, int flags, int type, int arg)
1554 {
1555 struct atac_softc *atac = chp->ch_atac;
1556 bool threset = false;
1557 struct ata_drive_datas *drvp;
1558
1559 ata_channel_lock_owned(chp);
1560
1561 /*
1562 * If we can poll or wait it's OK, otherwise wake up the
1563 * kernel thread to do it for us.
1564 */
1565 ATADEBUG_PRINT(("%s flags 0x%x ch_flags 0x%x\n",
1566 __func__, flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS);
1567 if ((flags & (AT_POLL | AT_WAIT)) == 0) {
1568 switch (type) {
1569 case ATACH_TH_RESET:
1570 if (chp->ch_flags & ATACH_TH_RESET) {
1571 /* No need to schedule another reset */
1572 return;
1573 }
1574 break;
1575 case ATACH_TH_DRIVE_RESET:
1576 {
1577 int drive = arg;
1578
1579 KASSERT(drive <= chp->ch_ndrives);
1580 drvp = &chp->ch_drive[drive];
1581
1582 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) {
1583 /* No need to schedule another reset */
1584 return;
1585 }
1586 drvp->drive_flags |= ATA_DRIVE_TH_RESET;
1587 break;
1588 }
1589 case ATACH_TH_RECOVERY:
1590 {
1591 uint32_t tfd = (uint32_t)arg;
1592
1593 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0);
1594 chp->recovery_tfd = tfd;
1595 break;
1596 }
1597 default:
1598 panic("%s: unknown type: %x", __func__, type);
1599 /* NOTREACHED */
1600 }
1601
1602 /*
1603 * Block execution of other commands while reset is scheduled
1604 * to a thread.
1605 */
1606 ata_channel_freeze_locked(chp);
1607 chp->ch_flags |= type;
1608
1609 cv_signal(&chp->ch_thr_idle);
1610 return;
1611 }
1612
1613 /* Block execution of other commands during reset */
1614 ata_channel_freeze_locked(chp);
1615
1616 /*
1617 * If reset has been scheduled to a thread, then clear
1618 * the flag now so that the thread won't try to execute it if
1619 * we happen to sleep, and thaw one more time after the reset.
1620 */
1621 if (chp->ch_flags & type) {
1622 chp->ch_flags &= ~type;
1623 threset = true;
1624 }
1625
1626 switch (type) {
1627 case ATACH_TH_RESET:
1628 (*atac->atac_bustype_ata->ata_reset_channel)(chp, flags);
1629
1630 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
1631 for (int drive = 0; drive < chp->ch_ndrives; drive++)
1632 chp->ch_drive[drive].state = 0;
1633 break;
1634
1635 case ATACH_TH_DRIVE_RESET:
1636 {
1637 int drive = arg;
1638
1639 KASSERT(drive <= chp->ch_ndrives);
1640 drvp = &chp->ch_drive[drive];
1641 (*atac->atac_bustype_ata->ata_reset_drive)(drvp, flags, NULL);
1642 drvp->state = 0;
1643 break;
1644 }
1645
1646 case ATACH_TH_RECOVERY:
1647 {
1648 uint32_t tfd = (uint32_t)arg;
1649
1650 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0);
1651 KASSERT(atac->atac_bustype_ata->ata_recovery != NULL);
1652
1653 SET(chp->ch_flags, ATACH_RECOVERING);
1654 (*atac->atac_bustype_ata->ata_recovery)(chp, flags, tfd);
1655 CLR(chp->ch_flags, ATACH_RECOVERING);
1656 break;
1657 }
1658
1659 default:
1660 panic("%s: unknown type: %x", __func__, type);
1661 /* NOTREACHED */
1662 }
1663
1664 /*
1665 * Thaw one extra time to clear the freeze done when the reset has
1666 * been scheduled to the thread.
1667 */
1668 if (threset)
1669 ata_channel_thaw_locked(chp);
1670
1671 /* Allow commands to run again */
1672 ata_channel_thaw_locked(chp);
1673
1674 /* Signal the thread in case there is an xfer to run */
1675 cv_signal(&chp->ch_thr_idle);
1676 }
1677
1678 int
1679 ata_addref(struct ata_channel *chp)
1680 {
1681 struct atac_softc *atac = chp->ch_atac;
1682 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
1683 int s, error = 0;
1684
1685 s = splbio();
1686 if (adapt->adapt_refcnt++ == 0 &&
1687 adapt->adapt_enable != NULL) {
1688 error = (*adapt->adapt_enable)(atac->atac_dev, 1);
1689 if (error)
1690 adapt->adapt_refcnt--;
1691 }
1692 splx(s);
1693 return (error);
1694 }
1695
1696 void
1697 ata_delref(struct ata_channel *chp)
1698 {
1699 struct atac_softc *atac = chp->ch_atac;
1700 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
1701 int s;
1702
1703 s = splbio();
1704 if (adapt->adapt_refcnt-- == 1 &&
1705 adapt->adapt_enable != NULL)
1706 (void) (*adapt->adapt_enable)(atac->atac_dev, 0);
1707 splx(s);
1708 }
1709
1710 void
1711 ata_print_modes(struct ata_channel *chp)
1712 {
1713 struct atac_softc *atac = chp->ch_atac;
1714 int drive;
1715 struct ata_drive_datas *drvp;
1716
1717 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
1718 for (drive = 0; drive < chp->ch_ndrives; drive++) {
1719 drvp = &chp->ch_drive[drive];
1720 if (drvp->drive_type == ATA_DRIVET_NONE ||
1721 drvp->drv_softc == NULL)
1722 continue;
1723 aprint_verbose("%s(%s:%d:%d): using PIO mode %d",
1724 device_xname(drvp->drv_softc),
1725 device_xname(atac->atac_dev),
1726 chp->ch_channel, drvp->drive, drvp->PIO_mode);
1727 #if NATA_DMA
1728 if (drvp->drive_flags & ATA_DRIVE_DMA)
1729 aprint_verbose(", DMA mode %d", drvp->DMA_mode);
1730 #if NATA_UDMA
1731 if (drvp->drive_flags & ATA_DRIVE_UDMA) {
1732 aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode);
1733 if (drvp->UDMA_mode == 2)
1734 aprint_verbose(" (Ultra/33)");
1735 else if (drvp->UDMA_mode == 4)
1736 aprint_verbose(" (Ultra/66)");
1737 else if (drvp->UDMA_mode == 5)
1738 aprint_verbose(" (Ultra/100)");
1739 else if (drvp->UDMA_mode == 6)
1740 aprint_verbose(" (Ultra/133)");
1741 }
1742 #endif /* NATA_UDMA */
1743 #endif /* NATA_DMA */
1744 #if NATA_DMA || NATA_PIOBM
1745 if (0
1746 #if NATA_DMA
1747 || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA))
1748 #endif
1749 #if NATA_PIOBM
1750 /* PIOBM capable controllers use DMA for PIO commands */
1751 || (atac->atac_cap & ATAC_CAP_PIOBM)
1752 #endif
1753 )
1754 aprint_verbose(" (using DMA)");
1755
1756 if (drvp->drive_flags & ATA_DRIVE_NCQ) {
1757 aprint_verbose(", NCQ (%d tags)%s",
1758 ATA_REAL_OPENINGS(chp->ch_queue->queue_openings),
1759 (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO)
1760 ? " w/PRIO" : "");
1761 } else if (drvp->drive_flags & ATA_DRIVE_WFUA)
1762 aprint_verbose(", WRITE DMA FUA EXT");
1763
1764 #endif /* NATA_DMA || NATA_PIOBM */
1765 aprint_verbose("\n");
1766 }
1767 }
1768
1769 #if defined(ATA_DOWNGRADE_MODE) && NATA_DMA
1770 /*
1771 * downgrade the transfer mode of a drive after an error. return 1 if
1772 * downgrade was possible, 0 otherwise.
1773 *
1774 * MUST BE CALLED AT splbio()!
1775 */
1776 static int
1777 ata_downgrade_mode(struct ata_drive_datas *drvp, int flags)
1778 {
1779 struct ata_channel *chp = drvp->chnl_softc;
1780 struct atac_softc *atac = chp->ch_atac;
1781 device_t drv_dev = drvp->drv_softc;
1782 int cf_flags = device_cfdata(drv_dev)->cf_flags;
1783
1784 ata_channel_lock_owned(drvp->chnl_softc);
1785
1786 /* if drive or controller don't know its mode, we can't do much */
1787 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 ||
1788 (atac->atac_set_modes == NULL))
1789 return 0;
1790 /* current drive mode was set by a config flag, let it this way */
1791 if ((cf_flags & ATA_CONFIG_PIO_SET) ||
1792 (cf_flags & ATA_CONFIG_DMA_SET) ||
1793 (cf_flags & ATA_CONFIG_UDMA_SET))
1794 return 0;
1795
1796 #if NATA_UDMA
1797 /*
1798 * If we were using Ultra-DMA mode, downgrade to the next lower mode.
1799 */
1800 if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) {
1801 drvp->UDMA_mode--;
1802 aprint_error_dev(drv_dev,
1803 "transfer error, downgrading to Ultra-DMA mode %d\n",
1804 drvp->UDMA_mode);
1805 }
1806 #endif
1807
1808 /*
1809 * If we were using ultra-DMA, don't downgrade to multiword DMA.
1810 */
1811 else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) {
1812 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA);
1813 drvp->PIO_mode = drvp->PIO_cap;
1814 aprint_error_dev(drv_dev,
1815 "transfer error, downgrading to PIO mode %d\n",
1816 drvp->PIO_mode);
1817 } else /* already using PIO, can't downgrade */
1818 return 0;
1819
1820 (*atac->atac_set_modes)(chp);
1821 ata_print_modes(chp);
1822 /* reset the channel, which will schedule all drives for setup */
1823 ata_thread_run(chp, flags, ATACH_TH_RESET, ATACH_NODRIVE);
1824 return 1;
1825 }
1826 #endif /* ATA_DOWNGRADE_MODE && NATA_DMA */
1827
1828 /*
1829 * Probe drive's capabilities, for use by the controller later
1830 * Assumes drvp points to an existing drive.
1831 */
1832 void
1833 ata_probe_caps(struct ata_drive_datas *drvp)
1834 {
1835 struct ataparams params, params2;
1836 struct ata_channel *chp = drvp->chnl_softc;
1837 struct atac_softc *atac = chp->ch_atac;
1838 device_t drv_dev = drvp->drv_softc;
1839 int i, printed = 0;
1840 const char *sep = "";
1841 int cf_flags;
1842
1843 if (ata_get_params(drvp, AT_WAIT, ¶ms) != CMD_OK) {
1844 /* IDENTIFY failed. Can't tell more about the device */
1845 return;
1846 }
1847 if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) ==
1848 (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) {
1849 /*
1850 * Controller claims 16 and 32 bit transfers.
1851 * Re-do an IDENTIFY with 32-bit transfers,
1852 * and compare results.
1853 */
1854 ata_channel_lock(chp);
1855 drvp->drive_flags |= ATA_DRIVE_CAP32;
1856 ata_channel_unlock(chp);
1857 ata_get_params(drvp, AT_WAIT, ¶ms2);
1858 if (memcmp(¶ms, ¶ms2, sizeof(struct ataparams)) != 0) {
1859 /* Not good. fall back to 16bits */
1860 ata_channel_lock(chp);
1861 drvp->drive_flags &= ~ATA_DRIVE_CAP32;
1862 ata_channel_unlock(chp);
1863 } else {
1864 aprint_verbose_dev(drv_dev, "32-bit data port\n");
1865 }
1866 }
1867 #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */
1868 if (params.atap_ata_major > 0x01 &&
1869 params.atap_ata_major != 0xffff) {
1870 for (i = 14; i > 0; i--) {
1871 if (params.atap_ata_major & (1 << i)) {
1872 aprint_verbose_dev(drv_dev,
1873 "ATA version %d\n", i);
1874 drvp->ata_vers = i;
1875 break;
1876 }
1877 }
1878 }
1879 #endif
1880
1881 /* An ATAPI device is at last PIO mode 3 */
1882 if (drvp->drive_type == ATA_DRIVET_ATAPI)
1883 drvp->PIO_mode = 3;
1884
1885 /*
1886 * It's not in the specs, but it seems that some drive
1887 * returns 0xffff in atap_extensions when this field is invalid
1888 */
1889 if (params.atap_extensions != 0xffff &&
1890 (params.atap_extensions & WDC_EXT_MODES)) {
1891 /*
1892 * XXX some drives report something wrong here (they claim to
1893 * support PIO mode 8 !). As mode is coded on 3 bits in
1894 * SET FEATURE, limit it to 7 (so limit i to 4).
1895 * If higher mode than 7 is found, abort.
1896 */
1897 for (i = 7; i >= 0; i--) {
1898 if ((params.atap_piomode_supp & (1 << i)) == 0)
1899 continue;
1900 if (i > 4)
1901 return;
1902 /*
1903 * See if mode is accepted.
1904 * If the controller can't set its PIO mode,
1905 * assume the defaults are good, so don't try
1906 * to set it
1907 */
1908 if (atac->atac_set_modes)
1909 /*
1910 * It's OK to poll here, it's fast enough
1911 * to not bother waiting for interrupt
1912 */
1913 if (ata_set_mode(drvp, 0x08 | (i + 3),
1914 AT_WAIT) != CMD_OK)
1915 continue;
1916 if (!printed) {
1917 aprint_verbose_dev(drv_dev,
1918 "drive supports PIO mode %d", i + 3);
1919 sep = ",";
1920 printed = 1;
1921 }
1922 /*
1923 * If controller's driver can't set its PIO mode,
1924 * get the highter one for the drive.
1925 */
1926 if (atac->atac_set_modes == NULL ||
1927 atac->atac_pio_cap >= i + 3) {
1928 drvp->PIO_mode = i + 3;
1929 drvp->PIO_cap = i + 3;
1930 break;
1931 }
1932 }
1933 if (!printed) {
1934 /*
1935 * We didn't find a valid PIO mode.
1936 * Assume the values returned for DMA are buggy too
1937 */
1938 return;
1939 }
1940 ata_channel_lock(chp);
1941 drvp->drive_flags |= ATA_DRIVE_MODE;
1942 ata_channel_unlock(chp);
1943 printed = 0;
1944 for (i = 7; i >= 0; i--) {
1945 if ((params.atap_dmamode_supp & (1 << i)) == 0)
1946 continue;
1947 #if NATA_DMA
1948 if ((atac->atac_cap & ATAC_CAP_DMA) &&
1949 atac->atac_set_modes != NULL)
1950 if (ata_set_mode(drvp, 0x20 | i, AT_WAIT)
1951 != CMD_OK)
1952 continue;
1953 #endif
1954 if (!printed) {
1955 aprint_verbose("%s DMA mode %d", sep, i);
1956 sep = ",";
1957 printed = 1;
1958 }
1959 #if NATA_DMA
1960 if (atac->atac_cap & ATAC_CAP_DMA) {
1961 if (atac->atac_set_modes != NULL &&
1962 atac->atac_dma_cap < i)
1963 continue;
1964 drvp->DMA_mode = i;
1965 drvp->DMA_cap = i;
1966 ata_channel_lock(chp);
1967 drvp->drive_flags |= ATA_DRIVE_DMA;
1968 ata_channel_unlock(chp);
1969 }
1970 #endif
1971 break;
1972 }
1973 if (params.atap_extensions & WDC_EXT_UDMA_MODES) {
1974 printed = 0;
1975 for (i = 7; i >= 0; i--) {
1976 if ((params.atap_udmamode_supp & (1 << i))
1977 == 0)
1978 continue;
1979 #if NATA_UDMA
1980 if (atac->atac_set_modes != NULL &&
1981 (atac->atac_cap & ATAC_CAP_UDMA))
1982 if (ata_set_mode(drvp, 0x40 | i,
1983 AT_WAIT) != CMD_OK)
1984 continue;
1985 #endif
1986 if (!printed) {
1987 aprint_verbose("%s Ultra-DMA mode %d",
1988 sep, i);
1989 if (i == 2)
1990 aprint_verbose(" (Ultra/33)");
1991 else if (i == 4)
1992 aprint_verbose(" (Ultra/66)");
1993 else if (i == 5)
1994 aprint_verbose(" (Ultra/100)");
1995 else if (i == 6)
1996 aprint_verbose(" (Ultra/133)");
1997 sep = ",";
1998 printed = 1;
1999 }
2000 #if NATA_UDMA
2001 if (atac->atac_cap & ATAC_CAP_UDMA) {
2002 if (atac->atac_set_modes != NULL &&
2003 atac->atac_udma_cap < i)
2004 continue;
2005 drvp->UDMA_mode = i;
2006 drvp->UDMA_cap = i;
2007 ata_channel_lock(chp);
2008 drvp->drive_flags |= ATA_DRIVE_UDMA;
2009 ata_channel_unlock(chp);
2010 }
2011 #endif
2012 break;
2013 }
2014 }
2015 }
2016
2017 ata_channel_lock(chp);
2018 drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM;
2019 if (drvp->drive_type == ATA_DRIVET_ATAPI) {
2020 if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM)
2021 drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
2022 } else {
2023 if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM)
2024 drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
2025 }
2026 ata_channel_unlock(chp);
2027
2028 /* Try to guess ATA version here, if it didn't get reported */
2029 if (drvp->ata_vers == 0) {
2030 #if NATA_UDMA
2031 if (drvp->drive_flags & ATA_DRIVE_UDMA)
2032 drvp->ata_vers = 4; /* should be at last ATA-4 */
2033 else
2034 #endif
2035 if (drvp->PIO_cap > 2)
2036 drvp->ata_vers = 2; /* should be at last ATA-2 */
2037 }
2038 cf_flags = device_cfdata(drv_dev)->cf_flags;
2039 if (cf_flags & ATA_CONFIG_PIO_SET) {
2040 ata_channel_lock(chp);
2041 drvp->PIO_mode =
2042 (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF;
2043 drvp->drive_flags |= ATA_DRIVE_MODE;
2044 ata_channel_unlock(chp);
2045 }
2046 #if NATA_DMA
2047 if ((atac->atac_cap & ATAC_CAP_DMA) == 0) {
2048 /* don't care about DMA modes */
2049 if (*sep != '\0')
2050 aprint_verbose("\n");
2051 return;
2052 }
2053 if (cf_flags & ATA_CONFIG_DMA_SET) {
2054 ata_channel_lock(chp);
2055 if ((cf_flags & ATA_CONFIG_DMA_MODES) ==
2056 ATA_CONFIG_DMA_DISABLE) {
2057 drvp->drive_flags &= ~ATA_DRIVE_DMA;
2058 } else {
2059 drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >>
2060 ATA_CONFIG_DMA_OFF;
2061 drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE;
2062 }
2063 ata_channel_unlock(chp);
2064 }
2065
2066 /*
2067 * Probe WRITE DMA FUA EXT. Support is mandatory for devices
2068 * supporting LBA48, but nevertheless confirm with the feature flag.
2069 */
2070 if (drvp->drive_flags & ATA_DRIVE_DMA) {
2071 if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0
2072 && (params.atap_cmd_def & ATA_CMDE_WFE)) {
2073 drvp->drive_flags |= ATA_DRIVE_WFUA;
2074 aprint_verbose("%s WRITE DMA FUA", sep);
2075 sep = ",";
2076 }
2077 }
2078
2079 /* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */
2080 ata_channel_lock(chp);
2081 drvp->drv_openings = 1;
2082 if (params.atap_sata_caps & SATA_NATIVE_CMDQ) {
2083 if (atac->atac_cap & ATAC_CAP_NCQ)
2084 drvp->drive_flags |= ATA_DRIVE_NCQ;
2085 drvp->drv_openings =
2086 (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1;
2087 aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings);
2088 sep = ",";
2089
2090 if (params.atap_sata_caps & SATA_NCQ_PRIO) {
2091 drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO;
2092 aprint_verbose(" w/PRIO");
2093 }
2094 }
2095 ata_channel_unlock(chp);
2096
2097 if (*sep != '\0')
2098 aprint_verbose("\n");
2099
2100 #if NATA_UDMA
2101 if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) {
2102 /* don't care about UDMA modes */
2103 return;
2104 }
2105 if (cf_flags & ATA_CONFIG_UDMA_SET) {
2106 ata_channel_lock(chp);
2107 if ((cf_flags & ATA_CONFIG_UDMA_MODES) ==
2108 ATA_CONFIG_UDMA_DISABLE) {
2109 drvp->drive_flags &= ~ATA_DRIVE_UDMA;
2110 } else {
2111 drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >>
2112 ATA_CONFIG_UDMA_OFF;
2113 drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE;
2114 }
2115 ata_channel_unlock(chp);
2116 }
2117 #endif /* NATA_UDMA */
2118 #endif /* NATA_DMA */
2119 }
2120
2121 /* management of the /dev/atabus* devices */
2122 int
2123 atabusopen(dev_t dev, int flag, int fmt, struct lwp *l)
2124 {
2125 struct atabus_softc *sc;
2126 int error;
2127
2128 sc = device_lookup_private(&atabus_cd, minor(dev));
2129 if (sc == NULL)
2130 return (ENXIO);
2131
2132 if (sc->sc_flags & ATABUSCF_OPEN)
2133 return (EBUSY);
2134
2135 if ((error = ata_addref(sc->sc_chan)) != 0)
2136 return (error);
2137
2138 sc->sc_flags |= ATABUSCF_OPEN;
2139
2140 return (0);
2141 }
2142
2143
2144 int
2145 atabusclose(dev_t dev, int flag, int fmt, struct lwp *l)
2146 {
2147 struct atabus_softc *sc =
2148 device_lookup_private(&atabus_cd, minor(dev));
2149
2150 ata_delref(sc->sc_chan);
2151
2152 sc->sc_flags &= ~ATABUSCF_OPEN;
2153
2154 return (0);
2155 }
2156
2157 int
2158 atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
2159 {
2160 struct atabus_softc *sc =
2161 device_lookup_private(&atabus_cd, minor(dev));
2162 struct ata_channel *chp = sc->sc_chan;
2163 int min_drive, max_drive, drive;
2164 int error;
2165
2166 /*
2167 * Enforce write permission for ioctls that change the
2168 * state of the bus. Host adapter specific ioctls must
2169 * be checked by the adapter driver.
2170 */
2171 switch (cmd) {
2172 case ATABUSIOSCAN:
2173 case ATABUSIODETACH:
2174 case ATABUSIORESET:
2175 if ((flag & FWRITE) == 0)
2176 return (EBADF);
2177 }
2178
2179 switch (cmd) {
2180 case ATABUSIORESET:
2181 ata_channel_lock(chp);
2182 ata_thread_run(sc->sc_chan, AT_WAIT | AT_POLL,
2183 ATACH_TH_RESET, ATACH_NODRIVE);
2184 ata_channel_unlock(chp);
2185 return 0;
2186 case ATABUSIOSCAN:
2187 {
2188 #if 0
2189 struct atabusioscan_args *a=
2190 (struct atabusioscan_args *)addr;
2191 #endif
2192 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
2193 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
2194 return (EOPNOTSUPP);
2195 return (EOPNOTSUPP);
2196 }
2197 case ATABUSIODETACH:
2198 {
2199 struct atabusiodetach_args *a=
2200 (struct atabusiodetach_args *)addr;
2201 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
2202 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
2203 return (EOPNOTSUPP);
2204 switch (a->at_dev) {
2205 case -1:
2206 min_drive = 0;
2207 max_drive = 1;
2208 break;
2209 case 0:
2210 case 1:
2211 min_drive = max_drive = a->at_dev;
2212 break;
2213 default:
2214 return (EINVAL);
2215 }
2216 for (drive = min_drive; drive <= max_drive; drive++) {
2217 if (chp->ch_drive[drive].drv_softc != NULL) {
2218 error = config_detach(
2219 chp->ch_drive[drive].drv_softc, 0);
2220 if (error)
2221 return (error);
2222 KASSERT(chp->ch_drive[drive].drv_softc == NULL);
2223 }
2224 }
2225 return 0;
2226 }
2227 default:
2228 return ENOTTY;
2229 }
2230 }
2231
2232 static bool
2233 atabus_suspend(device_t dv, const pmf_qual_t *qual)
2234 {
2235 struct atabus_softc *sc = device_private(dv);
2236 struct ata_channel *chp = sc->sc_chan;
2237
2238 ata_channel_idle(chp);
2239
2240 return true;
2241 }
2242
2243 static bool
2244 atabus_resume(device_t dv, const pmf_qual_t *qual)
2245 {
2246 struct atabus_softc *sc = device_private(dv);
2247 struct ata_channel *chp = sc->sc_chan;
2248
2249 /*
2250 * XXX joerg: with wdc, the first channel unfreezes the controller.
2251 * Move this the reset and queue idling into wdc.
2252 */
2253 ata_channel_lock(chp);
2254 if (chp->ch_queue->queue_freeze == 0) {
2255 ata_channel_unlock(chp);
2256 goto out;
2257 }
2258
2259 /* unfreeze the queue and reset drives */
2260 ata_channel_thaw_locked(chp);
2261
2262 /* reset channel only if there are drives attached */
2263 if (chp->ch_ndrives > 0)
2264 ata_thread_run(chp, AT_WAIT, ATACH_TH_RESET, ATACH_NODRIVE);
2265
2266 ata_channel_unlock(chp);
2267
2268 out:
2269 return true;
2270 }
2271
2272 static int
2273 atabus_rescan(device_t self, const char *ifattr, const int *locators)
2274 {
2275 struct atabus_softc *sc = device_private(self);
2276 struct ata_channel *chp = sc->sc_chan;
2277 struct atabus_initq *initq;
2278 int i;
2279
2280 /*
2281 * we can rescan a port multiplier atabus, even if some devices are
2282 * still attached
2283 */
2284 if (chp->ch_satapmp_nports == 0) {
2285 if (chp->atapibus != NULL) {
2286 return EBUSY;
2287 }
2288
2289 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
2290 for (i = 0; i < chp->ch_ndrives; i++) {
2291 if (chp->ch_drive[i].drv_softc != NULL) {
2292 return EBUSY;
2293 }
2294 }
2295 }
2296
2297 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP);
2298 initq->atabus_sc = sc;
2299 mutex_enter(&atabus_qlock);
2300 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
2301 mutex_exit(&atabus_qlock);
2302 config_pending_incr(sc->sc_dev);
2303
2304 ata_channel_lock(chp);
2305 chp->ch_flags |= ATACH_TH_RESCAN;
2306 cv_signal(&chp->ch_thr_idle);
2307 ata_channel_unlock(chp);
2308
2309 return 0;
2310 }
2311
2312 void
2313 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags)
2314 {
2315 KASSERT(mutex_owned(&chp->ch_lock));
2316
2317 if ((flags & (AT_WAIT | AT_POLL)) == AT_POLL) {
2318 /*
2319 * can't use kpause(), we may be in interrupt context
2320 * or taking a crash dump
2321 */
2322 delay(ms * 1000);
2323 } else {
2324 int pause = mstohz(ms);
2325
2326 kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock);
2327 }
2328 }
2329
2330 void
2331 atacmd_toncq(struct ata_xfer *xfer, uint8_t *cmd, uint16_t *count,
2332 uint16_t *features, uint8_t *device)
2333 {
2334 if ((xfer->c_flags & C_NCQ) == 0) {
2335 /* FUA handling for non-NCQ drives */
2336 if (xfer->c_bio.flags & ATA_FUA
2337 && *cmd == WDCC_WRITEDMA_EXT)
2338 *cmd = WDCC_WRITEDMA_FUA_EXT;
2339
2340 return;
2341 }
2342
2343 *cmd = (xfer->c_bio.flags & ATA_READ) ?
2344 WDCC_READ_FPDMA_QUEUED : WDCC_WRITE_FPDMA_QUEUED;
2345
2346 /* for FPDMA the block count is in features */
2347 *features = *count;
2348
2349 /* NCQ tag */
2350 *count = (xfer->c_slot << 3);
2351
2352 if (xfer->c_bio.flags & ATA_PRIO_HIGH)
2353 *count |= WDSC_PRIO_HIGH;
2354
2355 /* other device flags */
2356 if (xfer->c_bio.flags & ATA_FUA)
2357 *device |= WDSD_FUA;
2358 }
2359
2360 void
2361 ata_wait_cmd(struct ata_channel *chp, struct ata_xfer *xfer)
2362 {
2363 struct ata_queue *chq = chp->ch_queue;
2364 struct ata_command *ata_c = &xfer->c_ata_c;
2365
2366 ata_channel_lock(chp);
2367
2368 while ((ata_c->flags & AT_DONE) == 0)
2369 cv_wait(&chq->c_cmd_finish, &chp->ch_lock);
2370
2371 ata_channel_unlock(chp);
2372
2373 KASSERT((ata_c->flags & AT_DONE) != 0);
2374 }
2375