ata.c revision 1.158 1 /* $NetBSD: ata.c,v 1.158 2020/05/25 18:29:25 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.158 2020/05/25 18:29:25 jdolecek Exp $");
29
30 #include "opt_ata.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/device.h>
36 #include <sys/conf.h>
37 #include <sys/fcntl.h>
38 #include <sys/proc.h>
39 #include <sys/kthread.h>
40 #include <sys/errno.h>
41 #include <sys/ataio.h>
42 #include <sys/kmem.h>
43 #include <sys/intr.h>
44 #include <sys/bus.h>
45 #include <sys/once.h>
46 #include <sys/bitops.h>
47 #include <sys/cpu.h>
48
49 #define ATABUS_PRIVATE
50
51 #include <dev/ata/ataconf.h>
52 #include <dev/ata/atareg.h>
53 #include <dev/ata/atavar.h>
54 #include <dev/ic/wdcvar.h> /* for PIOBM */
55
56 #include "ioconf.h"
57 #include "locators.h"
58
59 #include "atapibus.h"
60 #include "ataraid.h"
61 #include "sata_pmp.h"
62
63 #if NATARAID > 0
64 #include <dev/ata/ata_raidvar.h>
65 #endif
66 #if NSATA_PMP > 0
67 #include <dev/ata/satapmpvar.h>
68 #endif
69 #include <dev/ata/satapmpreg.h>
70
71 #define DEBUG_FUNCS 0x08
72 #define DEBUG_PROBE 0x10
73 #define DEBUG_DETACH 0x20
74 #define DEBUG_XFERS 0x40
75 #ifdef ATADEBUG
76 #ifndef ATADEBUG_MASK
77 #define ATADEBUG_MASK 0
78 #endif
79 int atadebug_mask = ATADEBUG_MASK;
80 #define ATADEBUG_PRINT(args, level) \
81 if (atadebug_mask & (level)) \
82 printf args
83 #else
84 #define ATADEBUG_PRINT(args, level)
85 #endif
86
87 #if NATA_DMA
88 static int ata_downgrade_mode(struct ata_drive_datas *, int);
89 #endif
90
91 static ONCE_DECL(ata_init_ctrl);
92 static struct pool ata_xfer_pool;
93
94 /*
95 * A queue of atabus instances, used to ensure the same bus probe order
96 * for a given hardware configuration at each boot. Kthread probing
97 * devices on a atabus. Only one probing at once.
98 */
99 static TAILQ_HEAD(, atabus_initq) atabus_initq_head;
100 static kmutex_t atabus_qlock;
101 static kcondvar_t atabus_qcv;
102 static lwp_t * atabus_cfg_lwp;
103
104 /*****************************************************************************
105 * ATA bus layer.
106 *
107 * ATA controllers attach an atabus instance, which handles probing the bus
108 * for drives, etc.
109 *****************************************************************************/
110
111 dev_type_open(atabusopen);
112 dev_type_close(atabusclose);
113 dev_type_ioctl(atabusioctl);
114
115 const struct cdevsw atabus_cdevsw = {
116 .d_open = atabusopen,
117 .d_close = atabusclose,
118 .d_read = noread,
119 .d_write = nowrite,
120 .d_ioctl = atabusioctl,
121 .d_stop = nostop,
122 .d_tty = notty,
123 .d_poll = nopoll,
124 .d_mmap = nommap,
125 .d_kqfilter = nokqfilter,
126 .d_discard = nodiscard,
127 .d_flag = D_OTHER
128 };
129
130 static void atabus_childdetached(device_t, device_t);
131 static int atabus_rescan(device_t, const char *, const int *);
132 static bool atabus_resume(device_t, const pmf_qual_t *);
133 static bool atabus_suspend(device_t, const pmf_qual_t *);
134 static void atabusconfig_thread(void *);
135
136 static void ata_channel_idle(struct ata_channel *);
137 static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *);
138 static void ata_channel_freeze_locked(struct ata_channel *);
139 static void ata_thread_wake_locked(struct ata_channel *);
140
141 /*
142 * atabus_init:
143 *
144 * Initialize ATA subsystem structures.
145 */
146 static int
147 atabus_init(void)
148 {
149
150 pool_init(&ata_xfer_pool, sizeof(struct ata_xfer), 0, 0, 0,
151 "ataspl", NULL, IPL_BIO);
152 TAILQ_INIT(&atabus_initq_head);
153 mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE);
154 cv_init(&atabus_qcv, "atainitq");
155 return 0;
156 }
157
158 /*
159 * atabusprint:
160 *
161 * Autoconfiguration print routine used by ATA controllers when
162 * attaching an atabus instance.
163 */
164 int
165 atabusprint(void *aux, const char *pnp)
166 {
167 struct ata_channel *chan = aux;
168
169 if (pnp)
170 aprint_normal("atabus at %s", pnp);
171 aprint_normal(" channel %d", chan->ch_channel);
172
173 return (UNCONF);
174 }
175
176 /*
177 * ataprint:
178 *
179 * Autoconfiguration print routine.
180 */
181 int
182 ataprint(void *aux, const char *pnp)
183 {
184 struct ata_device *adev = aux;
185
186 if (pnp)
187 aprint_normal("wd at %s", pnp);
188 aprint_normal(" drive %d", adev->adev_drv_data->drive);
189
190 return (UNCONF);
191 }
192
193 /*
194 * ata_channel_attach:
195 *
196 * Common parts of attaching an atabus to an ATA controller channel.
197 */
198 void
199 ata_channel_attach(struct ata_channel *chp)
200 {
201 if (chp->ch_flags & ATACH_DISABLED)
202 return;
203
204 ata_channel_init(chp);
205
206 KASSERT(chp->ch_queue != NULL);
207
208 chp->atabus = config_found_ia(chp->ch_atac->atac_dev, "ata", chp,
209 atabusprint);
210 }
211
212 /*
213 * ata_channel_detach:
214 *
215 * Common parts of detaching an atabus to an ATA controller channel.
216 */
217 void
218 ata_channel_detach(struct ata_channel *chp)
219 {
220 if (chp->ch_flags & ATACH_DISABLED)
221 return;
222
223 ata_channel_destroy(chp);
224
225 chp->ch_flags |= ATACH_DETACHED;
226 }
227
228 static void
229 atabusconfig(struct atabus_softc *atabus_sc)
230 {
231 struct ata_channel *chp = atabus_sc->sc_chan;
232 struct atac_softc *atac = chp->ch_atac;
233 struct atabus_initq *atabus_initq = NULL;
234 int i, error;
235
236 /* we are in the atabus's thread context */
237
238 /*
239 * Probe for the drives attached to controller, unless a PMP
240 * is already known
241 */
242 /* XXX for SATA devices we will power up all drives at once */
243 if (chp->ch_satapmp_nports == 0)
244 (*atac->atac_probe)(chp);
245
246 if (chp->ch_ndrives >= 2) {
247 ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n",
248 chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type),
249 DEBUG_PROBE);
250 }
251
252 /* Make sure the devices probe in atabus order to avoid jitter. */
253 mutex_enter(&atabus_qlock);
254 for (;;) {
255 atabus_initq = TAILQ_FIRST(&atabus_initq_head);
256 if (atabus_initq->atabus_sc == atabus_sc)
257 break;
258 cv_wait(&atabus_qcv, &atabus_qlock);
259 }
260 mutex_exit(&atabus_qlock);
261
262 ata_channel_lock(chp);
263
264 KASSERT(ata_is_thread_run(chp));
265
266 /* If no drives, abort here */
267 if (chp->ch_drive == NULL)
268 goto out;
269 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
270 for (i = 0; i < chp->ch_ndrives; i++)
271 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE)
272 break;
273 if (i == chp->ch_ndrives)
274 goto out;
275
276 /* Shortcut in case we've been shutdown */
277 if (chp->ch_flags & ATACH_SHUTDOWN)
278 goto out;
279
280 ata_channel_unlock(chp);
281
282 if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread,
283 atabus_sc, &atabus_cfg_lwp,
284 "%scnf", device_xname(atac->atac_dev))) != 0)
285 aprint_error_dev(atac->atac_dev,
286 "unable to create config thread: error %d\n", error);
287 return;
288
289 out:
290 ata_channel_unlock(chp);
291
292 mutex_enter(&atabus_qlock);
293 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
294 cv_broadcast(&atabus_qcv);
295 mutex_exit(&atabus_qlock);
296
297 kmem_free(atabus_initq, sizeof(*atabus_initq));
298
299 ata_delref(chp);
300
301 config_pending_decr(atac->atac_dev);
302 }
303
304 /*
305 * atabus_configthread: finish attach of atabus's childrens, in a separate
306 * kernel thread.
307 */
308 static void
309 atabusconfig_thread(void *arg)
310 {
311 struct atabus_softc *atabus_sc = arg;
312 struct ata_channel *chp = atabus_sc->sc_chan;
313 struct atac_softc *atac = chp->ch_atac;
314 struct atabus_initq *atabus_initq = NULL;
315 int i, s;
316
317 /* XXX seems wrong */
318 mutex_enter(&atabus_qlock);
319 atabus_initq = TAILQ_FIRST(&atabus_initq_head);
320 KASSERT(atabus_initq->atabus_sc == atabus_sc);
321 mutex_exit(&atabus_qlock);
322
323 /*
324 * First look for a port multiplier
325 */
326 if (chp->ch_ndrives == PMP_MAX_DRIVES &&
327 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
328 #if NSATA_PMP > 0
329 satapmp_attach(chp);
330 #else
331 aprint_error_dev(atabus_sc->sc_dev,
332 "SATA port multiplier not supported\n");
333 /* no problems going on, all drives are ATA_DRIVET_NONE */
334 #endif
335 }
336
337 /*
338 * Attach an ATAPI bus, if needed.
339 */
340 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
341 for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) {
342 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) {
343 #if NATAPIBUS > 0
344 (*atac->atac_atapibus_attach)(atabus_sc);
345 #else
346 /*
347 * Fake the autoconfig "not configured" message
348 */
349 aprint_normal("atapibus at %s not configured\n",
350 device_xname(atac->atac_dev));
351 chp->atapibus = NULL;
352 s = splbio();
353 for (i = 0; i < chp->ch_ndrives; i++) {
354 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
355 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
356 }
357 splx(s);
358 #endif
359 break;
360 }
361 }
362
363 for (i = 0; i < chp->ch_ndrives; i++) {
364 struct ata_device adev;
365 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA &&
366 chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) {
367 continue;
368 }
369 if (chp->ch_drive[i].drv_softc != NULL)
370 continue;
371 memset(&adev, 0, sizeof(struct ata_device));
372 adev.adev_bustype = atac->atac_bustype_ata;
373 adev.adev_channel = chp->ch_channel;
374 adev.adev_drv_data = &chp->ch_drive[i];
375 chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev,
376 "ata_hl", &adev, ataprint);
377 if (chp->ch_drive[i].drv_softc != NULL) {
378 ata_probe_caps(&chp->ch_drive[i]);
379 } else {
380 s = splbio();
381 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
382 splx(s);
383 }
384 }
385
386 /* now that we know the drives, the controller can set its modes */
387 if (atac->atac_set_modes) {
388 (*atac->atac_set_modes)(chp);
389 ata_print_modes(chp);
390 }
391 #if NATARAID > 0
392 if (atac->atac_cap & ATAC_CAP_RAID) {
393 for (i = 0; i < chp->ch_ndrives; i++) {
394 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) {
395 ata_raid_check_component(
396 chp->ch_drive[i].drv_softc);
397 }
398 }
399 }
400 #endif /* NATARAID > 0 */
401
402 /*
403 * reset drive_flags for unattached devices, reset state for attached
404 * ones
405 */
406 s = splbio();
407 for (i = 0; i < chp->ch_ndrives; i++) {
408 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
409 continue;
410 if (chp->ch_drive[i].drv_softc == NULL) {
411 chp->ch_drive[i].drive_flags = 0;
412 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
413 } else
414 chp->ch_drive[i].state = 0;
415 }
416 splx(s);
417
418 mutex_enter(&atabus_qlock);
419 TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
420 cv_broadcast(&atabus_qcv);
421 mutex_exit(&atabus_qlock);
422
423 kmem_free(atabus_initq, sizeof(*atabus_initq));
424
425 ata_delref(chp);
426
427 config_pending_decr(atac->atac_dev);
428 kthread_exit(0);
429 }
430
431 /*
432 * atabus_thread:
433 *
434 * Worker thread for the ATA bus.
435 */
436 static void
437 atabus_thread(void *arg)
438 {
439 struct atabus_softc *sc = arg;
440 struct ata_channel *chp = sc->sc_chan;
441 struct ata_queue *chq = chp->ch_queue;
442 struct ata_xfer *xfer;
443 int i, rv;
444
445 ata_channel_lock(chp);
446 KASSERT(ata_is_thread_run(chp));
447
448 /*
449 * Probe the drives. Reset type to indicate to controllers
450 * that can re-probe that all drives must be probed..
451 *
452 * Note: ch_ndrives may be changed during the probe.
453 */
454 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
455 for (i = 0; i < chp->ch_ndrives; i++) {
456 chp->ch_drive[i].drive_flags = 0;
457 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
458 }
459 ata_channel_unlock(chp);
460
461 atabusconfig(sc);
462
463 ata_channel_lock(chp);
464 for (;;) {
465 if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_TH_DRIVE_RESET
466 | ATACH_TH_RECOVERY | ATACH_SHUTDOWN)) == 0 &&
467 (chq->queue_active == 0 || chq->queue_freeze == 0)) {
468 cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
469 }
470 if (chp->ch_flags & ATACH_SHUTDOWN) {
471 break;
472 }
473 if (chp->ch_flags & ATACH_TH_RESCAN) {
474 chp->ch_flags &= ~ATACH_TH_RESCAN;
475 ata_channel_unlock(chp);
476 atabusconfig(sc);
477 ata_channel_lock(chp);
478 }
479 if (chp->ch_flags & ATACH_TH_RESET) {
480 /* this will unfreeze the channel */
481 ata_thread_run(chp, AT_WAIT,
482 ATACH_TH_RESET, ATACH_NODRIVE);
483 } else if (chp->ch_flags & ATACH_TH_DRIVE_RESET) {
484 /* this will unfreeze the channel */
485 for (i = 0; i < chp->ch_ndrives; i++) {
486 struct ata_drive_datas *drvp;
487
488 drvp = &chp->ch_drive[i];
489
490 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) {
491 ata_thread_run(chp,
492 AT_WAIT, ATACH_TH_DRIVE_RESET, i);
493 }
494 }
495 chp->ch_flags &= ~ATACH_TH_DRIVE_RESET;
496 } else if (chp->ch_flags & ATACH_TH_RECOVERY) {
497 /*
498 * This will unfreeze the channel; drops locks during
499 * run, so must wrap in splbio()/splx() to avoid
500 * spurious interrupts. XXX MPSAFE
501 */
502 int s = splbio();
503 ata_thread_run(chp, AT_WAIT, ATACH_TH_RECOVERY,
504 chp->recovery_tfd);
505 splx(s);
506 } else if (chq->queue_active > 0 && chq->queue_freeze == 1) {
507 /*
508 * Caller has bumped queue_freeze, decrease it. This
509 * flow shalt never be executed for NCQ commands.
510 */
511 KASSERT((chp->ch_flags & ATACH_NCQ) == 0);
512 KASSERT(chq->queue_active == 1);
513
514 ata_channel_thaw_locked(chp);
515 xfer = ata_queue_get_active_xfer_locked(chp);
516
517 KASSERT(xfer != NULL);
518 KASSERT((xfer->c_flags & C_POLL) == 0);
519
520 switch ((rv = ata_xfer_start(xfer))) {
521 case ATASTART_STARTED:
522 case ATASTART_POLL:
523 case ATASTART_ABORT:
524 break;
525 case ATASTART_TH:
526 default:
527 panic("%s: ata_xfer_start() unexpected rv %d",
528 __func__, rv);
529 /* NOTREACHED */
530 }
531 } else if (chq->queue_freeze > 1)
532 panic("%s: queue_freeze", __func__);
533
534 /* Try to run down the queue once channel is unfrozen */
535 if (chq->queue_freeze == 0) {
536 ata_channel_unlock(chp);
537 atastart(chp);
538 ata_channel_lock(chp);
539 }
540 }
541 chp->ch_thread = NULL;
542 cv_signal(&chp->ch_thr_idle);
543 ata_channel_unlock(chp);
544 kthread_exit(0);
545 }
546
547 bool
548 ata_is_thread_run(struct ata_channel *chp)
549 {
550 KASSERT(mutex_owned(&chp->ch_lock));
551
552 return (chp->ch_thread == curlwp && !cpu_intr_p());
553 }
554
555 static void
556 ata_thread_wake_locked(struct ata_channel *chp)
557 {
558 KASSERT(mutex_owned(&chp->ch_lock));
559 ata_channel_freeze_locked(chp);
560 cv_signal(&chp->ch_thr_idle);
561 }
562
563 /*
564 * atabus_match:
565 *
566 * Autoconfiguration match routine.
567 */
568 static int
569 atabus_match(device_t parent, cfdata_t cf, void *aux)
570 {
571 struct ata_channel *chp = aux;
572
573 if (chp == NULL)
574 return (0);
575
576 if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel &&
577 cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT)
578 return (0);
579
580 return (1);
581 }
582
583 /*
584 * atabus_attach:
585 *
586 * Autoconfiguration attach routine.
587 */
588 static void
589 atabus_attach(device_t parent, device_t self, void *aux)
590 {
591 struct atabus_softc *sc = device_private(self);
592 struct ata_channel *chp = aux;
593 struct atabus_initq *initq;
594 int error;
595
596 sc->sc_chan = chp;
597
598 aprint_normal("\n");
599 aprint_naive("\n");
600
601 sc->sc_dev = self;
602
603 if (ata_addref(chp))
604 return;
605
606 RUN_ONCE(&ata_init_ctrl, atabus_init);
607
608 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP);
609 initq->atabus_sc = sc;
610 mutex_enter(&atabus_qlock);
611 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
612 mutex_exit(&atabus_qlock);
613 config_pending_incr(sc->sc_dev);
614
615 /* XXX MPSAFE - no KTHREAD_MPSAFE, so protected by KERNEL_LOCK() */
616 if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc,
617 &chp->ch_thread, "%s", device_xname(self))) != 0)
618 aprint_error_dev(self,
619 "unable to create kernel thread: error %d\n", error);
620
621 if (!pmf_device_register(self, atabus_suspend, atabus_resume))
622 aprint_error_dev(self, "couldn't establish power handler\n");
623 }
624
625 /*
626 * atabus_detach:
627 *
628 * Autoconfiguration detach routine.
629 */
630 static int
631 atabus_detach(device_t self, int flags)
632 {
633 struct atabus_softc *sc = device_private(self);
634 struct ata_channel *chp = sc->sc_chan;
635 device_t dev = NULL;
636 int i, error = 0;
637
638 /*
639 * Detach atapibus and its children.
640 */
641 if ((dev = chp->atapibus) != NULL) {
642 ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n",
643 device_xname(self), device_xname(dev)), DEBUG_DETACH);
644
645 error = config_detach(dev, flags);
646 if (error)
647 goto out;
648 KASSERT(chp->atapibus == NULL);
649 }
650
651 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
652
653 /*
654 * Detach our other children.
655 */
656 for (i = 0; i < chp->ch_ndrives; i++) {
657 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
658 continue;
659 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
660 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
661 if ((dev = chp->ch_drive[i].drv_softc) != NULL) {
662 ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__,
663 __LINE__, device_xname(self), device_xname(dev)),
664 DEBUG_DETACH);
665 error = config_detach(dev, flags);
666 if (error)
667 goto out;
668 KASSERT(chp->ch_drive[i].drv_softc == NULL);
669 KASSERT(chp->ch_drive[i].drive_type == 0);
670 }
671 }
672
673 /* Shutdown the channel. */
674 ata_channel_lock(chp);
675 chp->ch_flags |= ATACH_SHUTDOWN;
676 while (chp->ch_thread != NULL) {
677 cv_signal(&chp->ch_thr_idle);
678 cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
679 }
680 ata_channel_unlock(chp);
681
682 atabus_free_drives(chp);
683
684 out:
685 #ifdef ATADEBUG
686 if (dev != NULL && error != 0)
687 ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__,
688 device_xname(self), error, device_xname(dev)),
689 DEBUG_DETACH);
690 #endif /* ATADEBUG */
691
692 return (error);
693 }
694
695 void
696 atabus_childdetached(device_t self, device_t child)
697 {
698 bool found = false;
699 struct atabus_softc *sc = device_private(self);
700 struct ata_channel *chp = sc->sc_chan;
701 int i;
702
703 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
704 /*
705 * atapibus detached.
706 */
707 if (child == chp->atapibus) {
708 chp->atapibus = NULL;
709 found = true;
710 for (i = 0; i < chp->ch_ndrives; i++) {
711 if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI)
712 continue;
713 KASSERT(chp->ch_drive[i].drv_softc != NULL);
714 chp->ch_drive[i].drv_softc = NULL;
715 chp->ch_drive[i].drive_flags = 0;
716 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
717 }
718 }
719
720 /*
721 * Detach our other children.
722 */
723 for (i = 0; i < chp->ch_ndrives; i++) {
724 if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
725 continue;
726 if (child == chp->ch_drive[i].drv_softc) {
727 chp->ch_drive[i].drv_softc = NULL;
728 chp->ch_drive[i].drive_flags = 0;
729 if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
730 chp->ch_satapmp_nports = 0;
731 chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
732 found = true;
733 }
734 }
735
736 if (!found)
737 panic("%s: unknown child %p", device_xname(self),
738 (const void *)child);
739 }
740
741 CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc),
742 atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan,
743 atabus_childdetached, DVF_DETACH_SHUTDOWN);
744
745 /*****************************************************************************
746 * Common ATA bus operations.
747 *****************************************************************************/
748
749 /* allocate/free the channel's ch_drive[] array */
750 int
751 atabus_alloc_drives(struct ata_channel *chp, int ndrives)
752 {
753 int i;
754 if (chp->ch_ndrives != ndrives)
755 atabus_free_drives(chp);
756 if (chp->ch_drive == NULL) {
757 void *drv;
758
759 ata_channel_unlock(chp);
760 drv = kmem_zalloc(sizeof(*chp->ch_drive) * ndrives, KM_SLEEP);
761 ata_channel_lock(chp);
762
763 if (chp->ch_drive != NULL) {
764 /* lost the race */
765 kmem_free(drv, sizeof(*chp->ch_drive) * ndrives);
766 return 0;
767 }
768 chp->ch_drive = drv;
769 }
770 for (i = 0; i < ndrives; i++) {
771 chp->ch_drive[i].chnl_softc = chp;
772 chp->ch_drive[i].drive = i;
773 }
774 chp->ch_ndrives = ndrives;
775 return 0;
776 }
777
778 void
779 atabus_free_drives(struct ata_channel *chp)
780 {
781 #ifdef DIAGNOSTIC
782 int i;
783 int dopanic = 0;
784 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
785 for (i = 0; i < chp->ch_ndrives; i++) {
786 if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) {
787 printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n",
788 device_xname(chp->atabus), i,
789 chp->ch_drive[i].drive_type);
790 dopanic = 1;
791 }
792 if (chp->ch_drive[i].drv_softc != NULL) {
793 printf("%s: ch_drive[%d] attached to %s\n",
794 device_xname(chp->atabus), i,
795 device_xname(chp->ch_drive[i].drv_softc));
796 dopanic = 1;
797 }
798 }
799 if (dopanic)
800 panic("atabus_free_drives");
801 #endif
802
803 if (chp->ch_drive == NULL)
804 return;
805 kmem_free(chp->ch_drive,
806 sizeof(struct ata_drive_datas) * chp->ch_ndrives);
807 chp->ch_ndrives = 0;
808 chp->ch_drive = NULL;
809 }
810
811 /* Get the disk's parameters */
812 int
813 ata_get_params(struct ata_drive_datas *drvp, uint8_t flags,
814 struct ataparams *prms)
815 {
816 struct ata_xfer *xfer;
817 struct ata_channel *chp = drvp->chnl_softc;
818 struct atac_softc *atac = chp->ch_atac;
819 char *tb;
820 int i, rv;
821 uint16_t *p;
822
823 ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);
824
825 xfer = ata_get_xfer(chp, false);
826 if (xfer == NULL) {
827 ATADEBUG_PRINT(("%s: no xfer\n", __func__),
828 DEBUG_FUNCS|DEBUG_PROBE);
829 return CMD_AGAIN;
830 }
831
832 tb = kmem_zalloc(ATA_BSIZE, KM_SLEEP);
833 memset(prms, 0, sizeof(struct ataparams));
834
835 if (drvp->drive_type == ATA_DRIVET_ATA) {
836 xfer->c_ata_c.r_command = WDCC_IDENTIFY;
837 xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
838 xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
839 xfer->c_ata_c.timeout = 3000; /* 3s */
840 } else if (drvp->drive_type == ATA_DRIVET_ATAPI) {
841 xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE;
842 xfer->c_ata_c.r_st_bmask = 0;
843 xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
844 xfer->c_ata_c.timeout = 10000; /* 10s */
845 } else {
846 ATADEBUG_PRINT(("ata_get_parms: no disks\n"),
847 DEBUG_FUNCS|DEBUG_PROBE);
848 rv = CMD_ERR;
849 goto out;
850 }
851 xfer->c_ata_c.flags = AT_READ | flags;
852 xfer->c_ata_c.data = tb;
853 xfer->c_ata_c.bcount = ATA_BSIZE;
854 (*atac->atac_bustype_ata->ata_exec_command)(drvp, xfer);
855 ata_wait_cmd(chp, xfer);
856 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
857 ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n",
858 xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE);
859 rv = CMD_ERR;
860 goto out;
861 }
862 /* if we didn't read any data something is wrong */
863 if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) {
864 rv = CMD_ERR;
865 goto out;
866 }
867
868 /* Read in parameter block. */
869 memcpy(prms, tb, sizeof(struct ataparams));
870
871 /*
872 * Shuffle string byte order.
873 * ATAPI NEC, Mitsumi and Pioneer drives and
874 * old ATA TDK CompactFlash cards
875 * have different byte order.
876 */
877 #if BYTE_ORDER == BIG_ENDIAN
878 # define M(n) prms->atap_model[(n) ^ 1]
879 #else
880 # define M(n) prms->atap_model[n]
881 #endif
882 if (
883 #if BYTE_ORDER == BIG_ENDIAN
884 !
885 #endif
886 ((drvp->drive_type == ATA_DRIVET_ATAPI) ?
887 ((M(0) == 'N' && M(1) == 'E') ||
888 (M(0) == 'F' && M(1) == 'X') ||
889 (M(0) == 'P' && M(1) == 'i')) :
890 ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) {
891 rv = CMD_OK;
892 goto out;
893 }
894 #undef M
895 for (i = 0; i < sizeof(prms->atap_model); i += 2) {
896 p = (uint16_t *)(prms->atap_model + i);
897 *p = bswap16(*p);
898 }
899 for (i = 0; i < sizeof(prms->atap_serial); i += 2) {
900 p = (uint16_t *)(prms->atap_serial + i);
901 *p = bswap16(*p);
902 }
903 for (i = 0; i < sizeof(prms->atap_revision); i += 2) {
904 p = (uint16_t *)(prms->atap_revision + i);
905 *p = bswap16(*p);
906 }
907
908 rv = CMD_OK;
909 out:
910 kmem_free(tb, ATA_BSIZE);
911 ata_free_xfer(chp, xfer);
912 return rv;
913 }
914
915 int
916 ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags)
917 {
918 struct ata_xfer *xfer;
919 int rv;
920 struct ata_channel *chp = drvp->chnl_softc;
921 struct atac_softc *atac = chp->ch_atac;
922
923 ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS);
924
925 xfer = ata_get_xfer(chp, false);
926 if (xfer == NULL) {
927 ATADEBUG_PRINT(("%s: no xfer\n", __func__),
928 DEBUG_FUNCS|DEBUG_PROBE);
929 return CMD_AGAIN;
930 }
931
932 xfer->c_ata_c.r_command = SET_FEATURES;
933 xfer->c_ata_c.r_st_bmask = 0;
934 xfer->c_ata_c.r_st_pmask = 0;
935 xfer->c_ata_c.r_features = WDSF_SET_MODE;
936 xfer->c_ata_c.r_count = mode;
937 xfer->c_ata_c.flags = flags;
938 xfer->c_ata_c.timeout = 1000; /* 1s */
939 (*atac->atac_bustype_ata->ata_exec_command)(drvp, xfer);
940 ata_wait_cmd(chp, xfer);
941 if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
942 rv = CMD_ERR;
943 goto out;
944 }
945
946 rv = CMD_OK;
947
948 out:
949 ata_free_xfer(chp, xfer);
950 return rv;
951 }
952
953 #if NATA_DMA
954 void
955 ata_dmaerr(struct ata_drive_datas *drvp, int flags)
956 {
957 ata_channel_lock_owned(drvp->chnl_softc);
958
959 /*
960 * Downgrade decision: if we get NERRS_MAX in NXFER.
961 * We start with n_dmaerrs set to NERRS_MAX-1 so that the
962 * first error within the first NXFER ops will immediatly trigger
963 * a downgrade.
964 * If we got an error and n_xfers is bigger than NXFER reset counters.
965 */
966 drvp->n_dmaerrs++;
967 if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) {
968 ata_downgrade_mode(drvp, flags);
969 drvp->n_dmaerrs = NERRS_MAX-1;
970 drvp->n_xfers = 0;
971 return;
972 }
973 if (drvp->n_xfers > NXFER) {
974 drvp->n_dmaerrs = 1; /* just got an error */
975 drvp->n_xfers = 1; /* restart counting from this error */
976 }
977 }
978 #endif /* NATA_DMA */
979
980 /*
981 * freeze the queue and wait for the controller to be idle. Caller has to
982 * unfreeze/restart the queue
983 */
984 static void
985 ata_channel_idle(struct ata_channel *chp)
986 {
987 ata_channel_lock(chp);
988 ata_channel_freeze_locked(chp);
989 while (chp->ch_queue->queue_active > 0) {
990 chp->ch_queue->queue_flags |= QF_IDLE_WAIT;
991 cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1);
992 }
993 ata_channel_unlock(chp);
994 }
995
996 /*
997 * Add a command to the queue and start controller.
998 *
999 * MUST BE CALLED AT splbio()!
1000 */
1001 void
1002 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1003 {
1004
1005 ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer,
1006 chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
1007
1008 /* complete xfer setup */
1009 xfer->c_chp = chp;
1010
1011 ata_channel_lock(chp);
1012
1013 /*
1014 * Standard commands are added to the end of command list, but
1015 * recovery commands must be run immediatelly.
1016 */
1017 if ((xfer->c_flags & C_SKIP_QUEUE) == 0)
1018 SIMPLEQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer,
1019 c_xferchain);
1020 else
1021 SIMPLEQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer,
1022 c_xferchain);
1023
1024 /*
1025 * if polling and can sleep, wait for the xfer to be at head of queue
1026 */
1027 if ((xfer->c_flags & (C_POLL | C_WAIT)) == (C_POLL | C_WAIT)) {
1028 while (chp->ch_queue->queue_active > 0 ||
1029 SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) {
1030 xfer->c_flags |= C_WAITACT;
1031 cv_wait(&chp->ch_queue->c_active, &chp->ch_lock);
1032 xfer->c_flags &= ~C_WAITACT;
1033 }
1034
1035 /*
1036 * Free xfer now if it there was attempt to free it
1037 * while we were waiting.
1038 */
1039 if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) {
1040 ata_channel_unlock(chp);
1041
1042 ata_free_xfer(chp, xfer);
1043 return;
1044 }
1045 }
1046
1047 ata_channel_unlock(chp);
1048
1049 ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n",
1050 chp->ch_flags), DEBUG_XFERS);
1051 atastart(chp);
1052 }
1053
1054 /*
1055 * Start I/O on a controller, for the given channel.
1056 * The first xfer may be not for our channel if the channel queues
1057 * are shared.
1058 *
1059 * MUST BE CALLED AT splbio()!
1060 *
1061 * XXX FIS-based switching with PMP
1062 * Currently atastart() never schedules concurrent NCQ transfers to more than
1063 * one drive, even when channel has several SATA drives attached via PMP.
1064 * To support concurrent transfers to different drives with PMP, it would be
1065 * necessary to implement FIS-based switching support in controller driver,
1066 * and then adjust error handling and recovery to stop assuming at most
1067 * one active drive.
1068 */
1069 void
1070 atastart(struct ata_channel *chp)
1071 {
1072 struct atac_softc *atac = chp->ch_atac;
1073 struct ata_queue *chq = chp->ch_queue;
1074 struct ata_xfer *xfer, *axfer;
1075 bool skipq;
1076
1077 #ifdef ATA_DEBUG
1078 int spl1, spl2;
1079
1080 spl1 = splbio();
1081 spl2 = splbio();
1082 if (spl2 != spl1) {
1083 printf("atastart: not at splbio()\n");
1084 panic("atastart");
1085 }
1086 splx(spl2);
1087 splx(spl1);
1088 #endif /* ATA_DEBUG */
1089
1090 ata_channel_lock(chp);
1091
1092 again:
1093 /* is there a xfer ? */
1094 if ((xfer = SIMPLEQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) {
1095 ATADEBUG_PRINT(("%s(chp=%p): channel %d queue_xfer is empty\n",
1096 __func__, chp, chp->ch_channel), DEBUG_XFERS);
1097 goto out;
1098 }
1099
1100 /*
1101 * if someone is waiting for the command to be active, wake it up
1102 * and let it process the command
1103 */
1104 if (__predict_false(xfer->c_flags & C_WAITACT)) {
1105 ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d "
1106 "wait active\n", xfer, chp->ch_channel, xfer->c_drive),
1107 DEBUG_XFERS);
1108 cv_broadcast(&chp->ch_queue->c_active);
1109 goto out;
1110 }
1111
1112 skipq = ISSET(xfer->c_flags, C_SKIP_QUEUE);
1113
1114 /* is the queue frozen? */
1115 if (__predict_false(!skipq && chq->queue_freeze > 0)) {
1116 if (chq->queue_flags & QF_IDLE_WAIT) {
1117 chq->queue_flags &= ~QF_IDLE_WAIT;
1118 cv_signal(&chp->ch_queue->queue_idle);
1119 }
1120 ATADEBUG_PRINT(("%s(chp=%p): channel %d drive %d "
1121 "queue frozen: %d\n",
1122 __func__, chp, chp->ch_channel, xfer->c_drive,
1123 chq->queue_freeze),
1124 DEBUG_XFERS);
1125 goto out;
1126 }
1127
1128 /* all xfers on same queue must belong to the same channel */
1129 KASSERT(xfer->c_chp == chp);
1130
1131 /*
1132 * Can only take the command if there are no current active
1133 * commands, or if the command is NCQ and the active commands are also
1134 * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based
1135 * switching, can only send commands to single drive.
1136 * Need only check first xfer.
1137 * XXX FIS-based switching - revisit
1138 */
1139 if (!skipq && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) {
1140 if (!ISSET(xfer->c_flags, C_NCQ) ||
1141 !ISSET(axfer->c_flags, C_NCQ) ||
1142 xfer->c_drive != axfer->c_drive)
1143 goto out;
1144 }
1145
1146 struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive];
1147
1148 /*
1149 * Are we on limit of active xfers ? If the queue has more
1150 * than 1 openings, we keep one slot reserved for recovery or dump.
1151 */
1152 KASSERT(chq->queue_active <= chq->queue_openings);
1153 const uint8_t chq_openings = (!skipq && chq->queue_openings > 1)
1154 ? (chq->queue_openings - 1) : chq->queue_openings;
1155 const uint8_t drv_openings = ISSET(xfer->c_flags, C_NCQ)
1156 ? drvp->drv_openings : ATA_MAX_OPENINGS;
1157 if (chq->queue_active >= MIN(chq_openings, drv_openings)) {
1158 if (skipq) {
1159 panic("%s: channel %d busy, xfer not possible",
1160 __func__, chp->ch_channel);
1161 }
1162
1163 ATADEBUG_PRINT(("%s(chp=%p): channel %d completely busy\n",
1164 __func__, chp, chp->ch_channel), DEBUG_XFERS);
1165 goto out;
1166 }
1167
1168 /* Slot allocation can fail if drv_openings < ch_openings */
1169 if (!ata_queue_alloc_slot(chp, &xfer->c_slot, drv_openings))
1170 goto out;
1171
1172 if (__predict_false(atac->atac_claim_hw)) {
1173 if (!atac->atac_claim_hw(chp, 0)) {
1174 ata_queue_free_slot(chp, xfer->c_slot);
1175 goto out;
1176 }
1177 }
1178
1179 /* Now committed to start the xfer */
1180
1181 ATADEBUG_PRINT(("%s(chp=%p): xfer %p channel %d drive %d\n",
1182 __func__, chp, xfer, chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
1183 if (drvp->drive_flags & ATA_DRIVE_RESET) {
1184 drvp->drive_flags &= ~ATA_DRIVE_RESET;
1185 drvp->state = 0;
1186 }
1187
1188 if (ISSET(xfer->c_flags, C_NCQ))
1189 SET(chp->ch_flags, ATACH_NCQ);
1190 else
1191 CLR(chp->ch_flags, ATACH_NCQ);
1192
1193 SIMPLEQ_REMOVE_HEAD(&chq->queue_xfer, c_xferchain);
1194
1195 ata_activate_xfer_locked(chp, xfer);
1196
1197 if (atac->atac_cap & ATAC_CAP_NOIRQ)
1198 KASSERT(xfer->c_flags & C_POLL);
1199
1200 switch (ata_xfer_start(xfer)) {
1201 case ATASTART_TH:
1202 case ATASTART_ABORT:
1203 /* don't start any further commands in this case */
1204 goto out;
1205 default:
1206 /* nothing to do */
1207 break;
1208 }
1209
1210 /* Queue more commands if possible, but not during recovery or dump */
1211 if (!skipq && chq->queue_active < chq->queue_openings)
1212 goto again;
1213
1214 out:
1215 ata_channel_unlock(chp);
1216 }
1217
1218 int
1219 ata_xfer_start(struct ata_xfer *xfer)
1220 {
1221 struct ata_channel *chp = xfer->c_chp;
1222 int rv;
1223
1224 KASSERT(mutex_owned(&chp->ch_lock));
1225
1226 rv = xfer->ops->c_start(chp, xfer);
1227 switch (rv) {
1228 case ATASTART_STARTED:
1229 /* nothing to do */
1230 break;
1231 case ATASTART_TH:
1232 /* postpone xfer to thread */
1233 ata_thread_wake_locked(chp);
1234 break;
1235 case ATASTART_POLL:
1236 /* can happen even in thread context for some ATAPI devices */
1237 ata_channel_unlock(chp);
1238 KASSERT(xfer->ops != NULL && xfer->ops->c_poll != NULL);
1239 xfer->ops->c_poll(chp, xfer);
1240 ata_channel_lock(chp);
1241 break;
1242 case ATASTART_ABORT:
1243 ata_channel_unlock(chp);
1244 KASSERT(xfer->ops != NULL && xfer->ops->c_abort != NULL);
1245 xfer->ops->c_abort(chp, xfer);
1246 ata_channel_lock(chp);
1247 break;
1248 }
1249
1250 return rv;
1251 }
1252
1253 static void
1254 ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer)
1255 {
1256 struct ata_queue * const chq = chp->ch_queue;
1257
1258 KASSERT(mutex_owned(&chp->ch_lock));
1259 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0);
1260
1261 if ((xfer->c_flags & C_SKIP_QUEUE) == 0)
1262 TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain);
1263 else {
1264 /*
1265 * Must go to head, so that ata_queue_get_active_xfer()
1266 * returns the recovery command, and not some other
1267 * random active transfer.
1268 */
1269 TAILQ_INSERT_HEAD(&chq->active_xfers, xfer, c_activechain);
1270 }
1271 chq->active_xfers_used |= __BIT(xfer->c_slot);
1272 chq->queue_active++;
1273 }
1274
1275 /*
1276 * Does it's own locking, does not require splbio().
1277 * flags - whether to block waiting for free xfer
1278 */
1279 struct ata_xfer *
1280 ata_get_xfer(struct ata_channel *chp, bool waitok)
1281 {
1282 return pool_get(&ata_xfer_pool,
1283 PR_ZERO | (waitok ? PR_WAITOK : PR_NOWAIT));
1284 }
1285
1286 /*
1287 * ata_deactivate_xfer() must be always called prior to ata_free_xfer()
1288 */
1289 void
1290 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1291 {
1292 struct ata_queue *chq = chp->ch_queue;
1293
1294 ata_channel_lock(chp);
1295
1296 if (__predict_false(xfer->c_flags & (C_WAITACT|C_WAITTIMO))) {
1297 /* Someone is waiting for this xfer, so we can't free now */
1298 xfer->c_flags |= C_FREE;
1299 cv_broadcast(&chq->c_active);
1300 ata_channel_unlock(chp);
1301 return;
1302 }
1303
1304 /* XXX move PIOBM and free_gw to deactivate? */
1305 #if NATA_PIOBM /* XXX wdc dependent code */
1306 if (__predict_false(xfer->c_flags & C_PIOBM)) {
1307 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1308
1309 /* finish the busmastering PIO */
1310 (*wdc->piobm_done)(wdc->dma_arg,
1311 chp->ch_channel, xfer->c_drive);
1312 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT | ATACH_IRQ_WAIT);
1313 }
1314 #endif
1315
1316 if (__predict_false(chp->ch_atac->atac_free_hw))
1317 chp->ch_atac->atac_free_hw(chp);
1318
1319 ata_channel_unlock(chp);
1320
1321 if (__predict_true(!ISSET(xfer->c_flags, C_PRIVATE_ALLOC)))
1322 pool_put(&ata_xfer_pool, xfer);
1323 }
1324
1325 void
1326 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1327 {
1328 struct ata_queue * const chq = chp->ch_queue;
1329
1330 ata_channel_lock(chp);
1331
1332 KASSERT(chq->queue_active > 0);
1333 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0);
1334
1335 /* Stop only when this is last active xfer */
1336 if (chq->queue_active == 1)
1337 callout_stop(&chp->c_timo_callout);
1338
1339 if (callout_invoking(&chp->c_timo_callout))
1340 xfer->c_flags |= C_WAITTIMO;
1341
1342 TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain);
1343 chq->active_xfers_used &= ~__BIT(xfer->c_slot);
1344 chq->queue_active--;
1345
1346 ata_queue_free_slot(chp, xfer->c_slot);
1347
1348 if (xfer->c_flags & C_WAIT)
1349 cv_broadcast(&chq->c_cmd_finish);
1350
1351 ata_channel_unlock(chp);
1352 }
1353
1354 /*
1355 * Called in c_intr hook. Must be called before before any deactivations
1356 * are done - if there is drain pending, it calls c_kill_xfer hook which
1357 * deactivates the xfer.
1358 * Calls c_kill_xfer with channel lock free.
1359 * Returns true if caller should just exit without further processing.
1360 * Caller must not further access any part of xfer or any related controller
1361 * structures in that case, it should just return.
1362 */
1363 bool
1364 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer)
1365 {
1366 int drive = xfer->c_drive;
1367 bool draining = false;
1368
1369 ata_channel_lock(chp);
1370
1371 if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
1372 ata_channel_unlock(chp);
1373
1374 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE);
1375
1376 ata_channel_lock(chp);
1377 chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
1378 cv_signal(&chp->ch_queue->queue_drain);
1379 draining = true;
1380 }
1381
1382 ata_channel_unlock(chp);
1383
1384 return draining;
1385 }
1386
1387 /*
1388 * Check for race of normal transfer handling vs. timeout.
1389 */
1390 bool
1391 ata_timo_xfer_check(struct ata_xfer *xfer)
1392 {
1393 struct ata_channel *chp = xfer->c_chp;
1394 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1395
1396 ata_channel_lock(chp);
1397
1398 if (xfer->c_flags & C_WAITTIMO) {
1399 xfer->c_flags &= ~C_WAITTIMO;
1400
1401 /* Handle race vs. ata_free_xfer() */
1402 if (xfer->c_flags & C_FREE) {
1403 xfer->c_flags &= ~C_FREE;
1404 ata_channel_unlock(chp);
1405
1406 device_printf(drvp->drv_softc,
1407 "xfer %"PRIxPTR" freed while invoking timeout\n",
1408 (intptr_t)xfer & PAGE_MASK);
1409
1410 ata_free_xfer(chp, xfer);
1411 return true;
1412 }
1413
1414 /* Race vs. callout_stop() in ata_deactivate_xfer() */
1415 ata_channel_unlock(chp);
1416
1417 device_printf(drvp->drv_softc,
1418 "xfer %"PRIxPTR" deactivated while invoking timeout\n",
1419 (intptr_t)xfer & PAGE_MASK);
1420 return true;
1421 }
1422
1423 ata_channel_unlock(chp);
1424
1425 /* No race, proceed with timeout handling */
1426 return false;
1427 }
1428
1429 /*
1430 * Kill off all active xfers for a ata_channel.
1431 *
1432 * Must be called with channel lock held.
1433 */
1434 void
1435 ata_kill_active(struct ata_channel *chp, int reason, int flags)
1436 {
1437 struct ata_queue * const chq = chp->ch_queue;
1438 struct ata_xfer *xfer, *xfernext;
1439
1440 KASSERT(mutex_owned(&chp->ch_lock));
1441
1442 TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) {
1443 ata_channel_unlock(chp);
1444 xfer->ops->c_kill_xfer(xfer->c_chp, xfer, reason);
1445 ata_channel_lock(chp);
1446 }
1447 }
1448
1449 /*
1450 * Kill off all pending xfers for a drive.
1451 */
1452 void
1453 ata_kill_pending(struct ata_drive_datas *drvp)
1454 {
1455 struct ata_channel * const chp = drvp->chnl_softc;
1456 struct ata_queue * const chq = chp->ch_queue;
1457 struct ata_xfer *xfer;
1458
1459 ata_channel_lock(chp);
1460
1461 /* Kill all pending transfers */
1462 while ((xfer = SIMPLEQ_FIRST(&chq->queue_xfer))) {
1463 KASSERT(xfer->c_chp == chp);
1464
1465 if (xfer->c_drive != drvp->drive)
1466 continue;
1467
1468 SIMPLEQ_REMOVE_HEAD(&chp->ch_queue->queue_xfer, c_xferchain);
1469
1470 /*
1471 * Keep the lock, so that we get deadlock (and 'locking against
1472 * myself' with LOCKDEBUG), instead of silent
1473 * data corruption, if the hook tries to call back into
1474 * middle layer for inactive xfer.
1475 */
1476 xfer->ops->c_kill_xfer(chp, xfer, KILL_GONE_INACTIVE);
1477 }
1478
1479 /* Wait until all active transfers on the drive finish */
1480 while (chq->queue_active > 0) {
1481 bool drv_active = false;
1482
1483 TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
1484 KASSERT(xfer->c_chp == chp);
1485
1486 if (xfer->c_drive == drvp->drive) {
1487 drv_active = true;
1488 break;
1489 }
1490 }
1491
1492 if (!drv_active) {
1493 /* all finished */
1494 break;
1495 }
1496
1497 drvp->drive_flags |= ATA_DRIVE_WAITDRAIN;
1498 cv_wait(&chq->queue_drain, &chp->ch_lock);
1499 }
1500
1501 ata_channel_unlock(chp);
1502 }
1503
1504 static void
1505 ata_channel_freeze_locked(struct ata_channel *chp)
1506 {
1507 chp->ch_queue->queue_freeze++;
1508
1509 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
1510 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
1511 }
1512
1513 void
1514 ata_channel_freeze(struct ata_channel *chp)
1515 {
1516 ata_channel_lock(chp);
1517 ata_channel_freeze_locked(chp);
1518 ata_channel_unlock(chp);
1519 }
1520
1521 void
1522 ata_channel_thaw_locked(struct ata_channel *chp)
1523 {
1524 KASSERT(mutex_owned(&chp->ch_lock));
1525 KASSERT(chp->ch_queue->queue_freeze > 0);
1526
1527 chp->ch_queue->queue_freeze--;
1528
1529 ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
1530 chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
1531 }
1532
1533 /*
1534 * ata_thread_run:
1535 *
1536 * Reset and ATA channel. Channel lock must be held. arg is type-specific.
1537 */
1538 void
1539 ata_thread_run(struct ata_channel *chp, int flags, int type, int arg)
1540 {
1541 struct atac_softc *atac = chp->ch_atac;
1542 bool threset = false;
1543 struct ata_drive_datas *drvp;
1544
1545 ata_channel_lock_owned(chp);
1546
1547 /*
1548 * If we can poll or wait it's OK, otherwise wake up the
1549 * kernel thread to do it for us.
1550 */
1551 ATADEBUG_PRINT(("%s flags 0x%x ch_flags 0x%x\n",
1552 __func__, flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS);
1553 if ((flags & (AT_POLL | AT_WAIT)) == 0) {
1554 switch (type) {
1555 case ATACH_TH_RESET:
1556 if (chp->ch_flags & ATACH_TH_RESET) {
1557 /* No need to schedule another reset */
1558 return;
1559 }
1560 break;
1561 case ATACH_TH_DRIVE_RESET:
1562 {
1563 int drive = arg;
1564
1565 KASSERT(drive <= chp->ch_ndrives);
1566 drvp = &chp->ch_drive[drive];
1567
1568 if (drvp->drive_flags & ATA_DRIVE_TH_RESET) {
1569 /* No need to schedule another reset */
1570 return;
1571 }
1572 drvp->drive_flags |= ATA_DRIVE_TH_RESET;
1573 break;
1574 }
1575 case ATACH_TH_RECOVERY:
1576 {
1577 uint32_t tfd = (uint32_t)arg;
1578
1579 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0);
1580 chp->recovery_tfd = tfd;
1581 break;
1582 }
1583 default:
1584 panic("%s: unknown type: %x", __func__, type);
1585 /* NOTREACHED */
1586 }
1587
1588 /*
1589 * Block execution of other commands while reset is scheduled
1590 * to a thread.
1591 */
1592 ata_channel_freeze_locked(chp);
1593 chp->ch_flags |= type;
1594
1595 cv_signal(&chp->ch_thr_idle);
1596 return;
1597 }
1598
1599 /* Block execution of other commands during reset */
1600 ata_channel_freeze_locked(chp);
1601
1602 /*
1603 * If reset has been scheduled to a thread, then clear
1604 * the flag now so that the thread won't try to execute it if
1605 * we happen to sleep, and thaw one more time after the reset.
1606 */
1607 if (chp->ch_flags & type) {
1608 chp->ch_flags &= ~type;
1609 threset = true;
1610 }
1611
1612 switch (type) {
1613 case ATACH_TH_RESET:
1614 (*atac->atac_bustype_ata->ata_reset_channel)(chp, flags);
1615
1616 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
1617 for (int drive = 0; drive < chp->ch_ndrives; drive++)
1618 chp->ch_drive[drive].state = 0;
1619 break;
1620
1621 case ATACH_TH_DRIVE_RESET:
1622 {
1623 int drive = arg;
1624
1625 KASSERT(drive <= chp->ch_ndrives);
1626 drvp = &chp->ch_drive[drive];
1627 (*atac->atac_bustype_ata->ata_reset_drive)(drvp, flags, NULL);
1628 drvp->state = 0;
1629 break;
1630 }
1631
1632 case ATACH_TH_RECOVERY:
1633 {
1634 uint32_t tfd = (uint32_t)arg;
1635
1636 KASSERT((chp->ch_flags & ATACH_RECOVERING) == 0);
1637 KASSERT(atac->atac_bustype_ata->ata_recovery != NULL);
1638
1639 SET(chp->ch_flags, ATACH_RECOVERING);
1640 (*atac->atac_bustype_ata->ata_recovery)(chp, flags, tfd);
1641 CLR(chp->ch_flags, ATACH_RECOVERING);
1642 break;
1643 }
1644
1645 default:
1646 panic("%s: unknown type: %x", __func__, type);
1647 /* NOTREACHED */
1648 }
1649
1650 /*
1651 * Thaw one extra time to clear the freeze done when the reset has
1652 * been scheduled to the thread.
1653 */
1654 if (threset)
1655 ata_channel_thaw_locked(chp);
1656
1657 /* Allow commands to run again */
1658 ata_channel_thaw_locked(chp);
1659
1660 /* Signal the thread in case there is an xfer to run */
1661 cv_signal(&chp->ch_thr_idle);
1662 }
1663
1664 int
1665 ata_addref(struct ata_channel *chp)
1666 {
1667 struct atac_softc *atac = chp->ch_atac;
1668 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
1669 int s, error = 0;
1670
1671 s = splbio();
1672 if (adapt->adapt_refcnt++ == 0 &&
1673 adapt->adapt_enable != NULL) {
1674 error = (*adapt->adapt_enable)(atac->atac_dev, 1);
1675 if (error)
1676 adapt->adapt_refcnt--;
1677 }
1678 splx(s);
1679 return (error);
1680 }
1681
1682 void
1683 ata_delref(struct ata_channel *chp)
1684 {
1685 struct atac_softc *atac = chp->ch_atac;
1686 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
1687 int s;
1688
1689 s = splbio();
1690 if (adapt->adapt_refcnt-- == 1 &&
1691 adapt->adapt_enable != NULL)
1692 (void) (*adapt->adapt_enable)(atac->atac_dev, 0);
1693 splx(s);
1694 }
1695
1696 void
1697 ata_print_modes(struct ata_channel *chp)
1698 {
1699 struct atac_softc *atac = chp->ch_atac;
1700 int drive;
1701 struct ata_drive_datas *drvp;
1702
1703 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
1704 for (drive = 0; drive < chp->ch_ndrives; drive++) {
1705 drvp = &chp->ch_drive[drive];
1706 if (drvp->drive_type == ATA_DRIVET_NONE ||
1707 drvp->drv_softc == NULL)
1708 continue;
1709 aprint_verbose("%s(%s:%d:%d): using PIO mode %d",
1710 device_xname(drvp->drv_softc),
1711 device_xname(atac->atac_dev),
1712 chp->ch_channel, drvp->drive, drvp->PIO_mode);
1713 #if NATA_DMA
1714 if (drvp->drive_flags & ATA_DRIVE_DMA)
1715 aprint_verbose(", DMA mode %d", drvp->DMA_mode);
1716 #if NATA_UDMA
1717 if (drvp->drive_flags & ATA_DRIVE_UDMA) {
1718 aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode);
1719 if (drvp->UDMA_mode == 2)
1720 aprint_verbose(" (Ultra/33)");
1721 else if (drvp->UDMA_mode == 4)
1722 aprint_verbose(" (Ultra/66)");
1723 else if (drvp->UDMA_mode == 5)
1724 aprint_verbose(" (Ultra/100)");
1725 else if (drvp->UDMA_mode == 6)
1726 aprint_verbose(" (Ultra/133)");
1727 }
1728 #endif /* NATA_UDMA */
1729 #endif /* NATA_DMA */
1730 #if NATA_DMA || NATA_PIOBM
1731 if (0
1732 #if NATA_DMA
1733 || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA))
1734 #endif
1735 #if NATA_PIOBM
1736 /* PIOBM capable controllers use DMA for PIO commands */
1737 || (atac->atac_cap & ATAC_CAP_PIOBM)
1738 #endif
1739 )
1740 aprint_verbose(" (using DMA)");
1741
1742 if (drvp->drive_flags & ATA_DRIVE_NCQ) {
1743 aprint_verbose(", NCQ (%d tags)%s",
1744 ATA_REAL_OPENINGS(chp->ch_queue->queue_openings),
1745 (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO)
1746 ? " w/PRIO" : "");
1747 } else if (drvp->drive_flags & ATA_DRIVE_WFUA)
1748 aprint_verbose(", WRITE DMA FUA EXT");
1749
1750 #endif /* NATA_DMA || NATA_PIOBM */
1751 aprint_verbose("\n");
1752 }
1753 }
1754
1755 #if NATA_DMA
1756 /*
1757 * downgrade the transfer mode of a drive after an error. return 1 if
1758 * downgrade was possible, 0 otherwise.
1759 *
1760 * MUST BE CALLED AT splbio()!
1761 */
1762 static int
1763 ata_downgrade_mode(struct ata_drive_datas *drvp, int flags)
1764 {
1765 struct ata_channel *chp = drvp->chnl_softc;
1766 struct atac_softc *atac = chp->ch_atac;
1767 device_t drv_dev = drvp->drv_softc;
1768 int cf_flags = device_cfdata(drv_dev)->cf_flags;
1769
1770 ata_channel_lock_owned(drvp->chnl_softc);
1771
1772 /* if drive or controller don't know its mode, we can't do much */
1773 if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 ||
1774 (atac->atac_set_modes == NULL))
1775 return 0;
1776 /* current drive mode was set by a config flag, let it this way */
1777 if ((cf_flags & ATA_CONFIG_PIO_SET) ||
1778 (cf_flags & ATA_CONFIG_DMA_SET) ||
1779 (cf_flags & ATA_CONFIG_UDMA_SET))
1780 return 0;
1781
1782 #if NATA_UDMA
1783 /*
1784 * If we were using Ultra-DMA mode, downgrade to the next lower mode.
1785 */
1786 if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) {
1787 drvp->UDMA_mode--;
1788 aprint_error_dev(drv_dev,
1789 "transfer error, downgrading to Ultra-DMA mode %d\n",
1790 drvp->UDMA_mode);
1791 }
1792 #endif
1793
1794 /*
1795 * If we were using ultra-DMA, don't downgrade to multiword DMA.
1796 */
1797 else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) {
1798 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA);
1799 drvp->PIO_mode = drvp->PIO_cap;
1800 aprint_error_dev(drv_dev,
1801 "transfer error, downgrading to PIO mode %d\n",
1802 drvp->PIO_mode);
1803 } else /* already using PIO, can't downgrade */
1804 return 0;
1805
1806 (*atac->atac_set_modes)(chp);
1807 ata_print_modes(chp);
1808 /* reset the channel, which will schedule all drives for setup */
1809 ata_thread_run(chp, flags, ATACH_TH_RESET, ATACH_NODRIVE);
1810 return 1;
1811 }
1812 #endif /* NATA_DMA */
1813
1814 /*
1815 * Probe drive's capabilities, for use by the controller later
1816 * Assumes drvp points to an existing drive.
1817 */
1818 void
1819 ata_probe_caps(struct ata_drive_datas *drvp)
1820 {
1821 struct ataparams params, params2;
1822 struct ata_channel *chp = drvp->chnl_softc;
1823 struct atac_softc *atac = chp->ch_atac;
1824 device_t drv_dev = drvp->drv_softc;
1825 int i, printed = 0;
1826 const char *sep = "";
1827 int cf_flags;
1828
1829 if (ata_get_params(drvp, AT_WAIT, ¶ms) != CMD_OK) {
1830 /* IDENTIFY failed. Can't tell more about the device */
1831 return;
1832 }
1833 if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) ==
1834 (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) {
1835 /*
1836 * Controller claims 16 and 32 bit transfers.
1837 * Re-do an IDENTIFY with 32-bit transfers,
1838 * and compare results.
1839 */
1840 ata_channel_lock(chp);
1841 drvp->drive_flags |= ATA_DRIVE_CAP32;
1842 ata_channel_unlock(chp);
1843 ata_get_params(drvp, AT_WAIT, ¶ms2);
1844 if (memcmp(¶ms, ¶ms2, sizeof(struct ataparams)) != 0) {
1845 /* Not good. fall back to 16bits */
1846 ata_channel_lock(chp);
1847 drvp->drive_flags &= ~ATA_DRIVE_CAP32;
1848 ata_channel_unlock(chp);
1849 } else {
1850 aprint_verbose_dev(drv_dev, "32-bit data port\n");
1851 }
1852 }
1853 #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */
1854 if (params.atap_ata_major > 0x01 &&
1855 params.atap_ata_major != 0xffff) {
1856 for (i = 14; i > 0; i--) {
1857 if (params.atap_ata_major & (1 << i)) {
1858 aprint_verbose_dev(drv_dev,
1859 "ATA version %d\n", i);
1860 drvp->ata_vers = i;
1861 break;
1862 }
1863 }
1864 }
1865 #endif
1866
1867 /* An ATAPI device is at last PIO mode 3 */
1868 if (drvp->drive_type == ATA_DRIVET_ATAPI)
1869 drvp->PIO_mode = 3;
1870
1871 /*
1872 * It's not in the specs, but it seems that some drive
1873 * returns 0xffff in atap_extensions when this field is invalid
1874 */
1875 if (params.atap_extensions != 0xffff &&
1876 (params.atap_extensions & WDC_EXT_MODES)) {
1877 /*
1878 * XXX some drives report something wrong here (they claim to
1879 * support PIO mode 8 !). As mode is coded on 3 bits in
1880 * SET FEATURE, limit it to 7 (so limit i to 4).
1881 * If higher mode than 7 is found, abort.
1882 */
1883 for (i = 7; i >= 0; i--) {
1884 if ((params.atap_piomode_supp & (1 << i)) == 0)
1885 continue;
1886 if (i > 4)
1887 return;
1888 /*
1889 * See if mode is accepted.
1890 * If the controller can't set its PIO mode,
1891 * assume the defaults are good, so don't try
1892 * to set it
1893 */
1894 if (atac->atac_set_modes)
1895 /*
1896 * It's OK to poll here, it's fast enough
1897 * to not bother waiting for interrupt
1898 */
1899 if (ata_set_mode(drvp, 0x08 | (i + 3),
1900 AT_WAIT) != CMD_OK)
1901 continue;
1902 if (!printed) {
1903 aprint_verbose_dev(drv_dev,
1904 "drive supports PIO mode %d", i + 3);
1905 sep = ",";
1906 printed = 1;
1907 }
1908 /*
1909 * If controller's driver can't set its PIO mode,
1910 * get the highter one for the drive.
1911 */
1912 if (atac->atac_set_modes == NULL ||
1913 atac->atac_pio_cap >= i + 3) {
1914 drvp->PIO_mode = i + 3;
1915 drvp->PIO_cap = i + 3;
1916 break;
1917 }
1918 }
1919 if (!printed) {
1920 /*
1921 * We didn't find a valid PIO mode.
1922 * Assume the values returned for DMA are buggy too
1923 */
1924 return;
1925 }
1926 ata_channel_lock(chp);
1927 drvp->drive_flags |= ATA_DRIVE_MODE;
1928 ata_channel_unlock(chp);
1929 printed = 0;
1930 for (i = 7; i >= 0; i--) {
1931 if ((params.atap_dmamode_supp & (1 << i)) == 0)
1932 continue;
1933 #if NATA_DMA
1934 if ((atac->atac_cap & ATAC_CAP_DMA) &&
1935 atac->atac_set_modes != NULL)
1936 if (ata_set_mode(drvp, 0x20 | i, AT_WAIT)
1937 != CMD_OK)
1938 continue;
1939 #endif
1940 if (!printed) {
1941 aprint_verbose("%s DMA mode %d", sep, i);
1942 sep = ",";
1943 printed = 1;
1944 }
1945 #if NATA_DMA
1946 if (atac->atac_cap & ATAC_CAP_DMA) {
1947 if (atac->atac_set_modes != NULL &&
1948 atac->atac_dma_cap < i)
1949 continue;
1950 drvp->DMA_mode = i;
1951 drvp->DMA_cap = i;
1952 ata_channel_lock(chp);
1953 drvp->drive_flags |= ATA_DRIVE_DMA;
1954 ata_channel_unlock(chp);
1955 }
1956 #endif
1957 break;
1958 }
1959 if (params.atap_extensions & WDC_EXT_UDMA_MODES) {
1960 printed = 0;
1961 for (i = 7; i >= 0; i--) {
1962 if ((params.atap_udmamode_supp & (1 << i))
1963 == 0)
1964 continue;
1965 #if NATA_UDMA
1966 if (atac->atac_set_modes != NULL &&
1967 (atac->atac_cap & ATAC_CAP_UDMA))
1968 if (ata_set_mode(drvp, 0x40 | i,
1969 AT_WAIT) != CMD_OK)
1970 continue;
1971 #endif
1972 if (!printed) {
1973 aprint_verbose("%s Ultra-DMA mode %d",
1974 sep, i);
1975 if (i == 2)
1976 aprint_verbose(" (Ultra/33)");
1977 else if (i == 4)
1978 aprint_verbose(" (Ultra/66)");
1979 else if (i == 5)
1980 aprint_verbose(" (Ultra/100)");
1981 else if (i == 6)
1982 aprint_verbose(" (Ultra/133)");
1983 sep = ",";
1984 printed = 1;
1985 }
1986 #if NATA_UDMA
1987 if (atac->atac_cap & ATAC_CAP_UDMA) {
1988 if (atac->atac_set_modes != NULL &&
1989 atac->atac_udma_cap < i)
1990 continue;
1991 drvp->UDMA_mode = i;
1992 drvp->UDMA_cap = i;
1993 ata_channel_lock(chp);
1994 drvp->drive_flags |= ATA_DRIVE_UDMA;
1995 ata_channel_unlock(chp);
1996 }
1997 #endif
1998 break;
1999 }
2000 }
2001 }
2002
2003 ata_channel_lock(chp);
2004 drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM;
2005 if (drvp->drive_type == ATA_DRIVET_ATAPI) {
2006 if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM)
2007 drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
2008 } else {
2009 if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM)
2010 drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
2011 }
2012 ata_channel_unlock(chp);
2013
2014 /* Try to guess ATA version here, if it didn't get reported */
2015 if (drvp->ata_vers == 0) {
2016 #if NATA_UDMA
2017 if (drvp->drive_flags & ATA_DRIVE_UDMA)
2018 drvp->ata_vers = 4; /* should be at last ATA-4 */
2019 else
2020 #endif
2021 if (drvp->PIO_cap > 2)
2022 drvp->ata_vers = 2; /* should be at last ATA-2 */
2023 }
2024 cf_flags = device_cfdata(drv_dev)->cf_flags;
2025 if (cf_flags & ATA_CONFIG_PIO_SET) {
2026 ata_channel_lock(chp);
2027 drvp->PIO_mode =
2028 (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF;
2029 drvp->drive_flags |= ATA_DRIVE_MODE;
2030 ata_channel_unlock(chp);
2031 }
2032 #if NATA_DMA
2033 if ((atac->atac_cap & ATAC_CAP_DMA) == 0) {
2034 /* don't care about DMA modes */
2035 if (*sep != '\0')
2036 aprint_verbose("\n");
2037 return;
2038 }
2039 if (cf_flags & ATA_CONFIG_DMA_SET) {
2040 ata_channel_lock(chp);
2041 if ((cf_flags & ATA_CONFIG_DMA_MODES) ==
2042 ATA_CONFIG_DMA_DISABLE) {
2043 drvp->drive_flags &= ~ATA_DRIVE_DMA;
2044 } else {
2045 drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >>
2046 ATA_CONFIG_DMA_OFF;
2047 drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE;
2048 }
2049 ata_channel_unlock(chp);
2050 }
2051
2052 /*
2053 * Probe WRITE DMA FUA EXT. Support is mandatory for devices
2054 * supporting LBA48, but nevertheless confirm with the feature flag.
2055 */
2056 if (drvp->drive_flags & ATA_DRIVE_DMA) {
2057 if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0
2058 && (params.atap_cmd_def & ATA_CMDE_WFE)) {
2059 drvp->drive_flags |= ATA_DRIVE_WFUA;
2060 aprint_verbose("%s WRITE DMA FUA", sep);
2061 sep = ",";
2062 }
2063 }
2064
2065 /* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */
2066 ata_channel_lock(chp);
2067 drvp->drv_openings = 1;
2068 if (params.atap_sata_caps & SATA_NATIVE_CMDQ) {
2069 if (atac->atac_cap & ATAC_CAP_NCQ)
2070 drvp->drive_flags |= ATA_DRIVE_NCQ;
2071 drvp->drv_openings =
2072 (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1;
2073 aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings);
2074 sep = ",";
2075
2076 if (params.atap_sata_caps & SATA_NCQ_PRIO) {
2077 drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO;
2078 aprint_verbose(" w/PRIO");
2079 }
2080 }
2081 ata_channel_unlock(chp);
2082
2083 if (*sep != '\0')
2084 aprint_verbose("\n");
2085
2086 #if NATA_UDMA
2087 if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) {
2088 /* don't care about UDMA modes */
2089 return;
2090 }
2091 if (cf_flags & ATA_CONFIG_UDMA_SET) {
2092 ata_channel_lock(chp);
2093 if ((cf_flags & ATA_CONFIG_UDMA_MODES) ==
2094 ATA_CONFIG_UDMA_DISABLE) {
2095 drvp->drive_flags &= ~ATA_DRIVE_UDMA;
2096 } else {
2097 drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >>
2098 ATA_CONFIG_UDMA_OFF;
2099 drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE;
2100 }
2101 ata_channel_unlock(chp);
2102 }
2103 #endif /* NATA_UDMA */
2104 #endif /* NATA_DMA */
2105 }
2106
2107 /* management of the /dev/atabus* devices */
2108 int
2109 atabusopen(dev_t dev, int flag, int fmt, struct lwp *l)
2110 {
2111 struct atabus_softc *sc;
2112 int error;
2113
2114 sc = device_lookup_private(&atabus_cd, minor(dev));
2115 if (sc == NULL)
2116 return (ENXIO);
2117
2118 if (sc->sc_flags & ATABUSCF_OPEN)
2119 return (EBUSY);
2120
2121 if ((error = ata_addref(sc->sc_chan)) != 0)
2122 return (error);
2123
2124 sc->sc_flags |= ATABUSCF_OPEN;
2125
2126 return (0);
2127 }
2128
2129
2130 int
2131 atabusclose(dev_t dev, int flag, int fmt, struct lwp *l)
2132 {
2133 struct atabus_softc *sc =
2134 device_lookup_private(&atabus_cd, minor(dev));
2135
2136 ata_delref(sc->sc_chan);
2137
2138 sc->sc_flags &= ~ATABUSCF_OPEN;
2139
2140 return (0);
2141 }
2142
2143 int
2144 atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
2145 {
2146 struct atabus_softc *sc =
2147 device_lookup_private(&atabus_cd, minor(dev));
2148 struct ata_channel *chp = sc->sc_chan;
2149 int min_drive, max_drive, drive;
2150 int error;
2151
2152 /*
2153 * Enforce write permission for ioctls that change the
2154 * state of the bus. Host adapter specific ioctls must
2155 * be checked by the adapter driver.
2156 */
2157 switch (cmd) {
2158 case ATABUSIOSCAN:
2159 case ATABUSIODETACH:
2160 case ATABUSIORESET:
2161 if ((flag & FWRITE) == 0)
2162 return (EBADF);
2163 }
2164
2165 switch (cmd) {
2166 case ATABUSIORESET:
2167 ata_channel_lock(chp);
2168 ata_thread_run(sc->sc_chan, AT_WAIT | AT_POLL,
2169 ATACH_TH_RESET, ATACH_NODRIVE);
2170 ata_channel_unlock(chp);
2171 return 0;
2172 case ATABUSIOSCAN:
2173 {
2174 #if 0
2175 struct atabusioscan_args *a=
2176 (struct atabusioscan_args *)addr;
2177 #endif
2178 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
2179 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
2180 return (EOPNOTSUPP);
2181 return (EOPNOTSUPP);
2182 }
2183 case ATABUSIODETACH:
2184 {
2185 struct atabusiodetach_args *a=
2186 (struct atabusiodetach_args *)addr;
2187 if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
2188 (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
2189 return (EOPNOTSUPP);
2190 switch (a->at_dev) {
2191 case -1:
2192 min_drive = 0;
2193 max_drive = 1;
2194 break;
2195 case 0:
2196 case 1:
2197 min_drive = max_drive = a->at_dev;
2198 break;
2199 default:
2200 return (EINVAL);
2201 }
2202 for (drive = min_drive; drive <= max_drive; drive++) {
2203 if (chp->ch_drive[drive].drv_softc != NULL) {
2204 error = config_detach(
2205 chp->ch_drive[drive].drv_softc, 0);
2206 if (error)
2207 return (error);
2208 KASSERT(chp->ch_drive[drive].drv_softc == NULL);
2209 }
2210 }
2211 return 0;
2212 }
2213 default:
2214 return ENOTTY;
2215 }
2216 }
2217
2218 static bool
2219 atabus_suspend(device_t dv, const pmf_qual_t *qual)
2220 {
2221 struct atabus_softc *sc = device_private(dv);
2222 struct ata_channel *chp = sc->sc_chan;
2223
2224 ata_channel_idle(chp);
2225
2226 return true;
2227 }
2228
2229 static bool
2230 atabus_resume(device_t dv, const pmf_qual_t *qual)
2231 {
2232 struct atabus_softc *sc = device_private(dv);
2233 struct ata_channel *chp = sc->sc_chan;
2234
2235 /*
2236 * XXX joerg: with wdc, the first channel unfreezes the controller.
2237 * Move this the reset and queue idling into wdc.
2238 */
2239 ata_channel_lock(chp);
2240 if (chp->ch_queue->queue_freeze == 0) {
2241 ata_channel_unlock(chp);
2242 goto out;
2243 }
2244
2245 /* unfreeze the queue and reset drives */
2246 ata_channel_thaw_locked(chp);
2247
2248 /* reset channel only if there are drives attached */
2249 if (chp->ch_ndrives > 0)
2250 ata_thread_run(chp, AT_WAIT, ATACH_TH_RESET, ATACH_NODRIVE);
2251
2252 ata_channel_unlock(chp);
2253
2254 out:
2255 return true;
2256 }
2257
2258 static int
2259 atabus_rescan(device_t self, const char *ifattr, const int *locators)
2260 {
2261 struct atabus_softc *sc = device_private(self);
2262 struct ata_channel *chp = sc->sc_chan;
2263 struct atabus_initq *initq;
2264 int i;
2265
2266 /*
2267 * we can rescan a port multiplier atabus, even if some devices are
2268 * still attached
2269 */
2270 if (chp->ch_satapmp_nports == 0) {
2271 if (chp->atapibus != NULL) {
2272 return EBUSY;
2273 }
2274
2275 KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
2276 for (i = 0; i < chp->ch_ndrives; i++) {
2277 if (chp->ch_drive[i].drv_softc != NULL) {
2278 return EBUSY;
2279 }
2280 }
2281 }
2282
2283 initq = kmem_zalloc(sizeof(*initq), KM_SLEEP);
2284 initq->atabus_sc = sc;
2285 mutex_enter(&atabus_qlock);
2286 TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
2287 mutex_exit(&atabus_qlock);
2288 config_pending_incr(sc->sc_dev);
2289
2290 ata_channel_lock(chp);
2291 chp->ch_flags |= ATACH_TH_RESCAN;
2292 cv_signal(&chp->ch_thr_idle);
2293 ata_channel_unlock(chp);
2294
2295 return 0;
2296 }
2297
2298 void
2299 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags)
2300 {
2301 KASSERT(mutex_owned(&chp->ch_lock));
2302
2303 if ((flags & (AT_WAIT | AT_POLL)) == AT_POLL) {
2304 /*
2305 * can't use kpause(), we may be in interrupt context
2306 * or taking a crash dump
2307 */
2308 delay(ms * 1000);
2309 } else {
2310 int pause = mstohz(ms);
2311
2312 kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock);
2313 }
2314 }
2315
2316 void
2317 atacmd_toncq(struct ata_xfer *xfer, uint8_t *cmd, uint16_t *count,
2318 uint16_t *features, uint8_t *device)
2319 {
2320 if ((xfer->c_flags & C_NCQ) == 0) {
2321 /* FUA handling for non-NCQ drives */
2322 if (xfer->c_bio.flags & ATA_FUA
2323 && *cmd == WDCC_WRITEDMA_EXT)
2324 *cmd = WDCC_WRITEDMA_FUA_EXT;
2325
2326 return;
2327 }
2328
2329 *cmd = (xfer->c_bio.flags & ATA_READ) ?
2330 WDCC_READ_FPDMA_QUEUED : WDCC_WRITE_FPDMA_QUEUED;
2331
2332 /* for FPDMA the block count is in features */
2333 *features = *count;
2334
2335 /* NCQ tag */
2336 *count = (xfer->c_slot << 3);
2337
2338 if (xfer->c_bio.flags & ATA_PRIO_HIGH)
2339 *count |= WDSC_PRIO_HIGH;
2340
2341 /* other device flags */
2342 if (xfer->c_bio.flags & ATA_FUA)
2343 *device |= WDSD_FUA;
2344 }
2345
2346 void
2347 ata_wait_cmd(struct ata_channel *chp, struct ata_xfer *xfer)
2348 {
2349 struct ata_queue *chq = chp->ch_queue;
2350 struct ata_command *ata_c = &xfer->c_ata_c;
2351
2352 ata_channel_lock(chp);
2353
2354 while ((ata_c->flags & AT_DONE) == 0)
2355 cv_wait(&chq->c_cmd_finish, &chp->ch_lock);
2356
2357 ata_channel_unlock(chp);
2358
2359 KASSERT((ata_c->flags & AT_DONE) != 0);
2360 }
2361