ahcisata_core.c revision 1.98.2.1 1 /* $NetBSD: ahcisata_core.c,v 1.98.2.1 2021/08/01 22:42:23 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2006 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: ahcisata_core.c,v 1.98.2.1 2021/08/01 22:42:23 thorpej Exp $");
30
31 #include <sys/types.h>
32 #include <sys/malloc.h>
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/disklabel.h>
37 #include <sys/proc.h>
38 #include <sys/buf.h>
39
40 #include <dev/ata/atareg.h>
41 #include <dev/ata/satavar.h>
42 #include <dev/ata/satareg.h>
43 #include <dev/ata/satafisvar.h>
44 #include <dev/ata/satafisreg.h>
45 #include <dev/ata/satapmpreg.h>
46 #include <dev/ic/ahcisatavar.h>
47 #include <dev/ic/wdcreg.h>
48
49 #include <dev/scsipi/scsi_all.h> /* for SCSI status */
50
51 #include "atapibus.h"
52
53 #ifdef AHCI_DEBUG
54 int ahcidebug_mask = 0;
55 #endif
56
57 static void ahci_probe_drive(struct ata_channel *);
58 static void ahci_setup_channel(struct ata_channel *);
59
60 static void ahci_ata_bio(struct ata_drive_datas *, struct ata_xfer *);
61 static int ahci_do_reset_drive(struct ata_channel *, int, int, uint32_t *,
62 uint8_t);
63 static void ahci_reset_drive(struct ata_drive_datas *, int, uint32_t *);
64 static void ahci_reset_channel(struct ata_channel *, int);
65 static void ahci_exec_command(struct ata_drive_datas *, struct ata_xfer *);
66 static int ahci_ata_addref(struct ata_drive_datas *);
67 static void ahci_ata_delref(struct ata_drive_datas *);
68 static void ahci_killpending(struct ata_drive_datas *);
69
70 static int ahci_cmd_start(struct ata_channel *, struct ata_xfer *);
71 static int ahci_cmd_complete(struct ata_channel *, struct ata_xfer *, int);
72 static void ahci_cmd_poll(struct ata_channel *, struct ata_xfer *);
73 static void ahci_cmd_abort(struct ata_channel *, struct ata_xfer *);
74 static void ahci_cmd_done(struct ata_channel *, struct ata_xfer *);
75 static void ahci_cmd_done_end(struct ata_channel *, struct ata_xfer *);
76 static void ahci_cmd_kill_xfer(struct ata_channel *, struct ata_xfer *, int);
77 static int ahci_bio_start(struct ata_channel *, struct ata_xfer *);
78 static void ahci_bio_poll(struct ata_channel *, struct ata_xfer *);
79 static void ahci_bio_abort(struct ata_channel *, struct ata_xfer *);
80 static int ahci_bio_complete(struct ata_channel *, struct ata_xfer *, int);
81 static void ahci_bio_kill_xfer(struct ata_channel *, struct ata_xfer *, int) ;
82 static void ahci_channel_stop(struct ahci_softc *, struct ata_channel *, int);
83 static void ahci_channel_start(struct ahci_softc *, struct ata_channel *,
84 int, int);
85 static void ahci_channel_recover(struct ata_channel *, int, uint32_t);
86 static int ahci_dma_setup(struct ata_channel *, int, void *, size_t, int);
87 static int ahci_intr_port_common(struct ata_channel *);
88
89 #if NATAPIBUS > 0
90 static void ahci_atapibus_attach(struct atabus_softc *);
91 static void ahci_atapi_kill_pending(struct scsipi_periph *);
92 static void ahci_atapi_minphys(struct buf *);
93 static void ahci_atapi_scsipi_request(struct scsipi_channel *,
94 scsipi_adapter_req_t, void *);
95 static int ahci_atapi_start(struct ata_channel *, struct ata_xfer *);
96 static void ahci_atapi_poll(struct ata_channel *, struct ata_xfer *);
97 static void ahci_atapi_abort(struct ata_channel *, struct ata_xfer *);
98 static int ahci_atapi_complete(struct ata_channel *, struct ata_xfer *, int);
99 static void ahci_atapi_kill_xfer(struct ata_channel *, struct ata_xfer *, int);
100 static void ahci_atapi_probe_device(struct atapibus_softc *, int);
101
102 static const struct scsipi_bustype ahci_atapi_bustype = {
103 .bustype_type = SCSIPI_BUSTYPE_ATAPI,
104 .bustype_cmd = atapi_scsipi_cmd,
105 .bustype_interpret_sense = atapi_interpret_sense,
106 .bustype_printaddr = atapi_print_addr,
107 .bustype_kill_pending = ahci_atapi_kill_pending,
108 .bustype_async_event_xfer_mode = NULL,
109 };
110 #endif /* NATAPIBUS */
111
112 #define ATA_DELAY 10000 /* 10s for a drive I/O */
113 #define ATA_RESET_DELAY 31000 /* 31s for a drive reset */
114 #define AHCI_RST_WAIT (ATA_RESET_DELAY / 10)
115
116 const struct ata_bustype ahci_ata_bustype = {
117 .bustype_type = SCSIPI_BUSTYPE_ATA,
118 .ata_bio = ahci_ata_bio,
119 .ata_reset_drive = ahci_reset_drive,
120 .ata_reset_channel = ahci_reset_channel,
121 .ata_exec_command = ahci_exec_command,
122 .ata_get_params = ata_get_params,
123 .ata_addref = ahci_ata_addref,
124 .ata_delref = ahci_ata_delref,
125 .ata_killpending = ahci_killpending,
126 .ata_recovery = ahci_channel_recover,
127 };
128
129 static void ahci_setup_port(struct ahci_softc *sc, int i);
130
131 static void
132 ahci_enable(struct ahci_softc *sc)
133 {
134 uint32_t ghc;
135
136 ghc = AHCI_READ(sc, AHCI_GHC);
137 if (!(ghc & AHCI_GHC_AE)) {
138 ghc |= AHCI_GHC_AE;
139 AHCI_WRITE(sc, AHCI_GHC, ghc);
140 }
141 }
142
143 static int
144 ahci_reset(struct ahci_softc *sc)
145 {
146 int i;
147 uint32_t timeout_ms = 1000; /* default to 1s timeout */
148 prop_dictionary_t dict;
149
150 /* reset controller */
151 AHCI_WRITE(sc, AHCI_GHC, AHCI_GHC_HR);
152
153 /* some systems (rockchip rk3399) need extra reset time for ahcisata. */
154 dict = device_properties(sc->sc_atac.atac_dev);
155 if (dict)
156 prop_dictionary_get_uint32(dict, "ahci-reset-ms", &timeout_ms);
157
158 /* wait for reset to complete */
159 for (i = 0; i < timeout_ms; i++) {
160 delay(1000);
161 if ((AHCI_READ(sc, AHCI_GHC) & AHCI_GHC_HR) == 0)
162 break;
163 }
164 if ((AHCI_READ(sc, AHCI_GHC) & AHCI_GHC_HR) != 0) {
165 aprint_error_dev(sc->sc_atac.atac_dev, "reset failed\n");
166 return -1;
167 }
168 if (i > 1000) {
169 aprint_normal_dev(sc->sc_atac.atac_dev,
170 "reset took %d milliseconds\n", i);
171 }
172 /* enable ahci mode */
173 ahci_enable(sc);
174
175 if (sc->sc_save_init_data) {
176 AHCI_WRITE(sc, AHCI_CAP, sc->sc_init_data.cap);
177 if (sc->sc_init_data.cap2)
178 AHCI_WRITE(sc, AHCI_CAP2, sc->sc_init_data.cap2);
179 AHCI_WRITE(sc, AHCI_PI, sc->sc_init_data.ports);
180 }
181
182 /* Check if hardware reverted to single message MSI */
183 sc->sc_ghc_mrsm = ISSET(AHCI_READ(sc, AHCI_GHC), AHCI_GHC_MRSM);
184
185 return 0;
186 }
187
188 static void
189 ahci_setup_ports(struct ahci_softc *sc)
190 {
191 int i, port;
192
193 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) {
194 if ((sc->sc_ahci_ports & (1U << i)) == 0)
195 continue;
196 if (port >= sc->sc_atac.atac_nchannels) {
197 aprint_error("%s: more ports than announced\n",
198 AHCINAME(sc));
199 break;
200 }
201 ahci_setup_port(sc, i);
202 port++;
203 }
204 }
205
206 static void
207 ahci_reprobe_drives(struct ahci_softc *sc)
208 {
209 int i, port;
210 struct ahci_channel *achp;
211 struct ata_channel *chp;
212
213 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) {
214 if ((sc->sc_ahci_ports & (1U << i)) == 0)
215 continue;
216 if (port >= sc->sc_atac.atac_nchannels) {
217 aprint_error("%s: more ports than announced\n",
218 AHCINAME(sc));
219 break;
220 }
221 achp = &sc->sc_channels[i];
222 chp = &achp->ata_channel;
223
224 ahci_probe_drive(chp);
225 port++;
226 }
227 }
228
229 static void
230 ahci_setup_port(struct ahci_softc *sc, int i)
231 {
232 struct ahci_channel *achp;
233
234 achp = &sc->sc_channels[i];
235
236 AHCI_WRITE(sc, AHCI_P_CLB(i), BUS_ADDR_LO32(achp->ahcic_bus_cmdh));
237 AHCI_WRITE(sc, AHCI_P_CLBU(i), BUS_ADDR_HI32(achp->ahcic_bus_cmdh));
238 AHCI_WRITE(sc, AHCI_P_FB(i), BUS_ADDR_LO32(achp->ahcic_bus_rfis));
239 AHCI_WRITE(sc, AHCI_P_FBU(i), BUS_ADDR_HI32(achp->ahcic_bus_rfis));
240 }
241
242 static void
243 ahci_enable_intrs(struct ahci_softc *sc)
244 {
245
246 /* clear interrupts */
247 AHCI_WRITE(sc, AHCI_IS, AHCI_READ(sc, AHCI_IS));
248 /* enable interrupts */
249 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE);
250 }
251
252 void
253 ahci_attach(struct ahci_softc *sc)
254 {
255 uint32_t ahci_rev;
256 int i, j, port;
257 struct ahci_channel *achp;
258 struct ata_channel *chp;
259 int error;
260 int dmasize;
261 char buf[128];
262 void *cmdhp;
263 void *cmdtblp;
264
265 if (sc->sc_save_init_data) {
266 ahci_enable(sc);
267
268 sc->sc_init_data.cap = AHCI_READ(sc, AHCI_CAP);
269 sc->sc_init_data.ports = AHCI_READ(sc, AHCI_PI);
270
271 ahci_rev = AHCI_READ(sc, AHCI_VS);
272 if (AHCI_VS_MJR(ahci_rev) > 1 ||
273 (AHCI_VS_MJR(ahci_rev) == 1 && AHCI_VS_MNR(ahci_rev) >= 20)) {
274 sc->sc_init_data.cap2 = AHCI_READ(sc, AHCI_CAP2);
275 } else {
276 sc->sc_init_data.cap2 = 0;
277 }
278 if (sc->sc_init_data.ports == 0) {
279 sc->sc_init_data.ports = sc->sc_ahci_ports;
280 }
281 }
282
283 if (ahci_reset(sc) != 0)
284 return;
285
286 sc->sc_ahci_cap = AHCI_READ(sc, AHCI_CAP);
287 if (sc->sc_ahci_quirks & AHCI_QUIRK_BADPMP) {
288 aprint_verbose_dev(sc->sc_atac.atac_dev,
289 "ignoring broken port multiplier support\n");
290 sc->sc_ahci_cap &= ~AHCI_CAP_SPM;
291 }
292 if (sc->sc_ahci_quirks & AHCI_QUIRK_BADNCQ) {
293 aprint_verbose_dev(sc->sc_atac.atac_dev,
294 "ignoring broken NCQ support\n");
295 sc->sc_ahci_cap &= ~AHCI_CAP_NCQ;
296 }
297 sc->sc_atac.atac_nchannels = (sc->sc_ahci_cap & AHCI_CAP_NPMASK) + 1;
298 sc->sc_ncmds = ((sc->sc_ahci_cap & AHCI_CAP_NCS) >> 8) + 1;
299 ahci_rev = AHCI_READ(sc, AHCI_VS);
300 snprintb(buf, sizeof(buf), "\177\020"
301 /* "f\000\005NP\0" */
302 "b\005SXS\0"
303 "b\006EMS\0"
304 "b\007CCCS\0"
305 /* "f\010\005NCS\0" */
306 "b\015PSC\0"
307 "b\016SSC\0"
308 "b\017PMD\0"
309 "b\020FBSS\0"
310 "b\021SPM\0"
311 "b\022SAM\0"
312 "b\023SNZO\0"
313 "f\024\003ISS\0"
314 "=\001Gen1\0"
315 "=\002Gen2\0"
316 "=\003Gen3\0"
317 "b\030SCLO\0"
318 "b\031SAL\0"
319 "b\032SALP\0"
320 "b\033SSS\0"
321 "b\034SMPS\0"
322 "b\035SSNTF\0"
323 "b\036SNCQ\0"
324 "b\037S64A\0"
325 "\0", sc->sc_ahci_cap);
326 aprint_normal_dev(sc->sc_atac.atac_dev, "AHCI revision %u.%u"
327 ", %d port%s, %d slot%s, CAP %s\n",
328 AHCI_VS_MJR(ahci_rev), AHCI_VS_MNR(ahci_rev),
329 sc->sc_atac.atac_nchannels,
330 (sc->sc_atac.atac_nchannels == 1 ? "" : "s"),
331 sc->sc_ncmds, (sc->sc_ncmds == 1 ? "" : "s"), buf);
332
333 sc->sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA
334 | ((sc->sc_ahci_cap & AHCI_CAP_NCQ) ? ATAC_CAP_NCQ : 0);
335 sc->sc_atac.atac_cap |= sc->sc_atac_capflags;
336 sc->sc_atac.atac_pio_cap = 4;
337 sc->sc_atac.atac_dma_cap = 2;
338 sc->sc_atac.atac_udma_cap = 6;
339 sc->sc_atac.atac_channels = sc->sc_chanarray;
340 sc->sc_atac.atac_probe = ahci_probe_drive;
341 sc->sc_atac.atac_bustype_ata = &ahci_ata_bustype;
342 sc->sc_atac.atac_set_modes = ahci_setup_channel;
343 #if NATAPIBUS > 0
344 sc->sc_atac.atac_atapibus_attach = ahci_atapibus_attach;
345 #endif
346
347 dmasize =
348 (AHCI_RFIS_SIZE + AHCI_CMDH_SIZE) * sc->sc_atac.atac_nchannels;
349 error = bus_dmamem_alloc(sc->sc_dmat, dmasize, PAGE_SIZE, 0,
350 &sc->sc_cmd_hdr_seg, 1, &sc->sc_cmd_hdr_nseg, BUS_DMA_NOWAIT);
351 if (error) {
352 aprint_error("%s: unable to allocate command header memory"
353 ", error=%d\n", AHCINAME(sc), error);
354 return;
355 }
356 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cmd_hdr_seg,
357 sc->sc_cmd_hdr_nseg, dmasize,
358 &cmdhp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
359 if (error) {
360 aprint_error("%s: unable to map command header memory"
361 ", error=%d\n", AHCINAME(sc), error);
362 return;
363 }
364 error = bus_dmamap_create(sc->sc_dmat, dmasize, 1, dmasize, 0,
365 BUS_DMA_NOWAIT, &sc->sc_cmd_hdrd);
366 if (error) {
367 aprint_error("%s: unable to create command header map"
368 ", error=%d\n", AHCINAME(sc), error);
369 return;
370 }
371 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmd_hdrd,
372 cmdhp, dmasize, NULL, BUS_DMA_NOWAIT);
373 if (error) {
374 aprint_error("%s: unable to load command header map"
375 ", error=%d\n", AHCINAME(sc), error);
376 return;
377 }
378 sc->sc_cmd_hdr = cmdhp;
379 memset(cmdhp, 0, dmasize);
380 bus_dmamap_sync(sc->sc_dmat, sc->sc_cmd_hdrd, 0, dmasize,
381 BUS_DMASYNC_PREWRITE);
382
383 ahci_enable_intrs(sc);
384
385 if (sc->sc_ahci_ports == 0) {
386 sc->sc_ahci_ports = AHCI_READ(sc, AHCI_PI);
387 AHCIDEBUG_PRINT(("active ports %#x\n", sc->sc_ahci_ports),
388 DEBUG_PROBE);
389 }
390 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) {
391 if ((sc->sc_ahci_ports & (1U << i)) == 0)
392 continue;
393 if (port >= sc->sc_atac.atac_nchannels) {
394 aprint_error("%s: more ports than announced\n",
395 AHCINAME(sc));
396 break;
397 }
398
399 /* Optional intr establish per active port */
400 if (sc->sc_intr_establish && sc->sc_intr_establish(sc, i) != 0){
401 aprint_error("%s: intr establish hook failed\n",
402 AHCINAME(sc));
403 break;
404 }
405
406 achp = &sc->sc_channels[i];
407 chp = &achp->ata_channel;
408 sc->sc_chanarray[i] = chp;
409 chp->ch_channel = i;
410 chp->ch_atac = &sc->sc_atac;
411 chp->ch_queue = ata_queue_alloc(sc->sc_ncmds);
412 if (chp->ch_queue == NULL) {
413 aprint_error("%s port %d: can't allocate memory for "
414 "command queue", AHCINAME(sc), i);
415 break;
416 }
417 dmasize = AHCI_CMDTBL_SIZE * sc->sc_ncmds;
418 error = bus_dmamem_alloc(sc->sc_dmat, dmasize, PAGE_SIZE, 0,
419 &achp->ahcic_cmd_tbl_seg, 1, &achp->ahcic_cmd_tbl_nseg,
420 BUS_DMA_NOWAIT);
421 if (error) {
422 aprint_error("%s: unable to allocate command table "
423 "memory, error=%d\n", AHCINAME(sc), error);
424 break;
425 }
426 error = bus_dmamem_map(sc->sc_dmat, &achp->ahcic_cmd_tbl_seg,
427 achp->ahcic_cmd_tbl_nseg, dmasize,
428 &cmdtblp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
429 if (error) {
430 aprint_error("%s: unable to map command table memory"
431 ", error=%d\n", AHCINAME(sc), error);
432 break;
433 }
434 error = bus_dmamap_create(sc->sc_dmat, dmasize, 1, dmasize, 0,
435 BUS_DMA_NOWAIT, &achp->ahcic_cmd_tbld);
436 if (error) {
437 aprint_error("%s: unable to create command table map"
438 ", error=%d\n", AHCINAME(sc), error);
439 break;
440 }
441 error = bus_dmamap_load(sc->sc_dmat, achp->ahcic_cmd_tbld,
442 cmdtblp, dmasize, NULL, BUS_DMA_NOWAIT);
443 if (error) {
444 aprint_error("%s: unable to load command table map"
445 ", error=%d\n", AHCINAME(sc), error);
446 break;
447 }
448 memset(cmdtblp, 0, dmasize);
449 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_cmd_tbld, 0,
450 dmasize, BUS_DMASYNC_PREWRITE);
451 achp->ahcic_cmdh = (struct ahci_cmd_header *)
452 ((char *)cmdhp + AHCI_CMDH_SIZE * port);
453 achp->ahcic_bus_cmdh = sc->sc_cmd_hdrd->dm_segs[0].ds_addr +
454 AHCI_CMDH_SIZE * port;
455 achp->ahcic_rfis = (struct ahci_r_fis *)
456 ((char *)cmdhp +
457 AHCI_CMDH_SIZE * sc->sc_atac.atac_nchannels +
458 AHCI_RFIS_SIZE * port);
459 achp->ahcic_bus_rfis = sc->sc_cmd_hdrd->dm_segs[0].ds_addr +
460 AHCI_CMDH_SIZE * sc->sc_atac.atac_nchannels +
461 AHCI_RFIS_SIZE * port;
462 AHCIDEBUG_PRINT(("port %d cmdh %p (0x%" PRIx64 ") "
463 "rfis %p (0x%" PRIx64 ")\n", i,
464 achp->ahcic_cmdh, (uint64_t)achp->ahcic_bus_cmdh,
465 achp->ahcic_rfis, (uint64_t)achp->ahcic_bus_rfis),
466 DEBUG_PROBE);
467
468 for (j = 0; j < sc->sc_ncmds; j++) {
469 achp->ahcic_cmd_tbl[j] = (struct ahci_cmd_tbl *)
470 ((char *)cmdtblp + AHCI_CMDTBL_SIZE * j);
471 achp->ahcic_bus_cmd_tbl[j] =
472 achp->ahcic_cmd_tbld->dm_segs[0].ds_addr +
473 AHCI_CMDTBL_SIZE * j;
474 achp->ahcic_cmdh[j].cmdh_cmdtba =
475 htole64(achp->ahcic_bus_cmd_tbl[j]);
476 AHCIDEBUG_PRINT(("port %d/%d tbl %p (0x%" PRIx64 ")\n", i, j,
477 achp->ahcic_cmd_tbl[j],
478 (uint64_t)achp->ahcic_bus_cmd_tbl[j]), DEBUG_PROBE);
479 /* The xfer DMA map */
480 error = bus_dmamap_create(sc->sc_dmat, MAXPHYS,
481 AHCI_NPRD, 0x400000 /* 4MB */, 0,
482 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
483 &achp->ahcic_datad[j]);
484 if (error) {
485 aprint_error("%s: couldn't alloc xfer DMA map, "
486 "error=%d\n", AHCINAME(sc), error);
487 goto end;
488 }
489 }
490 ahci_setup_port(sc, i);
491 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih,
492 AHCI_P_SSTS(i), 4, &achp->ahcic_sstatus) != 0) {
493 aprint_error("%s: couldn't map port %d "
494 "sata_status regs\n", AHCINAME(sc), i);
495 break;
496 }
497 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih,
498 AHCI_P_SCTL(i), 4, &achp->ahcic_scontrol) != 0) {
499 aprint_error("%s: couldn't map port %d "
500 "sata_control regs\n", AHCINAME(sc), i);
501 break;
502 }
503 if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih,
504 AHCI_P_SERR(i), 4, &achp->ahcic_serror) != 0) {
505 aprint_error("%s: couldn't map port %d "
506 "sata_error regs\n", AHCINAME(sc), i);
507 break;
508 }
509 ata_channel_attach(chp);
510 port++;
511 end:
512 continue;
513 }
514 }
515
516 void
517 ahci_childdetached(struct ahci_softc *sc, device_t child)
518 {
519 struct ahci_channel *achp;
520 struct ata_channel *chp;
521
522 for (int i = 0; i < AHCI_MAX_PORTS; i++) {
523 achp = &sc->sc_channels[i];
524 chp = &achp->ata_channel;
525
526 if ((sc->sc_ahci_ports & (1U << i)) == 0)
527 continue;
528
529 if (child == chp->atabus)
530 chp->atabus = NULL;
531 }
532 }
533
534 int
535 ahci_detach(struct ahci_softc *sc, int flags)
536 {
537 struct atac_softc *atac;
538 struct ahci_channel *achp;
539 struct ata_channel *chp;
540 struct scsipi_adapter *adapt;
541 int i, j, port;
542 int error;
543
544 atac = &sc->sc_atac;
545 adapt = &atac->atac_atapi_adapter._generic;
546
547 for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) {
548 achp = &sc->sc_channels[i];
549 chp = &achp->ata_channel;
550
551 if ((sc->sc_ahci_ports & (1U << i)) == 0)
552 continue;
553 if (port >= sc->sc_atac.atac_nchannels) {
554 aprint_error("%s: more ports than announced\n",
555 AHCINAME(sc));
556 break;
557 }
558
559 if (chp->atabus != NULL) {
560 if ((error = config_detach(chp->atabus, flags)) != 0)
561 return error;
562
563 KASSERT(chp->atabus == NULL);
564 }
565
566 if (chp->ch_flags & ATACH_DETACHED)
567 continue;
568
569 for (j = 0; j < sc->sc_ncmds; j++)
570 bus_dmamap_destroy(sc->sc_dmat, achp->ahcic_datad[j]);
571
572 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_cmd_tbld);
573 bus_dmamap_destroy(sc->sc_dmat, achp->ahcic_cmd_tbld);
574 bus_dmamem_unmap(sc->sc_dmat, achp->ahcic_cmd_tbl[0],
575 AHCI_CMDTBL_SIZE * sc->sc_ncmds);
576 bus_dmamem_free(sc->sc_dmat, &achp->ahcic_cmd_tbl_seg,
577 achp->ahcic_cmd_tbl_nseg);
578
579 ata_channel_detach(chp);
580 port++;
581 }
582
583 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmd_hdrd);
584 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmd_hdrd);
585 bus_dmamem_unmap(sc->sc_dmat, sc->sc_cmd_hdr,
586 (AHCI_RFIS_SIZE + AHCI_CMDH_SIZE) * sc->sc_atac.atac_nchannels);
587 bus_dmamem_free(sc->sc_dmat, &sc->sc_cmd_hdr_seg, sc->sc_cmd_hdr_nseg);
588
589 if (adapt->adapt_refcnt != 0)
590 return EBUSY;
591
592 return 0;
593 }
594
595 void
596 ahci_resume(struct ahci_softc *sc)
597 {
598 ahci_reset(sc);
599 ahci_setup_ports(sc);
600 ahci_reprobe_drives(sc);
601 ahci_enable_intrs(sc);
602 }
603
604 int
605 ahci_intr(void *v)
606 {
607 struct ahci_softc *sc = v;
608 uint32_t is, ports;
609 int bit, r = 0;
610
611 while ((is = AHCI_READ(sc, AHCI_IS))) {
612 AHCIDEBUG_PRINT(("%s ahci_intr 0x%x\n", AHCINAME(sc), is),
613 DEBUG_INTR);
614 r = 1;
615 ports = is;
616 while ((bit = ffs(ports)) != 0) {
617 bit--;
618 ahci_intr_port_common(&sc->sc_channels[bit].ata_channel);
619 ports &= ~__BIT(bit);
620 }
621 AHCI_WRITE(sc, AHCI_IS, is);
622 }
623
624 return r;
625 }
626
627 int
628 ahci_intr_port(void *v)
629 {
630 struct ahci_channel *achp = v;
631 struct ata_channel *chp = &achp->ata_channel;
632 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
633 int ret;
634
635 ret = ahci_intr_port_common(chp);
636 if (ret) {
637 AHCI_WRITE(sc, AHCI_IS, 1U << chp->ch_channel);
638 }
639
640 return ret;
641 }
642
643 static int
644 ahci_intr_port_common(struct ata_channel *chp)
645 {
646 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
647 uint32_t is, tfd, sact;
648 struct ata_xfer *xfer;
649 int slot = -1;
650 bool recover = false;
651 uint32_t aslots;
652
653 is = AHCI_READ(sc, AHCI_P_IS(chp->ch_channel));
654 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), is);
655
656 AHCIDEBUG_PRINT(("ahci_intr_port_common %s port %d "
657 "is 0x%x CI 0x%x SACT 0x%x TFD 0x%x\n",
658 AHCINAME(sc),
659 chp->ch_channel, is,
660 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)),
661 AHCI_READ(sc, AHCI_P_SACT(chp->ch_channel)),
662 AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel))),
663 DEBUG_INTR);
664
665 if ((chp->ch_flags & ATACH_NCQ) == 0) {
666 /* Non-NCQ operation */
667 sact = AHCI_READ(sc, AHCI_P_CI(chp->ch_channel));
668 } else {
669 /* NCQ operation */
670 sact = AHCI_READ(sc, AHCI_P_SACT(chp->ch_channel));
671 }
672
673 /* Handle errors */
674 if (is & (AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS |
675 AHCI_P_IX_IFS | AHCI_P_IX_OFS | AHCI_P_IX_UFS)) {
676 /* Fatal errors */
677 if (is & AHCI_P_IX_TFES) {
678 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel));
679
680 if ((chp->ch_flags & ATACH_NCQ) == 0) {
681 /* Slot valid only for Non-NCQ operation */
682 slot = (AHCI_READ(sc,
683 AHCI_P_CMD(chp->ch_channel))
684 & AHCI_P_CMD_CCS_MASK)
685 >> AHCI_P_CMD_CCS_SHIFT;
686 }
687
688 AHCIDEBUG_PRINT((
689 "%s port %d: TFE: sact 0x%x is 0x%x tfd 0x%x\n",
690 AHCINAME(sc), chp->ch_channel, sact, is, tfd),
691 DEBUG_INTR);
692 } else {
693 /* mark an error, and set BSY */
694 tfd = (WDCE_ABRT << AHCI_P_TFD_ERR_SHIFT) |
695 WDCS_ERR | WDCS_BSY;
696 }
697
698 if (is & AHCI_P_IX_IFS) {
699 AHCIDEBUG_PRINT(("%s port %d: SERR 0x%x\n",
700 AHCINAME(sc), chp->ch_channel,
701 AHCI_READ(sc, AHCI_P_SERR(chp->ch_channel))),
702 DEBUG_INTR);
703 }
704
705 if (!ISSET(chp->ch_flags, ATACH_RECOVERING))
706 recover = true;
707 } else if (is & (AHCI_P_IX_DHRS|AHCI_P_IX_SDBS)) {
708 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel));
709
710 /* D2H Register FIS or Set Device Bits */
711 if ((tfd & WDCS_ERR) != 0) {
712 if (!ISSET(chp->ch_flags, ATACH_RECOVERING))
713 recover = true;
714
715 AHCIDEBUG_PRINT(("%s port %d: transfer aborted 0x%x\n",
716 AHCINAME(sc), chp->ch_channel, tfd), DEBUG_INTR);
717 }
718 } else {
719 tfd = 0;
720 }
721
722 if (__predict_false(recover))
723 ata_channel_freeze(chp);
724
725 aslots = ata_queue_active(chp);
726
727 if (slot >= 0) {
728 if ((aslots & __BIT(slot)) != 0 &&
729 (sact & __BIT(slot)) == 0) {
730 xfer = ata_queue_hwslot_to_xfer(chp, slot);
731 xfer->ops->c_intr(chp, xfer, tfd);
732 }
733 } else {
734 /*
735 * For NCQ, HBA halts processing when error is notified,
736 * and any further D2H FISes are ignored until the error
737 * condition is cleared. Hence if a command is inactive,
738 * it means it actually already finished successfully.
739 * Note: active slots can change as c_intr() callback
740 * can activate another command(s), so must only process
741 * commands active before we start processing.
742 */
743
744 for (slot = 0; slot < sc->sc_ncmds; slot++) {
745 if ((aslots & __BIT(slot)) != 0 &&
746 (sact & __BIT(slot)) == 0) {
747 xfer = ata_queue_hwslot_to_xfer(chp, slot);
748 xfer->ops->c_intr(chp, xfer, tfd);
749 }
750 }
751 }
752
753 if (__predict_false(recover)) {
754 ata_channel_lock(chp);
755 ata_channel_thaw_locked(chp);
756 ata_thread_run(chp, 0, ATACH_TH_RECOVERY, tfd);
757 ata_channel_unlock(chp);
758 }
759
760 return 1;
761 }
762
763 static void
764 ahci_reset_drive(struct ata_drive_datas *drvp, int flags, uint32_t *sigp)
765 {
766 struct ata_channel *chp = drvp->chnl_softc;
767 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
768 uint8_t c_slot;
769
770 ata_channel_lock_owned(chp);
771
772 /* get a slot for running the command on */
773 if (!ata_queue_alloc_slot(chp, &c_slot, ATA_MAX_OPENINGS)) {
774 panic("%s: %s: failed to get xfer for reset, port %d\n",
775 device_xname(sc->sc_atac.atac_dev),
776 __func__, chp->ch_channel);
777 /* NOTREACHED */
778 }
779
780 AHCI_WRITE(sc, AHCI_GHC,
781 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE);
782 ahci_channel_stop(sc, chp, flags);
783 ahci_do_reset_drive(chp, drvp->drive, flags, sigp, c_slot);
784 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE);
785
786 ata_queue_free_slot(chp, c_slot);
787 }
788
789 /* return error code from ata_bio */
790 static int
791 ahci_exec_fis(struct ata_channel *chp, int timeout, int flags, int slot)
792 {
793 struct ahci_channel *achp = (struct ahci_channel *)chp;
794 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
795 int i;
796 uint32_t is;
797
798 /*
799 * Base timeout is specified in ms. Delay for 10ms
800 * on each round.
801 */
802 timeout = timeout / 10;
803
804 AHCI_CMDTBL_SYNC(sc, achp, slot, BUS_DMASYNC_PREWRITE);
805 AHCI_CMDH_SYNC(sc, achp, slot,
806 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
807 /* start command */
808 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << slot);
809 for (i = 0; i < timeout; i++) {
810 if ((AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)) & (1U << slot)) ==
811 0)
812 return 0;
813 is = AHCI_READ(sc, AHCI_P_IS(chp->ch_channel));
814 if (is & (AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS |
815 AHCI_P_IX_IFS |
816 AHCI_P_IX_OFS | AHCI_P_IX_UFS)) {
817 if ((is & (AHCI_P_IX_DHRS|AHCI_P_IX_TFES)) ==
818 (AHCI_P_IX_DHRS|AHCI_P_IX_TFES)) {
819 /*
820 * we got the D2H FIS anyway,
821 * assume sig is valid.
822 * channel is restarted later
823 */
824 return ERROR;
825 }
826 aprint_debug("%s port %d: error 0x%x sending FIS\n",
827 AHCINAME(sc), chp->ch_channel, is);
828 return ERR_DF;
829 }
830 ata_delay(chp, 10, "ahcifis", flags);
831 }
832
833 aprint_debug("%s port %d: timeout sending FIS\n",
834 AHCINAME(sc), chp->ch_channel);
835 return TIMEOUT;
836 }
837
838 static int
839 ahci_do_reset_drive(struct ata_channel *chp, int drive, int flags,
840 uint32_t *sigp, uint8_t c_slot)
841 {
842 struct ahci_channel *achp = (struct ahci_channel *)chp;
843 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
844 struct ahci_cmd_tbl *cmd_tbl;
845 struct ahci_cmd_header *cmd_h;
846 int i, error = 0;
847 uint32_t sig, cmd;
848 int noclo_retry = 0, retry;
849
850 ata_channel_lock_owned(chp);
851
852 again:
853 /* clear port interrupt register */
854 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff);
855 /* clear SErrors and start operations */
856 if ((sc->sc_ahci_cap & AHCI_CAP_CLO) == AHCI_CAP_CLO) {
857 /*
858 * issue a command list override to clear BSY.
859 * This is needed if there's a PMP with no drive
860 * on port 0
861 */
862 ahci_channel_start(sc, chp, flags, 1);
863 } else {
864 /* Can't handle command still running without CLO */
865 cmd = AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel));
866 if ((cmd & AHCI_P_CMD_CR) != 0) {
867 ahci_channel_stop(sc, chp, flags);
868 cmd = AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel));
869 if ((cmd & AHCI_P_CMD_CR) != 0) {
870 aprint_error("%s port %d: DMA engine busy "
871 "for drive %d\n", AHCINAME(sc),
872 chp->ch_channel, drive);
873 error = EBUSY;
874 goto end;
875 }
876 }
877
878 KASSERT((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR) == 0);
879
880 ahci_channel_start(sc, chp, flags, 0);
881 }
882 if (drive > 0) {
883 KASSERT(sc->sc_ahci_cap & AHCI_CAP_SPM);
884 }
885
886 /* polled command, assume interrupts are disabled */
887
888 cmd_h = &achp->ahcic_cmdh[c_slot];
889 cmd_tbl = achp->ahcic_cmd_tbl[c_slot];
890 cmd_h->cmdh_flags = htole16(AHCI_CMDH_F_RST | AHCI_CMDH_F_CBSY |
891 RHD_FISLEN / 4 | (drive << AHCI_CMDH_F_PMP_SHIFT));
892 cmd_h->cmdh_prdtl = 0;
893 cmd_h->cmdh_prdbc = 0;
894 memset(cmd_tbl->cmdt_cfis, 0, 64);
895 cmd_tbl->cmdt_cfis[fis_type] = RHD_FISTYPE;
896 cmd_tbl->cmdt_cfis[rhd_c] = drive;
897 cmd_tbl->cmdt_cfis[rhd_control] = WDCTL_RST | WDCTL_4BIT;
898 switch (ahci_exec_fis(chp, 100, flags, c_slot)) {
899 case ERR_DF:
900 case TIMEOUT:
901 /*
902 * without CLO we can't make sure a software reset will
903 * success, as the drive may still have BSY or DRQ set.
904 * in this case, reset the whole channel and retry the
905 * drive reset. The channel reset should clear BSY and DRQ
906 */
907 if ((sc->sc_ahci_cap & AHCI_CAP_CLO) == 0 && noclo_retry == 0) {
908 noclo_retry++;
909 ahci_reset_channel(chp, flags);
910 goto again;
911 }
912 aprint_error("%s port %d: setting WDCTL_RST failed "
913 "for drive %d\n", AHCINAME(sc), chp->ch_channel, drive);
914 error = EBUSY;
915 goto end;
916 default:
917 break;
918 }
919
920 /*
921 * SATA specification has toggle period for SRST bit of 5 usec. Some
922 * controllers fail to process the SRST clear operation unless
923 * we wait for at least this period between the set and clear commands.
924 */
925 ata_delay(chp, 10, "ahcirstw", flags);
926
927 /*
928 * Try to clear WDCTL_RST a few times before giving up.
929 */
930 for (error = EBUSY, retry = 0; error != 0 && retry < 5; retry++) {
931 cmd_h->cmdh_flags = htole16(RHD_FISLEN / 4 |
932 (drive << AHCI_CMDH_F_PMP_SHIFT));
933 cmd_h->cmdh_prdbc = 0;
934 memset(cmd_tbl->cmdt_cfis, 0, 64);
935 cmd_tbl->cmdt_cfis[fis_type] = RHD_FISTYPE;
936 cmd_tbl->cmdt_cfis[rhd_c] = drive;
937 cmd_tbl->cmdt_cfis[rhd_control] = WDCTL_4BIT;
938 switch (ahci_exec_fis(chp, 310, flags, c_slot)) {
939 case ERR_DF:
940 case TIMEOUT:
941 error = EBUSY;
942 break;
943 default:
944 error = 0;
945 break;
946 }
947 if (error == 0) {
948 break;
949 }
950 }
951 if (error == EBUSY) {
952 aprint_error("%s port %d: clearing WDCTL_RST failed "
953 "for drive %d\n", AHCINAME(sc), chp->ch_channel, drive);
954 goto end;
955 }
956
957 /*
958 * wait 31s for BSY to clear
959 * This should not be needed, but some controllers clear the
960 * command slot before receiving the D2H FIS ...
961 */
962 for (i = 0; i < AHCI_RST_WAIT; i++) {
963 sig = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel));
964 if ((__SHIFTOUT(sig, AHCI_P_TFD_ST) & WDCS_BSY) == 0)
965 break;
966 ata_delay(chp, 10, "ahcid2h", flags);
967 }
968 if (i == AHCI_RST_WAIT) {
969 aprint_error("%s: BSY never cleared, TD 0x%x\n",
970 AHCINAME(sc), sig);
971 goto end;
972 }
973 AHCIDEBUG_PRINT(("%s: BSY took %d ms\n", AHCINAME(sc), i * 10),
974 DEBUG_PROBE);
975 sig = AHCI_READ(sc, AHCI_P_SIG(chp->ch_channel));
976 if (sigp)
977 *sigp = sig;
978 AHCIDEBUG_PRINT(("%s: port %d: sig=0x%x CMD=0x%x\n",
979 AHCINAME(sc), chp->ch_channel, sig,
980 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel))), DEBUG_PROBE);
981 end:
982 ahci_channel_stop(sc, chp, flags);
983 ata_delay(chp, 500, "ahcirst", flags);
984 /* clear port interrupt register */
985 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff);
986 ahci_channel_start(sc, chp, flags,
987 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0);
988 return error;
989 }
990
991 static void
992 ahci_reset_channel(struct ata_channel *chp, int flags)
993 {
994 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
995 struct ahci_channel *achp = (struct ahci_channel *)chp;
996 int i, tfd;
997
998 ata_channel_lock_owned(chp);
999
1000 ahci_channel_stop(sc, chp, flags);
1001 if (sata_reset_interface(chp, sc->sc_ahcit, achp->ahcic_scontrol,
1002 achp->ahcic_sstatus, flags) != SStatus_DET_DEV) {
1003 printf("%s: port %d reset failed\n", AHCINAME(sc), chp->ch_channel);
1004 /* XXX and then ? */
1005 }
1006 ata_kill_active(chp, KILL_RESET, flags);
1007 ata_delay(chp, 500, "ahcirst", flags);
1008 /* clear port interrupt register */
1009 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff);
1010 /* clear SErrors and start operations */
1011 ahci_channel_start(sc, chp, flags,
1012 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0);
1013 /* wait 31s for BSY to clear */
1014 for (i = 0; i < AHCI_RST_WAIT; i++) {
1015 tfd = AHCI_READ(sc, AHCI_P_TFD(chp->ch_channel));
1016 if ((AHCI_TFD_ST(tfd) & WDCS_BSY) == 0)
1017 break;
1018 ata_delay(chp, 10, "ahcid2h", flags);
1019 }
1020 if ((AHCI_TFD_ST(tfd) & WDCS_BSY) != 0)
1021 aprint_error("%s: BSY never cleared, TD 0x%x\n",
1022 AHCINAME(sc), tfd);
1023 AHCIDEBUG_PRINT(("%s: BSY took %d ms\n", AHCINAME(sc), i * 10),
1024 DEBUG_PROBE);
1025 /* clear port interrupt register */
1026 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff);
1027
1028 return;
1029 }
1030
1031 static int
1032 ahci_ata_addref(struct ata_drive_datas *drvp)
1033 {
1034 return 0;
1035 }
1036
1037 static void
1038 ahci_ata_delref(struct ata_drive_datas *drvp)
1039 {
1040 return;
1041 }
1042
1043 static void
1044 ahci_killpending(struct ata_drive_datas *drvp)
1045 {
1046 return;
1047 }
1048
1049 static void
1050 ahci_probe_drive(struct ata_channel *chp)
1051 {
1052 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1053 struct ahci_channel *achp = (struct ahci_channel *)chp;
1054 uint32_t sig;
1055 uint8_t c_slot;
1056 int error;
1057
1058 ata_channel_lock(chp);
1059
1060 /* get a slot for running the command on */
1061 if (!ata_queue_alloc_slot(chp, &c_slot, ATA_MAX_OPENINGS)) {
1062 aprint_error_dev(sc->sc_atac.atac_dev,
1063 "%s: failed to get xfer port %d\n",
1064 __func__, chp->ch_channel);
1065 ata_channel_unlock(chp);
1066 return;
1067 }
1068
1069 /* bring interface up, accept FISs, power up and spin up device */
1070 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel),
1071 AHCI_P_CMD_ICC_AC | AHCI_P_CMD_FRE |
1072 AHCI_P_CMD_POD | AHCI_P_CMD_SUD);
1073 /* reset the PHY and bring online */
1074 switch (sata_reset_interface(chp, sc->sc_ahcit, achp->ahcic_scontrol,
1075 achp->ahcic_sstatus, AT_WAIT)) {
1076 case SStatus_DET_DEV:
1077 ata_delay(chp, 500, "ahcidv", AT_WAIT);
1078
1079 /* Initial value, used in case the soft reset fails */
1080 sig = AHCI_READ(sc, AHCI_P_SIG(chp->ch_channel));
1081
1082 if (sc->sc_ahci_cap & AHCI_CAP_SPM) {
1083 error = ahci_do_reset_drive(chp, PMP_PORT_CTL, AT_WAIT,
1084 &sig, c_slot);
1085
1086 /* If probe for PMP failed, just fallback to drive 0 */
1087 if (error) {
1088 aprint_error("%s port %d: drive %d reset "
1089 "failed, disabling PMP\n",
1090 AHCINAME(sc), chp->ch_channel,
1091 PMP_PORT_CTL);
1092
1093 sc->sc_ahci_cap &= ~AHCI_CAP_SPM;
1094 ahci_reset_channel(chp, AT_WAIT);
1095 }
1096 } else {
1097 ahci_do_reset_drive(chp, 0, AT_WAIT, &sig, c_slot);
1098 }
1099 sata_interpret_sig(chp, 0, sig);
1100 /* if we have a PMP attached, inform the controller */
1101 if (chp->ch_ndrives > PMP_PORT_CTL &&
1102 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
1103 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel),
1104 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) |
1105 AHCI_P_CMD_PMA);
1106 }
1107 /* clear port interrupt register */
1108 AHCI_WRITE(sc, AHCI_P_IS(chp->ch_channel), 0xffffffff);
1109
1110 /* and enable interrupts */
1111 AHCI_WRITE(sc, AHCI_P_IE(chp->ch_channel),
1112 AHCI_P_IX_TFES | AHCI_P_IX_HBFS | AHCI_P_IX_HBDS |
1113 AHCI_P_IX_IFS |
1114 AHCI_P_IX_OFS | AHCI_P_IX_DPS | AHCI_P_IX_UFS |
1115 AHCI_P_IX_PSS | AHCI_P_IX_DHRS | AHCI_P_IX_SDBS);
1116 /* wait 500ms before actually starting operations */
1117 ata_delay(chp, 500, "ahciprb", AT_WAIT);
1118 break;
1119
1120 default:
1121 break;
1122 }
1123
1124 ata_queue_free_slot(chp, c_slot);
1125
1126 ata_channel_unlock(chp);
1127 }
1128
1129 static void
1130 ahci_setup_channel(struct ata_channel *chp)
1131 {
1132 return;
1133 }
1134
1135 static const struct ata_xfer_ops ahci_cmd_xfer_ops = {
1136 .c_start = ahci_cmd_start,
1137 .c_poll = ahci_cmd_poll,
1138 .c_abort = ahci_cmd_abort,
1139 .c_intr = ahci_cmd_complete,
1140 .c_kill_xfer = ahci_cmd_kill_xfer,
1141 };
1142
1143 static void
1144 ahci_exec_command(struct ata_drive_datas *drvp, struct ata_xfer *xfer)
1145 {
1146 struct ata_channel *chp = drvp->chnl_softc;
1147 struct ata_command *ata_c = &xfer->c_ata_c;
1148
1149 AHCIDEBUG_PRINT(("ahci_exec_command port %d CI 0x%x\n",
1150 chp->ch_channel,
1151 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))),
1152 DEBUG_XFERS);
1153 if (ata_c->flags & AT_POLL)
1154 xfer->c_flags |= C_POLL;
1155 if (ata_c->flags & AT_WAIT)
1156 xfer->c_flags |= C_WAIT;
1157 xfer->c_drive = drvp->drive;
1158 xfer->c_databuf = ata_c->data;
1159 xfer->c_bcount = ata_c->bcount;
1160 xfer->ops = &ahci_cmd_xfer_ops;
1161
1162 ata_exec_xfer(chp, xfer);
1163 }
1164
1165 static int
1166 ahci_cmd_start(struct ata_channel *chp, struct ata_xfer *xfer)
1167 {
1168 struct ahci_softc *sc = AHCI_CH2SC(chp);
1169 struct ahci_channel *achp = (struct ahci_channel *)chp;
1170 struct ata_command *ata_c = &xfer->c_ata_c;
1171 int slot = xfer->c_slot;
1172 struct ahci_cmd_tbl *cmd_tbl;
1173 struct ahci_cmd_header *cmd_h;
1174
1175 AHCIDEBUG_PRINT(("ahci_cmd_start CI 0x%x timo %d\n slot %d",
1176 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel)),
1177 ata_c->timeout, slot),
1178 DEBUG_XFERS);
1179
1180 ata_channel_lock_owned(chp);
1181
1182 cmd_tbl = achp->ahcic_cmd_tbl[slot];
1183 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel,
1184 cmd_tbl), DEBUG_XFERS);
1185
1186 satafis_rhd_construct_cmd(ata_c, cmd_tbl->cmdt_cfis);
1187 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive;
1188
1189 cmd_h = &achp->ahcic_cmdh[slot];
1190 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc),
1191 chp->ch_channel, cmd_h), DEBUG_XFERS);
1192 if (ahci_dma_setup(chp, slot,
1193 (ata_c->flags & (AT_READ|AT_WRITE) && ata_c->bcount > 0) ?
1194 ata_c->data : NULL,
1195 ata_c->bcount,
1196 (ata_c->flags & AT_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)) {
1197 ata_c->flags |= AT_DF;
1198 return ATASTART_ABORT;
1199 }
1200 cmd_h->cmdh_flags = htole16(
1201 ((ata_c->flags & AT_WRITE) ? AHCI_CMDH_F_WR : 0) |
1202 RHD_FISLEN / 4 | (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT));
1203 cmd_h->cmdh_prdbc = 0;
1204 AHCI_CMDH_SYNC(sc, achp, slot,
1205 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1206
1207 if (ata_c->flags & AT_POLL) {
1208 /* polled command, disable interrupts */
1209 AHCI_WRITE(sc, AHCI_GHC,
1210 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE);
1211 }
1212 /* start command */
1213 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << slot);
1214
1215 if ((ata_c->flags & AT_POLL) == 0) {
1216 callout_reset(&chp->c_timo_callout, mstohz(ata_c->timeout),
1217 ata_timeout, chp);
1218 return ATASTART_STARTED;
1219 } else
1220 return ATASTART_POLL;
1221 }
1222
1223 static void
1224 ahci_cmd_poll(struct ata_channel *chp, struct ata_xfer *xfer)
1225 {
1226 struct ahci_softc *sc = AHCI_CH2SC(chp);
1227 struct ahci_channel *achp = (struct ahci_channel *)chp;
1228
1229 ata_channel_lock(chp);
1230
1231 /*
1232 * Polled command.
1233 */
1234 for (int i = 0; i < xfer->c_ata_c.timeout / 10; i++) {
1235 if (xfer->c_ata_c.flags & AT_DONE)
1236 break;
1237 ata_channel_unlock(chp);
1238 ahci_intr_port(achp);
1239 ata_channel_lock(chp);
1240 ata_delay(chp, 10, "ahcipl", xfer->c_ata_c.flags);
1241 }
1242 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel,
1243 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS),
1244 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)),
1245 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)),
1246 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)),
1247 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)),
1248 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)),
1249 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))),
1250 DEBUG_XFERS);
1251
1252 ata_channel_unlock(chp);
1253
1254 if ((xfer->c_ata_c.flags & AT_DONE) == 0) {
1255 xfer->c_ata_c.flags |= AT_TIMEOU;
1256 xfer->ops->c_intr(chp, xfer, 0);
1257 }
1258 /* reenable interrupts */
1259 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE);
1260 }
1261
1262 static void
1263 ahci_cmd_abort(struct ata_channel *chp, struct ata_xfer *xfer)
1264 {
1265 ahci_cmd_complete(chp, xfer, 0);
1266 }
1267
1268 static void
1269 ahci_cmd_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason)
1270 {
1271 struct ata_command *ata_c = &xfer->c_ata_c;
1272 bool deactivate = true;
1273
1274 AHCIDEBUG_PRINT(("ahci_cmd_kill_xfer port %d\n", chp->ch_channel),
1275 DEBUG_FUNCS);
1276
1277 switch (reason) {
1278 case KILL_GONE_INACTIVE:
1279 deactivate = false;
1280 /* FALLTHROUGH */
1281 case KILL_GONE:
1282 ata_c->flags |= AT_GONE;
1283 break;
1284 case KILL_RESET:
1285 ata_c->flags |= AT_RESET;
1286 break;
1287 case KILL_REQUEUE:
1288 panic("%s: not supposed to be requeued\n", __func__);
1289 break;
1290 default:
1291 printf("ahci_cmd_kill_xfer: unknown reason %d\n", reason);
1292 panic("ahci_cmd_kill_xfer");
1293 }
1294
1295 ahci_cmd_done_end(chp, xfer);
1296
1297 if (deactivate)
1298 ata_deactivate_xfer(chp, xfer);
1299 }
1300
1301 static int
1302 ahci_cmd_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd)
1303 {
1304 struct ata_command *ata_c = &xfer->c_ata_c;
1305 struct ahci_channel *achp = (struct ahci_channel *)chp;
1306 struct ahci_softc *sc = AHCI_CH2SC(chp);
1307
1308 AHCIDEBUG_PRINT(("ahci_cmd_complete port %d CMD 0x%x CI 0x%x\n",
1309 chp->ch_channel,
1310 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CMD(chp->ch_channel)),
1311 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))),
1312 DEBUG_FUNCS);
1313
1314 if (ata_waitdrain_xfer_check(chp, xfer))
1315 return 0;
1316
1317 if (xfer->c_flags & C_TIMEOU) {
1318 ata_c->flags |= AT_TIMEOU;
1319 }
1320
1321 if (AHCI_TFD_ST(tfd) & WDCS_BSY) {
1322 ata_c->flags |= AT_TIMEOU;
1323 } else if (AHCI_TFD_ST(tfd) & WDCS_ERR) {
1324 ata_c->r_error = AHCI_TFD_ERR(tfd);
1325 ata_c->flags |= AT_ERROR;
1326 }
1327
1328 if (ata_c->flags & AT_READREG) {
1329 AHCI_RFIS_SYNC(sc, achp, BUS_DMASYNC_POSTREAD);
1330 satafis_rdh_cmd_readreg(ata_c, achp->ahcic_rfis->rfis_rfis);
1331 }
1332
1333 ahci_cmd_done(chp, xfer);
1334
1335 ata_deactivate_xfer(chp, xfer);
1336
1337 if ((ata_c->flags & (AT_TIMEOU|AT_ERROR)) == 0)
1338 atastart(chp);
1339
1340 return 0;
1341 }
1342
1343 static void
1344 ahci_cmd_done(struct ata_channel *chp, struct ata_xfer *xfer)
1345 {
1346 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1347 struct ahci_channel *achp = (struct ahci_channel *)chp;
1348 struct ata_command *ata_c = &xfer->c_ata_c;
1349 uint16_t *idwordbuf;
1350 int i;
1351
1352 AHCIDEBUG_PRINT(("ahci_cmd_done port %d flags %#x/%#x\n",
1353 chp->ch_channel, xfer->c_flags, ata_c->flags), DEBUG_FUNCS);
1354
1355 if (ata_c->flags & (AT_READ|AT_WRITE) && ata_c->bcount > 0) {
1356 bus_dmamap_t map = achp->ahcic_datad[xfer->c_slot];
1357 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1358 (ata_c->flags & AT_READ) ? BUS_DMASYNC_POSTREAD :
1359 BUS_DMASYNC_POSTWRITE);
1360 bus_dmamap_unload(sc->sc_dmat, map);
1361 }
1362
1363 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot,
1364 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1365
1366 /* ata(4) expects IDENTIFY data to be in host endianess */
1367 if (ata_c->r_command == WDCC_IDENTIFY ||
1368 ata_c->r_command == ATAPI_IDENTIFY_DEVICE) {
1369 idwordbuf = xfer->c_databuf;
1370 for (i = 0; i < (xfer->c_bcount / sizeof(*idwordbuf)); i++) {
1371 idwordbuf[i] = le16toh(idwordbuf[i]);
1372 }
1373 }
1374
1375 if (achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc)
1376 ata_c->flags |= AT_XFDONE;
1377
1378 ahci_cmd_done_end(chp, xfer);
1379 }
1380
1381 static void
1382 ahci_cmd_done_end(struct ata_channel *chp, struct ata_xfer *xfer)
1383 {
1384 struct ata_command *ata_c = &xfer->c_ata_c;
1385
1386 ata_c->flags |= AT_DONE;
1387 }
1388
1389 static const struct ata_xfer_ops ahci_bio_xfer_ops = {
1390 .c_start = ahci_bio_start,
1391 .c_poll = ahci_bio_poll,
1392 .c_abort = ahci_bio_abort,
1393 .c_intr = ahci_bio_complete,
1394 .c_kill_xfer = ahci_bio_kill_xfer,
1395 };
1396
1397 static void
1398 ahci_ata_bio(struct ata_drive_datas *drvp, struct ata_xfer *xfer)
1399 {
1400 struct ata_channel *chp = drvp->chnl_softc;
1401 struct ata_bio *ata_bio = &xfer->c_bio;
1402
1403 AHCIDEBUG_PRINT(("ahci_ata_bio port %d CI 0x%x\n",
1404 chp->ch_channel,
1405 AHCI_READ(AHCI_CH2SC(chp), AHCI_P_CI(chp->ch_channel))),
1406 DEBUG_XFERS);
1407 if (ata_bio->flags & ATA_POLL)
1408 xfer->c_flags |= C_POLL;
1409 xfer->c_drive = drvp->drive;
1410 xfer->c_databuf = ata_bio->databuf;
1411 xfer->c_bcount = ata_bio->bcount;
1412 xfer->ops = &ahci_bio_xfer_ops;
1413 ata_exec_xfer(chp, xfer);
1414 }
1415
1416 static int
1417 ahci_bio_start(struct ata_channel *chp, struct ata_xfer *xfer)
1418 {
1419 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1420 struct ahci_channel *achp = (struct ahci_channel *)chp;
1421 struct ata_bio *ata_bio = &xfer->c_bio;
1422 struct ahci_cmd_tbl *cmd_tbl;
1423 struct ahci_cmd_header *cmd_h;
1424
1425 AHCIDEBUG_PRINT(("ahci_bio_start CI 0x%x\n",
1426 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), DEBUG_XFERS);
1427
1428 ata_channel_lock_owned(chp);
1429
1430 cmd_tbl = achp->ahcic_cmd_tbl[xfer->c_slot];
1431 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel,
1432 cmd_tbl), DEBUG_XFERS);
1433
1434 satafis_rhd_construct_bio(xfer, cmd_tbl->cmdt_cfis);
1435 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive;
1436
1437 cmd_h = &achp->ahcic_cmdh[xfer->c_slot];
1438 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc),
1439 chp->ch_channel, cmd_h), DEBUG_XFERS);
1440 if (ahci_dma_setup(chp, xfer->c_slot, ata_bio->databuf, ata_bio->bcount,
1441 (ata_bio->flags & ATA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)) {
1442 ata_bio->error = ERR_DMA;
1443 ata_bio->r_error = 0;
1444 return ATASTART_ABORT;
1445 }
1446 cmd_h->cmdh_flags = htole16(
1447 ((ata_bio->flags & ATA_READ) ? 0 : AHCI_CMDH_F_WR) |
1448 RHD_FISLEN / 4 | (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT));
1449 cmd_h->cmdh_prdbc = 0;
1450 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot,
1451 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1452
1453 if (xfer->c_flags & C_POLL) {
1454 /* polled command, disable interrupts */
1455 AHCI_WRITE(sc, AHCI_GHC,
1456 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE);
1457 }
1458 if (xfer->c_flags & C_NCQ)
1459 AHCI_WRITE(sc, AHCI_P_SACT(chp->ch_channel), 1U << xfer->c_slot);
1460 /* start command */
1461 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << xfer->c_slot);
1462
1463 if ((xfer->c_flags & C_POLL) == 0) {
1464 callout_reset(&chp->c_timo_callout, mstohz(ATA_DELAY),
1465 ata_timeout, chp);
1466 return ATASTART_STARTED;
1467 } else
1468 return ATASTART_POLL;
1469 }
1470
1471 static void
1472 ahci_bio_poll(struct ata_channel *chp, struct ata_xfer *xfer)
1473 {
1474 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1475 struct ahci_channel *achp = (struct ahci_channel *)chp;
1476
1477 /*
1478 * Polled command.
1479 */
1480 for (int i = 0; i < ATA_DELAY * 10; i++) {
1481 if (xfer->c_bio.flags & ATA_ITSDONE)
1482 break;
1483 ahci_intr_port(achp);
1484 delay(100);
1485 }
1486 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel,
1487 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS),
1488 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)),
1489 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)),
1490 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)),
1491 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)),
1492 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)),
1493 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))),
1494 DEBUG_XFERS);
1495 if ((xfer->c_bio.flags & ATA_ITSDONE) == 0) {
1496 xfer->c_bio.error = TIMEOUT;
1497 xfer->ops->c_intr(chp, xfer, 0);
1498 }
1499 /* reenable interrupts */
1500 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE);
1501 }
1502
1503 static void
1504 ahci_bio_abort(struct ata_channel *chp, struct ata_xfer *xfer)
1505 {
1506 ahci_bio_complete(chp, xfer, 0);
1507 }
1508
1509 static void
1510 ahci_bio_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason)
1511 {
1512 int drive = xfer->c_drive;
1513 struct ata_bio *ata_bio = &xfer->c_bio;
1514 bool deactivate = true;
1515
1516 AHCIDEBUG_PRINT(("ahci_bio_kill_xfer port %d\n", chp->ch_channel),
1517 DEBUG_FUNCS);
1518
1519 ata_bio->flags |= ATA_ITSDONE;
1520 switch (reason) {
1521 case KILL_GONE_INACTIVE:
1522 deactivate = false;
1523 /* FALLTHROUGH */
1524 case KILL_GONE:
1525 ata_bio->error = ERR_NODEV;
1526 break;
1527 case KILL_RESET:
1528 ata_bio->error = ERR_RESET;
1529 break;
1530 case KILL_REQUEUE:
1531 ata_bio->error = REQUEUE;
1532 break;
1533 default:
1534 printf("ahci_bio_kill_xfer: unknown reason %d\n", reason);
1535 panic("ahci_bio_kill_xfer");
1536 }
1537 ata_bio->r_error = WDCE_ABRT;
1538
1539 if (deactivate)
1540 ata_deactivate_xfer(chp, xfer);
1541
1542 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc, xfer);
1543 }
1544
1545 static int
1546 ahci_bio_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd)
1547 {
1548 struct ata_bio *ata_bio = &xfer->c_bio;
1549 int drive = xfer->c_drive;
1550 struct ahci_channel *achp = (struct ahci_channel *)chp;
1551 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1552
1553 AHCIDEBUG_PRINT(("ahci_bio_complete port %d\n", chp->ch_channel),
1554 DEBUG_FUNCS);
1555
1556 if (ata_waitdrain_xfer_check(chp, xfer))
1557 return 0;
1558
1559 if (xfer->c_flags & C_TIMEOU) {
1560 ata_bio->error = TIMEOUT;
1561 }
1562
1563 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot], 0,
1564 achp->ahcic_datad[xfer->c_slot]->dm_mapsize,
1565 (ata_bio->flags & ATA_READ) ? BUS_DMASYNC_POSTREAD :
1566 BUS_DMASYNC_POSTWRITE);
1567 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot]);
1568
1569 ata_bio->flags |= ATA_ITSDONE;
1570 if (AHCI_TFD_ERR(tfd) & WDCS_DWF) {
1571 ata_bio->error = ERR_DF;
1572 } else if (AHCI_TFD_ST(tfd) & WDCS_ERR) {
1573 ata_bio->error = ERROR;
1574 ata_bio->r_error = AHCI_TFD_ERR(tfd);
1575 } else if (AHCI_TFD_ST(tfd) & WDCS_CORR)
1576 ata_bio->flags |= ATA_CORR;
1577
1578 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot,
1579 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1580 AHCIDEBUG_PRINT(("ahci_bio_complete bcount %ld",
1581 ata_bio->bcount), DEBUG_XFERS);
1582 /*
1583 * If it was a write, complete data buffer may have been transferred
1584 * before error detection; in this case don't use cmdh_prdbc
1585 * as it won't reflect what was written to media. Assume nothing
1586 * was transferred and leave bcount as-is.
1587 * For queued commands, PRD Byte Count should not be used, and is
1588 * not required to be valid; in that case underflow is always illegal.
1589 */
1590 if ((xfer->c_flags & C_NCQ) != 0) {
1591 if (ata_bio->error == NOERROR)
1592 ata_bio->bcount = 0;
1593 } else {
1594 if ((ata_bio->flags & ATA_READ) || ata_bio->error == NOERROR)
1595 ata_bio->bcount -=
1596 le32toh(achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc);
1597 }
1598 AHCIDEBUG_PRINT((" now %ld\n", ata_bio->bcount), DEBUG_XFERS);
1599
1600 ata_deactivate_xfer(chp, xfer);
1601
1602 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc, xfer);
1603 if ((AHCI_TFD_ST(tfd) & WDCS_ERR) == 0)
1604 atastart(chp);
1605 return 0;
1606 }
1607
1608 static void
1609 ahci_channel_stop(struct ahci_softc *sc, struct ata_channel *chp, int flags)
1610 {
1611 int i;
1612 /* stop channel */
1613 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel),
1614 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & ~AHCI_P_CMD_ST);
1615 /* wait 1s for channel to stop */
1616 for (i = 0; i <100; i++) {
1617 if ((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR)
1618 == 0)
1619 break;
1620 ata_delay(chp, 10, "ahcistop", flags);
1621 }
1622 if (AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CR) {
1623 printf("%s: channel wouldn't stop\n", AHCINAME(sc));
1624 /* XXX controller reset ? */
1625 return;
1626 }
1627
1628 if (sc->sc_channel_stop)
1629 sc->sc_channel_stop(sc, chp);
1630 }
1631
1632 static void
1633 ahci_channel_start(struct ahci_softc *sc, struct ata_channel *chp,
1634 int flags, int clo)
1635 {
1636 int i;
1637 uint32_t p_cmd;
1638 /* clear error */
1639 AHCI_WRITE(sc, AHCI_P_SERR(chp->ch_channel),
1640 AHCI_READ(sc, AHCI_P_SERR(chp->ch_channel)));
1641
1642 if (clo) {
1643 /* issue command list override */
1644 KASSERT(sc->sc_ahci_cap & AHCI_CAP_CLO);
1645 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel),
1646 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) | AHCI_P_CMD_CLO);
1647 /* wait 1s for AHCI_CAP_CLO to clear */
1648 for (i = 0; i <100; i++) {
1649 if ((AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) &
1650 AHCI_P_CMD_CLO) == 0)
1651 break;
1652 ata_delay(chp, 10, "ahciclo", flags);
1653 }
1654 if (AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)) & AHCI_P_CMD_CLO) {
1655 printf("%s: channel wouldn't CLO\n", AHCINAME(sc));
1656 /* XXX controller reset ? */
1657 return;
1658 }
1659 }
1660
1661 if (sc->sc_channel_start)
1662 sc->sc_channel_start(sc, chp);
1663
1664 /* and start controller */
1665 p_cmd = AHCI_P_CMD_ICC_AC | AHCI_P_CMD_POD | AHCI_P_CMD_SUD |
1666 AHCI_P_CMD_FRE | AHCI_P_CMD_ST;
1667 if (chp->ch_ndrives > PMP_PORT_CTL &&
1668 chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
1669 p_cmd |= AHCI_P_CMD_PMA;
1670 }
1671 AHCI_WRITE(sc, AHCI_P_CMD(chp->ch_channel), p_cmd);
1672 }
1673
1674 /* Recover channel after command failure */
1675 static void
1676 ahci_channel_recover(struct ata_channel *chp, int flags, uint32_t tfd)
1677 {
1678 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1679 int drive = ATACH_NODRIVE;
1680 bool reset = false;
1681
1682 ata_channel_lock_owned(chp);
1683
1684 /*
1685 * Read FBS to get the drive which caused the error, if PM is in use.
1686 * According to AHCI 1.3 spec, this register is available regardless
1687 * if FIS-based switching (FBSS) feature is supported, or disabled.
1688 * If FIS-based switching is not in use, it merely maintains single
1689 * pair of DRQ/BSY state, but it is enough since in that case we
1690 * never issue commands for more than one device at the time anyway.
1691 * XXX untested
1692 */
1693 if (chp->ch_ndrives > PMP_PORT_CTL) {
1694 uint32_t fbs = AHCI_READ(sc, AHCI_P_FBS(chp->ch_channel));
1695 if (fbs & AHCI_P_FBS_SDE) {
1696 drive = (fbs & AHCI_P_FBS_DWE) >> AHCI_P_FBS_DWE_SHIFT;
1697
1698 /*
1699 * Tell HBA to reset PM port X (value in DWE) state,
1700 * and resume processing commands for other ports.
1701 */
1702 fbs |= AHCI_P_FBS_DEC;
1703 AHCI_WRITE(sc, AHCI_P_FBS(chp->ch_channel), fbs);
1704 for (int i = 0; i < 1000; i++) {
1705 fbs = AHCI_READ(sc,
1706 AHCI_P_FBS(chp->ch_channel));
1707 if ((fbs & AHCI_P_FBS_DEC) == 0)
1708 break;
1709 DELAY(1000);
1710 }
1711 if ((fbs & AHCI_P_FBS_DEC) != 0) {
1712 /* follow non-device specific recovery */
1713 drive = ATACH_NODRIVE;
1714 reset = true;
1715 }
1716 } else {
1717 /* not device specific, reset channel */
1718 drive = ATACH_NODRIVE;
1719 reset = true;
1720 }
1721 } else
1722 drive = 0;
1723
1724 /*
1725 * If BSY or DRQ bits are set, must execute COMRESET to return
1726 * device to idle state. If drive is idle, it's enough to just
1727 * reset CMD.ST, it's not necessary to do software reset.
1728 * After resetting CMD.ST, need to execute READ LOG EXT for NCQ
1729 * to unblock device processing if COMRESET was not done.
1730 */
1731 if (reset || (AHCI_TFD_ST(tfd) & (WDCS_BSY|WDCS_DRQ)) != 0) {
1732 ahci_reset_channel(chp, flags);
1733 goto out;
1734 }
1735
1736 KASSERT(drive != ATACH_NODRIVE && drive >= 0);
1737 ahci_channel_stop(sc, chp, flags);
1738 ahci_channel_start(sc, chp, flags,
1739 (sc->sc_ahci_cap & AHCI_CAP_CLO) ? 1 : 0);
1740
1741 ata_recovery_resume(chp, drive, tfd, flags);
1742
1743 out:
1744 /* Drive unblocked, back to normal operation */
1745 return;
1746 }
1747
1748 static int
1749 ahci_dma_setup(struct ata_channel *chp, int slot, void *data,
1750 size_t count, int op)
1751 {
1752 int error, seg;
1753 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1754 struct ahci_channel *achp = (struct ahci_channel *)chp;
1755 struct ahci_cmd_tbl *cmd_tbl;
1756 struct ahci_cmd_header *cmd_h;
1757
1758 cmd_h = &achp->ahcic_cmdh[slot];
1759 cmd_tbl = achp->ahcic_cmd_tbl[slot];
1760
1761 if (data == NULL) {
1762 cmd_h->cmdh_prdtl = 0;
1763 goto end;
1764 }
1765
1766 error = bus_dmamap_load(sc->sc_dmat, achp->ahcic_datad[slot],
1767 data, count, NULL,
1768 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | op);
1769 if (error) {
1770 printf("%s port %d: failed to load xfer: %d\n",
1771 AHCINAME(sc), chp->ch_channel, error);
1772 return error;
1773 }
1774 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[slot], 0,
1775 achp->ahcic_datad[slot]->dm_mapsize,
1776 (op == BUS_DMA_READ) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1777 for (seg = 0; seg < achp->ahcic_datad[slot]->dm_nsegs; seg++) {
1778 cmd_tbl->cmdt_prd[seg].prd_dba = htole64(
1779 achp->ahcic_datad[slot]->dm_segs[seg].ds_addr);
1780 cmd_tbl->cmdt_prd[seg].prd_dbc = htole32(
1781 achp->ahcic_datad[slot]->dm_segs[seg].ds_len - 1);
1782 }
1783 cmd_tbl->cmdt_prd[seg - 1].prd_dbc |= htole32(AHCI_PRD_DBC_IPC);
1784 cmd_h->cmdh_prdtl = htole16(achp->ahcic_datad[slot]->dm_nsegs);
1785 end:
1786 AHCI_CMDTBL_SYNC(sc, achp, slot, BUS_DMASYNC_PREWRITE);
1787 return 0;
1788 }
1789
1790 #if NATAPIBUS > 0
1791 static void
1792 ahci_atapibus_attach(struct atabus_softc * ata_sc)
1793 {
1794 struct ata_channel *chp = ata_sc->sc_chan;
1795 struct atac_softc *atac = chp->ch_atac;
1796 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
1797 struct scsipi_channel *chan = &chp->ch_atapi_channel;
1798 /*
1799 * Fill in the scsipi_adapter.
1800 */
1801 adapt->adapt_dev = atac->atac_dev;
1802 adapt->adapt_nchannels = atac->atac_nchannels;
1803 adapt->adapt_request = ahci_atapi_scsipi_request;
1804 adapt->adapt_minphys = ahci_atapi_minphys;
1805 atac->atac_atapi_adapter.atapi_probe_device = ahci_atapi_probe_device;
1806
1807 /*
1808 * Fill in the scsipi_channel.
1809 */
1810 memset(chan, 0, sizeof(*chan));
1811 chan->chan_adapter = adapt;
1812 chan->chan_bustype = &ahci_atapi_bustype;
1813 chan->chan_channel = chp->ch_channel;
1814 chan->chan_flags = SCSIPI_CHAN_OPENINGS;
1815 chan->chan_openings = 1;
1816 chan->chan_max_periph = 1;
1817 chan->chan_ntargets = 1;
1818 chan->chan_nluns = 1;
1819 chp->atapibus = config_found(ata_sc->sc_dev, chan, atapiprint,
1820 CFARG_IATTR, "atapi",
1821 CFARG_EOL);
1822 }
1823
1824 static void
1825 ahci_atapi_minphys(struct buf *bp)
1826 {
1827 if (bp->b_bcount > MAXPHYS)
1828 bp->b_bcount = MAXPHYS;
1829 minphys(bp);
1830 }
1831
1832 /*
1833 * Kill off all pending xfers for a periph.
1834 *
1835 * Must be called at splbio().
1836 */
1837 static void
1838 ahci_atapi_kill_pending(struct scsipi_periph *periph)
1839 {
1840 struct atac_softc *atac =
1841 device_private(periph->periph_channel->chan_adapter->adapt_dev);
1842 struct ata_channel *chp =
1843 atac->atac_channels[periph->periph_channel->chan_channel];
1844
1845 ata_kill_pending(&chp->ch_drive[periph->periph_target]);
1846 }
1847
1848 static const struct ata_xfer_ops ahci_atapi_xfer_ops = {
1849 .c_start = ahci_atapi_start,
1850 .c_poll = ahci_atapi_poll,
1851 .c_abort = ahci_atapi_abort,
1852 .c_intr = ahci_atapi_complete,
1853 .c_kill_xfer = ahci_atapi_kill_xfer,
1854 };
1855
1856 static void
1857 ahci_atapi_scsipi_request(struct scsipi_channel *chan,
1858 scsipi_adapter_req_t req, void *arg)
1859 {
1860 struct scsipi_adapter *adapt = chan->chan_adapter;
1861 struct scsipi_periph *periph;
1862 struct scsipi_xfer *sc_xfer;
1863 struct ahci_softc *sc = device_private(adapt->adapt_dev);
1864 struct atac_softc *atac = &sc->sc_atac;
1865 struct ata_xfer *xfer;
1866 int channel = chan->chan_channel;
1867 int drive, s;
1868
1869 switch (req) {
1870 case ADAPTER_REQ_RUN_XFER:
1871 sc_xfer = arg;
1872 periph = sc_xfer->xs_periph;
1873 drive = periph->periph_target;
1874 if (!device_is_active(atac->atac_dev)) {
1875 sc_xfer->error = XS_DRIVER_STUFFUP;
1876 scsipi_done(sc_xfer);
1877 return;
1878 }
1879 xfer = ata_get_xfer(atac->atac_channels[channel], false);
1880 if (xfer == NULL) {
1881 sc_xfer->error = XS_RESOURCE_SHORTAGE;
1882 scsipi_done(sc_xfer);
1883 return;
1884 }
1885
1886 if (sc_xfer->xs_control & XS_CTL_POLL)
1887 xfer->c_flags |= C_POLL;
1888 xfer->c_drive = drive;
1889 xfer->c_flags |= C_ATAPI;
1890 xfer->c_databuf = sc_xfer->data;
1891 xfer->c_bcount = sc_xfer->datalen;
1892 xfer->ops = &ahci_atapi_xfer_ops;
1893 xfer->c_scsipi = sc_xfer;
1894 xfer->c_atapi.c_dscpoll = 0;
1895 s = splbio();
1896 ata_exec_xfer(atac->atac_channels[channel], xfer);
1897 #ifdef DIAGNOSTIC
1898 if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 &&
1899 (sc_xfer->xs_status & XS_STS_DONE) == 0)
1900 panic("ahci_atapi_scsipi_request: polled command "
1901 "not done");
1902 #endif
1903 splx(s);
1904 return;
1905 default:
1906 /* Not supported, nothing to do. */
1907 ;
1908 }
1909 }
1910
1911 static int
1912 ahci_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer)
1913 {
1914 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1915 struct ahci_channel *achp = (struct ahci_channel *)chp;
1916 struct scsipi_xfer *sc_xfer = xfer->c_scsipi;
1917 struct ahci_cmd_tbl *cmd_tbl;
1918 struct ahci_cmd_header *cmd_h;
1919
1920 AHCIDEBUG_PRINT(("ahci_atapi_start CI 0x%x\n",
1921 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))), DEBUG_XFERS);
1922
1923 ata_channel_lock_owned(chp);
1924
1925 cmd_tbl = achp->ahcic_cmd_tbl[xfer->c_slot];
1926 AHCIDEBUG_PRINT(("%s port %d tbl %p\n", AHCINAME(sc), chp->ch_channel,
1927 cmd_tbl), DEBUG_XFERS);
1928
1929 satafis_rhd_construct_atapi(xfer, cmd_tbl->cmdt_cfis);
1930 cmd_tbl->cmdt_cfis[rhd_c] |= xfer->c_drive;
1931 memset(&cmd_tbl->cmdt_acmd, 0, sizeof(cmd_tbl->cmdt_acmd));
1932 memcpy(cmd_tbl->cmdt_acmd, sc_xfer->cmd, sc_xfer->cmdlen);
1933
1934 cmd_h = &achp->ahcic_cmdh[xfer->c_slot];
1935 AHCIDEBUG_PRINT(("%s port %d header %p\n", AHCINAME(sc),
1936 chp->ch_channel, cmd_h), DEBUG_XFERS);
1937 if (ahci_dma_setup(chp, xfer->c_slot,
1938 sc_xfer->datalen ? sc_xfer->data : NULL,
1939 sc_xfer->datalen,
1940 (sc_xfer->xs_control & XS_CTL_DATA_IN) ?
1941 BUS_DMA_READ : BUS_DMA_WRITE)) {
1942 sc_xfer->error = XS_DRIVER_STUFFUP;
1943 return ATASTART_ABORT;
1944 }
1945 cmd_h->cmdh_flags = htole16(
1946 ((sc_xfer->xs_control & XS_CTL_DATA_OUT) ? AHCI_CMDH_F_WR : 0) |
1947 RHD_FISLEN / 4 | AHCI_CMDH_F_A |
1948 (xfer->c_drive << AHCI_CMDH_F_PMP_SHIFT));
1949 cmd_h->cmdh_prdbc = 0;
1950 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot,
1951 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1952
1953 if (xfer->c_flags & C_POLL) {
1954 /* polled command, disable interrupts */
1955 AHCI_WRITE(sc, AHCI_GHC,
1956 AHCI_READ(sc, AHCI_GHC) & ~AHCI_GHC_IE);
1957 }
1958 /* start command */
1959 AHCI_WRITE(sc, AHCI_P_CI(chp->ch_channel), 1U << xfer->c_slot);
1960
1961 if ((xfer->c_flags & C_POLL) == 0) {
1962 callout_reset(&chp->c_timo_callout, mstohz(sc_xfer->timeout),
1963 ata_timeout, chp);
1964 return ATASTART_STARTED;
1965 } else
1966 return ATASTART_POLL;
1967 }
1968
1969 static void
1970 ahci_atapi_poll(struct ata_channel *chp, struct ata_xfer *xfer)
1971 {
1972 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
1973 struct ahci_channel *achp = (struct ahci_channel *)chp;
1974
1975 /*
1976 * Polled command.
1977 */
1978 for (int i = 0; i < ATA_DELAY / 10; i++) {
1979 if (xfer->c_scsipi->xs_status & XS_STS_DONE)
1980 break;
1981 ahci_intr_port(achp);
1982 delay(10000);
1983 }
1984 AHCIDEBUG_PRINT(("%s port %d poll end GHC 0x%x IS 0x%x list 0x%x%x fis 0x%x%x CMD 0x%x CI 0x%x\n", AHCINAME(sc), chp->ch_channel,
1985 AHCI_READ(sc, AHCI_GHC), AHCI_READ(sc, AHCI_IS),
1986 AHCI_READ(sc, AHCI_P_CLBU(chp->ch_channel)),
1987 AHCI_READ(sc, AHCI_P_CLB(chp->ch_channel)),
1988 AHCI_READ(sc, AHCI_P_FBU(chp->ch_channel)),
1989 AHCI_READ(sc, AHCI_P_FB(chp->ch_channel)),
1990 AHCI_READ(sc, AHCI_P_CMD(chp->ch_channel)),
1991 AHCI_READ(sc, AHCI_P_CI(chp->ch_channel))),
1992 DEBUG_XFERS);
1993 if ((xfer->c_scsipi->xs_status & XS_STS_DONE) == 0) {
1994 xfer->c_scsipi->error = XS_TIMEOUT;
1995 xfer->ops->c_intr(chp, xfer, 0);
1996 }
1997 /* reenable interrupts */
1998 AHCI_WRITE(sc, AHCI_GHC, AHCI_READ(sc, AHCI_GHC) | AHCI_GHC_IE);
1999 }
2000
2001 static void
2002 ahci_atapi_abort(struct ata_channel *chp, struct ata_xfer *xfer)
2003 {
2004 ahci_atapi_complete(chp, xfer, 0);
2005 }
2006
2007 static int
2008 ahci_atapi_complete(struct ata_channel *chp, struct ata_xfer *xfer, int tfd)
2009 {
2010 struct scsipi_xfer *sc_xfer = xfer->c_scsipi;
2011 struct ahci_channel *achp = (struct ahci_channel *)chp;
2012 struct ahci_softc *sc = (struct ahci_softc *)chp->ch_atac;
2013
2014 AHCIDEBUG_PRINT(("ahci_atapi_complete port %d\n", chp->ch_channel),
2015 DEBUG_FUNCS);
2016
2017 if (ata_waitdrain_xfer_check(chp, xfer))
2018 return 0;
2019
2020 if (xfer->c_flags & C_TIMEOU) {
2021 sc_xfer->error = XS_TIMEOUT;
2022 }
2023
2024 if (xfer->c_bcount > 0) {
2025 bus_dmamap_sync(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot], 0,
2026 achp->ahcic_datad[xfer->c_slot]->dm_mapsize,
2027 (sc_xfer->xs_control & XS_CTL_DATA_IN) ?
2028 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2029 bus_dmamap_unload(sc->sc_dmat, achp->ahcic_datad[xfer->c_slot]);
2030 }
2031
2032 AHCI_CMDH_SYNC(sc, achp, xfer->c_slot,
2033 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2034 sc_xfer->resid = sc_xfer->datalen;
2035 sc_xfer->resid -= le32toh(achp->ahcic_cmdh[xfer->c_slot].cmdh_prdbc);
2036 AHCIDEBUG_PRINT(("ahci_atapi_complete datalen %d resid %d\n",
2037 sc_xfer->datalen, sc_xfer->resid), DEBUG_XFERS);
2038 if (AHCI_TFD_ST(tfd) & WDCS_ERR &&
2039 ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 ||
2040 sc_xfer->resid == sc_xfer->datalen)) {
2041 sc_xfer->error = XS_SHORTSENSE;
2042 sc_xfer->sense.atapi_sense = AHCI_TFD_ERR(tfd);
2043 if ((sc_xfer->xs_periph->periph_quirks &
2044 PQUIRK_NOSENSE) == 0) {
2045 /* ask scsipi to send a REQUEST_SENSE */
2046 sc_xfer->error = XS_BUSY;
2047 sc_xfer->status = SCSI_CHECK;
2048 }
2049 }
2050
2051 ata_deactivate_xfer(chp, xfer);
2052
2053 ata_free_xfer(chp, xfer);
2054 scsipi_done(sc_xfer);
2055 if ((AHCI_TFD_ST(tfd) & WDCS_ERR) == 0)
2056 atastart(chp);
2057 return 0;
2058 }
2059
2060 static void
2061 ahci_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason)
2062 {
2063 struct scsipi_xfer *sc_xfer = xfer->c_scsipi;
2064 bool deactivate = true;
2065
2066 /* remove this command from xfer queue */
2067 switch (reason) {
2068 case KILL_GONE_INACTIVE:
2069 deactivate = false;
2070 /* FALLTHROUGH */
2071 case KILL_GONE:
2072 sc_xfer->error = XS_DRIVER_STUFFUP;
2073 break;
2074 case KILL_RESET:
2075 sc_xfer->error = XS_RESET;
2076 break;
2077 case KILL_REQUEUE:
2078 sc_xfer->error = XS_REQUEUE;
2079 break;
2080 default:
2081 printf("ahci_ata_atapi_kill_xfer: unknown reason %d\n", reason);
2082 panic("ahci_ata_atapi_kill_xfer");
2083 }
2084
2085 if (deactivate)
2086 ata_deactivate_xfer(chp, xfer);
2087
2088 ata_free_xfer(chp, xfer);
2089 scsipi_done(sc_xfer);
2090 }
2091
2092 static void
2093 ahci_atapi_probe_device(struct atapibus_softc *sc, int target)
2094 {
2095 struct scsipi_channel *chan = sc->sc_channel;
2096 struct scsipi_periph *periph;
2097 struct ataparams ids;
2098 struct ataparams *id = &ids;
2099 struct ahci_softc *ahcic =
2100 device_private(chan->chan_adapter->adapt_dev);
2101 struct atac_softc *atac = &ahcic->sc_atac;
2102 struct ata_channel *chp = atac->atac_channels[chan->chan_channel];
2103 struct ata_drive_datas *drvp = &chp->ch_drive[target];
2104 struct scsipibus_attach_args sa;
2105 char serial_number[21], model[41], firmware_revision[9];
2106 int s;
2107
2108 /* skip if already attached */
2109 if (scsipi_lookup_periph(chan, target, 0) != NULL)
2110 return;
2111
2112 /* if no ATAPI device detected at attach time, skip */
2113 if (drvp->drive_type != ATA_DRIVET_ATAPI) {
2114 AHCIDEBUG_PRINT(("ahci_atapi_probe_device: drive %d "
2115 "not present\n", target), DEBUG_PROBE);
2116 return;
2117 }
2118
2119 /* Some ATAPI devices need a bit more time after software reset. */
2120 delay(5000);
2121 if (ata_get_params(drvp, AT_WAIT, id) == 0) {
2122 #ifdef ATAPI_DEBUG_PROBE
2123 printf("%s drive %d: cmdsz 0x%x drqtype 0x%x\n",
2124 AHCINAME(ahcic), target,
2125 id->atap_config & ATAPI_CFG_CMD_MASK,
2126 id->atap_config & ATAPI_CFG_DRQ_MASK);
2127 #endif
2128 periph = scsipi_alloc_periph(M_NOWAIT);
2129 if (periph == NULL) {
2130 aprint_error_dev(sc->sc_dev,
2131 "unable to allocate periph for drive %d\n",
2132 target);
2133 return;
2134 }
2135 periph->periph_dev = NULL;
2136 periph->periph_channel = chan;
2137 periph->periph_switch = &atapi_probe_periphsw;
2138 periph->periph_target = target;
2139 periph->periph_lun = 0;
2140 periph->periph_quirks = PQUIRK_ONLYBIG;
2141
2142 #ifdef SCSIPI_DEBUG
2143 if (SCSIPI_DEBUG_TYPE == SCSIPI_BUSTYPE_ATAPI &&
2144 SCSIPI_DEBUG_TARGET == target)
2145 periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS;
2146 #endif
2147 periph->periph_type = ATAPI_CFG_TYPE(id->atap_config);
2148 if (id->atap_config & ATAPI_CFG_REMOV)
2149 periph->periph_flags |= PERIPH_REMOVABLE;
2150 if (periph->periph_type == T_SEQUENTIAL) {
2151 s = splbio();
2152 drvp->drive_flags |= ATA_DRIVE_ATAPIDSCW;
2153 splx(s);
2154 }
2155
2156 sa.sa_periph = periph;
2157 sa.sa_inqbuf.type = ATAPI_CFG_TYPE(id->atap_config);
2158 sa.sa_inqbuf.removable = id->atap_config & ATAPI_CFG_REMOV ?
2159 T_REMOV : T_FIXED;
2160 strnvisx(model, sizeof(model), id->atap_model, 40,
2161 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
2162 strnvisx(serial_number, sizeof(serial_number), id->atap_serial,
2163 20, VIS_TRIM|VIS_SAFE|VIS_OCTAL);
2164 strnvisx(firmware_revision, sizeof(firmware_revision),
2165 id->atap_revision, 8, VIS_TRIM|VIS_SAFE|VIS_OCTAL);
2166 sa.sa_inqbuf.vendor = model;
2167 sa.sa_inqbuf.product = serial_number;
2168 sa.sa_inqbuf.revision = firmware_revision;
2169
2170 /*
2171 * Determine the operating mode capabilities of the device.
2172 */
2173 if ((id->atap_config & ATAPI_CFG_CMD_MASK) == ATAPI_CFG_CMD_16)
2174 periph->periph_cap |= PERIPH_CAP_CMD16;
2175 /* XXX This is gross. */
2176 periph->periph_cap |= (id->atap_config & ATAPI_CFG_DRQ_MASK);
2177
2178 drvp->drv_softc = atapi_probe_device(sc, target, periph, &sa);
2179
2180 if (drvp->drv_softc)
2181 ata_probe_caps(drvp);
2182 else {
2183 s = splbio();
2184 drvp->drive_type = ATA_DRIVET_NONE;
2185 splx(s);
2186 }
2187 } else {
2188 AHCIDEBUG_PRINT(("ahci_atapi_get_params: ATAPI_IDENTIFY_DEVICE "
2189 "failed for drive %s:%d:%d\n",
2190 AHCINAME(ahcic), chp->ch_channel, target), DEBUG_PROBE);
2191 s = splbio();
2192 drvp->drive_type = ATA_DRIVET_NONE;
2193 splx(s);
2194 }
2195 }
2196 #endif /* NATAPIBUS */
2197