1 /* $NetBSD: pciide_common.c,v 1.72 2025/04/16 17:53:04 andvar Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999, 2000, 2001, 2003 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 30 /* 31 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. All advertising materials mentioning features or use of this software 42 * must display the following acknowledgement: 43 * This product includes software developed by Christopher G. Demetriou 44 * for the NetBSD Project. 45 * 4. The name of the author may not be used to endorse or promote products 46 * derived from this software without specific prior written permission 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 51 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 */ 59 60 /* 61 * PCI IDE controller driver. 62 * 63 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 64 * sys/dev/pci/ppb.c, revision 1.16). 65 * 66 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 67 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 68 * 5/16/94" from the PCI SIG. 69 * 70 */ 71 72 #include <sys/cdefs.h> 73 __KERNEL_RCSID(0, "$NetBSD: pciide_common.c,v 1.72 2025/04/16 17:53:04 andvar Exp $"); 74 75 #include <sys/param.h> 76 77 #include <dev/pci/pcireg.h> 78 #include <dev/pci/pcivar.h> 79 #include <dev/pci/pcidevs.h> 80 #include <dev/pci/pciidereg.h> 81 #include <dev/pci/pciidevar.h> 82 83 #include <dev/ic/wdcreg.h> 84 85 #ifdef ATADEBUG 86 #ifndef ATADEBUG_PCIIDE_MASK 87 #define ATADEBUG_PCIIDE_MASK 0 88 #endif 89 int atadebug_pciide_mask = ATADEBUG_PCIIDE_MASK; 90 #endif 91 92 #if NATA_DMA 93 static const char dmaerrfmt[] = 94 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n"; 95 #endif 96 97 /* Default product description for devices not known from this controller */ 98 const struct pciide_product_desc default_product_desc = { 99 0, 100 0, 101 "Generic PCI IDE controller", 102 default_chip_map, 103 }; 104 105 const struct pciide_product_desc * 106 pciide_lookup_product(pcireg_t id, const struct pciide_product_desc *pp) 107 { 108 for (; pp->chip_map != NULL; pp++) 109 if (PCI_PRODUCT(id) == pp->ide_product) 110 break; 111 112 if (pp->chip_map == NULL) 113 return NULL; 114 return pp; 115 } 116 117 void 118 pciide_common_attach(struct pciide_softc *sc, const struct pci_attach_args *pa, 119 const struct pciide_product_desc *pp) 120 { 121 pci_chipset_tag_t pc = pa->pa_pc; 122 pcitag_t tag = pa->pa_tag; 123 #if NATA_DMA 124 pcireg_t csr; 125 #endif 126 const char *displaydev = NULL; 127 int dontprint = 0; 128 129 sc->sc_pci_id = pa->pa_id; 130 if (pp == NULL) { 131 /* should only happen for generic pciide devices */ 132 sc->sc_pp = &default_product_desc; 133 } else { 134 sc->sc_pp = pp; 135 /* if ide_name == NULL, printf is done in chip-specific map */ 136 if (pp->ide_name) 137 displaydev = pp->ide_name; 138 else 139 dontprint = 1; 140 } 141 142 if (dontprint) { 143 aprint_naive("disk controller\n"); 144 aprint_normal("\n"); /* ??? */ 145 } else 146 pci_aprint_devinfo_fancy(pa, "disk controller", displaydev, 1); 147 148 sc->sc_pc = pa->pa_pc; 149 sc->sc_tag = pa->pa_tag; 150 151 #if NATA_DMA 152 /* Set up DMA defaults; these might be adjusted by chip_map. */ 153 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 154 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 155 #endif 156 157 #ifdef ATADEBUG 158 if (atadebug_pciide_mask & DEBUG_PROBE) 159 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 160 #endif 161 sc->sc_pp->chip_map(sc, pa); 162 163 #if NATA_DMA 164 if (sc->sc_dma_ok) { 165 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 166 csr |= PCI_COMMAND_MASTER_ENABLE; 167 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 168 } 169 #endif 170 ATADEBUG_PRINT(("pciide: command/status register=%x\n", 171 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 172 } 173 174 int 175 pciide_common_detach(struct pciide_softc *sc, int flags) 176 { 177 struct pciide_channel *cp; 178 struct ata_channel *wdc_cp; 179 struct wdc_regs *wdr; 180 int channel, drive; 181 int rv; 182 183 rv = wdcdetach(sc->sc_wdcdev.sc_atac.atac_dev, flags); 184 if (rv) 185 return rv; 186 187 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 188 channel++) { 189 cp = &sc->pciide_channels[channel]; 190 wdc_cp = &cp->ata_channel; 191 wdr = CHAN_TO_WDC_REGS(wdc_cp); 192 193 if (wdc_cp->ch_flags & ATACH_DISABLED) 194 continue; 195 196 if (wdr->cmd_ios != 0) 197 bus_space_unmap(wdr->cmd_iot, 198 wdr->cmd_baseioh, wdr->cmd_ios); 199 if (cp->compat != 0) { 200 if (wdr->ctl_ios != 0) 201 bus_space_unmap(wdr->ctl_iot, 202 wdr->ctl_ioh, wdr->ctl_ios); 203 } else { 204 if (cp->ctl_ios != 0) 205 bus_space_unmap(wdr->ctl_iot, 206 cp->ctl_baseioh, cp->ctl_ios); 207 } 208 209 for (drive = 0; drive < sc->sc_wdcdev.wdc_maxdrives; drive++) { 210 #if NATA_DMA 211 pciide_dma_table_teardown(sc, channel, drive); 212 #endif 213 } 214 } 215 216 #if NATA_DMA 217 if (sc->sc_dma_ios != 0) 218 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_ios); 219 if (sc->sc_ba5_ss != 0) 220 bus_space_unmap(sc->sc_ba5_st, sc->sc_ba5_sh, sc->sc_ba5_ss); 221 #endif 222 223 return 0; 224 } 225 226 int 227 pciide_detach(device_t self, int flags) 228 { 229 struct pciide_softc *sc = device_private(self); 230 struct pciide_channel *cp; 231 int channel; 232 #ifndef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH 233 bool has_compat_chan; 234 235 has_compat_chan = false; 236 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 237 channel++) { 238 cp = &sc->pciide_channels[channel]; 239 if (cp->compat != 0) { 240 has_compat_chan = true; 241 } 242 } 243 244 if (has_compat_chan != false) 245 return EBUSY; 246 #endif 247 248 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 249 channel++) { 250 cp = &sc->pciide_channels[channel]; 251 if (cp->compat != 0) 252 if (cp->ih != NULL) { 253 pciide_unmap_compat_intr(sc->sc_pc, cp, channel); 254 cp->ih = NULL; 255 } 256 } 257 258 if (sc->sc_pci_ih != NULL) { 259 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 260 sc->sc_pci_ih = NULL; 261 } 262 263 return pciide_common_detach(sc, flags); 264 } 265 266 /* tell whether the chip is enabled or not */ 267 int 268 pciide_chipen(struct pciide_softc *sc, const struct pci_attach_args *pa) 269 { 270 pcireg_t csr; 271 272 if ((pa->pa_flags & PCI_FLAGS_IO_OKAY) == 0) { 273 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 274 "I/O access disabled at bridge\n"); 275 return 0; 276 } 277 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 278 if ((csr & PCI_COMMAND_IO_ENABLE) == 0) { 279 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 280 "I/O access disabled at device\n"); 281 return 0; 282 } 283 return 1; 284 } 285 286 void 287 pciide_mapregs_compat(const struct pci_attach_args *pa, 288 struct pciide_channel *cp, int compatchan) 289 { 290 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 291 struct ata_channel *wdc_cp = &cp->ata_channel; 292 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); 293 int i; 294 295 cp->compat = 1; 296 297 wdr->cmd_iot = pa->pa_iot; 298 if (bus_space_map(wdr->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 299 PCIIDE_COMPAT_CMD_SIZE, 0, &wdr->cmd_baseioh) != 0) { 300 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 301 "couldn't map %s channel cmd regs\n", cp->name); 302 goto bad; 303 } 304 wdr->cmd_ios = PCIIDE_COMPAT_CMD_SIZE; 305 306 wdr->ctl_iot = pa->pa_iot; 307 if (bus_space_map(wdr->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 308 PCIIDE_COMPAT_CTL_SIZE, 0, &wdr->ctl_ioh) != 0) { 309 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 310 "couldn't map %s channel ctl regs\n", cp->name); 311 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); 312 goto bad; 313 } 314 wdr->ctl_ios = PCIIDE_COMPAT_CTL_SIZE; 315 316 for (i = 0; i < WDC_NREG; i++) { 317 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 318 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 319 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 320 "couldn't subregion %s channel cmd regs\n", 321 cp->name); 322 goto bad; 323 } 324 } 325 wdc_init_shadow_regs(wdr); 326 wdr->data32iot = wdr->cmd_iot; 327 wdr->data32ioh = wdr->cmd_iohs[0]; 328 return; 329 330 bad: 331 cp->ata_channel.ch_flags |= ATACH_DISABLED; 332 return; 333 } 334 335 void 336 pciide_mapregs_native(const struct pci_attach_args *pa, 337 struct pciide_channel *cp, int (*pci_intr)(void *)) 338 { 339 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 340 struct ata_channel *wdc_cp = &cp->ata_channel; 341 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); 342 const char *intrstr; 343 pci_intr_handle_t intrhandle; 344 int i; 345 char intrbuf[PCI_INTRSTR_LEN]; 346 347 cp->compat = 0; 348 349 if (sc->sc_pci_ih == NULL) { 350 if (pci_intr_map(pa, &intrhandle) != 0) { 351 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 352 "couldn't map native-PCI interrupt\n"); 353 goto bad; 354 } 355 intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf, sizeof(intrbuf)); 356 sc->sc_pci_ih = pci_intr_establish_xname(pa->pa_pc, 357 intrhandle, IPL_BIO, pci_intr, sc, 358 device_xname(sc->sc_wdcdev.sc_atac.atac_dev)); 359 if (sc->sc_pci_ih != NULL) { 360 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 361 "using %s for native-PCI interrupt\n", 362 intrstr ? intrstr : "unknown interrupt"); 363 } else { 364 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 365 "couldn't establish native-PCI interrupt"); 366 if (intrstr != NULL) 367 aprint_error(" at %s", intrstr); 368 aprint_error("\n"); 369 goto bad; 370 } 371 } 372 cp->ih = sc->sc_pci_ih; 373 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->ch_channel), 374 PCI_MAPREG_TYPE_IO, 0, 375 &wdr->cmd_iot, &wdr->cmd_baseioh, NULL, &wdr->cmd_ios) != 0) { 376 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 377 "couldn't map %s channel cmd regs\n", cp->name); 378 goto bad; 379 } 380 381 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->ch_channel), 382 PCI_MAPREG_TYPE_IO, 0, 383 &wdr->ctl_iot, &cp->ctl_baseioh, NULL, &cp->ctl_ios) != 0) { 384 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 385 "couldn't map %s channel ctl regs\n", cp->name); 386 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); 387 goto bad; 388 } 389 /* 390 * In native mode, 4 bytes of I/O space are mapped for the control 391 * register, the control register is at offset 2. Pass the generic 392 * code a handle for only one byte at the right offset. 393 */ 394 if (bus_space_subregion(wdr->ctl_iot, cp->ctl_baseioh, 2, 1, 395 &wdr->ctl_ioh) != 0) { 396 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 397 "unable to subregion %s channel ctl regs\n", cp->name); 398 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); 399 bus_space_unmap(wdr->cmd_iot, cp->ctl_baseioh, cp->ctl_ios); 400 goto bad; 401 } 402 403 for (i = 0; i < WDC_NREG; i++) { 404 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 405 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 406 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 407 "couldn't subregion %s channel cmd regs\n", 408 cp->name); 409 goto bad; 410 } 411 } 412 wdc_init_shadow_regs(wdr); 413 wdr->data32iot = wdr->cmd_iot; 414 wdr->data32ioh = wdr->cmd_iohs[0]; 415 return; 416 417 bad: 418 cp->ata_channel.ch_flags |= ATACH_DISABLED; 419 return; 420 } 421 422 #if NATA_DMA 423 void 424 pciide_mapreg_dma(struct pciide_softc *sc, const struct pci_attach_args *pa) 425 { 426 pcireg_t maptype; 427 bus_addr_t addr; 428 struct pciide_channel *pc; 429 int reg, chan; 430 bus_size_t size; 431 432 /* 433 * Map DMA registers 434 * 435 * Note that sc_dma_ok is the right variable to test to see if 436 * DMA can be done. If the interface doesn't support DMA, 437 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 438 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 439 * non-zero if the interface supports DMA and the registers 440 * could be mapped. 441 * 442 * XXX Note that despite the fact that the Bus Master IDE specs 443 * XXX say that "The bus master IDE function uses 16 bytes of IO 444 * XXX space," some controllers (at least the United 445 * XXX Microelectronics UM8886BF) place it in memory space. 446 */ 447 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 448 PCIIDE_REG_BUS_MASTER_DMA); 449 450 switch (maptype) { 451 case PCI_MAPREG_TYPE_IO: 452 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 453 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 454 &addr, NULL, NULL) == 0); 455 if (sc->sc_dma_ok == 0) { 456 aprint_verbose( 457 ", but unused (couldn't query registers)"); 458 break; 459 } 460 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 461 && addr >= 0x10000) { 462 sc->sc_dma_ok = 0; 463 aprint_verbose( 464 ", but unused (registers at unsafe address " 465 "%#lx)", (unsigned long)addr); 466 break; 467 } 468 /* FALLTHROUGH */ 469 470 case PCI_MAPREG_MEM_TYPE_32BIT: 471 sc->sc_dma_ok = (pci_mapreg_map(pa, 472 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 473 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_ios) 474 == 0); 475 sc->sc_dmat = pa->pa_dmat; 476 if (sc->sc_dma_ok == 0) { 477 aprint_verbose(", but unused (couldn't map registers)"); 478 } else { 479 sc->sc_wdcdev.dma_arg = sc; 480 sc->sc_wdcdev.dma_init = pciide_dma_init; 481 sc->sc_wdcdev.dma_start = pciide_dma_start; 482 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 483 } 484 485 if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 486 PCIIDE_OPTIONS_NODMA) { 487 aprint_verbose( 488 ", but unused (forced off by config file)"); 489 sc->sc_dma_ok = 0; 490 } else { 491 bool disable; 492 493 if (prop_dictionary_get_bool( 494 device_properties(sc->sc_wdcdev.sc_atac.atac_dev), 495 "pciide-disable-dma", &disable) && disable) { 496 aprint_verbose( 497 ", but unused (disabled by platform)"); 498 sc->sc_dma_ok = 0; 499 } 500 } 501 break; 502 503 default: 504 sc->sc_dma_ok = 0; 505 aprint_verbose( 506 ", but unsupported register maptype (0x%x)", maptype); 507 } 508 509 if (sc->sc_dma_ok == 0) 510 return; 511 512 /* 513 * Set up the default handles for the DMA registers. 514 * Just reserve 32 bits for each handle, unless space 515 * doesn't permit it. 516 */ 517 for (chan = 0; chan < PCIIDE_NUM_CHANNELS; chan++) { 518 pc = &sc->pciide_channels[chan]; 519 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 520 size = 4; 521 if (size > (IDEDMA_SCH_OFFSET - reg)) 522 size = IDEDMA_SCH_OFFSET - reg; 523 if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh, 524 IDEDMA_SCH_OFFSET * chan + reg, size, 525 &pc->dma_iohs[reg]) != 0) { 526 sc->sc_dma_ok = 0; 527 aprint_verbose(", but can't subregion offset %d " 528 "size %lu", reg, (u_long)size); 529 return; 530 } 531 } 532 } 533 } 534 #endif /* NATA_DMA */ 535 536 int 537 pciide_compat_intr(void *arg) 538 { 539 struct pciide_channel *cp = arg; 540 541 #ifdef DIAGNOSTIC 542 /* should only be called for a compat channel */ 543 if (cp->compat == 0) 544 panic("pciide compat intr called for non-compat chan %p", cp); 545 #endif 546 return (wdcintr(&cp->ata_channel)); 547 } 548 549 int 550 pciide_pci_intr(void *arg) 551 { 552 struct pciide_softc *sc = arg; 553 struct pciide_channel *cp; 554 struct ata_channel *wdc_cp; 555 int i, rv, crv; 556 557 rv = 0; 558 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) { 559 cp = &sc->pciide_channels[i]; 560 wdc_cp = &cp->ata_channel; 561 562 /* If a compat channel skip. */ 563 if (cp->compat) 564 continue; 565 566 /* if this channel not waiting for intr, skip */ 567 if ((wdc_cp->ch_flags & ATACH_IRQ_WAIT) == 0) 568 continue; 569 570 crv = wdcintr(wdc_cp); 571 if (crv == 0) 572 ; /* leave rv alone */ 573 else if (crv == 1) 574 rv = 1; /* claim the intr */ 575 else if (rv == 0) /* crv should be -1 in this case */ 576 rv = crv; /* if we've done no better, take it */ 577 } 578 return (rv); 579 } 580 581 #if NATA_DMA 582 void 583 pciide_channel_dma_setup(struct pciide_channel *cp) 584 { 585 int drive, s; 586 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 587 struct ata_drive_datas *drvp; 588 589 KASSERT(cp->ata_channel.ch_ndrives != 0); 590 591 for (drive = 0; drive < cp->ata_channel.ch_ndrives; drive++) { 592 drvp = &cp->ata_channel.ch_drive[drive]; 593 /* If no drive, skip */ 594 if (drvp->drive_type == ATA_DRIVET_NONE) 595 continue; 596 /* setup DMA if needed */ 597 if (((drvp->drive_flags & ATA_DRIVE_DMA) == 0 && 598 (drvp->drive_flags & ATA_DRIVE_UDMA) == 0) || 599 sc->sc_dma_ok == 0) { 600 s = splbio(); 601 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); 602 splx(s); 603 continue; 604 } 605 if (pciide_dma_table_setup(sc, cp->ata_channel.ch_channel, 606 drive) != 0) { 607 /* Abort DMA setup */ 608 s = splbio(); 609 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); 610 splx(s); 611 continue; 612 } 613 } 614 } 615 616 #define NIDEDMA_TABLES(sc) \ 617 (MAXPHYS/(uimin((sc)->sc_dma_maxsegsz, PAGE_SIZE)) + 1) 618 619 int 620 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 621 { 622 int error; 623 const bus_size_t dma_table_size = 624 sizeof(struct idedma_table) * NIDEDMA_TABLES(sc); 625 struct pciide_dma_maps *dma_maps = 626 &sc->pciide_channels[channel].dma_maps[drive]; 627 628 /* If table was already allocated, just return */ 629 if (dma_maps->dma_table) 630 return 0; 631 632 /* Allocate memory for the DMA tables and map it */ 633 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 634 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &dma_maps->dmamap_table_seg, 635 1, &dma_maps->dmamap_table_nseg, BUS_DMA_NOWAIT)) != 0) { 636 aprint_error(dmaerrfmt, 637 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 638 "allocate", drive, error); 639 return error; 640 } 641 if ((error = bus_dmamem_map(sc->sc_dmat, &dma_maps->dmamap_table_seg, 642 dma_maps->dmamap_table_nseg, dma_table_size, 643 (void **)&dma_maps->dma_table, 644 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 645 aprint_error(dmaerrfmt, 646 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 647 "map", drive, error); 648 return error; 649 } 650 ATADEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 651 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 652 (unsigned long)dma_maps->dmamap_table_seg.ds_addr), DEBUG_PROBE); 653 /* Create and load table DMA map for this disk */ 654 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 655 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 656 &dma_maps->dmamap_table)) != 0) { 657 aprint_error(dmaerrfmt, 658 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 659 "create", drive, error); 660 return error; 661 } 662 if ((error = bus_dmamap_load(sc->sc_dmat, 663 dma_maps->dmamap_table, 664 dma_maps->dma_table, 665 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 666 aprint_error(dmaerrfmt, 667 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 668 "load", drive, error); 669 return error; 670 } 671 ATADEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 672 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 673 DEBUG_PROBE); 674 /* Create a xfer DMA map for this drive */ 675 if ((error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 676 NIDEDMA_TABLES(sc), sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 677 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 678 &dma_maps->dmamap_xfer)) != 0) { 679 aprint_error(dmaerrfmt, 680 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 681 "create xfer", drive, error); 682 return error; 683 } 684 return 0; 685 } 686 687 void 688 pciide_dma_table_teardown(struct pciide_softc *sc, int channel, int drive) 689 { 690 struct pciide_channel *cp; 691 struct pciide_dma_maps *dma_maps; 692 693 cp = &sc->pciide_channels[channel]; 694 dma_maps = &cp->dma_maps[drive]; 695 696 if (dma_maps->dma_table == NULL) 697 return; 698 699 bus_dmamap_destroy(sc->sc_dmat, dma_maps->dmamap_xfer); 700 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_table); 701 bus_dmamap_destroy(sc->sc_dmat, dma_maps->dmamap_table); 702 bus_dmamem_unmap(sc->sc_dmat, dma_maps->dma_table, 703 sizeof(struct idedma_table) * NIDEDMA_TABLES(sc)); 704 bus_dmamem_free(sc->sc_dmat, &dma_maps->dmamap_table_seg, 705 dma_maps->dmamap_table_nseg); 706 707 dma_maps->dma_table = NULL; 708 709 return; 710 } 711 712 int 713 pciide_dma_dmamap_setup(struct pciide_softc *sc, int channel, int drive, 714 void *databuf, size_t datalen, int flags) 715 { 716 int error, seg; 717 struct pciide_channel *cp = &sc->pciide_channels[channel]; 718 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 719 720 error = bus_dmamap_load(sc->sc_dmat, 721 dma_maps->dmamap_xfer, 722 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 723 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); 724 if (error) { 725 aprint_error(dmaerrfmt, 726 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 727 "load xfer", drive, error); 728 return error; 729 } 730 731 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 732 dma_maps->dmamap_xfer->dm_mapsize, 733 (flags & WDC_DMA_READ) ? 734 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 735 736 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 737 bus_addr_t phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 738 bus_size_t len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 739 740 #ifdef DIAGNOSTIC 741 /* A segment must not cross a 64k boundary */ 742 { 743 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 744 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 745 printf("pciide_dma: seg %d addr 0x%" PRIx64 746 " len 0x%" PRIx64 " not properly aligned\n", 747 seg, (uint64_t)phys, (uint64_t)len); 748 panic("pciide_dma: buf align"); 749 } 750 } 751 #endif 752 /* 753 * Some controllers get really upset if the length 754 * of any DMA segment is odd. This isn't something 755 * that's going to happen in normal steady-state 756 * operation (reading VM pages, etc.), but physio users 757 * don't have as many guard rails. 758 * 759 * Consider an 8K read request that starts at an odd 760 * offset within a page. At first blush, all of the 761 * checks pass because it's a sector-rounded size, but 762 * unless the buffer spans 2 physically contiguous pages, 763 * it's going to result in 2 odd-length DMA segments. 764 * 765 * Odd start addresses are also frowned upon, so we 766 * catch those here, too. 767 * 768 * Returning EINVAL here will cause the upper layers to 769 * fall back onto PIO. 770 */ 771 if ((phys & 1) != 0 || (len & 1) != 0) { 772 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 773 "Invalid DMA segment: " 774 "seg %d addr 0x%" PRIx64 " len 0x%" PRIx64 "\n", 775 seg, (uint64_t)phys, (uint64_t)len); 776 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 777 return EINVAL; 778 } 779 dma_maps->dma_table[seg].base_addr = htole32(phys); 780 dma_maps->dma_table[seg].byte_count = 781 htole32(len & IDEDMA_BYTE_COUNT_MASK); 782 ATADEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 783 seg, le32toh(dma_maps->dma_table[seg].byte_count), 784 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 785 786 } 787 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 788 htole32(IDEDMA_BYTE_COUNT_EOT); 789 790 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 791 dma_maps->dmamap_table->dm_mapsize, 792 BUS_DMASYNC_PREWRITE); 793 794 #ifdef DIAGNOSTIC 795 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 796 printf("pciide_dma_dmamap_setup: addr 0x%lx " 797 "not properly aligned\n", 798 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 799 panic("pciide_dma_init: table align"); 800 } 801 #endif 802 /* remember flags */ 803 dma_maps->dma_flags = flags; 804 805 return 0; 806 } 807 808 int 809 pciide_dma_init(void *v, int channel, int drive, void *databuf, size_t datalen, 810 int flags) 811 { 812 struct pciide_softc *sc = v; 813 int error; 814 struct pciide_channel *cp = &sc->pciide_channels[channel]; 815 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 816 817 if ((error = pciide_dma_dmamap_setup(sc, channel, drive, 818 databuf, datalen, flags)) != 0) 819 return error; 820 /* Maps are ready. Start DMA function */ 821 /* Clear status bits */ 822 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 823 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); 824 /* Write table addr */ 825 bus_space_write_4(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_TBL], 0, 826 dma_maps->dmamap_table->dm_segs[0].ds_addr); 827 /* set read/write */ 828 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 829 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 830 return 0; 831 } 832 833 void 834 pciide_dma_start(void *v, int channel, int drive) 835 { 836 struct pciide_softc *sc = v; 837 struct pciide_channel *cp = &sc->pciide_channels[channel]; 838 839 ATADEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 840 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 841 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) 842 | IDEDMA_CMD_START); 843 } 844 845 int 846 pciide_dma_finish(void *v, int channel, int drive, int force) 847 { 848 struct pciide_softc *sc = v; 849 u_int8_t status; 850 int error = 0; 851 struct pciide_channel *cp = &sc->pciide_channels[channel]; 852 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 853 854 status = bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0); 855 ATADEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 856 DEBUG_XFERS); 857 858 if (force == WDC_DMAEND_END && (status & IDEDMA_CTL_INTR) == 0) 859 return WDC_DMAST_NOIRQ; 860 861 /* stop DMA channel */ 862 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 863 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) 864 & ~IDEDMA_CMD_START); 865 866 /* Unload the map of the data buffer */ 867 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 868 dma_maps->dmamap_xfer->dm_mapsize, 869 (dma_maps->dma_flags & WDC_DMA_READ) ? 870 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 871 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 872 873 if ((status & IDEDMA_CTL_ERR) != 0 && force != WDC_DMAEND_ABRT_QUIET) { 874 aprint_error("%s:%d:%d: bus-master DMA error: status=0x%x\n", 875 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 876 drive, status); 877 error |= WDC_DMAST_ERR; 878 } 879 880 if ((status & IDEDMA_CTL_INTR) == 0 && force != WDC_DMAEND_ABRT_QUIET) { 881 aprint_error("%s:%d:%d: bus-master DMA error: missing " 882 "interrupt, status=0x%x\n", 883 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), 884 channel, drive, status); 885 error |= WDC_DMAST_NOIRQ; 886 } 887 888 if ((status & IDEDMA_CTL_ACT) != 0 && force != WDC_DMAEND_ABRT_QUIET) { 889 /* data underrun, may be a valid condition for ATAPI */ 890 error |= WDC_DMAST_UNDER; 891 } 892 return error; 893 } 894 895 void 896 pciide_irqack(struct ata_channel *chp) 897 { 898 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 899 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 900 901 /* clear status bits in IDE DMA registers */ 902 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 903 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); 904 } 905 #endif /* NATA_DMA */ 906 907 /* some common code used by several chip_map */ 908 int 909 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 910 { 911 struct pciide_channel *cp = &sc->pciide_channels[channel]; 912 sc->wdc_chanarray[channel] = &cp->ata_channel; 913 cp->name = PCIIDE_CHANNEL_NAME(channel); 914 cp->ata_channel.ch_channel = channel; 915 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; 916 917 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 918 "%s channel %s to %s mode\n", cp->name, 919 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 920 "configured" : "wired", 921 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 922 "native-PCI" : "compatibility"); 923 return 1; 924 } 925 926 /* some common code used by several chip channel_map */ 927 void 928 pciide_mapchan(const struct pci_attach_args *pa, struct pciide_channel *cp, 929 pcireg_t interface, int (*pci_intr)(void *)) 930 { 931 struct ata_channel *wdc_cp = &cp->ata_channel; 932 933 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel)) 934 pciide_mapregs_native(pa, cp, pci_intr); 935 else { 936 pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel); 937 if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0) 938 pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel); 939 } 940 wdcattach(wdc_cp); 941 } 942 943 /* 944 * generic code to map the compat intr. 945 */ 946 void 947 pciide_map_compat_intr(const struct pci_attach_args *pa, 948 struct pciide_channel *cp, int compatchan) 949 { 950 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 951 952 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 953 cp->ih = 954 pciide_machdep_compat_intr_establish(sc->sc_wdcdev.sc_atac.atac_dev, 955 pa, compatchan, pciide_compat_intr, cp); 956 if (cp->ih == NULL) { 957 #endif 958 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 959 "no compatibility interrupt for use by %s " 960 "channel\n", cp->name); 961 cp->ata_channel.ch_flags |= ATACH_DISABLED; 962 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 963 } 964 #endif 965 } 966 967 void 968 pciide_unmap_compat_intr(pci_chipset_tag_t pc, struct pciide_channel *cp, 969 int compatchan) 970 { 971 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH 972 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 973 974 pciide_machdep_compat_intr_disestablish(sc->sc_wdcdev.sc_atac.atac_dev, 975 sc->sc_pc, compatchan, cp->ih); 976 #endif 977 } 978 979 void 980 default_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) 981 { 982 struct pciide_channel *cp; 983 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 984 pcireg_t csr; 985 int channel; 986 #if NATA_DMA 987 int drive; 988 u_int8_t idedma_ctl; 989 #endif 990 const char *failreason; 991 struct wdc_regs *wdr; 992 993 if (pciide_chipen(sc, pa) == 0) 994 return; 995 996 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 997 #if NATA_DMA 998 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 999 "bus-master DMA support present"); 1000 if (sc->sc_pp == &default_product_desc && 1001 (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 1002 PCIIDE_OPTIONS_DMA) == 0) { 1003 aprint_verbose(", but unused (no driver support)"); 1004 sc->sc_dma_ok = 0; 1005 } else { 1006 pciide_mapreg_dma(sc, pa); 1007 if (sc->sc_dma_ok != 0) 1008 aprint_verbose(", used without full driver " 1009 "support"); 1010 } 1011 #else 1012 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1013 "bus-master DMA support present, but unused (no driver " 1014 "support)"); 1015 #endif /* NATA_DMA */ 1016 } else { 1017 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1018 "hardware does not support DMA"); 1019 #if NATA_DMA 1020 sc->sc_dma_ok = 0; 1021 #endif 1022 } 1023 aprint_verbose("\n"); 1024 #if NATA_DMA 1025 if (sc->sc_dma_ok) { 1026 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 1027 sc->sc_wdcdev.irqack = pciide_irqack; 1028 } 1029 #endif 1030 sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; 1031 #if NATA_DMA 1032 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; 1033 #endif 1034 1035 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 1036 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 1037 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 1038 sc->sc_wdcdev.wdc_maxdrives = 2; 1039 1040 wdc_allocate_regs(&sc->sc_wdcdev); 1041 1042 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 1043 channel++) { 1044 cp = &sc->pciide_channels[channel]; 1045 if (pciide_chansetup(sc, channel, interface) == 0) 1046 continue; 1047 wdr = CHAN_TO_WDC_REGS(&cp->ata_channel); 1048 if (interface & PCIIDE_INTERFACE_PCI(channel)) 1049 pciide_mapregs_native(pa, cp, pciide_pci_intr); 1050 else 1051 pciide_mapregs_compat(pa, cp, 1052 cp->ata_channel.ch_channel); 1053 if (cp->ata_channel.ch_flags & ATACH_DISABLED) 1054 continue; 1055 /* 1056 * Check to see if something appears to be there. 1057 */ 1058 failreason = NULL; 1059 /* 1060 * In native mode, always enable the controller. It's 1061 * not possible to have an ISA board using the same address 1062 * anyway. 1063 */ 1064 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1065 wdcattach(&cp->ata_channel); 1066 continue; 1067 } 1068 if (!wdcprobe(CHAN_TO_WDC_REGS(&cp->ata_channel))) { 1069 failreason = "not responding; disabled or no drives?"; 1070 goto next; 1071 } 1072 /* 1073 * Now, make sure it's actually attributable to this PCI IDE 1074 * channel by trying to access the channel again while the 1075 * PCI IDE controller's I/O space is disabled. (If the 1076 * channel no longer appears to be there, it belongs to 1077 * this controller.) YUCK! 1078 */ 1079 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1080 PCI_COMMAND_STATUS_REG); 1081 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1082 csr & ~PCI_COMMAND_IO_ENABLE); 1083 if (wdcprobe(CHAN_TO_WDC_REGS(&cp->ata_channel))) 1084 failreason = "other hardware responding at addresses"; 1085 pci_conf_write(sc->sc_pc, sc->sc_tag, 1086 PCI_COMMAND_STATUS_REG, csr); 1087 next: 1088 if (failreason) { 1089 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1090 "%s channel ignored (%s)\n", cp->name, failreason); 1091 cp->ata_channel.ch_flags |= ATACH_DISABLED; 1092 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 1093 wdr->cmd_ios); 1094 bus_space_unmap(wdr->ctl_iot, wdr->ctl_ioh, 1095 wdr->ctl_ios); 1096 } else { 1097 pciide_map_compat_intr(pa, cp, 1098 cp->ata_channel.ch_channel); 1099 wdcattach(&cp->ata_channel); 1100 } 1101 } 1102 1103 #if NATA_DMA 1104 if (sc->sc_dma_ok == 0) 1105 return; 1106 1107 /* Allocate DMA maps */ 1108 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 1109 channel++) { 1110 idedma_ctl = 0; 1111 cp = &sc->pciide_channels[channel]; 1112 for (drive = 0; drive < sc->sc_wdcdev.wdc_maxdrives; drive++) { 1113 /* 1114 * we have not probed the drives yet, allocate 1115 * resources for all of them. 1116 */ 1117 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1118 /* Abort DMA setup */ 1119 aprint_error( 1120 "%s:%d:%d: can't allocate DMA maps, " 1121 "using PIO transfers\n", 1122 device_xname( 1123 sc->sc_wdcdev.sc_atac.atac_dev), 1124 channel, drive); 1125 sc->sc_dma_ok = 0; 1126 sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA; 1127 sc->sc_wdcdev.irqack = NULL; 1128 break; 1129 } 1130 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1131 } 1132 if (idedma_ctl != 0) { 1133 /* Add software bits in status register */ 1134 bus_space_write_1(sc->sc_dma_iot, 1135 cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl); 1136 } 1137 } 1138 #endif /* NATA_DMA */ 1139 } 1140 1141 void 1142 sata_setup_channel(struct ata_channel *chp) 1143 { 1144 #if NATA_DMA 1145 struct ata_drive_datas *drvp; 1146 int drive; 1147 #if NATA_UDMA 1148 int s; 1149 #endif 1150 u_int32_t idedma_ctl; 1151 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 1152 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 1153 1154 /* setup DMA if needed */ 1155 pciide_channel_dma_setup(cp); 1156 1157 idedma_ctl = 0; 1158 1159 KASSERT(cp->ata_channel.ch_ndrives != 0); 1160 for (drive = 0; drive < cp->ata_channel.ch_ndrives; drive++) { 1161 drvp = &chp->ch_drive[drive]; 1162 /* If no drive, skip */ 1163 if (drvp->drive_type == ATA_DRIVET_NONE) 1164 continue; 1165 #if NATA_UDMA 1166 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 1167 /* use Ultra/DMA */ 1168 s = splbio(); 1169 drvp->drive_flags &= ~ATA_DRIVE_DMA; 1170 splx(s); 1171 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1172 } else 1173 #endif /* NATA_UDMA */ 1174 if (drvp->drive_flags & ATA_DRIVE_DMA) { 1175 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1176 } 1177 } 1178 1179 /* 1180 * Nothing to do to setup modes; it is meaningless in S-ATA 1181 * (but many S-ATA drives still want to get the SET_FEATURE 1182 * command). 1183 */ 1184 if (idedma_ctl != 0) { 1185 /* Add software bits in status register */ 1186 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 1187 idedma_ctl); 1188 } 1189 #endif /* NATA_DMA */ 1190 } 1191