1 /* $NetBSD: sdhc.c,v 1.122 2025/09/02 22:22:14 jmcneill Exp $ */ 2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */ 3 4 /* 5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * SD Host Controller driver based on the SD Host Controller Standard 22 * Simplified Specification Version 1.00 (www.sdcard.com). 23 */ 24 25 #include <sys/cdefs.h> 26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.122 2025/09/02 22:22:14 jmcneill Exp $"); 27 28 #ifdef _KERNEL_OPT 29 #include "opt_sdmmc.h" 30 #endif 31 32 #include <sys/param.h> 33 #include <sys/device.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/systm.h> 37 #include <sys/mutex.h> 38 #include <sys/condvar.h> 39 #include <sys/atomic.h> 40 41 #include <dev/sdmmc/sdhcreg.h> 42 #include <dev/sdmmc/sdhcvar.h> 43 #include <dev/sdmmc/sdmmcchip.h> 44 #include <dev/sdmmc/sdmmcreg.h> 45 #include <dev/sdmmc/sdmmcvar.h> 46 47 #ifdef SDHC_DEBUG 48 int sdhcdebug = 1; 49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0) 50 void sdhc_dump_regs(struct sdhc_host *); 51 #else 52 #define DPRINTF(n,s) do {} while (0) 53 #endif 54 55 #define SDHC_COMMAND_TIMEOUT hz 56 #define SDHC_BUFFER_TIMEOUT hz 57 #define SDHC_TRANSFER_TIMEOUT hz 58 #define SDHC_DMA_TIMEOUT (hz*3) 59 #define SDHC_TUNING_TIMEOUT hz 60 61 struct sdhc_host { 62 struct sdhc_softc *sc; /* host controller device */ 63 64 bus_space_tag_t iot; /* host register set tag */ 65 bus_space_handle_t ioh; /* host register set handle */ 66 bus_size_t ios; /* host register space size */ 67 bus_dma_tag_t dmat; /* host DMA tag */ 68 69 device_t sdmmc; /* generic SD/MMC device */ 70 71 u_int clkbase; /* base clock frequency in KHz */ 72 int maxblklen; /* maximum block length */ 73 uint32_t ocr; /* OCR value from capabilities */ 74 75 uint8_t regs[14]; /* host controller state */ 76 77 uint16_t intr_status; /* soft interrupt status */ 78 uint16_t intr_error_status; /* soft error status */ 79 kmutex_t intr_lock; 80 kmutex_t bus_clock_lock; 81 kcondvar_t intr_cv; 82 83 callout_t tuning_timer; 84 int tuning_timing; 85 u_int tuning_timer_count; 86 u_int tuning_timer_pending; 87 88 int specver; /* spec. version */ 89 90 uint32_t flags; /* flags for this host */ 91 #define SHF_USE_DMA 0x0001 92 #define SHF_USE_4BIT_MODE 0x0002 93 #define SHF_USE_8BIT_MODE 0x0004 94 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */ 95 #define SHF_USE_ADMA2_32 0x0010 96 #define SHF_USE_ADMA2_64 0x0020 97 #define SHF_USE_ADMA2_MASK 0x0030 98 99 bus_dmamap_t adma_map; 100 bus_dma_segment_t adma_segs[1]; 101 void *adma2; 102 103 uint8_t vdd; /* last vdd setting */ 104 }; 105 106 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev)) 107 108 static uint8_t 109 hread1(struct sdhc_host *hp, bus_size_t reg) 110 { 111 112 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) 113 return bus_space_read_1(hp->iot, hp->ioh, reg); 114 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3)); 115 } 116 117 static uint16_t 118 hread2(struct sdhc_host *hp, bus_size_t reg) 119 { 120 121 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) 122 return bus_space_read_2(hp->iot, hp->ioh, reg); 123 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2)); 124 } 125 126 #define HREAD1(hp, reg) hread1(hp, reg) 127 #define HREAD2(hp, reg) hread2(hp, reg) 128 #define HREAD4(hp, reg) \ 129 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg))) 130 131 132 static void 133 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val) 134 { 135 136 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 137 bus_space_write_1(hp->iot, hp->ioh, o, val); 138 } else { 139 const size_t shift = 8 * (o & 3); 140 o &= -4; 141 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o); 142 tmp = (val << shift) | (tmp & ~(0xffU << shift)); 143 bus_space_write_4(hp->iot, hp->ioh, o, tmp); 144 } 145 } 146 147 static void 148 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val) 149 { 150 151 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 152 bus_space_write_2(hp->iot, hp->ioh, o, val); 153 } else { 154 const size_t shift = 8 * (o & 2); 155 o &= -4; 156 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o); 157 tmp = (val << shift) | (tmp & ~(0xffffU << shift)); 158 bus_space_write_4(hp->iot, hp->ioh, o, tmp); 159 } 160 } 161 162 static void 163 hwrite4(struct sdhc_host *hp, bus_size_t o, uint32_t val) 164 { 165 166 bus_space_write_4(hp->iot, hp->ioh, o, val); 167 } 168 169 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val) 170 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val) 171 #define HWRITE4(hp, reg, val) hwrite4(hp, reg, val) 172 173 #define HCLR1(hp, reg, bits) \ 174 do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0) 175 #define HCLR2(hp, reg, bits) \ 176 do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0) 177 #define HCLR4(hp, reg, bits) \ 178 do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0) 179 #define HSET1(hp, reg, bits) \ 180 do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0) 181 #define HSET2(hp, reg, bits) \ 182 do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0) 183 #define HSET4(hp, reg, bits) \ 184 do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0) 185 186 static int sdhc_host_reset(sdmmc_chipset_handle_t); 187 static int sdhc_host_reset1(sdmmc_chipset_handle_t); 188 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t); 189 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t); 190 static int sdhc_card_detect(sdmmc_chipset_handle_t); 191 static int sdhc_write_protect(sdmmc_chipset_handle_t); 192 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t); 193 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool); 194 static int sdhc_bus_width(sdmmc_chipset_handle_t, int); 195 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int); 196 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int); 197 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t); 198 static void sdhc_exec_command(sdmmc_chipset_handle_t, 199 struct sdmmc_command *); 200 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int); 201 static int sdhc_execute_tuning1(struct sdhc_host *, int); 202 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int); 203 static void sdhc_tuning_timer(void *); 204 static void sdhc_hw_reset(sdmmc_chipset_handle_t); 205 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *); 206 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t); 207 static int sdhc_soft_reset(struct sdhc_host *, int); 208 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool); 209 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *); 210 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *); 211 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *); 212 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int); 213 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int); 214 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int); 215 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int); 216 217 static struct sdmmc_chip_functions sdhc_functions = { 218 /* host controller reset */ 219 .host_reset = sdhc_host_reset, 220 221 /* host controller capabilities */ 222 .host_ocr = sdhc_host_ocr, 223 .host_maxblklen = sdhc_host_maxblklen, 224 225 /* card detection */ 226 .card_detect = sdhc_card_detect, 227 228 /* write protect */ 229 .write_protect = sdhc_write_protect, 230 231 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */ 232 .bus_power = sdhc_bus_power, 233 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */ 234 .bus_width = sdhc_bus_width, 235 .bus_rod = sdhc_bus_rod, 236 237 /* command execution */ 238 .exec_command = sdhc_exec_command, 239 240 /* card interrupt */ 241 .card_enable_intr = sdhc_card_enable_intr, 242 .card_intr_ack = sdhc_card_intr_ack, 243 244 /* UHS functions */ 245 .signal_voltage = sdhc_signal_voltage, 246 .bus_clock_ddr = sdhc_bus_clock_ddr, 247 .execute_tuning = sdhc_execute_tuning, 248 .hw_reset = sdhc_hw_reset, 249 }; 250 251 static int 252 sdhc_cfprint(void *aux, const char *pnp) 253 { 254 const struct sdmmcbus_attach_args * const saa = aux; 255 const struct sdhc_host * const hp = saa->saa_sch; 256 257 if (pnp) { 258 aprint_normal("sdmmc at %s", pnp); 259 } 260 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) { 261 if (hp->sc->sc_host[host] == hp) { 262 aprint_normal(" slot %zu", host); 263 } 264 } 265 266 return UNCONF; 267 } 268 269 /* 270 * Called by attachment driver. For each SD card slot there is one SD 271 * host controller standard register set. (1.3) 272 */ 273 int 274 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot, 275 bus_space_handle_t ioh, bus_size_t iosize) 276 { 277 struct sdmmcbus_attach_args saa; 278 struct sdhc_host *hp; 279 uint32_t caps, caps2; 280 uint16_t sdhcver; 281 int error; 282 283 /* Allocate one more host structure. */ 284 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO); 285 if (hp == NULL) { 286 aprint_error_dev(sc->sc_dev, 287 "couldn't alloc memory (sdhc host)\n"); 288 goto err1; 289 } 290 sc->sc_host[sc->sc_nhosts++] = hp; 291 292 /* Fill in the new host structure. */ 293 hp->sc = sc; 294 hp->iot = iot; 295 hp->ioh = ioh; 296 hp->ios = iosize; 297 hp->dmat = sc->sc_dmat; 298 299 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC); 300 mutex_init(&hp->bus_clock_lock, MUTEX_DEFAULT, IPL_NONE); 301 cv_init(&hp->intr_cv, "sdhcintr"); 302 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE); 303 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp); 304 305 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 306 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT; 307 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 308 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION); 309 } else if (iosize <= SDHC_HOST_CTL_VERSION) { 310 sdhcver = SDHC_SPEC_NOVERS << SDHC_SPEC_VERS_SHIFT; 311 } else { 312 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION); 313 } 314 aprint_normal_dev(sc->sc_dev, "SDHC "); 315 hp->specver = SDHC_SPEC_VERSION(sdhcver); 316 switch (SDHC_SPEC_VERSION(sdhcver)) { 317 case SDHC_SPEC_VERS_100: 318 aprint_normal("1.0"); 319 break; 320 case SDHC_SPEC_VERS_200: 321 aprint_normal("2.0"); 322 break; 323 case SDHC_SPEC_VERS_300: 324 aprint_normal("3.0"); 325 break; 326 case SDHC_SPEC_VERS_400: 327 aprint_normal("4.0"); 328 break; 329 case SDHC_SPEC_VERS_410: 330 aprint_normal("4.1"); 331 break; 332 case SDHC_SPEC_VERS_420: 333 aprint_normal("4.2"); 334 break; 335 case SDHC_SPEC_NOVERS: 336 hp->specver = -1; 337 aprint_normal("NO-VERS"); 338 break; 339 default: 340 aprint_normal("unknown version(0x%x)", 341 SDHC_SPEC_VERSION(sdhcver)); 342 break; 343 } 344 if (SDHC_SPEC_VERSION(sdhcver) != SDHC_SPEC_NOVERS) 345 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver)); 346 347 /* 348 * Reset the host controller and enable interrupts. 349 */ 350 (void)sdhc_host_reset(hp); 351 352 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 353 /* init uSDHC registers */ 354 HWRITE4(hp, SDHC_MMC_BOOT, 0); 355 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN | 356 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE); 357 HWRITE4(hp, SDHC_WATERMARK_LEVEL, 358 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) | 359 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) | 360 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) | 361 (0x40 << SDHC_WATERMARK_READ_SHIFT)); 362 HSET4(hp, SDHC_VEND_SPEC, 363 SDHC_VEND_SPEC_MBO | 364 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 365 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN | 366 SDHC_VEND_SPEC_HCLK_SOFT_EN | 367 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN | 368 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN | 369 SDHC_VEND_SPEC_FRC_SDCLK_ON); 370 } 371 372 /* Determine host capabilities. */ 373 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) { 374 caps = sc->sc_caps; 375 caps2 = sc->sc_caps2; 376 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 377 /* uSDHC capability register is little bit different */ 378 caps = HREAD4(hp, SDHC_CAPABILITIES); 379 caps |= SDHC_8BIT_SUPP; 380 if (caps & SDHC_ADMA1_SUPP) 381 caps |= SDHC_ADMA2_SUPP; 382 sc->sc_caps = caps; 383 /* uSDHC has no SDHC_CAPABILITIES2 register */ 384 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP; 385 } else { 386 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES); 387 if (hp->specver >= SDHC_SPEC_VERS_300) { 388 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2); 389 } else { 390 caps2 = sc->sc_caps2 = 0; 391 } 392 } 393 394 aprint_verbose(", caps <%08x/%08x>", caps, caps2); 395 396 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) & 397 SDHC_RETUNING_MODES_MASK; 398 if (retuning_mode == SDHC_RETUNING_MODE_1) { 399 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) & 400 SDHC_TIMER_COUNT_MASK; 401 if (hp->tuning_timer_count == 0xf) 402 hp->tuning_timer_count = 0; 403 if (hp->tuning_timer_count) 404 hp->tuning_timer_count = 405 1 << (hp->tuning_timer_count - 1); 406 } 407 408 /* 409 * Use DMA if the host system and the controller support it. 410 * Supports integrated or external DMA egine, with or without 411 * SDHC_DMA_ENABLE in the command. 412 */ 413 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) || 414 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA && 415 ISSET(caps, SDHC_DMA_SUPPORT)))) { 416 SET(hp->flags, SHF_USE_DMA); 417 418 if (ISSET(caps, SDHC_ADMA2_SUPP) && 419 !ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA)) { 420 SET(hp->flags, SHF_MODE_DMAEN); 421 /* 422 * 64-bit mode was present in the 2.00 spec, removed 423 * from 3.00, and re-added in 4.00 with a different 424 * descriptor layout. We only support 2.00 and 3.00 425 * descriptors for now. 426 */ 427 if (hp->specver == SDHC_SPEC_VERS_200 && 428 ISSET(caps, SDHC_64BIT_SYS_BUS)) { 429 SET(hp->flags, SHF_USE_ADMA2_64); 430 aprint_normal(", 64-bit ADMA2"); 431 } else { 432 SET(hp->flags, SHF_USE_ADMA2_32); 433 aprint_normal(", 32-bit ADMA2"); 434 } 435 } else { 436 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) || 437 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN)) 438 SET(hp->flags, SHF_MODE_DMAEN); 439 if (sc->sc_vendor_transfer_data_dma) { 440 aprint_normal(", platform DMA"); 441 } else { 442 aprint_normal(", SDMA"); 443 } 444 } 445 } else { 446 aprint_normal(", PIO"); 447 } 448 449 /* 450 * Determine the base clock frequency. (2.2.24) 451 */ 452 if (hp->specver >= SDHC_SPEC_VERS_300) { 453 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps); 454 } else { 455 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps); 456 } 457 if (hp->clkbase == 0 || 458 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) { 459 if (sc->sc_clkbase == 0) { 460 /* The attachment driver must tell us. */ 461 aprint_error_dev(sc->sc_dev, 462 "unknown base clock frequency\n"); 463 goto err; 464 } 465 hp->clkbase = sc->sc_clkbase; 466 } 467 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) { 468 /* SDHC 1.0 supports only 10-63 MHz. */ 469 aprint_error_dev(sc->sc_dev, 470 "base clock frequency out of range: %u MHz\n", 471 hp->clkbase / 1000); 472 goto err; 473 } 474 aprint_normal(", %u kHz", hp->clkbase); 475 476 /* 477 * XXX Set the data timeout counter value according to 478 * capabilities. (2.2.15) 479 */ 480 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX); 481 #if 1 482 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 483 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16); 484 #endif 485 486 if (ISSET(caps, SDHC_EMBEDDED_SLOT)) 487 aprint_normal(", embedded slot"); 488 489 /* 490 * Determine SD bus voltage levels supported by the controller. 491 */ 492 aprint_normal(","); 493 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) { 494 SET(hp->ocr, MMC_OCR_HCS); 495 aprint_normal(" HS"); 496 } 497 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_1_8_V)) { 498 if (ISSET(caps2, SDHC_SDR50_SUPP)) { 499 SET(hp->ocr, MMC_OCR_S18A); 500 aprint_normal(" SDR50"); 501 } 502 if (ISSET(caps2, SDHC_DDR50_SUPP)) { 503 SET(hp->ocr, MMC_OCR_S18A); 504 aprint_normal(" DDR50"); 505 } 506 if (ISSET(caps2, SDHC_SDR104_SUPP)) { 507 SET(hp->ocr, MMC_OCR_S18A); 508 aprint_normal(" SDR104 HS200"); 509 } 510 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) { 511 SET(hp->ocr, MMC_OCR_1_65V_1_95V); 512 aprint_normal(" 1.8V"); 513 } 514 } 515 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) { 516 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V); 517 aprint_normal(" 3.0V"); 518 } 519 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) { 520 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V); 521 aprint_normal(" 3.3V"); 522 } 523 if (hp->specver >= SDHC_SPEC_VERS_300) { 524 aprint_normal(", re-tuning mode %d", retuning_mode + 1); 525 if (hp->tuning_timer_count) 526 aprint_normal(" (%us timer)", hp->tuning_timer_count); 527 } 528 529 /* 530 * Determine the maximum block length supported by the host 531 * controller. (2.2.24) 532 */ 533 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) { 534 case SDHC_MAX_BLK_LEN_512: 535 hp->maxblklen = 512; 536 break; 537 538 case SDHC_MAX_BLK_LEN_1024: 539 hp->maxblklen = 1024; 540 break; 541 542 case SDHC_MAX_BLK_LEN_2048: 543 hp->maxblklen = 2048; 544 break; 545 546 case SDHC_MAX_BLK_LEN_4096: 547 hp->maxblklen = 4096; 548 break; 549 550 default: 551 aprint_error_dev(sc->sc_dev, "max block length unknown\n"); 552 goto err; 553 } 554 aprint_normal(", %u byte blocks", hp->maxblklen); 555 aprint_normal("\n"); 556 557 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 558 int rseg; 559 560 /* Allocate ADMA2 descriptor memory */ 561 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 562 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK); 563 if (error) { 564 aprint_error_dev(sc->sc_dev, 565 "ADMA2 dmamem_alloc failed (%d)\n", error); 566 goto adma_done; 567 } 568 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg, 569 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK); 570 if (error) { 571 aprint_error_dev(sc->sc_dev, 572 "ADMA2 dmamem_map failed (%d)\n", error); 573 goto adma_done; 574 } 575 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 576 0, BUS_DMA_WAITOK, &hp->adma_map); 577 if (error) { 578 aprint_error_dev(sc->sc_dev, 579 "ADMA2 dmamap_create failed (%d)\n", error); 580 goto adma_done; 581 } 582 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map, 583 hp->adma2, PAGE_SIZE, NULL, 584 BUS_DMA_WAITOK|BUS_DMA_WRITE); 585 if (error) { 586 aprint_error_dev(sc->sc_dev, 587 "ADMA2 dmamap_load failed (%d)\n", error); 588 goto adma_done; 589 } 590 591 memset(hp->adma2, 0, PAGE_SIZE); 592 593 adma_done: 594 if (error) 595 CLR(hp->flags, SHF_USE_ADMA2_MASK); 596 } 597 598 /* 599 * Attach the generic SD/MMC bus driver. (The bus driver must 600 * not invoke any chipset functions before it is attached.) 601 */ 602 memset(&saa, 0, sizeof(saa)); 603 saa.saa_busname = "sdmmc"; 604 saa.saa_sct = &sdhc_functions; 605 saa.saa_sch = hp; 606 saa.saa_dmat = hp->dmat; 607 saa.saa_clkmax = hp->clkbase; 608 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM)) 609 saa.saa_clkmin = hp->clkbase / 256 / 2046; 610 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS)) 611 saa.saa_clkmin = hp->clkbase / 256 / 16; 612 else if (hp->sc->sc_clkmsk != 0) 613 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >> 614 (ffs(hp->sc->sc_clkmsk) - 1)); 615 else if (hp->specver >= SDHC_SPEC_VERS_300) 616 saa.saa_clkmin = hp->clkbase / 0x3ff; 617 else 618 saa.saa_clkmin = hp->clkbase / 256; 619 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP)) 620 saa.saa_caps |= SMC_CAPS_AUTO_STOP; 621 saa.saa_caps |= SMC_CAPS_4BIT_MODE; 622 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE)) 623 saa.saa_caps |= SMC_CAPS_8BIT_MODE; 624 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) 625 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED | 626 SMC_CAPS_MMC_HIGHSPEED; 627 if (ISSET(caps2, SDHC_SDR104_SUPP)) 628 saa.saa_caps |= SMC_CAPS_UHS_SDR104 | 629 SMC_CAPS_UHS_SDR50 | 630 SMC_CAPS_MMC_HS200; 631 if (ISSET(caps2, SDHC_SDR50_SUPP)) 632 saa.saa_caps |= SMC_CAPS_UHS_SDR50; 633 if (ISSET(caps2, SDHC_DDR50_SUPP)) 634 saa.saa_caps |= SMC_CAPS_UHS_DDR50; 635 if (ISSET(hp->flags, SHF_USE_DMA)) { 636 saa.saa_caps |= SMC_CAPS_DMA; 637 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 638 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA; 639 } 640 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY)) 641 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY; 642 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET)) 643 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET; 644 645 if (ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA2_ZEROLEN)) 646 saa.saa_max_seg = 65535; 647 648 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint, CFARGS_NONE); 649 650 return 0; 651 652 err: 653 callout_destroy(&hp->tuning_timer); 654 cv_destroy(&hp->intr_cv); 655 mutex_destroy(&hp->bus_clock_lock); 656 mutex_destroy(&hp->intr_lock); 657 free(hp, M_DEVBUF); 658 sc->sc_host[--sc->sc_nhosts] = NULL; 659 err1: 660 return 1; 661 } 662 663 int 664 sdhc_detach(struct sdhc_softc *sc, int flags) 665 { 666 struct sdhc_host *hp; 667 int rv = 0; 668 669 for (size_t n = 0; n < sc->sc_nhosts; n++) { 670 hp = sc->sc_host[n]; 671 if (hp == NULL) 672 continue; 673 if (hp->sdmmc != NULL) { 674 rv = config_detach(hp->sdmmc, flags); 675 if (rv) 676 break; 677 hp->sdmmc = NULL; 678 } 679 /* disable interrupts */ 680 if ((flags & DETACH_FORCE) == 0) { 681 mutex_enter(&hp->intr_lock); 682 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 683 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0); 684 } else { 685 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0); 686 } 687 sdhc_soft_reset(hp, SDHC_RESET_ALL); 688 mutex_exit(&hp->intr_lock); 689 } 690 callout_halt(&hp->tuning_timer, NULL); 691 callout_destroy(&hp->tuning_timer); 692 cv_destroy(&hp->intr_cv); 693 mutex_destroy(&hp->intr_lock); 694 if (hp->ios > 0) { 695 bus_space_unmap(hp->iot, hp->ioh, hp->ios); 696 hp->ios = 0; 697 } 698 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 699 bus_dmamap_unload(sc->sc_dmat, hp->adma_map); 700 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map); 701 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE); 702 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1); 703 } 704 free(hp, M_DEVBUF); 705 sc->sc_host[n] = NULL; 706 } 707 708 return rv; 709 } 710 711 bool 712 sdhc_suspend(device_t dev, const pmf_qual_t *qual) 713 { 714 struct sdhc_softc *sc = device_private(dev); 715 struct sdhc_host *hp; 716 size_t i; 717 718 /* XXX poll for command completion or suspend command 719 * in progress */ 720 721 /* Save the host controller state. */ 722 for (size_t n = 0; n < sc->sc_nhosts; n++) { 723 hp = sc->sc_host[n]; 724 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 725 for (i = 0; i < sizeof hp->regs; i += 4) { 726 uint32_t v = HREAD4(hp, i); 727 hp->regs[i + 0] = (v >> 0); 728 hp->regs[i + 1] = (v >> 8); 729 if (i + 3 < sizeof hp->regs) { 730 hp->regs[i + 2] = (v >> 16); 731 hp->regs[i + 3] = (v >> 24); 732 } 733 } 734 } else { 735 for (i = 0; i < sizeof hp->regs; i++) { 736 hp->regs[i] = HREAD1(hp, i); 737 } 738 } 739 } 740 return true; 741 } 742 743 bool 744 sdhc_resume(device_t dev, const pmf_qual_t *qual) 745 { 746 struct sdhc_softc *sc = device_private(dev); 747 struct sdhc_host *hp; 748 size_t i; 749 750 /* Restore the host controller state. */ 751 for (size_t n = 0; n < sc->sc_nhosts; n++) { 752 hp = sc->sc_host[n]; 753 (void)sdhc_host_reset(hp); 754 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 755 for (i = 0; i < sizeof hp->regs; i += 4) { 756 if (i + 3 < sizeof hp->regs) { 757 HWRITE4(hp, i, 758 (hp->regs[i + 0] << 0) 759 | (hp->regs[i + 1] << 8) 760 | (hp->regs[i + 2] << 16) 761 | (hp->regs[i + 3] << 24)); 762 } else { 763 HWRITE4(hp, i, 764 (hp->regs[i + 0] << 0) 765 | (hp->regs[i + 1] << 8)); 766 } 767 } 768 } else { 769 for (i = 0; i < sizeof hp->regs; i++) { 770 HWRITE1(hp, i, hp->regs[i]); 771 } 772 } 773 } 774 return true; 775 } 776 777 bool 778 sdhc_shutdown(device_t dev, int flags) 779 { 780 struct sdhc_softc *sc = device_private(dev); 781 struct sdhc_host *hp; 782 783 /* XXX chip locks up if we don't disable it before reboot. */ 784 for (size_t i = 0; i < sc->sc_nhosts; i++) { 785 hp = sc->sc_host[i]; 786 (void)sdhc_host_reset(hp); 787 } 788 return true; 789 } 790 791 /* 792 * Reset the host controller. Called during initialization, when 793 * cards are removed, upon resume, and during error recovery. 794 */ 795 static int 796 sdhc_host_reset1(sdmmc_chipset_handle_t sch) 797 { 798 struct sdhc_host *hp = (struct sdhc_host *)sch; 799 uint32_t sdhcimask; 800 int error; 801 802 KASSERT(mutex_owned(&hp->intr_lock)); 803 804 /* Disable all interrupts. */ 805 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 806 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0); 807 } else { 808 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0); 809 } 810 811 /* Let sdhc_bus_power restore power */ 812 hp->vdd = 0; 813 814 /* 815 * Reset the entire host controller and wait up to 100ms for 816 * the controller to clear the reset bit. 817 */ 818 error = sdhc_soft_reset(hp, SDHC_RESET_ALL); 819 if (error) 820 goto out; 821 822 /* Set data timeout counter value to max for now. */ 823 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX); 824 #if 1 825 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 826 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16); 827 #endif 828 829 /* Enable interrupts. */ 830 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION | 831 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY | 832 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT | 833 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE; 834 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 835 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16; 836 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask); 837 sdhcimask ^= 838 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16; 839 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY; 840 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask); 841 } else { 842 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask); 843 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK); 844 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY; 845 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask); 846 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK); 847 } 848 849 out: 850 return error; 851 } 852 853 static int 854 sdhc_host_reset(sdmmc_chipset_handle_t sch) 855 { 856 struct sdhc_host *hp = (struct sdhc_host *)sch; 857 int error; 858 859 mutex_enter(&hp->intr_lock); 860 error = sdhc_host_reset1(sch); 861 mutex_exit(&hp->intr_lock); 862 863 return error; 864 } 865 866 static uint32_t 867 sdhc_host_ocr(sdmmc_chipset_handle_t sch) 868 { 869 struct sdhc_host *hp = (struct sdhc_host *)sch; 870 871 return hp->ocr; 872 } 873 874 static int 875 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch) 876 { 877 struct sdhc_host *hp = (struct sdhc_host *)sch; 878 879 return hp->maxblklen; 880 } 881 882 /* 883 * Return non-zero if the card is currently inserted. 884 */ 885 static int 886 sdhc_card_detect(sdmmc_chipset_handle_t sch) 887 { 888 struct sdhc_host *hp = (struct sdhc_host *)sch; 889 int r; 890 891 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NON_REMOVABLE)) 892 return 1; 893 894 if (hp->sc->sc_vendor_card_detect) 895 return (*hp->sc->sc_vendor_card_detect)(hp->sc); 896 897 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED); 898 899 return r ? 1 : 0; 900 } 901 902 /* 903 * Return non-zero if the card is currently write-protected. 904 */ 905 static int 906 sdhc_write_protect(sdmmc_chipset_handle_t sch) 907 { 908 struct sdhc_host *hp = (struct sdhc_host *)sch; 909 int r; 910 911 if (hp->sc->sc_vendor_write_protect) 912 return (*hp->sc->sc_vendor_write_protect)(hp->sc); 913 914 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH); 915 916 return r ? 0 : 1; 917 } 918 919 /* 920 * Set or change SD bus voltage and enable or disable SD bus power. 921 * Return zero on success. 922 */ 923 static int 924 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr) 925 { 926 struct sdhc_host *hp = (struct sdhc_host *)sch; 927 uint8_t vdd; 928 int error = 0; 929 const uint32_t pcmask = 930 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT)); 931 uint32_t reg; 932 933 mutex_enter(&hp->intr_lock); 934 935 /* 936 * Disable bus power before voltage change. 937 */ 938 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0)) { 939 hp->vdd = 0; 940 HWRITE1(hp, SDHC_POWER_CTL, 0); 941 } 942 943 /* If power is disabled, reset the host and return now. */ 944 if (ocr == 0) { 945 (void)sdhc_host_reset1(hp); 946 callout_halt(&hp->tuning_timer, &hp->intr_lock); 947 goto out; 948 } 949 950 /* 951 * Select the lowest voltage according to capabilities. 952 */ 953 ocr &= hp->ocr; 954 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) { 955 vdd = SDHC_VOLTAGE_1_8V; 956 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) { 957 vdd = SDHC_VOLTAGE_3_0V; 958 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) { 959 vdd = SDHC_VOLTAGE_3_3V; 960 } else { 961 /* Unsupported voltage level requested. */ 962 error = EINVAL; 963 goto out; 964 } 965 966 /* 967 * Did voltage change ? 968 */ 969 if (vdd == hp->vdd) 970 goto out; 971 972 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 973 /* 974 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus 975 * voltage ramp until power rises. 976 */ 977 978 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) { 979 HWRITE1(hp, SDHC_POWER_CTL, 980 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER); 981 } else { 982 reg = HREAD1(hp, SDHC_POWER_CTL) & pcmask; 983 HWRITE1(hp, SDHC_POWER_CTL, reg); 984 sdmmc_delay(1); 985 reg |= (vdd << SDHC_VOLTAGE_SHIFT); 986 HWRITE1(hp, SDHC_POWER_CTL, reg); 987 sdmmc_delay(1); 988 reg |= SDHC_BUS_POWER; 989 HWRITE1(hp, SDHC_POWER_CTL, reg); 990 sdmmc_delay(10000); 991 } 992 993 /* 994 * The host system may not power the bus due to battery low, 995 * etc. In that case, the host controller should clear the 996 * bus power bit. 997 */ 998 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) { 999 error = ENXIO; 1000 goto out; 1001 } 1002 } 1003 1004 /* power successfully changed */ 1005 hp->vdd = vdd; 1006 1007 out: 1008 mutex_exit(&hp->intr_lock); 1009 1010 return error; 1011 } 1012 1013 /* 1014 * Return the smallest possible base clock frequency divisor value 1015 * for the CLOCK_CTL register to produce `freq' (KHz). 1016 */ 1017 static bool 1018 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp) 1019 { 1020 u_int div; 1021 1022 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) { 1023 for (div = hp->clkbase / freq; div <= 0x3ff; div++) { 1024 if ((hp->clkbase / div) <= freq) { 1025 *divp = SDHC_SDCLK_CGM 1026 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT) 1027 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT); 1028 //freq = hp->clkbase / div; 1029 return true; 1030 } 1031 } 1032 /* No divisor found. */ 1033 return false; 1034 } 1035 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) { 1036 u_int dvs = (hp->clkbase + freq - 1) / freq; 1037 u_int roundup = dvs & 1; 1038 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) { 1039 if (dvs + roundup <= 16) { 1040 dvs += roundup - 1; 1041 *divp = (div << SDHC_SDCLK_DIV_SHIFT) 1042 | (dvs << SDHC_SDCLK_DVS_SHIFT); 1043 DPRINTF(2, 1044 ("%s: divisor for freq %u is %u * %u\n", 1045 HDEVNAME(hp), freq, div * 2, dvs + 1)); 1046 //freq = hp->clkbase / (div * 2) * (dvs + 1); 1047 return true; 1048 } 1049 /* 1050 * If we drop bits, we need to round up the divisor. 1051 */ 1052 roundup |= dvs & 1; 1053 } 1054 /* No divisor found. */ 1055 return false; 1056 } 1057 if (hp->sc->sc_clkmsk != 0) { 1058 div = howmany(hp->clkbase, freq); 1059 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1))) 1060 return false; 1061 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1); 1062 //freq = hp->clkbase / div; 1063 return true; 1064 } 1065 if (hp->specver >= SDHC_SPEC_VERS_300) { 1066 div = howmany(hp->clkbase, freq); 1067 div = div > 1 ? howmany(div, 2) : 0; 1068 if (div > 0x3ff) 1069 return false; 1070 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK) 1071 << SDHC_SDCLK_XDIV_SHIFT) | 1072 (((div >> 0) & SDHC_SDCLK_DIV_MASK) 1073 << SDHC_SDCLK_DIV_SHIFT); 1074 //freq = hp->clkbase / (div ? div * 2 : 1); 1075 return true; 1076 } else { 1077 for (div = 1; div <= 256; div *= 2) { 1078 if ((hp->clkbase / div) <= freq) { 1079 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT; 1080 //freq = hp->clkbase / div; 1081 return true; 1082 } 1083 } 1084 /* No divisor found. */ 1085 return false; 1086 } 1087 /* No divisor found. */ 1088 return false; 1089 } 1090 1091 /* 1092 * Set or change SDCLK frequency or disable the SD clock. 1093 * Return zero on success. 1094 */ 1095 static int 1096 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr) 1097 { 1098 struct sdhc_host *hp = (struct sdhc_host *)sch; 1099 u_int div; 1100 u_int timo; 1101 int16_t reg; 1102 int error = 0; 1103 bool present __diagused; 1104 1105 #ifdef DIAGNOSTIC 1106 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK); 1107 1108 /* Must not stop the clock if commands are in progress. */ 1109 if (present && sdhc_card_detect(hp)) { 1110 aprint_normal_dev(hp->sc->sc_dev, 1111 "%s: command in progress\n", __func__); 1112 } 1113 #endif 1114 1115 if (hp->sc->sc_vendor_bus_clock) { 1116 mutex_enter(&hp->bus_clock_lock); 1117 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq); 1118 mutex_exit(&hp->bus_clock_lock); 1119 if (error != 0) 1120 return error; 1121 } 1122 1123 mutex_enter(&hp->intr_lock); 1124 1125 /* 1126 * Stop SD clock before changing the frequency. 1127 */ 1128 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1129 HCLR4(hp, SDHC_VEND_SPEC, 1130 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1131 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1132 if (freq == SDMMC_SDCLK_OFF) { 1133 goto out; 1134 } 1135 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1136 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8); 1137 if (freq == SDMMC_SDCLK_OFF) { 1138 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0); 1139 goto out; 1140 } 1141 } else { 1142 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1143 if (freq == SDMMC_SDCLK_OFF) 1144 goto out; 1145 } 1146 1147 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1148 if (ddr) 1149 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN); 1150 else 1151 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN); 1152 } else if (hp->specver >= SDHC_SPEC_VERS_300) { 1153 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK); 1154 if (freq > 100000) { 1155 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104); 1156 } else if (freq > 50000) { 1157 if (ddr) { 1158 HSET2(hp, SDHC_HOST_CTL2, 1159 SDHC_UHS_MODE_SELECT_DDR50); 1160 } else { 1161 HSET2(hp, SDHC_HOST_CTL2, 1162 SDHC_UHS_MODE_SELECT_SDR50); 1163 } 1164 } else if (freq > 25000) { 1165 if (ddr) { 1166 HSET2(hp, SDHC_HOST_CTL2, 1167 SDHC_UHS_MODE_SELECT_DDR50); 1168 } else { 1169 HSET2(hp, SDHC_HOST_CTL2, 1170 SDHC_UHS_MODE_SELECT_SDR25); 1171 } 1172 } else if (freq > 400) { 1173 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12); 1174 } 1175 } 1176 1177 /* 1178 * Slow down Ricoh 5U823 controller that isn't reliable 1179 * at 100MHz bus clock. 1180 */ 1181 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) { 1182 if (freq == 100000) 1183 --freq; 1184 } 1185 1186 /* 1187 * Set the minimum base clock frequency divisor. 1188 */ 1189 if (!sdhc_clock_divisor(hp, freq, &div)) { 1190 /* Invalid base clock frequency or `freq' value. */ 1191 aprint_error_dev(hp->sc->sc_dev, 1192 "Invalid bus clock %d kHz\n", freq); 1193 error = EINVAL; 1194 goto out; 1195 } 1196 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1197 if (ddr) { 1198 /* in ddr mode, divisor >>= 1 */ 1199 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK << 1200 SDHC_SDCLK_DIV_SHIFT)) | 1201 (div & (SDHC_SDCLK_DVS_MASK << 1202 SDHC_SDCLK_DVS_SHIFT)); 1203 } 1204 for (timo = 1000; timo > 0; timo--) { 1205 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB)) 1206 break; 1207 sdmmc_delay(10); 1208 } 1209 HWRITE4(hp, SDHC_CLOCK_CTL, 1210 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f); 1211 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1212 HWRITE4(hp, SDHC_CLOCK_CTL, 1213 div | (SDHC_TIMEOUT_MAX << 16)); 1214 } else { 1215 reg = HREAD2(hp, SDHC_CLOCK_CTL); 1216 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE); 1217 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div); 1218 } 1219 1220 /* 1221 * Start internal clock. Wait 10ms for stabilization. 1222 */ 1223 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1224 HSET4(hp, SDHC_VEND_SPEC, 1225 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1226 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1227 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1228 sdmmc_delay(10000); 1229 HSET4(hp, SDHC_CLOCK_CTL, 1230 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE); 1231 } else { 1232 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE); 1233 for (timo = 1000; timo > 0; timo--) { 1234 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL), 1235 SDHC_INTCLK_STABLE)) 1236 break; 1237 sdmmc_delay(10); 1238 } 1239 if (timo == 0) { 1240 error = ETIMEDOUT; 1241 DPRINTF(1,("%s: timeout\n", __func__)); 1242 goto out; 1243 } 1244 } 1245 1246 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1247 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE); 1248 /* 1249 * Sending 80 clocks at 400kHz takes 200us. 1250 * So delay for that time + slop and then 1251 * check a few times for completion. 1252 */ 1253 sdmmc_delay(210); 1254 for (timo = 10; timo > 0; timo--) { 1255 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), 1256 SDHC_INIT_ACTIVE)) 1257 break; 1258 sdmmc_delay(10); 1259 } 1260 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo)); 1261 1262 /* 1263 * Enable SD clock. 1264 */ 1265 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1266 HSET4(hp, SDHC_VEND_SPEC, 1267 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1268 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1269 } else { 1270 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1271 } 1272 } else { 1273 /* 1274 * Enable SD clock. 1275 */ 1276 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1277 1278 if (freq > 25000 && 1279 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT)) 1280 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED); 1281 else 1282 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED); 1283 } 1284 1285 mutex_exit(&hp->intr_lock); 1286 1287 if (hp->sc->sc_vendor_bus_clock_post) { 1288 mutex_enter(&hp->bus_clock_lock); 1289 error = (*hp->sc->sc_vendor_bus_clock_post)(hp->sc, freq); 1290 mutex_exit(&hp->bus_clock_lock); 1291 } 1292 return error; 1293 1294 out: 1295 mutex_exit(&hp->intr_lock); 1296 1297 return error; 1298 } 1299 1300 static int 1301 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width) 1302 { 1303 struct sdhc_host *hp = (struct sdhc_host *)sch; 1304 int reg; 1305 1306 switch (width) { 1307 case 1: 1308 case 4: 1309 break; 1310 1311 case 8: 1312 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE)) 1313 break; 1314 /* FALLTHROUGH */ 1315 default: 1316 DPRINTF(0,("%s: unsupported bus width (%d)\n", 1317 HDEVNAME(hp), width)); 1318 return 1; 1319 } 1320 1321 if (hp->sc->sc_vendor_bus_width) { 1322 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width); 1323 if (error != 0) 1324 return error; 1325 } 1326 1327 mutex_enter(&hp->intr_lock); 1328 1329 reg = HREAD1(hp, SDHC_HOST_CTL); 1330 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1331 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE); 1332 if (width == 4) 1333 reg |= SDHC_4BIT_MODE; 1334 else if (width == 8) 1335 reg |= SDHC_ESDHC_8BIT_MODE; 1336 } else { 1337 reg &= ~SDHC_4BIT_MODE; 1338 if (hp->specver >= SDHC_SPEC_VERS_300) { 1339 reg &= ~SDHC_8BIT_MODE; 1340 } 1341 if (width == 4) { 1342 reg |= SDHC_4BIT_MODE; 1343 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) { 1344 reg |= SDHC_8BIT_MODE; 1345 } 1346 } 1347 HWRITE1(hp, SDHC_HOST_CTL, reg); 1348 1349 mutex_exit(&hp->intr_lock); 1350 1351 return 0; 1352 } 1353 1354 static int 1355 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on) 1356 { 1357 struct sdhc_host *hp = (struct sdhc_host *)sch; 1358 1359 if (hp->sc->sc_vendor_rod) 1360 return (*hp->sc->sc_vendor_rod)(hp->sc, on); 1361 1362 return 0; 1363 } 1364 1365 static void 1366 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable) 1367 { 1368 struct sdhc_host *hp = (struct sdhc_host *)sch; 1369 1370 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1371 mutex_enter(&hp->intr_lock); 1372 if (enable) { 1373 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1374 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT); 1375 } else { 1376 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT); 1377 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1378 } 1379 mutex_exit(&hp->intr_lock); 1380 } 1381 } 1382 1383 static void 1384 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch) 1385 { 1386 struct sdhc_host *hp = (struct sdhc_host *)sch; 1387 1388 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1389 mutex_enter(&hp->intr_lock); 1390 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1391 mutex_exit(&hp->intr_lock); 1392 } 1393 } 1394 1395 static int 1396 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage) 1397 { 1398 struct sdhc_host *hp = (struct sdhc_host *)sch; 1399 int error = 0; 1400 1401 if (hp->specver < SDHC_SPEC_VERS_300) 1402 return EINVAL; 1403 1404 mutex_enter(&hp->intr_lock); 1405 switch (signal_voltage) { 1406 case SDMMC_SIGNAL_VOLTAGE_180: 1407 if (hp->sc->sc_vendor_signal_voltage != NULL) { 1408 error = hp->sc->sc_vendor_signal_voltage(hp->sc, 1409 signal_voltage); 1410 if (error != 0) 1411 break; 1412 } 1413 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) 1414 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN); 1415 break; 1416 case SDMMC_SIGNAL_VOLTAGE_330: 1417 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) 1418 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN); 1419 if (hp->sc->sc_vendor_signal_voltage != NULL) { 1420 error = hp->sc->sc_vendor_signal_voltage(hp->sc, 1421 signal_voltage); 1422 if (error != 0) 1423 break; 1424 } 1425 break; 1426 default: 1427 error = EINVAL; 1428 break; 1429 } 1430 mutex_exit(&hp->intr_lock); 1431 1432 return error; 1433 } 1434 1435 /* 1436 * Sampling clock tuning procedure (UHS) 1437 */ 1438 static int 1439 sdhc_execute_tuning1(struct sdhc_host *hp, int timing) 1440 { 1441 struct sdmmc_command cmd; 1442 uint8_t hostctl; 1443 int opcode, error, retry = 40; 1444 1445 KASSERT(mutex_owned(&hp->intr_lock)); 1446 1447 hp->tuning_timing = timing; 1448 1449 switch (timing) { 1450 case SDMMC_TIMING_MMC_HS200: 1451 opcode = MMC_SEND_TUNING_BLOCK_HS200; 1452 break; 1453 case SDMMC_TIMING_UHS_SDR50: 1454 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50)) 1455 return 0; 1456 /* FALLTHROUGH */ 1457 case SDMMC_TIMING_UHS_SDR104: 1458 opcode = MMC_SEND_TUNING_BLOCK; 1459 break; 1460 default: 1461 return EINVAL; 1462 } 1463 1464 hostctl = HREAD1(hp, SDHC_HOST_CTL); 1465 1466 /* enable buffer read ready interrupt */ 1467 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY); 1468 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY); 1469 1470 /* disable DMA */ 1471 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT); 1472 1473 /* reset tuning circuit */ 1474 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL); 1475 1476 /* start of tuning */ 1477 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING); 1478 1479 do { 1480 memset(&cmd, 0, sizeof(cmd)); 1481 cmd.c_opcode = opcode; 1482 cmd.c_arg = 0; 1483 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1; 1484 if (ISSET(hostctl, SDHC_8BIT_MODE)) { 1485 cmd.c_blklen = cmd.c_datalen = 128; 1486 } else { 1487 cmd.c_blklen = cmd.c_datalen = 64; 1488 } 1489 1490 error = sdhc_start_command(hp, &cmd); 1491 if (error) 1492 break; 1493 1494 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY, 1495 SDHC_TUNING_TIMEOUT, false)) { 1496 break; 1497 } 1498 1499 delay(1000); 1500 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry); 1501 1502 /* disable buffer read ready interrupt */ 1503 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY); 1504 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY); 1505 1506 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) { 1507 HCLR2(hp, SDHC_HOST_CTL2, 1508 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING); 1509 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1510 aprint_error_dev(hp->sc->sc_dev, 1511 "tuning did not complete, using fixed sampling clock\n"); 1512 return 0; /* tuning did not complete */ 1513 } 1514 1515 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) { 1516 HCLR2(hp, SDHC_HOST_CTL2, 1517 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING); 1518 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1519 aprint_error_dev(hp->sc->sc_dev, 1520 "tuning failed, using fixed sampling clock\n"); 1521 return 0; /* tuning failed */ 1522 } 1523 1524 if (hp->tuning_timer_count) { 1525 callout_schedule(&hp->tuning_timer, 1526 hz * hp->tuning_timer_count); 1527 } 1528 1529 return 0; /* tuning completed */ 1530 } 1531 1532 static int 1533 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing) 1534 { 1535 struct sdhc_host *hp = (struct sdhc_host *)sch; 1536 int error; 1537 1538 mutex_enter(&hp->intr_lock); 1539 error = sdhc_execute_tuning1(hp, timing); 1540 mutex_exit(&hp->intr_lock); 1541 return error; 1542 } 1543 1544 static void 1545 sdhc_tuning_timer(void *arg) 1546 { 1547 struct sdhc_host *hp = arg; 1548 1549 atomic_swap_uint(&hp->tuning_timer_pending, 1); 1550 } 1551 1552 static void 1553 sdhc_hw_reset(sdmmc_chipset_handle_t sch) 1554 { 1555 struct sdhc_host *hp = (struct sdhc_host *)sch; 1556 struct sdhc_softc *sc = hp->sc; 1557 1558 if (sc->sc_vendor_hw_reset != NULL) 1559 sc->sc_vendor_hw_reset(sc, hp); 1560 } 1561 1562 static int 1563 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value) 1564 { 1565 struct timeval start, diff; 1566 uint32_t state; 1567 1568 microuptime(&start); 1569 for (;;) { 1570 state = HREAD4(hp, SDHC_PRESENT_STATE); 1571 if ((state & mask) == value) { 1572 return 0; 1573 } 1574 microuptime(&diff); 1575 timersub(&diff, &start, &diff); 1576 if (diff.tv_sec != 0) { 1577 aprint_error_dev(hp->sc->sc_dev, 1578 "timeout waiting for mask %#x value %#x " 1579 "(state=%#x)\n", 1580 mask, value, state); 1581 return ETIMEDOUT; 1582 } 1583 } 1584 } 1585 1586 static void 1587 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd) 1588 { 1589 struct sdhc_host *hp = (struct sdhc_host *)sch; 1590 int error; 1591 bool probing; 1592 1593 mutex_enter(&hp->intr_lock); 1594 1595 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) { 1596 (void)sdhc_execute_tuning1(hp, hp->tuning_timing); 1597 } 1598 1599 if (cmd->c_data && 1600 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1601 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY; 1602 if (ISSET(hp->flags, SHF_USE_DMA)) { 1603 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready); 1604 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready); 1605 } else { 1606 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready); 1607 HSET2(hp, SDHC_NINTR_STATUS_EN, ready); 1608 } 1609 } 1610 1611 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) { 1612 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR; 1613 if (cmd->c_data != NULL) { 1614 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr); 1615 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr); 1616 } else { 1617 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr); 1618 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr); 1619 } 1620 } 1621 1622 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_STOP_WITH_TC)) { 1623 if (cmd->c_opcode == MMC_STOP_TRANSMISSION) 1624 SET(cmd->c_flags, SCF_RSP_BSY); 1625 } 1626 1627 /* 1628 * Start the MMC command, or mark `cmd' as failed and return. 1629 */ 1630 error = sdhc_start_command(hp, cmd); 1631 if (error) { 1632 cmd->c_error = error; 1633 goto out; 1634 } 1635 1636 /* 1637 * Wait until the command phase is done, or until the command 1638 * is marked done for any other reason. 1639 */ 1640 probing = (cmd->c_flags & SCF_TOUT_OK) != 0; 1641 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT*3, probing)) { 1642 DPRINTF(1,("%s: timeout for command\n", __func__)); 1643 sdmmc_delay(50); 1644 cmd->c_error = ETIMEDOUT; 1645 goto out; 1646 } 1647 1648 /* 1649 * The host controller removes bits [0:7] from the response 1650 * data (CRC) and we pass the data up unchanged to the bus 1651 * driver (without padding). 1652 */ 1653 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) { 1654 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0); 1655 if (ISSET(cmd->c_flags, SCF_RSP_136)) { 1656 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4); 1657 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8); 1658 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12); 1659 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) { 1660 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) | 1661 (cmd->c_resp[1] << 24); 1662 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) | 1663 (cmd->c_resp[2] << 24); 1664 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) | 1665 (cmd->c_resp[3] << 24); 1666 cmd->c_resp[3] = (cmd->c_resp[3] >> 8); 1667 } 1668 } 1669 } 1670 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0])); 1671 1672 /* 1673 * If the command has data to transfer in any direction, 1674 * execute the transfer now. 1675 */ 1676 if (cmd->c_error == 0 && cmd->c_data != NULL) 1677 sdhc_transfer_data(hp, cmd); 1678 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) { 1679 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) && 1680 !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) { 1681 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n", 1682 HDEVNAME(hp))); 1683 cmd->c_error = ETIMEDOUT; 1684 goto out; 1685 } 1686 } 1687 1688 out: 1689 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED) 1690 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) { 1691 /* Turn off the LED. */ 1692 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON); 1693 } 1694 SET(cmd->c_flags, SCF_ITSDONE); 1695 1696 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) && 1697 cmd->c_opcode == MMC_STOP_TRANSMISSION) 1698 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT); 1699 1700 mutex_exit(&hp->intr_lock); 1701 1702 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp), 1703 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort", 1704 cmd->c_flags, cmd->c_error)); 1705 } 1706 1707 static int 1708 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd) 1709 { 1710 struct sdhc_softc * const sc = hp->sc; 1711 uint16_t blksize = 0; 1712 uint16_t blkcount = 0; 1713 uint16_t mode; 1714 uint16_t command; 1715 uint32_t pmask; 1716 int error; 1717 1718 KASSERT(mutex_owned(&hp->intr_lock)); 1719 1720 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n", 1721 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data, 1722 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS))); 1723 1724 /* 1725 * The maximum block length for commands should be the minimum 1726 * of the host buffer size and the card buffer size. (1.7.2) 1727 */ 1728 1729 /* Fragment the data into proper blocks. */ 1730 if (cmd->c_datalen > 0) { 1731 blksize = MIN(cmd->c_datalen, cmd->c_blklen); 1732 blkcount = cmd->c_datalen / blksize; 1733 if (cmd->c_datalen % blksize > 0) { 1734 /* XXX: Split this command. (1.7.4) */ 1735 aprint_error_dev(sc->sc_dev, 1736 "data not a multiple of %u bytes\n", blksize); 1737 return EINVAL; 1738 } 1739 } 1740 1741 /* Check limit imposed by 9-bit block count. (1.7.2) */ 1742 if (blkcount > SDHC_BLOCK_COUNT_MAX) { 1743 aprint_error_dev(sc->sc_dev, "too much data\n"); 1744 return EINVAL; 1745 } 1746 1747 /* Prepare transfer mode register value. (2.2.5) */ 1748 mode = 0; 1749 if (ISSET(cmd->c_flags, SCF_CMD_READ)) 1750 mode |= SDHC_READ_MODE; 1751 if (blkcount > 0) { 1752 mode |= SDHC_BLOCK_COUNT_ENABLE; 1753 if (blkcount > 1) { 1754 mode |= SDHC_MULTI_BLOCK_MODE; 1755 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) 1756 && !ISSET(cmd->c_flags, SCF_NO_STOP)) 1757 mode |= SDHC_AUTO_CMD12_ENABLE; 1758 } 1759 } 1760 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 && 1761 ISSET(hp->flags, SHF_MODE_DMAEN)) { 1762 mode |= SDHC_DMA_ENABLE; 1763 } 1764 1765 /* 1766 * Prepare command register value. (2.2.6) 1767 */ 1768 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT; 1769 1770 if (ISSET(cmd->c_flags, SCF_RSP_CRC)) 1771 command |= SDHC_CRC_CHECK_ENABLE; 1772 if (ISSET(cmd->c_flags, SCF_RSP_IDX)) 1773 command |= SDHC_INDEX_CHECK_ENABLE; 1774 if (cmd->c_datalen > 0) 1775 command |= SDHC_DATA_PRESENT_SELECT; 1776 1777 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT)) 1778 command |= SDHC_NO_RESPONSE; 1779 else if (ISSET(cmd->c_flags, SCF_RSP_136)) 1780 command |= SDHC_RESP_LEN_136; 1781 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) 1782 command |= SDHC_RESP_LEN_48_CHK_BUSY; 1783 else 1784 command |= SDHC_RESP_LEN_48; 1785 1786 /* Wait until command and optionally data inhibit bits are clear. (1.5) */ 1787 pmask = SDHC_CMD_INHIBIT_CMD; 1788 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY)) 1789 pmask |= SDHC_CMD_INHIBIT_DAT; 1790 error = sdhc_wait_state(hp, pmask, 0); 1791 if (error) { 1792 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1793 device_printf(sc->sc_dev, "command or data phase inhibited\n"); 1794 return error; 1795 } 1796 1797 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n", 1798 HDEVNAME(hp), blksize, blkcount, mode, command)); 1799 1800 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1801 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) << 1802 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */ 1803 } 1804 1805 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1806 /* Alert the user not to remove the card. */ 1807 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON); 1808 } 1809 1810 /* Set DMA start address. */ 1811 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) { 1812 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) { 1813 bus_addr_t paddr = 1814 cmd->c_dmamap->dm_segs[seg].ds_addr; 1815 uint16_t len = 1816 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ? 1817 0 : cmd->c_dmamap->dm_segs[seg].ds_len; 1818 uint16_t attr = 1819 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS; 1820 if (seg == cmd->c_dmamap->dm_nsegs - 1) { 1821 attr |= SDHC_ADMA2_END; 1822 } 1823 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) { 1824 struct sdhc_adma2_descriptor32 *desc = 1825 hp->adma2; 1826 desc[seg].attribute = htole16(attr); 1827 desc[seg].length = htole16(len); 1828 desc[seg].address = htole32(paddr); 1829 } else { 1830 struct sdhc_adma2_descriptor64 *desc = 1831 hp->adma2; 1832 desc[seg].attribute = htole16(attr); 1833 desc[seg].length = htole16(len); 1834 desc[seg].address = htole32(paddr & 0xffffffff); 1835 desc[seg].address_hi = htole32( 1836 (uint64_t)paddr >> 32); 1837 } 1838 } 1839 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) { 1840 struct sdhc_adma2_descriptor32 *desc = hp->adma2; 1841 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0); 1842 } else { 1843 struct sdhc_adma2_descriptor64 *desc = hp->adma2; 1844 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0); 1845 } 1846 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE, 1847 BUS_DMASYNC_PREWRITE); 1848 1849 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr; 1850 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff); 1851 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) { 1852 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4, 1853 (uint64_t)desc_addr >> 32); 1854 } 1855 1856 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1857 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT); 1858 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2); 1859 } else { 1860 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT); 1861 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2); 1862 } 1863 } else if (ISSET(mode, SDHC_DMA_ENABLE) && 1864 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) { 1865 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1866 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT); 1867 } 1868 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr); 1869 } 1870 1871 /* 1872 * Start a CPU data transfer. Writing to the high order byte 1873 * of the SDHC_COMMAND register triggers the SD command. (1.5) 1874 */ 1875 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 1876 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16)); 1877 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg); 1878 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1879 /* mode bits is in MIX_CTRL register on uSDHC */ 1880 HWRITE4(hp, SDHC_MIX_CTRL, mode | 1881 (HREAD4(hp, SDHC_MIX_CTRL) & ~SDHC_TRANSFER_MODE_MASK)); 1882 if (cmd->c_opcode == MMC_STOP_TRANSMISSION) 1883 command |= SDHC_COMMAND_TYPE_ABORT; 1884 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16); 1885 } else { 1886 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16)); 1887 } 1888 } else { 1889 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize); 1890 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount); 1891 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg); 1892 HWRITE2(hp, SDHC_TRANSFER_MODE, mode); 1893 HWRITE2(hp, SDHC_COMMAND, command); 1894 } 1895 1896 return 0; 1897 } 1898 1899 static void 1900 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd) 1901 { 1902 struct sdhc_softc *sc = hp->sc; 1903 int error; 1904 1905 KASSERT(mutex_owned(&hp->intr_lock)); 1906 1907 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp), 1908 MMC_R1(cmd->c_resp), cmd->c_datalen)); 1909 1910 #ifdef SDHC_DEBUG 1911 /* XXX I forgot why I wanted to know when this happens :-( */ 1912 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) && 1913 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) { 1914 aprint_error_dev(hp->sc->sc_dev, 1915 "CMD52/53 error response flags %#x\n", 1916 MMC_R1(cmd->c_resp) & 0xff00); 1917 } 1918 #endif 1919 1920 if (cmd->c_dmamap != NULL) { 1921 if (hp->sc->sc_vendor_transfer_data_dma != NULL) { 1922 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd); 1923 if (error == 0 && !sdhc_wait_intr(hp, 1924 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) { 1925 DPRINTF(1,("%s: timeout\n", __func__)); 1926 error = ETIMEDOUT; 1927 } 1928 } else { 1929 error = sdhc_transfer_data_dma(hp, cmd); 1930 } 1931 } else 1932 error = sdhc_transfer_data_pio(hp, cmd); 1933 if (error) 1934 cmd->c_error = error; 1935 SET(cmd->c_flags, SCF_ITSDONE); 1936 1937 DPRINTF(1,("%s: data transfer done (error=%d)\n", 1938 HDEVNAME(hp), cmd->c_error)); 1939 } 1940 1941 static int 1942 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd) 1943 { 1944 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs; 1945 bus_addr_t posaddr; 1946 bus_addr_t segaddr; 1947 bus_size_t seglen; 1948 u_int seg = 0; 1949 int error = 0; 1950 int status; 1951 1952 KASSERT(mutex_owned(&hp->intr_lock)); 1953 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT); 1954 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT); 1955 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE); 1956 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE); 1957 1958 for (;;) { 1959 status = sdhc_wait_intr(hp, 1960 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE, 1961 SDHC_DMA_TIMEOUT, false); 1962 1963 if (status & SDHC_TRANSFER_COMPLETE) { 1964 break; 1965 } 1966 if (!status) { 1967 DPRINTF(1,("%s: timeout\n", __func__)); 1968 error = ETIMEDOUT; 1969 break; 1970 } 1971 1972 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 1973 continue; 1974 } 1975 1976 if ((status & SDHC_DMA_INTERRUPT) == 0) { 1977 continue; 1978 } 1979 1980 /* DMA Interrupt (boundary crossing) */ 1981 1982 segaddr = dm_segs[seg].ds_addr; 1983 seglen = dm_segs[seg].ds_len; 1984 posaddr = HREAD4(hp, SDHC_DMA_ADDR); 1985 1986 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) { 1987 continue; 1988 } 1989 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen))) 1990 HWRITE4(hp, SDHC_DMA_ADDR, posaddr); 1991 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs) 1992 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr); 1993 KASSERT(seg < cmd->c_dmamap->dm_nsegs); 1994 } 1995 1996 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 1997 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0, 1998 PAGE_SIZE, BUS_DMASYNC_POSTWRITE); 1999 } 2000 2001 return error; 2002 } 2003 2004 static int 2005 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd) 2006 { 2007 uint8_t *data = cmd->c_data; 2008 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int); 2009 u_int len, datalen; 2010 u_int imask; 2011 u_int pmask; 2012 int error = 0; 2013 2014 KASSERT(mutex_owned(&hp->intr_lock)); 2015 2016 if (ISSET(cmd->c_flags, SCF_CMD_READ)) { 2017 imask = SDHC_BUFFER_READ_READY; 2018 pmask = SDHC_BUFFER_READ_ENABLE; 2019 if (ISSET(hp->sc->sc_flags, 2020 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2021 pio_func = esdhc_read_data_pio; 2022 } else { 2023 pio_func = sdhc_read_data_pio; 2024 } 2025 } else { 2026 imask = SDHC_BUFFER_WRITE_READY; 2027 pmask = SDHC_BUFFER_WRITE_ENABLE; 2028 if (ISSET(hp->sc->sc_flags, 2029 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2030 pio_func = esdhc_write_data_pio; 2031 } else { 2032 pio_func = sdhc_write_data_pio; 2033 } 2034 } 2035 datalen = cmd->c_datalen; 2036 2037 KASSERT(mutex_owned(&hp->intr_lock)); 2038 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask); 2039 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE); 2040 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE); 2041 2042 while (datalen > 0) { 2043 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) { 2044 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 2045 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask); 2046 } else { 2047 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask); 2048 } 2049 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) { 2050 DPRINTF(1,("%s: timeout\n", __func__)); 2051 error = ETIMEDOUT; 2052 break; 2053 } 2054 2055 error = sdhc_wait_state(hp, pmask, pmask); 2056 if (error) 2057 break; 2058 } 2059 2060 len = MIN(datalen, cmd->c_blklen); 2061 (*pio_func)(hp, data, len); 2062 DPRINTF(2,("%s: pio data transfer %u @ %p\n", 2063 HDEVNAME(hp), len, data)); 2064 2065 data += len; 2066 datalen -= len; 2067 } 2068 2069 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, 2070 SDHC_TRANSFER_TIMEOUT, false)) { 2071 DPRINTF(1,("%s: timeout for transfer\n", __func__)); 2072 error = ETIMEDOUT; 2073 } 2074 2075 return error; 2076 } 2077 2078 static void 2079 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2080 { 2081 2082 if (((__uintptr_t)data & 3) == 0) { 2083 while (datalen > 3) { 2084 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA)); 2085 data += 4; 2086 datalen -= 4; 2087 } 2088 if (datalen > 1) { 2089 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA)); 2090 data += 2; 2091 datalen -= 2; 2092 } 2093 if (datalen > 0) { 2094 *data = HREAD1(hp, SDHC_DATA); 2095 data += 1; 2096 datalen -= 1; 2097 } 2098 } else if (((__uintptr_t)data & 1) == 0) { 2099 while (datalen > 1) { 2100 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA)); 2101 data += 2; 2102 datalen -= 2; 2103 } 2104 if (datalen > 0) { 2105 *data = HREAD1(hp, SDHC_DATA); 2106 data += 1; 2107 datalen -= 1; 2108 } 2109 } else { 2110 while (datalen > 0) { 2111 *data = HREAD1(hp, SDHC_DATA); 2112 data += 1; 2113 datalen -= 1; 2114 } 2115 } 2116 } 2117 2118 static void 2119 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2120 { 2121 2122 if (((__uintptr_t)data & 3) == 0) { 2123 while (datalen > 3) { 2124 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data)); 2125 data += 4; 2126 datalen -= 4; 2127 } 2128 if (datalen > 1) { 2129 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data)); 2130 data += 2; 2131 datalen -= 2; 2132 } 2133 if (datalen > 0) { 2134 HWRITE1(hp, SDHC_DATA, *data); 2135 data += 1; 2136 datalen -= 1; 2137 } 2138 } else if (((__uintptr_t)data & 1) == 0) { 2139 while (datalen > 1) { 2140 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data)); 2141 data += 2; 2142 datalen -= 2; 2143 } 2144 if (datalen > 0) { 2145 HWRITE1(hp, SDHC_DATA, *data); 2146 data += 1; 2147 datalen -= 1; 2148 } 2149 } else { 2150 while (datalen > 0) { 2151 HWRITE1(hp, SDHC_DATA, *data); 2152 data += 1; 2153 datalen -= 1; 2154 } 2155 } 2156 } 2157 2158 static void 2159 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2160 { 2161 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS); 2162 uint32_t v; 2163 2164 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK; 2165 size_t count = 0; 2166 2167 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2168 if (count == 0) { 2169 /* 2170 * If we've drained "watermark" words, we need to wait 2171 * a little bit so the read FIFO can refill. 2172 */ 2173 sdmmc_delay(10); 2174 count = watermark; 2175 } 2176 v = HREAD4(hp, SDHC_DATA); 2177 v = le32toh(v); 2178 *(uint32_t *)data = v; 2179 data += 4; 2180 datalen -= 4; 2181 status = HREAD2(hp, SDHC_NINTR_STATUS); 2182 count--; 2183 } 2184 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2185 if (count == 0) { 2186 sdmmc_delay(10); 2187 } 2188 v = HREAD4(hp, SDHC_DATA); 2189 v = le32toh(v); 2190 do { 2191 *data++ = v; 2192 v >>= 8; 2193 } while (--datalen > 0); 2194 } 2195 } 2196 2197 static void 2198 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2199 { 2200 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS); 2201 uint32_t v; 2202 2203 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK; 2204 size_t count = watermark; 2205 2206 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2207 if (count == 0) { 2208 sdmmc_delay(10); 2209 count = watermark; 2210 } 2211 v = *(uint32_t *)data; 2212 v = htole32(v); 2213 HWRITE4(hp, SDHC_DATA, v); 2214 data += 4; 2215 datalen -= 4; 2216 status = HREAD2(hp, SDHC_NINTR_STATUS); 2217 count--; 2218 } 2219 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2220 if (count == 0) { 2221 sdmmc_delay(10); 2222 } 2223 v = *(uint32_t *)data; 2224 v = htole32(v); 2225 HWRITE4(hp, SDHC_DATA, v); 2226 } 2227 } 2228 2229 /* Prepare for another command. */ 2230 static int 2231 sdhc_soft_reset(struct sdhc_host *hp, int mask) 2232 { 2233 int timo; 2234 2235 KASSERT(mutex_owned(&hp->intr_lock)); 2236 2237 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask)); 2238 2239 /* Request the reset. */ 2240 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask); 2241 2242 /* 2243 * If necessary, wait for the controller to set the bits to 2244 * acknowledge the reset. 2245 */ 2246 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) && 2247 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) { 2248 for (timo = 10000; timo > 0; timo--) { 2249 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask)) 2250 break; 2251 /* Short delay because I worry we may miss it... */ 2252 sdmmc_delay(1); 2253 } 2254 if (timo == 0) { 2255 DPRINTF(1,("%s: timeout for reset on\n", __func__)); 2256 return ETIMEDOUT; 2257 } 2258 } 2259 2260 /* 2261 * Wait for the controller to clear the bits to indicate that 2262 * the reset has completed. 2263 */ 2264 for (timo = 10; timo > 0; timo--) { 2265 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask)) 2266 break; 2267 sdmmc_delay(10000); 2268 } 2269 if (timo == 0) { 2270 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp), 2271 HREAD1(hp, SDHC_SOFTWARE_RESET))); 2272 return ETIMEDOUT; 2273 } 2274 2275 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 2276 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP); 2277 } 2278 2279 return 0; 2280 } 2281 2282 static int 2283 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing) 2284 { 2285 int status, error, nointr; 2286 2287 KASSERT(mutex_owned(&hp->intr_lock)); 2288 2289 mask |= SDHC_ERROR_INTERRUPT; 2290 2291 nointr = 0; 2292 status = hp->intr_status & mask; 2293 while (status == 0) { 2294 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo) 2295 == EWOULDBLOCK) { 2296 nointr = 1; 2297 break; 2298 } 2299 status = hp->intr_status & mask; 2300 } 2301 error = hp->intr_error_status; 2302 2303 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status, 2304 error)); 2305 2306 hp->intr_status &= ~status; 2307 hp->intr_error_status &= ~error; 2308 2309 if (ISSET(status, SDHC_ERROR_INTERRUPT)) { 2310 if (ISSET(error, SDHC_DMA_ERROR)) 2311 device_printf(hp->sc->sc_dev,"dma error\n"); 2312 if (ISSET(error, SDHC_ADMA_ERROR)) 2313 device_printf(hp->sc->sc_dev,"adma error\n"); 2314 if (ISSET(error, SDHC_AUTO_CMD12_ERROR)) 2315 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n"); 2316 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR)) 2317 device_printf(hp->sc->sc_dev,"current limit error\n"); 2318 if (ISSET(error, SDHC_DATA_END_BIT_ERROR)) 2319 device_printf(hp->sc->sc_dev,"data end bit error\n"); 2320 if (ISSET(error, SDHC_DATA_CRC_ERROR)) 2321 device_printf(hp->sc->sc_dev,"data crc error\n"); 2322 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR)) 2323 device_printf(hp->sc->sc_dev,"data timeout error\n"); 2324 if (ISSET(error, SDHC_CMD_INDEX_ERROR)) 2325 device_printf(hp->sc->sc_dev,"cmd index error\n"); 2326 if (ISSET(error, SDHC_CMD_END_BIT_ERROR)) 2327 device_printf(hp->sc->sc_dev,"cmd end bit error\n"); 2328 if (ISSET(error, SDHC_CMD_CRC_ERROR)) 2329 device_printf(hp->sc->sc_dev,"cmd crc error\n"); 2330 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) { 2331 if (!probing) 2332 device_printf(hp->sc->sc_dev,"cmd timeout error\n"); 2333 #ifdef SDHC_DEBUG 2334 else if (sdhcdebug > 0) 2335 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n"); 2336 #endif 2337 } 2338 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0) 2339 device_printf(hp->sc->sc_dev,"vendor error %#x\n", 2340 (error & ~SDHC_EINTR_STATUS_MASK)); 2341 if (error == 0) 2342 device_printf(hp->sc->sc_dev,"no error\n"); 2343 2344 /* Command timeout has higher priority than command complete. */ 2345 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) 2346 CLR(status, SDHC_COMMAND_COMPLETE); 2347 2348 /* Transfer complete has higher priority than data timeout. */ 2349 if (ISSET(status, SDHC_TRANSFER_COMPLETE)) 2350 CLR(error, SDHC_DATA_TIMEOUT_ERROR); 2351 } 2352 2353 if (nointr || 2354 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) { 2355 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 2356 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT); 2357 hp->intr_error_status = 0; 2358 status = 0; 2359 } 2360 2361 return status; 2362 } 2363 2364 /* 2365 * Established by attachment driver at interrupt priority IPL_SDMMC. 2366 */ 2367 int 2368 sdhc_intr(void *arg) 2369 { 2370 struct sdhc_softc *sc = (struct sdhc_softc *)arg; 2371 struct sdhc_host *hp; 2372 int done = 0; 2373 uint16_t status; 2374 uint16_t error; 2375 2376 /* We got an interrupt, but we don't know from which slot. */ 2377 for (size_t host = 0; host < sc->sc_nhosts; host++) { 2378 hp = sc->sc_host[host]; 2379 if (hp == NULL) 2380 continue; 2381 2382 mutex_enter(&hp->intr_lock); 2383 2384 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 2385 /* Find out which interrupts are pending. */ 2386 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS); 2387 status = xstatus; 2388 error = xstatus >> 16; 2389 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) && 2390 (xstatus & SDHC_TRANSFER_COMPLETE) && 2391 !(xstatus & SDHC_DMA_INTERRUPT)) { 2392 /* read again due to uSDHC errata */ 2393 status = xstatus = HREAD4(hp, 2394 SDHC_NINTR_STATUS); 2395 error = xstatus >> 16; 2396 } 2397 if (ISSET(sc->sc_flags, 2398 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2399 if ((error & SDHC_NINTR_STATUS_MASK) != 0) 2400 SET(status, SDHC_ERROR_INTERRUPT); 2401 } 2402 if (error) 2403 xstatus |= SDHC_ERROR_INTERRUPT; 2404 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK)) 2405 goto next_port; /* no interrupt for us */ 2406 /* Acknowledge the interrupts we are about to handle. */ 2407 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus); 2408 } else { 2409 /* Find out which interrupts are pending. */ 2410 error = 0; 2411 status = HREAD2(hp, SDHC_NINTR_STATUS); 2412 if (!ISSET(status, SDHC_NINTR_STATUS_MASK)) 2413 goto next_port; /* no interrupt for us */ 2414 /* Acknowledge the interrupts we are about to handle. */ 2415 HWRITE2(hp, SDHC_NINTR_STATUS, status); 2416 if (ISSET(status, SDHC_ERROR_INTERRUPT)) { 2417 /* Acknowledge error interrupts. */ 2418 error = HREAD2(hp, SDHC_EINTR_STATUS); 2419 HWRITE2(hp, SDHC_EINTR_STATUS, error); 2420 } 2421 } 2422 2423 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp), 2424 status, error)); 2425 2426 /* Claim this interrupt. */ 2427 done = 1; 2428 2429 if (ISSET(status, SDHC_ERROR_INTERRUPT) && 2430 ISSET(error, SDHC_ADMA_ERROR)) { 2431 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS); 2432 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp), 2433 adma_err); 2434 } 2435 2436 /* 2437 * Wake up the sdmmc event thread to scan for cards. 2438 */ 2439 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) { 2440 if (hp->sdmmc != NULL) { 2441 sdmmc_needs_discover(hp->sdmmc); 2442 } 2443 if (ISSET(sc->sc_flags, 2444 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2445 HCLR4(hp, SDHC_NINTR_STATUS_EN, 2446 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)); 2447 HCLR4(hp, SDHC_NINTR_SIGNAL_EN, 2448 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)); 2449 } 2450 } 2451 2452 /* 2453 * Schedule re-tuning process (UHS). 2454 */ 2455 if (ISSET(status, SDHC_RETUNING_EVENT)) { 2456 atomic_swap_uint(&hp->tuning_timer_pending, 1); 2457 } 2458 2459 /* 2460 * Wake up the blocking process to service command 2461 * related interrupt(s). 2462 */ 2463 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT| 2464 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY| 2465 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) { 2466 hp->intr_error_status |= error; 2467 hp->intr_status |= status; 2468 if (ISSET(sc->sc_flags, 2469 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2470 HCLR4(hp, SDHC_NINTR_SIGNAL_EN, 2471 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY)); 2472 } 2473 cv_broadcast(&hp->intr_cv); 2474 } 2475 2476 /* 2477 * Service SD card interrupts. 2478 */ 2479 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC) 2480 && ISSET(status, SDHC_CARD_INTERRUPT)) { 2481 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp))); 2482 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 2483 sdmmc_card_intr(hp->sdmmc); 2484 } 2485 next_port: 2486 mutex_exit(&hp->intr_lock); 2487 } 2488 2489 return done; 2490 } 2491 2492 kmutex_t * 2493 sdhc_host_lock(struct sdhc_host *hp) 2494 { 2495 return &hp->intr_lock; 2496 } 2497 2498 uint8_t 2499 sdhc_host_read_1(struct sdhc_host *hp, int reg) 2500 { 2501 return HREAD1(hp, reg); 2502 } 2503 2504 uint16_t 2505 sdhc_host_read_2(struct sdhc_host *hp, int reg) 2506 { 2507 return HREAD2(hp, reg); 2508 } 2509 2510 uint32_t 2511 sdhc_host_read_4(struct sdhc_host *hp, int reg) 2512 { 2513 return HREAD4(hp, reg); 2514 } 2515 2516 void 2517 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val) 2518 { 2519 HWRITE1(hp, reg, val); 2520 } 2521 2522 void 2523 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val) 2524 { 2525 HWRITE2(hp, reg, val); 2526 } 2527 2528 void 2529 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val) 2530 { 2531 HWRITE4(hp, reg, val); 2532 } 2533 2534 #ifdef SDHC_DEBUG 2535 void 2536 sdhc_dump_regs(struct sdhc_host *hp) 2537 { 2538 2539 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE, 2540 HREAD4(hp, SDHC_PRESENT_STATE)); 2541 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 2542 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL, 2543 HREAD1(hp, SDHC_POWER_CTL)); 2544 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS, 2545 HREAD2(hp, SDHC_NINTR_STATUS)); 2546 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS, 2547 HREAD2(hp, SDHC_EINTR_STATUS)); 2548 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN, 2549 HREAD2(hp, SDHC_NINTR_STATUS_EN)); 2550 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN, 2551 HREAD2(hp, SDHC_EINTR_STATUS_EN)); 2552 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN, 2553 HREAD2(hp, SDHC_NINTR_SIGNAL_EN)); 2554 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN, 2555 HREAD2(hp, SDHC_EINTR_SIGNAL_EN)); 2556 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES, 2557 HREAD4(hp, SDHC_CAPABILITIES)); 2558 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES, 2559 HREAD4(hp, SDHC_MAX_CAPABILITIES)); 2560 } 2561 #endif 2562