sdhc.c revision 1.97 1 /* $NetBSD: sdhc.c,v 1.97 2017/01/07 15:05:08 kiyohara Exp $ */
2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * SD Host Controller driver based on the SD Host Controller Standard
22 * Simplified Specification Version 1.00 (www.sdcard.com).
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.97 2017/01/07 15:05:08 kiyohara Exp $");
27
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s) do {} while (0)
53 #endif
54
55 #define SDHC_COMMAND_TIMEOUT hz
56 #define SDHC_BUFFER_TIMEOUT hz
57 #define SDHC_TRANSFER_TIMEOUT hz
58 #define SDHC_DMA_TIMEOUT (hz*3)
59 #define SDHC_TUNING_TIMEOUT hz
60
61 struct sdhc_host {
62 struct sdhc_softc *sc; /* host controller device */
63
64 bus_space_tag_t iot; /* host register set tag */
65 bus_space_handle_t ioh; /* host register set handle */
66 bus_size_t ios; /* host register space size */
67 bus_dma_tag_t dmat; /* host DMA tag */
68
69 device_t sdmmc; /* generic SD/MMC device */
70
71 u_int clkbase; /* base clock frequency in KHz */
72 int maxblklen; /* maximum block length */
73 uint32_t ocr; /* OCR value from capabilities */
74
75 uint8_t regs[14]; /* host controller state */
76
77 uint16_t intr_status; /* soft interrupt status */
78 uint16_t intr_error_status; /* soft error status */
79 kmutex_t intr_lock;
80 kcondvar_t intr_cv;
81
82 callout_t tuning_timer;
83 int tuning_timing;
84 u_int tuning_timer_count;
85 u_int tuning_timer_pending;
86
87 int specver; /* spec. version */
88
89 uint32_t flags; /* flags for this host */
90 #define SHF_USE_DMA 0x0001
91 #define SHF_USE_4BIT_MODE 0x0002
92 #define SHF_USE_8BIT_MODE 0x0004
93 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */
94 #define SHF_USE_ADMA2_32 0x0010
95 #define SHF_USE_ADMA2_64 0x0020
96 #define SHF_USE_ADMA2_MASK 0x0030
97
98 bus_dmamap_t adma_map;
99 bus_dma_segment_t adma_segs[1];
100 void *adma2;
101 };
102
103 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev))
104
105 static uint8_t
106 hread1(struct sdhc_host *hp, bus_size_t reg)
107 {
108
109 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
110 return bus_space_read_1(hp->iot, hp->ioh, reg);
111 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
112 }
113
114 static uint16_t
115 hread2(struct sdhc_host *hp, bus_size_t reg)
116 {
117
118 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
119 return bus_space_read_2(hp->iot, hp->ioh, reg);
120 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
121 }
122
123 #define HREAD1(hp, reg) hread1(hp, reg)
124 #define HREAD2(hp, reg) hread2(hp, reg)
125 #define HREAD4(hp, reg) \
126 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
127
128
129 static void
130 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
131 {
132
133 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
134 bus_space_write_1(hp->iot, hp->ioh, o, val);
135 } else {
136 const size_t shift = 8 * (o & 3);
137 o &= -4;
138 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
139 tmp = (val << shift) | (tmp & ~(0xff << shift));
140 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
141 }
142 }
143
144 static void
145 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
146 {
147
148 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
149 bus_space_write_2(hp->iot, hp->ioh, o, val);
150 } else {
151 const size_t shift = 8 * (o & 2);
152 o &= -4;
153 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
154 tmp = (val << shift) | (tmp & ~(0xffff << shift));
155 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
156 }
157 }
158
159 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val)
160 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val)
161 #define HWRITE4(hp, reg, val) \
162 bus_space_write_4((hp)->iot, (hp)->ioh, (reg), (val))
163
164 #define HCLR1(hp, reg, bits) \
165 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
166 #define HCLR2(hp, reg, bits) \
167 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
168 #define HCLR4(hp, reg, bits) \
169 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
170 #define HSET1(hp, reg, bits) \
171 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
172 #define HSET2(hp, reg, bits) \
173 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
174 #define HSET4(hp, reg, bits) \
175 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
176
177 static int sdhc_host_reset(sdmmc_chipset_handle_t);
178 static int sdhc_host_reset1(sdmmc_chipset_handle_t);
179 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t);
180 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t);
181 static int sdhc_card_detect(sdmmc_chipset_handle_t);
182 static int sdhc_write_protect(sdmmc_chipset_handle_t);
183 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
184 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
185 static int sdhc_bus_width(sdmmc_chipset_handle_t, int);
186 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int);
187 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
188 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t);
189 static void sdhc_exec_command(sdmmc_chipset_handle_t,
190 struct sdmmc_command *);
191 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
192 static int sdhc_execute_tuning1(struct sdhc_host *, int);
193 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
194 static void sdhc_tuning_timer(void *);
195 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
196 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
197 static int sdhc_soft_reset(struct sdhc_host *, int);
198 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool);
199 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
200 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
201 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
202 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
203 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
204 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
205 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
206
207 static struct sdmmc_chip_functions sdhc_functions = {
208 /* host controller reset */
209 .host_reset = sdhc_host_reset,
210
211 /* host controller capabilities */
212 .host_ocr = sdhc_host_ocr,
213 .host_maxblklen = sdhc_host_maxblklen,
214
215 /* card detection */
216 .card_detect = sdhc_card_detect,
217
218 /* write protect */
219 .write_protect = sdhc_write_protect,
220
221 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
222 .bus_power = sdhc_bus_power,
223 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */
224 .bus_width = sdhc_bus_width,
225 .bus_rod = sdhc_bus_rod,
226
227 /* command execution */
228 .exec_command = sdhc_exec_command,
229
230 /* card interrupt */
231 .card_enable_intr = sdhc_card_enable_intr,
232 .card_intr_ack = sdhc_card_intr_ack,
233
234 /* UHS functions */
235 .signal_voltage = sdhc_signal_voltage,
236 .bus_clock_ddr = sdhc_bus_clock_ddr,
237 .execute_tuning = sdhc_execute_tuning,
238 };
239
240 static int
241 sdhc_cfprint(void *aux, const char *pnp)
242 {
243 const struct sdmmcbus_attach_args * const saa = aux;
244 const struct sdhc_host * const hp = saa->saa_sch;
245
246 if (pnp) {
247 aprint_normal("sdmmc at %s", pnp);
248 }
249 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
250 if (hp->sc->sc_host[host] == hp) {
251 aprint_normal(" slot %zu", host);
252 }
253 }
254
255 return UNCONF;
256 }
257
258 /*
259 * Called by attachment driver. For each SD card slot there is one SD
260 * host controller standard register set. (1.3)
261 */
262 int
263 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
264 bus_space_handle_t ioh, bus_size_t iosize)
265 {
266 struct sdmmcbus_attach_args saa;
267 struct sdhc_host *hp;
268 uint32_t caps, caps2;
269 uint16_t sdhcver;
270 int error;
271
272 /* Allocate one more host structure. */
273 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
274 if (hp == NULL) {
275 aprint_error_dev(sc->sc_dev,
276 "couldn't alloc memory (sdhc host)\n");
277 goto err1;
278 }
279 sc->sc_host[sc->sc_nhosts++] = hp;
280
281 /* Fill in the new host structure. */
282 hp->sc = sc;
283 hp->iot = iot;
284 hp->ioh = ioh;
285 hp->ios = iosize;
286 hp->dmat = sc->sc_dmat;
287
288 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
289 cv_init(&hp->intr_cv, "sdhcintr");
290 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
291 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
292
293 if (iosize <= SDHC_HOST_CTL_VERSION) {
294 aprint_normal_dev(sc->sc_dev, "SDHC NO-VERS");
295 hp->specver = -1;
296 } else {
297 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
298 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
299 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
300 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
301 } else
302 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
303 aprint_normal_dev(sc->sc_dev, "SDHC ");
304 hp->specver = SDHC_SPEC_VERSION(sdhcver);
305 switch (SDHC_SPEC_VERSION(sdhcver)) {
306 case SDHC_SPEC_VERS_100:
307 aprint_normal("1.0");
308 break;
309
310 case SDHC_SPEC_VERS_200:
311 aprint_normal("2.0");
312 break;
313
314 case SDHC_SPEC_VERS_300:
315 aprint_normal("3.0");
316 break;
317
318 case SDHC_SPEC_VERS_400:
319 aprint_normal("4.0");
320 break;
321
322 default:
323 aprint_normal("unknown version(0x%x)",
324 SDHC_SPEC_VERSION(sdhcver));
325 break;
326 }
327 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
328 }
329
330 /*
331 * Reset the host controller and enable interrupts.
332 */
333 (void)sdhc_host_reset(hp);
334
335 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
336 /* init uSDHC registers */
337 HWRITE4(hp, SDHC_MMC_BOOT, 0);
338 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
339 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
340 HWRITE4(hp, SDHC_WATERMARK_LEVEL,
341 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
342 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
343 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
344 (0x40 << SDHC_WATERMARK_READ_SHIFT));
345 HSET4(hp, SDHC_VEND_SPEC,
346 SDHC_VEND_SPEC_MBO |
347 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
348 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
349 SDHC_VEND_SPEC_HCLK_SOFT_EN |
350 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
351 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
352 SDHC_VEND_SPEC_FRC_SDCLK_ON);
353 }
354
355 /* Determine host capabilities. */
356 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
357 caps = sc->sc_caps;
358 caps2 = sc->sc_caps2;
359 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
360 /* uSDHC capability register is little bit different */
361 caps = HREAD4(hp, SDHC_CAPABILITIES);
362 caps |= SDHC_8BIT_SUPP;
363 if (caps & SDHC_ADMA1_SUPP)
364 caps |= SDHC_ADMA2_SUPP;
365 sc->sc_caps = caps;
366 /* uSDHC has no SDHC_CAPABILITIES2 register */
367 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
368 } else {
369 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
370 if (hp->specver >= SDHC_SPEC_VERS_300) {
371 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
372 } else {
373 caps2 = sc->sc_caps2 = 0;
374 }
375 }
376
377 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
378 SDHC_RETUNING_MODES_MASK;
379 if (retuning_mode == SDHC_RETUNING_MODE_1) {
380 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
381 SDHC_TIMER_COUNT_MASK;
382 if (hp->tuning_timer_count == 0xf)
383 hp->tuning_timer_count = 0;
384 if (hp->tuning_timer_count)
385 hp->tuning_timer_count =
386 1 << (hp->tuning_timer_count - 1);
387 }
388
389 /*
390 * Use DMA if the host system and the controller support it.
391 * Suports integrated or external DMA egine, with or without
392 * SDHC_DMA_ENABLE in the command.
393 */
394 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
395 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
396 ISSET(caps, SDHC_DMA_SUPPORT)))) {
397 SET(hp->flags, SHF_USE_DMA);
398
399 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) &&
400 ISSET(caps, SDHC_ADMA2_SUPP)) {
401 SET(hp->flags, SHF_MODE_DMAEN);
402 /*
403 * 64-bit mode was present in the 2.00 spec, removed
404 * from 3.00, and re-added in 4.00 with a different
405 * descriptor layout. We only support 2.00 and 3.00
406 * descriptors for now.
407 */
408 if (hp->specver == SDHC_SPEC_VERS_200 &&
409 ISSET(caps, SDHC_64BIT_SYS_BUS)) {
410 SET(hp->flags, SHF_USE_ADMA2_64);
411 aprint_normal(", 64-bit ADMA2");
412 } else {
413 SET(hp->flags, SHF_USE_ADMA2_32);
414 aprint_normal(", 32-bit ADMA2");
415 }
416 } else {
417 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
418 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
419 SET(hp->flags, SHF_MODE_DMAEN);
420 if (sc->sc_vendor_transfer_data_dma) {
421 aprint_normal(", platform DMA");
422 } else {
423 aprint_normal(", SDMA");
424 }
425 }
426 } else {
427 aprint_normal(", PIO");
428 }
429
430 /*
431 * Determine the base clock frequency. (2.2.24)
432 */
433 if (hp->specver >= SDHC_SPEC_VERS_300) {
434 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
435 } else {
436 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
437 }
438 if (hp->clkbase == 0 ||
439 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
440 if (sc->sc_clkbase == 0) {
441 /* The attachment driver must tell us. */
442 aprint_error_dev(sc->sc_dev,
443 "unknown base clock frequency\n");
444 goto err;
445 }
446 hp->clkbase = sc->sc_clkbase;
447 }
448 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
449 /* SDHC 1.0 supports only 10-63 MHz. */
450 aprint_error_dev(sc->sc_dev,
451 "base clock frequency out of range: %u MHz\n",
452 hp->clkbase / 1000);
453 goto err;
454 }
455 aprint_normal(", %u kHz", hp->clkbase);
456
457 /*
458 * XXX Set the data timeout counter value according to
459 * capabilities. (2.2.15)
460 */
461 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
462 #if 1
463 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
464 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
465 #endif
466
467 if (ISSET(caps, SDHC_EMBEDDED_SLOT))
468 aprint_normal(", embedded slot");
469
470 /*
471 * Determine SD bus voltage levels supported by the controller.
472 */
473 aprint_normal(",");
474 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
475 SET(hp->ocr, MMC_OCR_HCS);
476 aprint_normal(" HS");
477 }
478 if (ISSET(caps2, SDHC_SDR50_SUPP)) {
479 SET(hp->ocr, MMC_OCR_S18A);
480 aprint_normal(" SDR50");
481 }
482 if (ISSET(caps2, SDHC_DDR50_SUPP)) {
483 SET(hp->ocr, MMC_OCR_S18A);
484 aprint_normal(" DDR50");
485 }
486 if (ISSET(caps2, SDHC_SDR104_SUPP)) {
487 SET(hp->ocr, MMC_OCR_S18A);
488 aprint_normal(" SDR104 HS200");
489 }
490 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
491 SET(hp->ocr, MMC_OCR_1_65V_1_95V);
492 aprint_normal(" 1.8V");
493 }
494 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
495 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
496 aprint_normal(" 3.0V");
497 }
498 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
499 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
500 aprint_normal(" 3.3V");
501 }
502 if (hp->specver >= SDHC_SPEC_VERS_300) {
503 aprint_normal(", re-tuning mode %d", retuning_mode + 1);
504 if (hp->tuning_timer_count)
505 aprint_normal(" (%us timer)", hp->tuning_timer_count);
506 }
507
508 /*
509 * Determine the maximum block length supported by the host
510 * controller. (2.2.24)
511 */
512 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
513 case SDHC_MAX_BLK_LEN_512:
514 hp->maxblklen = 512;
515 break;
516
517 case SDHC_MAX_BLK_LEN_1024:
518 hp->maxblklen = 1024;
519 break;
520
521 case SDHC_MAX_BLK_LEN_2048:
522 hp->maxblklen = 2048;
523 break;
524
525 case SDHC_MAX_BLK_LEN_4096:
526 hp->maxblklen = 4096;
527 break;
528
529 default:
530 aprint_error_dev(sc->sc_dev, "max block length unknown\n");
531 goto err;
532 }
533 aprint_normal(", %u byte blocks", hp->maxblklen);
534 aprint_normal("\n");
535
536 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
537 int rseg;
538
539 /* Allocate ADMA2 descriptor memory */
540 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
541 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
542 if (error) {
543 aprint_error_dev(sc->sc_dev,
544 "ADMA2 dmamem_alloc failed (%d)\n", error);
545 goto adma_done;
546 }
547 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
548 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
549 if (error) {
550 aprint_error_dev(sc->sc_dev,
551 "ADMA2 dmamem_map failed (%d)\n", error);
552 goto adma_done;
553 }
554 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
555 0, BUS_DMA_WAITOK, &hp->adma_map);
556 if (error) {
557 aprint_error_dev(sc->sc_dev,
558 "ADMA2 dmamap_create failed (%d)\n", error);
559 goto adma_done;
560 }
561 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
562 hp->adma2, PAGE_SIZE, NULL,
563 BUS_DMA_WAITOK|BUS_DMA_WRITE);
564 if (error) {
565 aprint_error_dev(sc->sc_dev,
566 "ADMA2 dmamap_load failed (%d)\n", error);
567 goto adma_done;
568 }
569
570 memset(hp->adma2, 0, PAGE_SIZE);
571
572 adma_done:
573 if (error)
574 CLR(hp->flags, SHF_USE_ADMA2_MASK);
575 }
576
577 /*
578 * Attach the generic SD/MMC bus driver. (The bus driver must
579 * not invoke any chipset functions before it is attached.)
580 */
581 memset(&saa, 0, sizeof(saa));
582 saa.saa_busname = "sdmmc";
583 saa.saa_sct = &sdhc_functions;
584 saa.saa_sch = hp;
585 saa.saa_dmat = hp->dmat;
586 saa.saa_clkmax = hp->clkbase;
587 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
588 saa.saa_clkmin = hp->clkbase / 256 / 2046;
589 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
590 saa.saa_clkmin = hp->clkbase / 256 / 16;
591 else if (hp->sc->sc_clkmsk != 0)
592 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
593 (ffs(hp->sc->sc_clkmsk) - 1));
594 else if (hp->specver >= SDHC_SPEC_VERS_300)
595 saa.saa_clkmin = hp->clkbase / 0x3ff;
596 else
597 saa.saa_clkmin = hp->clkbase / 256;
598 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
599 saa.saa_caps |= SMC_CAPS_AUTO_STOP;
600 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
601 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
602 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
603 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
604 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED;
605 if (ISSET(caps2, SDHC_SDR104_SUPP))
606 saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
607 SMC_CAPS_UHS_SDR50 |
608 SMC_CAPS_MMC_HS200;
609 if (ISSET(caps2, SDHC_SDR50_SUPP))
610 saa.saa_caps |= SMC_CAPS_UHS_SDR50;
611 if (ISSET(caps2, SDHC_DDR50_SUPP))
612 saa.saa_caps |= SMC_CAPS_UHS_DDR50;
613 if (ISSET(hp->flags, SHF_USE_DMA)) {
614 saa.saa_caps |= SMC_CAPS_DMA;
615 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
616 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
617 }
618 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
619 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
620 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
621 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
622 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint);
623
624 return 0;
625
626 err:
627 callout_destroy(&hp->tuning_timer);
628 cv_destroy(&hp->intr_cv);
629 mutex_destroy(&hp->intr_lock);
630 free(hp, M_DEVBUF);
631 sc->sc_host[--sc->sc_nhosts] = NULL;
632 err1:
633 return 1;
634 }
635
636 int
637 sdhc_detach(struct sdhc_softc *sc, int flags)
638 {
639 struct sdhc_host *hp;
640 int rv = 0;
641
642 for (size_t n = 0; n < sc->sc_nhosts; n++) {
643 hp = sc->sc_host[n];
644 if (hp == NULL)
645 continue;
646 if (hp->sdmmc != NULL) {
647 rv = config_detach(hp->sdmmc, flags);
648 if (rv)
649 break;
650 hp->sdmmc = NULL;
651 }
652 /* disable interrupts */
653 if ((flags & DETACH_FORCE) == 0) {
654 mutex_enter(&hp->intr_lock);
655 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
656 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
657 } else {
658 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
659 }
660 sdhc_soft_reset(hp, SDHC_RESET_ALL);
661 mutex_exit(&hp->intr_lock);
662 }
663 callout_halt(&hp->tuning_timer, NULL);
664 callout_destroy(&hp->tuning_timer);
665 cv_destroy(&hp->intr_cv);
666 mutex_destroy(&hp->intr_lock);
667 if (hp->ios > 0) {
668 bus_space_unmap(hp->iot, hp->ioh, hp->ios);
669 hp->ios = 0;
670 }
671 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
672 bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
673 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
674 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
675 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
676 }
677 free(hp, M_DEVBUF);
678 sc->sc_host[n] = NULL;
679 }
680
681 return rv;
682 }
683
684 bool
685 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
686 {
687 struct sdhc_softc *sc = device_private(dev);
688 struct sdhc_host *hp;
689 size_t i;
690
691 /* XXX poll for command completion or suspend command
692 * in progress */
693
694 /* Save the host controller state. */
695 for (size_t n = 0; n < sc->sc_nhosts; n++) {
696 hp = sc->sc_host[n];
697 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
698 for (i = 0; i < sizeof hp->regs; i += 4) {
699 uint32_t v = HREAD4(hp, i);
700 hp->regs[i + 0] = (v >> 0);
701 hp->regs[i + 1] = (v >> 8);
702 if (i + 3 < sizeof hp->regs) {
703 hp->regs[i + 2] = (v >> 16);
704 hp->regs[i + 3] = (v >> 24);
705 }
706 }
707 } else {
708 for (i = 0; i < sizeof hp->regs; i++) {
709 hp->regs[i] = HREAD1(hp, i);
710 }
711 }
712 }
713 return true;
714 }
715
716 bool
717 sdhc_resume(device_t dev, const pmf_qual_t *qual)
718 {
719 struct sdhc_softc *sc = device_private(dev);
720 struct sdhc_host *hp;
721 size_t i;
722
723 /* Restore the host controller state. */
724 for (size_t n = 0; n < sc->sc_nhosts; n++) {
725 hp = sc->sc_host[n];
726 (void)sdhc_host_reset(hp);
727 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
728 for (i = 0; i < sizeof hp->regs; i += 4) {
729 if (i + 3 < sizeof hp->regs) {
730 HWRITE4(hp, i,
731 (hp->regs[i + 0] << 0)
732 | (hp->regs[i + 1] << 8)
733 | (hp->regs[i + 2] << 16)
734 | (hp->regs[i + 3] << 24));
735 } else {
736 HWRITE4(hp, i,
737 (hp->regs[i + 0] << 0)
738 | (hp->regs[i + 1] << 8));
739 }
740 }
741 } else {
742 for (i = 0; i < sizeof hp->regs; i++) {
743 HWRITE1(hp, i, hp->regs[i]);
744 }
745 }
746 }
747 return true;
748 }
749
750 bool
751 sdhc_shutdown(device_t dev, int flags)
752 {
753 struct sdhc_softc *sc = device_private(dev);
754 struct sdhc_host *hp;
755
756 /* XXX chip locks up if we don't disable it before reboot. */
757 for (size_t i = 0; i < sc->sc_nhosts; i++) {
758 hp = sc->sc_host[i];
759 (void)sdhc_host_reset(hp);
760 }
761 return true;
762 }
763
764 /*
765 * Reset the host controller. Called during initialization, when
766 * cards are removed, upon resume, and during error recovery.
767 */
768 static int
769 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
770 {
771 struct sdhc_host *hp = (struct sdhc_host *)sch;
772 uint32_t sdhcimask;
773 int error;
774
775 KASSERT(mutex_owned(&hp->intr_lock));
776
777 /* Disable all interrupts. */
778 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
779 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
780 } else {
781 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
782 }
783
784 /*
785 * Reset the entire host controller and wait up to 100ms for
786 * the controller to clear the reset bit.
787 */
788 error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
789 if (error)
790 goto out;
791
792 /* Set data timeout counter value to max for now. */
793 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
794 #if 1
795 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
796 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
797 #endif
798
799 /* Enable interrupts. */
800 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
801 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
802 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
803 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
804 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
805 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
806 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
807 sdhcimask ^=
808 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
809 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
810 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
811 } else {
812 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
813 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
814 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
815 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
816 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
817 }
818
819 out:
820 return error;
821 }
822
823 static int
824 sdhc_host_reset(sdmmc_chipset_handle_t sch)
825 {
826 struct sdhc_host *hp = (struct sdhc_host *)sch;
827 int error;
828
829 mutex_enter(&hp->intr_lock);
830 error = sdhc_host_reset1(sch);
831 mutex_exit(&hp->intr_lock);
832
833 return error;
834 }
835
836 static uint32_t
837 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
838 {
839 struct sdhc_host *hp = (struct sdhc_host *)sch;
840
841 return hp->ocr;
842 }
843
844 static int
845 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
846 {
847 struct sdhc_host *hp = (struct sdhc_host *)sch;
848
849 return hp->maxblklen;
850 }
851
852 /*
853 * Return non-zero if the card is currently inserted.
854 */
855 static int
856 sdhc_card_detect(sdmmc_chipset_handle_t sch)
857 {
858 struct sdhc_host *hp = (struct sdhc_host *)sch;
859 int r;
860
861 if (hp->sc->sc_vendor_card_detect)
862 return (*hp->sc->sc_vendor_card_detect)(hp->sc);
863
864 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
865
866 return r ? 1 : 0;
867 }
868
869 /*
870 * Return non-zero if the card is currently write-protected.
871 */
872 static int
873 sdhc_write_protect(sdmmc_chipset_handle_t sch)
874 {
875 struct sdhc_host *hp = (struct sdhc_host *)sch;
876 int r;
877
878 if (hp->sc->sc_vendor_write_protect)
879 return (*hp->sc->sc_vendor_write_protect)(hp->sc);
880
881 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
882
883 return r ? 0 : 1;
884 }
885
886 /*
887 * Set or change SD bus voltage and enable or disable SD bus power.
888 * Return zero on success.
889 */
890 static int
891 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
892 {
893 struct sdhc_host *hp = (struct sdhc_host *)sch;
894 uint8_t vdd;
895 int error = 0;
896 const uint32_t pcmask =
897 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
898
899 mutex_enter(&hp->intr_lock);
900
901 /*
902 * Disable bus power before voltage change.
903 */
904 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
905 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0))
906 HWRITE1(hp, SDHC_POWER_CTL, 0);
907
908 /* If power is disabled, reset the host and return now. */
909 if (ocr == 0) {
910 (void)sdhc_host_reset1(hp);
911 callout_halt(&hp->tuning_timer, &hp->intr_lock);
912 goto out;
913 }
914
915 /*
916 * Select the lowest voltage according to capabilities.
917 */
918 ocr &= hp->ocr;
919 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
920 vdd = SDHC_VOLTAGE_1_8V;
921 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
922 vdd = SDHC_VOLTAGE_3_0V;
923 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
924 vdd = SDHC_VOLTAGE_3_3V;
925 } else {
926 /* Unsupported voltage level requested. */
927 error = EINVAL;
928 goto out;
929 }
930
931 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
932 /*
933 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus
934 * voltage ramp until power rises.
935 */
936
937 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
938 HWRITE1(hp, SDHC_POWER_CTL,
939 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
940 } else {
941 HWRITE1(hp, SDHC_POWER_CTL,
942 HREAD1(hp, SDHC_POWER_CTL) & pcmask);
943 sdmmc_delay(1);
944 HWRITE1(hp, SDHC_POWER_CTL,
945 (vdd << SDHC_VOLTAGE_SHIFT));
946 sdmmc_delay(1);
947 HSET1(hp, SDHC_POWER_CTL, SDHC_BUS_POWER);
948 sdmmc_delay(10000);
949 }
950
951 /*
952 * The host system may not power the bus due to battery low,
953 * etc. In that case, the host controller should clear the
954 * bus power bit.
955 */
956 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
957 error = ENXIO;
958 goto out;
959 }
960 }
961
962 out:
963 mutex_exit(&hp->intr_lock);
964
965 return error;
966 }
967
968 /*
969 * Return the smallest possible base clock frequency divisor value
970 * for the CLOCK_CTL register to produce `freq' (KHz).
971 */
972 static bool
973 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
974 {
975 u_int div;
976
977 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
978 for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
979 if ((hp->clkbase / div) <= freq) {
980 *divp = SDHC_SDCLK_CGM
981 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
982 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
983 //freq = hp->clkbase / div;
984 return true;
985 }
986 }
987 /* No divisor found. */
988 return false;
989 }
990 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
991 u_int dvs = (hp->clkbase + freq - 1) / freq;
992 u_int roundup = dvs & 1;
993 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
994 if (dvs + roundup <= 16) {
995 dvs += roundup - 1;
996 *divp = (div << SDHC_SDCLK_DIV_SHIFT)
997 | (dvs << SDHC_SDCLK_DVS_SHIFT);
998 DPRINTF(2,
999 ("%s: divisor for freq %u is %u * %u\n",
1000 HDEVNAME(hp), freq, div * 2, dvs + 1));
1001 //freq = hp->clkbase / (div * 2) * (dvs + 1);
1002 return true;
1003 }
1004 /*
1005 * If we drop bits, we need to round up the divisor.
1006 */
1007 roundup |= dvs & 1;
1008 }
1009 /* No divisor found. */
1010 return false;
1011 }
1012 if (hp->sc->sc_clkmsk != 0) {
1013 div = howmany(hp->clkbase, freq);
1014 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1015 return false;
1016 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1017 //freq = hp->clkbase / div;
1018 return true;
1019 }
1020 if (hp->specver >= SDHC_SPEC_VERS_300) {
1021 div = howmany(hp->clkbase, freq);
1022 div = div > 1 ? howmany(div, 2) : 0;
1023 if (div > 0x3ff)
1024 return false;
1025 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1026 << SDHC_SDCLK_XDIV_SHIFT) |
1027 (((div >> 0) & SDHC_SDCLK_DIV_MASK)
1028 << SDHC_SDCLK_DIV_SHIFT);
1029 //freq = hp->clkbase / (div ? div * 2 : 1);
1030 return true;
1031 } else {
1032 for (div = 1; div <= 256; div *= 2) {
1033 if ((hp->clkbase / div) <= freq) {
1034 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1035 //freq = hp->clkbase / div;
1036 return true;
1037 }
1038 }
1039 /* No divisor found. */
1040 return false;
1041 }
1042 /* No divisor found. */
1043 return false;
1044 }
1045
1046 /*
1047 * Set or change SDCLK frequency or disable the SD clock.
1048 * Return zero on success.
1049 */
1050 static int
1051 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1052 {
1053 struct sdhc_host *hp = (struct sdhc_host *)sch;
1054 u_int div;
1055 u_int timo;
1056 int16_t reg;
1057 int error = 0;
1058 bool present __diagused;
1059
1060 mutex_enter(&hp->intr_lock);
1061
1062 #ifdef DIAGNOSTIC
1063 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1064
1065 /* Must not stop the clock if commands are in progress. */
1066 if (present && sdhc_card_detect(hp)) {
1067 aprint_normal_dev(hp->sc->sc_dev,
1068 "%s: command in progress\n", __func__);
1069 }
1070 #endif
1071
1072 if (hp->sc->sc_vendor_bus_clock) {
1073 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1074 if (error != 0)
1075 goto out;
1076 }
1077
1078 /*
1079 * Stop SD clock before changing the frequency.
1080 */
1081 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1082 HCLR4(hp, SDHC_VEND_SPEC,
1083 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1084 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1085 if (freq == SDMMC_SDCLK_OFF) {
1086 goto out;
1087 }
1088 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1089 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1090 if (freq == SDMMC_SDCLK_OFF) {
1091 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1092 goto out;
1093 }
1094 } else {
1095 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1096 if (freq == SDMMC_SDCLK_OFF)
1097 goto out;
1098 }
1099
1100 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1101 if (ddr)
1102 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1103 else
1104 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1105 } else if (hp->specver >= SDHC_SPEC_VERS_300) {
1106 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1107 if (freq > 100000) {
1108 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1109 } else if (freq > 50000) {
1110 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR50);
1111 } else if (freq > 25000) {
1112 if (ddr) {
1113 HSET2(hp, SDHC_HOST_CTL2,
1114 SDHC_UHS_MODE_SELECT_DDR50);
1115 } else {
1116 HSET2(hp, SDHC_HOST_CTL2,
1117 SDHC_UHS_MODE_SELECT_SDR25);
1118 }
1119 } else if (freq > 400) {
1120 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1121 }
1122 }
1123
1124 /*
1125 * Slow down Ricoh 5U823 controller that isn't reliable
1126 * at 100MHz bus clock.
1127 */
1128 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1129 if (freq == 100000)
1130 --freq;
1131 }
1132
1133 /*
1134 * Set the minimum base clock frequency divisor.
1135 */
1136 if (!sdhc_clock_divisor(hp, freq, &div)) {
1137 /* Invalid base clock frequency or `freq' value. */
1138 aprint_error_dev(hp->sc->sc_dev,
1139 "Invalid bus clock %d kHz\n", freq);
1140 error = EINVAL;
1141 goto out;
1142 }
1143 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1144 if (ddr) {
1145 /* in ddr mode, divisor >>= 1 */
1146 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1147 SDHC_SDCLK_DIV_SHIFT)) |
1148 (div & (SDHC_SDCLK_DVS_MASK <<
1149 SDHC_SDCLK_DVS_SHIFT));
1150 }
1151 for (timo = 1000; timo > 0; timo--) {
1152 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1153 break;
1154 sdmmc_delay(10);
1155 }
1156 HWRITE4(hp, SDHC_CLOCK_CTL,
1157 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1158 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1159 HWRITE4(hp, SDHC_CLOCK_CTL,
1160 div | (SDHC_TIMEOUT_MAX << 16));
1161 } else {
1162 reg = HREAD2(hp, SDHC_CLOCK_CTL);
1163 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1164 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1165 }
1166
1167 /*
1168 * Start internal clock. Wait 10ms for stabilization.
1169 */
1170 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1171 HSET4(hp, SDHC_VEND_SPEC,
1172 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1173 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1174 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1175 sdmmc_delay(10000);
1176 HSET4(hp, SDHC_CLOCK_CTL,
1177 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1178 } else {
1179 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1180 for (timo = 1000; timo > 0; timo--) {
1181 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1182 SDHC_INTCLK_STABLE))
1183 break;
1184 sdmmc_delay(10);
1185 }
1186 if (timo == 0) {
1187 error = ETIMEDOUT;
1188 DPRINTF(1,("%s: timeout\n", __func__));
1189 goto out;
1190 }
1191 }
1192
1193 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1194 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1195 /*
1196 * Sending 80 clocks at 400kHz takes 200us.
1197 * So delay for that time + slop and then
1198 * check a few times for completion.
1199 */
1200 sdmmc_delay(210);
1201 for (timo = 10; timo > 0; timo--) {
1202 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1203 SDHC_INIT_ACTIVE))
1204 break;
1205 sdmmc_delay(10);
1206 }
1207 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1208
1209 /*
1210 * Enable SD clock.
1211 */
1212 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1213 HSET4(hp, SDHC_VEND_SPEC,
1214 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1215 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1216 } else {
1217 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1218 }
1219 } else {
1220 /*
1221 * Enable SD clock.
1222 */
1223 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1224
1225 if (freq > 25000 &&
1226 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1227 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1228 else
1229 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1230 }
1231
1232 out:
1233 mutex_exit(&hp->intr_lock);
1234
1235 return error;
1236 }
1237
1238 static int
1239 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1240 {
1241 struct sdhc_host *hp = (struct sdhc_host *)sch;
1242 int reg;
1243
1244 switch (width) {
1245 case 1:
1246 case 4:
1247 break;
1248
1249 case 8:
1250 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1251 break;
1252 /* FALLTHROUGH */
1253 default:
1254 DPRINTF(0,("%s: unsupported bus width (%d)\n",
1255 HDEVNAME(hp), width));
1256 return 1;
1257 }
1258
1259 if (hp->sc->sc_vendor_bus_width) {
1260 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1261 if (error != 0)
1262 return error;
1263 }
1264
1265 mutex_enter(&hp->intr_lock);
1266
1267 reg = HREAD1(hp, SDHC_HOST_CTL);
1268 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1269 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1270 if (width == 4)
1271 reg |= SDHC_4BIT_MODE;
1272 else if (width == 8)
1273 reg |= SDHC_ESDHC_8BIT_MODE;
1274 } else {
1275 reg &= ~SDHC_4BIT_MODE;
1276 if (hp->specver >= SDHC_SPEC_VERS_300) {
1277 reg &= ~SDHC_8BIT_MODE;
1278 }
1279 if (width == 4) {
1280 reg |= SDHC_4BIT_MODE;
1281 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1282 reg |= SDHC_8BIT_MODE;
1283 }
1284 }
1285 HWRITE1(hp, SDHC_HOST_CTL, reg);
1286
1287 mutex_exit(&hp->intr_lock);
1288
1289 return 0;
1290 }
1291
1292 static int
1293 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1294 {
1295 struct sdhc_host *hp = (struct sdhc_host *)sch;
1296
1297 if (hp->sc->sc_vendor_rod)
1298 return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1299
1300 return 0;
1301 }
1302
1303 static void
1304 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1305 {
1306 struct sdhc_host *hp = (struct sdhc_host *)sch;
1307
1308 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1309 mutex_enter(&hp->intr_lock);
1310 if (enable) {
1311 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1312 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1313 } else {
1314 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1315 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1316 }
1317 mutex_exit(&hp->intr_lock);
1318 }
1319 }
1320
1321 static void
1322 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1323 {
1324 struct sdhc_host *hp = (struct sdhc_host *)sch;
1325
1326 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1327 mutex_enter(&hp->intr_lock);
1328 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1329 mutex_exit(&hp->intr_lock);
1330 }
1331 }
1332
1333 static int
1334 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1335 {
1336 struct sdhc_host *hp = (struct sdhc_host *)sch;
1337
1338 mutex_enter(&hp->intr_lock);
1339 switch (signal_voltage) {
1340 case SDMMC_SIGNAL_VOLTAGE_180:
1341 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1342 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1343 break;
1344 case SDMMC_SIGNAL_VOLTAGE_330:
1345 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1346 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1347 break;
1348 default:
1349 return EINVAL;
1350 }
1351 mutex_exit(&hp->intr_lock);
1352
1353 return 0;
1354 }
1355
1356 /*
1357 * Sampling clock tuning procedure (UHS)
1358 */
1359 static int
1360 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1361 {
1362 struct sdmmc_command cmd;
1363 uint8_t hostctl;
1364 int opcode, error, retry = 40;
1365
1366 KASSERT(mutex_owned(&hp->intr_lock));
1367
1368 hp->tuning_timing = timing;
1369
1370 switch (timing) {
1371 case SDMMC_TIMING_MMC_HS200:
1372 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1373 break;
1374 case SDMMC_TIMING_UHS_SDR50:
1375 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1376 return 0;
1377 /* FALLTHROUGH */
1378 case SDMMC_TIMING_UHS_SDR104:
1379 opcode = MMC_SEND_TUNING_BLOCK;
1380 break;
1381 default:
1382 return EINVAL;
1383 }
1384
1385 hostctl = HREAD1(hp, SDHC_HOST_CTL);
1386
1387 /* enable buffer read ready interrupt */
1388 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1389 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1390
1391 /* disable DMA */
1392 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1393
1394 /* reset tuning circuit */
1395 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1396
1397 /* start of tuning */
1398 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1399
1400 do {
1401 memset(&cmd, 0, sizeof(cmd));
1402 cmd.c_opcode = opcode;
1403 cmd.c_arg = 0;
1404 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1405 if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1406 cmd.c_blklen = cmd.c_datalen = 128;
1407 } else {
1408 cmd.c_blklen = cmd.c_datalen = 64;
1409 }
1410
1411 error = sdhc_start_command(hp, &cmd);
1412 if (error)
1413 break;
1414
1415 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1416 SDHC_TUNING_TIMEOUT, false)) {
1417 break;
1418 }
1419
1420 delay(1000);
1421 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1422
1423 /* disable buffer read ready interrupt */
1424 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1425 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1426
1427 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1428 HCLR2(hp, SDHC_HOST_CTL2,
1429 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1430 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1431 aprint_error_dev(hp->sc->sc_dev,
1432 "tuning did not complete, using fixed sampling clock\n");
1433 return EIO; /* tuning did not complete */
1434 }
1435
1436 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1437 HCLR2(hp, SDHC_HOST_CTL2,
1438 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1439 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1440 aprint_error_dev(hp->sc->sc_dev,
1441 "tuning failed, using fixed sampling clock\n");
1442 return EIO; /* tuning failed */
1443 }
1444
1445 if (hp->tuning_timer_count) {
1446 callout_schedule(&hp->tuning_timer,
1447 hz * hp->tuning_timer_count);
1448 }
1449
1450 return 0; /* tuning completed */
1451 }
1452
1453 static int
1454 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1455 {
1456 struct sdhc_host *hp = (struct sdhc_host *)sch;
1457 int error;
1458
1459 mutex_enter(&hp->intr_lock);
1460 error = sdhc_execute_tuning1(hp, timing);
1461 mutex_exit(&hp->intr_lock);
1462 return error;
1463 }
1464
1465 static void
1466 sdhc_tuning_timer(void *arg)
1467 {
1468 struct sdhc_host *hp = arg;
1469
1470 atomic_swap_uint(&hp->tuning_timer_pending, 1);
1471 }
1472
1473 static int
1474 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1475 {
1476 uint32_t state;
1477 int timeout;
1478
1479 for (timeout = 10000; timeout > 0; timeout--) {
1480 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1481 return 0;
1482 sdmmc_delay(10);
1483 }
1484 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1485 mask, value, state);
1486 return ETIMEDOUT;
1487 }
1488
1489 static void
1490 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1491 {
1492 struct sdhc_host *hp = (struct sdhc_host *)sch;
1493 int error;
1494 bool probing;
1495
1496 mutex_enter(&hp->intr_lock);
1497
1498 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1499 (void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1500 }
1501
1502 if (cmd->c_data &&
1503 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1504 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1505 if (ISSET(hp->flags, SHF_USE_DMA)) {
1506 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1507 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1508 } else {
1509 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1510 HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1511 }
1512 }
1513
1514 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1515 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1516 if (cmd->c_data != NULL) {
1517 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1518 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1519 } else {
1520 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1521 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1522 }
1523 }
1524
1525 /*
1526 * Start the MMC command, or mark `cmd' as failed and return.
1527 */
1528 error = sdhc_start_command(hp, cmd);
1529 if (error) {
1530 cmd->c_error = error;
1531 goto out;
1532 }
1533
1534 /*
1535 * Wait until the command phase is done, or until the command
1536 * is marked done for any other reason.
1537 */
1538 probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1539 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT, probing)) {
1540 DPRINTF(1,("%s: timeout for command\n", __func__));
1541 sdmmc_delay(50);
1542 cmd->c_error = ETIMEDOUT;
1543 goto out;
1544 }
1545
1546 /*
1547 * The host controller removes bits [0:7] from the response
1548 * data (CRC) and we pass the data up unchanged to the bus
1549 * driver (without padding).
1550 */
1551 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1552 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1553 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1554 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1555 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1556 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1557 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1558 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1559 (cmd->c_resp[1] << 24);
1560 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1561 (cmd->c_resp[2] << 24);
1562 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1563 (cmd->c_resp[3] << 24);
1564 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1565 }
1566 }
1567 }
1568 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1569
1570 /*
1571 * If the command has data to transfer in any direction,
1572 * execute the transfer now.
1573 */
1574 if (cmd->c_error == 0 && cmd->c_data != NULL)
1575 sdhc_transfer_data(hp, cmd);
1576 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1577 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) &&
1578 !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1579 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1580 HDEVNAME(hp)));
1581 cmd->c_error = ETIMEDOUT;
1582 goto out;
1583 }
1584 }
1585
1586 out:
1587 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1588 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1589 /* Turn off the LED. */
1590 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1591 }
1592 SET(cmd->c_flags, SCF_ITSDONE);
1593
1594 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) &&
1595 cmd->c_opcode == MMC_STOP_TRANSMISSION)
1596 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
1597
1598 mutex_exit(&hp->intr_lock);
1599
1600 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1601 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1602 cmd->c_flags, cmd->c_error));
1603 }
1604
1605 static int
1606 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1607 {
1608 struct sdhc_softc * const sc = hp->sc;
1609 uint16_t blksize = 0;
1610 uint16_t blkcount = 0;
1611 uint16_t mode;
1612 uint16_t command;
1613 uint32_t pmask;
1614 int error;
1615
1616 KASSERT(mutex_owned(&hp->intr_lock));
1617
1618 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1619 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1620 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1621
1622 /*
1623 * The maximum block length for commands should be the minimum
1624 * of the host buffer size and the card buffer size. (1.7.2)
1625 */
1626
1627 /* Fragment the data into proper blocks. */
1628 if (cmd->c_datalen > 0) {
1629 blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1630 blkcount = cmd->c_datalen / blksize;
1631 if (cmd->c_datalen % blksize > 0) {
1632 /* XXX: Split this command. (1.7.4) */
1633 aprint_error_dev(sc->sc_dev,
1634 "data not a multiple of %u bytes\n", blksize);
1635 return EINVAL;
1636 }
1637 }
1638
1639 /* Check limit imposed by 9-bit block count. (1.7.2) */
1640 if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1641 aprint_error_dev(sc->sc_dev, "too much data\n");
1642 return EINVAL;
1643 }
1644
1645 /* Prepare transfer mode register value. (2.2.5) */
1646 mode = SDHC_BLOCK_COUNT_ENABLE;
1647 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1648 mode |= SDHC_READ_MODE;
1649 if (blkcount > 1) {
1650 mode |= SDHC_MULTI_BLOCK_MODE;
1651 /* XXX only for memory commands? */
1652 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
1653 mode |= SDHC_AUTO_CMD12_ENABLE;
1654 }
1655 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1656 ISSET(hp->flags, SHF_MODE_DMAEN)) {
1657 mode |= SDHC_DMA_ENABLE;
1658 }
1659
1660 /*
1661 * Prepare command register value. (2.2.6)
1662 */
1663 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1664
1665 if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1666 command |= SDHC_CRC_CHECK_ENABLE;
1667 if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1668 command |= SDHC_INDEX_CHECK_ENABLE;
1669 if (cmd->c_datalen > 0)
1670 command |= SDHC_DATA_PRESENT_SELECT;
1671
1672 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1673 command |= SDHC_NO_RESPONSE;
1674 else if (ISSET(cmd->c_flags, SCF_RSP_136))
1675 command |= SDHC_RESP_LEN_136;
1676 else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1677 command |= SDHC_RESP_LEN_48_CHK_BUSY;
1678 else
1679 command |= SDHC_RESP_LEN_48;
1680
1681 /* Wait until command and optionally data inhibit bits are clear. (1.5) */
1682 pmask = SDHC_CMD_INHIBIT_CMD;
1683 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1684 pmask |= SDHC_CMD_INHIBIT_DAT;
1685 error = sdhc_wait_state(hp, pmask, 0);
1686 if (error) {
1687 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1688 device_printf(sc->sc_dev, "command or data phase inhibited\n");
1689 return error;
1690 }
1691
1692 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1693 HDEVNAME(hp), blksize, blkcount, mode, command));
1694
1695 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1696 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1697 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */
1698 }
1699
1700 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1701 /* Alert the user not to remove the card. */
1702 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1703 }
1704
1705 /* Set DMA start address. */
1706 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1707 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1708 bus_addr_t paddr =
1709 cmd->c_dmamap->dm_segs[seg].ds_addr;
1710 uint16_t len =
1711 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1712 0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1713 uint16_t attr =
1714 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1715 if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1716 attr |= SDHC_ADMA2_END;
1717 }
1718 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1719 struct sdhc_adma2_descriptor32 *desc =
1720 hp->adma2;
1721 desc[seg].attribute = htole16(attr);
1722 desc[seg].length = htole16(len);
1723 desc[seg].address = htole32(paddr);
1724 } else {
1725 struct sdhc_adma2_descriptor64 *desc =
1726 hp->adma2;
1727 desc[seg].attribute = htole16(attr);
1728 desc[seg].length = htole16(len);
1729 desc[seg].address = htole32(paddr & 0xffffffff);
1730 desc[seg].address_hi = htole32(
1731 (uint64_t)paddr >> 32);
1732 }
1733 }
1734 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1735 struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1736 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1737 } else {
1738 struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1739 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1740 }
1741 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1742 BUS_DMASYNC_PREWRITE);
1743 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1744 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1745 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1746 } else {
1747 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1748 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1749 }
1750
1751 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1752
1753 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1754 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1755 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1756 (uint64_t)desc_addr >> 32);
1757 }
1758 } else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1759 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1760 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1761 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1762 }
1763 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1764 }
1765
1766 /*
1767 * Start a CPU data transfer. Writing to the high order byte
1768 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1769 */
1770 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1771 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1772 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1773 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1774 /* mode bits is in MIX_CTRL register on uSDHC */
1775 HWRITE4(hp, SDHC_MIX_CTRL, mode |
1776 (HREAD4(hp, SDHC_MIX_CTRL) &
1777 ~(SDHC_MULTI_BLOCK_MODE |
1778 SDHC_READ_MODE |
1779 SDHC_AUTO_CMD12_ENABLE |
1780 SDHC_BLOCK_COUNT_ENABLE |
1781 SDHC_DMA_ENABLE)));
1782 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1783 } else {
1784 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1785 }
1786 } else {
1787 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1788 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1789 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1790 HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1791 HWRITE2(hp, SDHC_COMMAND, command);
1792 }
1793
1794 return 0;
1795 }
1796
1797 static void
1798 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1799 {
1800 struct sdhc_softc *sc = hp->sc;
1801 int error;
1802
1803 KASSERT(mutex_owned(&hp->intr_lock));
1804
1805 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1806 MMC_R1(cmd->c_resp), cmd->c_datalen));
1807
1808 #ifdef SDHC_DEBUG
1809 /* XXX I forgot why I wanted to know when this happens :-( */
1810 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1811 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1812 aprint_error_dev(hp->sc->sc_dev,
1813 "CMD52/53 error response flags %#x\n",
1814 MMC_R1(cmd->c_resp) & 0xff00);
1815 }
1816 #endif
1817
1818 if (cmd->c_dmamap != NULL) {
1819 if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1820 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1821 if (error == 0 && !sdhc_wait_intr(hp,
1822 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1823 DPRINTF(1,("%s: timeout\n", __func__));
1824 error = ETIMEDOUT;
1825 }
1826 } else {
1827 error = sdhc_transfer_data_dma(hp, cmd);
1828 }
1829 } else
1830 error = sdhc_transfer_data_pio(hp, cmd);
1831 if (error)
1832 cmd->c_error = error;
1833 SET(cmd->c_flags, SCF_ITSDONE);
1834
1835 DPRINTF(1,("%s: data transfer done (error=%d)\n",
1836 HDEVNAME(hp), cmd->c_error));
1837 }
1838
1839 static int
1840 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1841 {
1842 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1843 bus_addr_t posaddr;
1844 bus_addr_t segaddr;
1845 bus_size_t seglen;
1846 u_int seg = 0;
1847 int error = 0;
1848 int status;
1849
1850 KASSERT(mutex_owned(&hp->intr_lock));
1851 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1852 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1853 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1854 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1855
1856 for (;;) {
1857 status = sdhc_wait_intr(hp,
1858 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1859 SDHC_DMA_TIMEOUT, false);
1860
1861 if (status & SDHC_TRANSFER_COMPLETE) {
1862 break;
1863 }
1864 if (!status) {
1865 DPRINTF(1,("%s: timeout\n", __func__));
1866 error = ETIMEDOUT;
1867 break;
1868 }
1869
1870 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1871 continue;
1872 }
1873
1874 if ((status & SDHC_DMA_INTERRUPT) == 0) {
1875 continue;
1876 }
1877
1878 /* DMA Interrupt (boundary crossing) */
1879
1880 segaddr = dm_segs[seg].ds_addr;
1881 seglen = dm_segs[seg].ds_len;
1882 posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1883
1884 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1885 continue;
1886 }
1887 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1888 HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1889 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1890 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1891 KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1892 }
1893
1894 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1895 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1896 PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1897 }
1898
1899 return error;
1900 }
1901
1902 static int
1903 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1904 {
1905 uint8_t *data = cmd->c_data;
1906 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1907 u_int len, datalen;
1908 u_int imask;
1909 u_int pmask;
1910 int error = 0;
1911
1912 KASSERT(mutex_owned(&hp->intr_lock));
1913
1914 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1915 imask = SDHC_BUFFER_READ_READY;
1916 pmask = SDHC_BUFFER_READ_ENABLE;
1917 if (ISSET(hp->sc->sc_flags,
1918 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1919 pio_func = esdhc_read_data_pio;
1920 } else {
1921 pio_func = sdhc_read_data_pio;
1922 }
1923 } else {
1924 imask = SDHC_BUFFER_WRITE_READY;
1925 pmask = SDHC_BUFFER_WRITE_ENABLE;
1926 if (ISSET(hp->sc->sc_flags,
1927 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1928 pio_func = esdhc_write_data_pio;
1929 } else {
1930 pio_func = sdhc_write_data_pio;
1931 }
1932 }
1933 datalen = cmd->c_datalen;
1934
1935 KASSERT(mutex_owned(&hp->intr_lock));
1936 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
1937 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1938 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1939
1940 while (datalen > 0) {
1941 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
1942 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1943 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
1944 } else {
1945 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
1946 }
1947 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
1948 DPRINTF(1,("%s: timeout\n", __func__));
1949 error = ETIMEDOUT;
1950 break;
1951 }
1952
1953 error = sdhc_wait_state(hp, pmask, pmask);
1954 if (error)
1955 break;
1956 }
1957
1958 len = MIN(datalen, cmd->c_blklen);
1959 (*pio_func)(hp, data, len);
1960 DPRINTF(2,("%s: pio data transfer %u @ %p\n",
1961 HDEVNAME(hp), len, data));
1962
1963 data += len;
1964 datalen -= len;
1965 }
1966
1967 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
1968 SDHC_TRANSFER_TIMEOUT, false)) {
1969 DPRINTF(1,("%s: timeout for transfer\n", __func__));
1970 error = ETIMEDOUT;
1971 }
1972
1973 return error;
1974 }
1975
1976 static void
1977 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1978 {
1979
1980 if (((__uintptr_t)data & 3) == 0) {
1981 while (datalen > 3) {
1982 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
1983 data += 4;
1984 datalen -= 4;
1985 }
1986 if (datalen > 1) {
1987 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
1988 data += 2;
1989 datalen -= 2;
1990 }
1991 if (datalen > 0) {
1992 *data = HREAD1(hp, SDHC_DATA);
1993 data += 1;
1994 datalen -= 1;
1995 }
1996 } else if (((__uintptr_t)data & 1) == 0) {
1997 while (datalen > 1) {
1998 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
1999 data += 2;
2000 datalen -= 2;
2001 }
2002 if (datalen > 0) {
2003 *data = HREAD1(hp, SDHC_DATA);
2004 data += 1;
2005 datalen -= 1;
2006 }
2007 } else {
2008 while (datalen > 0) {
2009 *data = HREAD1(hp, SDHC_DATA);
2010 data += 1;
2011 datalen -= 1;
2012 }
2013 }
2014 }
2015
2016 static void
2017 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2018 {
2019
2020 if (((__uintptr_t)data & 3) == 0) {
2021 while (datalen > 3) {
2022 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2023 data += 4;
2024 datalen -= 4;
2025 }
2026 if (datalen > 1) {
2027 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2028 data += 2;
2029 datalen -= 2;
2030 }
2031 if (datalen > 0) {
2032 HWRITE1(hp, SDHC_DATA, *data);
2033 data += 1;
2034 datalen -= 1;
2035 }
2036 } else if (((__uintptr_t)data & 1) == 0) {
2037 while (datalen > 1) {
2038 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2039 data += 2;
2040 datalen -= 2;
2041 }
2042 if (datalen > 0) {
2043 HWRITE1(hp, SDHC_DATA, *data);
2044 data += 1;
2045 datalen -= 1;
2046 }
2047 } else {
2048 while (datalen > 0) {
2049 HWRITE1(hp, SDHC_DATA, *data);
2050 data += 1;
2051 datalen -= 1;
2052 }
2053 }
2054 }
2055
2056 static void
2057 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2058 {
2059 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2060 uint32_t v;
2061
2062 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2063 size_t count = 0;
2064
2065 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2066 if (count == 0) {
2067 /*
2068 * If we've drained "watermark" words, we need to wait
2069 * a little bit so the read FIFO can refill.
2070 */
2071 sdmmc_delay(10);
2072 count = watermark;
2073 }
2074 v = HREAD4(hp, SDHC_DATA);
2075 v = le32toh(v);
2076 *(uint32_t *)data = v;
2077 data += 4;
2078 datalen -= 4;
2079 status = HREAD2(hp, SDHC_NINTR_STATUS);
2080 count--;
2081 }
2082 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2083 if (count == 0) {
2084 sdmmc_delay(10);
2085 }
2086 v = HREAD4(hp, SDHC_DATA);
2087 v = le32toh(v);
2088 do {
2089 *data++ = v;
2090 v >>= 8;
2091 } while (--datalen > 0);
2092 }
2093 }
2094
2095 static void
2096 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2097 {
2098 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2099 uint32_t v;
2100
2101 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2102 size_t count = watermark;
2103
2104 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2105 if (count == 0) {
2106 sdmmc_delay(10);
2107 count = watermark;
2108 }
2109 v = *(uint32_t *)data;
2110 v = htole32(v);
2111 HWRITE4(hp, SDHC_DATA, v);
2112 data += 4;
2113 datalen -= 4;
2114 status = HREAD2(hp, SDHC_NINTR_STATUS);
2115 count--;
2116 }
2117 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2118 if (count == 0) {
2119 sdmmc_delay(10);
2120 }
2121 v = *(uint32_t *)data;
2122 v = htole32(v);
2123 HWRITE4(hp, SDHC_DATA, v);
2124 }
2125 }
2126
2127 /* Prepare for another command. */
2128 static int
2129 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2130 {
2131 int timo;
2132
2133 KASSERT(mutex_owned(&hp->intr_lock));
2134
2135 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2136
2137 /* Request the reset. */
2138 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2139
2140 /*
2141 * If necessary, wait for the controller to set the bits to
2142 * acknowledge the reset.
2143 */
2144 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2145 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2146 for (timo = 10000; timo > 0; timo--) {
2147 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2148 break;
2149 /* Short delay because I worry we may miss it... */
2150 sdmmc_delay(1);
2151 }
2152 if (timo == 0) {
2153 DPRINTF(1,("%s: timeout for reset on\n", __func__));
2154 return ETIMEDOUT;
2155 }
2156 }
2157
2158 /*
2159 * Wait for the controller to clear the bits to indicate that
2160 * the reset has completed.
2161 */
2162 for (timo = 10; timo > 0; timo--) {
2163 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2164 break;
2165 sdmmc_delay(10000);
2166 }
2167 if (timo == 0) {
2168 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2169 HREAD1(hp, SDHC_SOFTWARE_RESET)));
2170 return ETIMEDOUT;
2171 }
2172
2173 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2174 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2175 }
2176
2177 return 0;
2178 }
2179
2180 static int
2181 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2182 {
2183 int status, error, nointr;
2184
2185 KASSERT(mutex_owned(&hp->intr_lock));
2186
2187 mask |= SDHC_ERROR_INTERRUPT;
2188
2189 nointr = 0;
2190 status = hp->intr_status & mask;
2191 while (status == 0) {
2192 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2193 == EWOULDBLOCK) {
2194 nointr = 1;
2195 break;
2196 }
2197 status = hp->intr_status & mask;
2198 }
2199 error = hp->intr_error_status;
2200
2201 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2202 error));
2203
2204 hp->intr_status &= ~status;
2205 hp->intr_error_status &= ~error;
2206
2207 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2208 if (ISSET(error, SDHC_DMA_ERROR))
2209 device_printf(hp->sc->sc_dev,"dma error\n");
2210 if (ISSET(error, SDHC_ADMA_ERROR))
2211 device_printf(hp->sc->sc_dev,"adma error\n");
2212 if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2213 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2214 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2215 device_printf(hp->sc->sc_dev,"current limit error\n");
2216 if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2217 device_printf(hp->sc->sc_dev,"data end bit error\n");
2218 if (ISSET(error, SDHC_DATA_CRC_ERROR))
2219 device_printf(hp->sc->sc_dev,"data crc error\n");
2220 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2221 device_printf(hp->sc->sc_dev,"data timeout error\n");
2222 if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2223 device_printf(hp->sc->sc_dev,"cmd index error\n");
2224 if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2225 device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2226 if (ISSET(error, SDHC_CMD_CRC_ERROR))
2227 device_printf(hp->sc->sc_dev,"cmd crc error\n");
2228 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2229 if (!probing)
2230 device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2231 #ifdef SDHC_DEBUG
2232 else if (sdhcdebug > 0)
2233 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2234 #endif
2235 }
2236 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2237 device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2238 (error & ~SDHC_EINTR_STATUS_MASK));
2239 if (error == 0)
2240 device_printf(hp->sc->sc_dev,"no error\n");
2241
2242 /* Command timeout has higher priority than command complete. */
2243 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2244 CLR(status, SDHC_COMMAND_COMPLETE);
2245
2246 /* Transfer complete has higher priority than data timeout. */
2247 if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2248 CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2249 }
2250
2251 if (nointr ||
2252 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2253 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2254 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2255 hp->intr_error_status = 0;
2256 status = 0;
2257 }
2258
2259 return status;
2260 }
2261
2262 /*
2263 * Established by attachment driver at interrupt priority IPL_SDMMC.
2264 */
2265 int
2266 sdhc_intr(void *arg)
2267 {
2268 struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2269 struct sdhc_host *hp;
2270 int done = 0;
2271 uint16_t status;
2272 uint16_t error;
2273
2274 /* We got an interrupt, but we don't know from which slot. */
2275 for (size_t host = 0; host < sc->sc_nhosts; host++) {
2276 hp = sc->sc_host[host];
2277 if (hp == NULL)
2278 continue;
2279
2280 mutex_enter(&hp->intr_lock);
2281
2282 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2283 /* Find out which interrupts are pending. */
2284 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2285 status = xstatus;
2286 error = xstatus >> 16;
2287 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2288 (xstatus & SDHC_TRANSFER_COMPLETE) &&
2289 !(xstatus & SDHC_DMA_INTERRUPT)) {
2290 /* read again due to uSDHC errata */
2291 status = xstatus = HREAD4(hp,
2292 SDHC_NINTR_STATUS);
2293 error = xstatus >> 16;
2294 }
2295 if (ISSET(sc->sc_flags,
2296 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2297 if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2298 SET(status, SDHC_ERROR_INTERRUPT);
2299 }
2300 if (error)
2301 xstatus |= SDHC_ERROR_INTERRUPT;
2302 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2303 goto next_port; /* no interrupt for us */
2304 /* Acknowledge the interrupts we are about to handle. */
2305 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2306 } else {
2307 /* Find out which interrupts are pending. */
2308 error = 0;
2309 status = HREAD2(hp, SDHC_NINTR_STATUS);
2310 if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2311 goto next_port; /* no interrupt for us */
2312 /* Acknowledge the interrupts we are about to handle. */
2313 HWRITE2(hp, SDHC_NINTR_STATUS, status);
2314 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2315 /* Acknowledge error interrupts. */
2316 error = HREAD2(hp, SDHC_EINTR_STATUS);
2317 HWRITE2(hp, SDHC_EINTR_STATUS, error);
2318 }
2319 }
2320
2321 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2322 status, error));
2323
2324 /* Claim this interrupt. */
2325 done = 1;
2326
2327 if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2328 ISSET(error, SDHC_ADMA_ERROR)) {
2329 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2330 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2331 adma_err);
2332 }
2333
2334 /*
2335 * Wake up the sdmmc event thread to scan for cards.
2336 */
2337 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2338 if (hp->sdmmc != NULL) {
2339 sdmmc_needs_discover(hp->sdmmc);
2340 }
2341 if (ISSET(sc->sc_flags,
2342 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2343 HCLR4(hp, SDHC_NINTR_STATUS_EN,
2344 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2345 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2346 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2347 }
2348 }
2349
2350 /*
2351 * Schedule re-tuning process (UHS).
2352 */
2353 if (ISSET(status, SDHC_RETUNING_EVENT)) {
2354 atomic_swap_uint(&hp->tuning_timer_pending, 1);
2355 }
2356
2357 /*
2358 * Wake up the blocking process to service command
2359 * related interrupt(s).
2360 */
2361 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2362 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2363 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2364 hp->intr_error_status |= error;
2365 hp->intr_status |= status;
2366 if (ISSET(sc->sc_flags,
2367 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2368 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2369 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2370 }
2371 cv_broadcast(&hp->intr_cv);
2372 }
2373
2374 /*
2375 * Service SD card interrupts.
2376 */
2377 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2378 && ISSET(status, SDHC_CARD_INTERRUPT)) {
2379 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2380 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2381 sdmmc_card_intr(hp->sdmmc);
2382 }
2383 next_port:
2384 mutex_exit(&hp->intr_lock);
2385 }
2386
2387 return done;
2388 }
2389
2390 kmutex_t *
2391 sdhc_host_lock(struct sdhc_host *hp)
2392 {
2393 return &hp->intr_lock;
2394 }
2395
2396 #ifdef SDHC_DEBUG
2397 void
2398 sdhc_dump_regs(struct sdhc_host *hp)
2399 {
2400
2401 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE,
2402 HREAD4(hp, SDHC_PRESENT_STATE));
2403 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2404 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL,
2405 HREAD1(hp, SDHC_POWER_CTL));
2406 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS,
2407 HREAD2(hp, SDHC_NINTR_STATUS));
2408 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS,
2409 HREAD2(hp, SDHC_EINTR_STATUS));
2410 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN,
2411 HREAD2(hp, SDHC_NINTR_STATUS_EN));
2412 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN,
2413 HREAD2(hp, SDHC_EINTR_STATUS_EN));
2414 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN,
2415 HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2416 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN,
2417 HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2418 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES,
2419 HREAD4(hp, SDHC_CAPABILITIES));
2420 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2421 HREAD4(hp, SDHC_MAX_CAPABILITIES));
2422 }
2423 #endif
2424