sdhc.c revision 1.100 1 /* $NetBSD: sdhc.c,v 1.100 2017/04/22 21:49:41 jmcneill Exp $ */
2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * SD Host Controller driver based on the SD Host Controller Standard
22 * Simplified Specification Version 1.00 (www.sdcard.com).
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.100 2017/04/22 21:49:41 jmcneill Exp $");
27
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s) do {} while (0)
53 #endif
54
55 #define SDHC_COMMAND_TIMEOUT hz
56 #define SDHC_BUFFER_TIMEOUT hz
57 #define SDHC_TRANSFER_TIMEOUT hz
58 #define SDHC_DMA_TIMEOUT (hz*3)
59 #define SDHC_TUNING_TIMEOUT hz
60
61 struct sdhc_host {
62 struct sdhc_softc *sc; /* host controller device */
63
64 bus_space_tag_t iot; /* host register set tag */
65 bus_space_handle_t ioh; /* host register set handle */
66 bus_size_t ios; /* host register space size */
67 bus_dma_tag_t dmat; /* host DMA tag */
68
69 device_t sdmmc; /* generic SD/MMC device */
70
71 u_int clkbase; /* base clock frequency in KHz */
72 int maxblklen; /* maximum block length */
73 uint32_t ocr; /* OCR value from capabilities */
74
75 uint8_t regs[14]; /* host controller state */
76
77 uint16_t intr_status; /* soft interrupt status */
78 uint16_t intr_error_status; /* soft error status */
79 kmutex_t intr_lock;
80 kcondvar_t intr_cv;
81
82 callout_t tuning_timer;
83 int tuning_timing;
84 u_int tuning_timer_count;
85 u_int tuning_timer_pending;
86
87 int specver; /* spec. version */
88
89 uint32_t flags; /* flags for this host */
90 #define SHF_USE_DMA 0x0001
91 #define SHF_USE_4BIT_MODE 0x0002
92 #define SHF_USE_8BIT_MODE 0x0004
93 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */
94 #define SHF_USE_ADMA2_32 0x0010
95 #define SHF_USE_ADMA2_64 0x0020
96 #define SHF_USE_ADMA2_MASK 0x0030
97
98 bus_dmamap_t adma_map;
99 bus_dma_segment_t adma_segs[1];
100 void *adma2;
101 };
102
103 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev))
104
105 static uint8_t
106 hread1(struct sdhc_host *hp, bus_size_t reg)
107 {
108
109 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
110 return bus_space_read_1(hp->iot, hp->ioh, reg);
111 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
112 }
113
114 static uint16_t
115 hread2(struct sdhc_host *hp, bus_size_t reg)
116 {
117
118 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
119 return bus_space_read_2(hp->iot, hp->ioh, reg);
120 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
121 }
122
123 #define HREAD1(hp, reg) hread1(hp, reg)
124 #define HREAD2(hp, reg) hread2(hp, reg)
125 #define HREAD4(hp, reg) \
126 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
127
128
129 static void
130 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
131 {
132
133 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
134 bus_space_write_1(hp->iot, hp->ioh, o, val);
135 } else {
136 const size_t shift = 8 * (o & 3);
137 o &= -4;
138 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
139 tmp = (val << shift) | (tmp & ~(0xff << shift));
140 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
141 }
142 }
143
144 static void
145 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
146 {
147
148 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
149 bus_space_write_2(hp->iot, hp->ioh, o, val);
150 } else {
151 const size_t shift = 8 * (o & 2);
152 o &= -4;
153 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
154 tmp = (val << shift) | (tmp & ~(0xffff << shift));
155 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
156 }
157 }
158
159 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val)
160 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val)
161 #define HWRITE4(hp, reg, val) \
162 bus_space_write_4((hp)->iot, (hp)->ioh, (reg), (val))
163
164 #define HCLR1(hp, reg, bits) \
165 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
166 #define HCLR2(hp, reg, bits) \
167 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
168 #define HCLR4(hp, reg, bits) \
169 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
170 #define HSET1(hp, reg, bits) \
171 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
172 #define HSET2(hp, reg, bits) \
173 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
174 #define HSET4(hp, reg, bits) \
175 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
176
177 static int sdhc_host_reset(sdmmc_chipset_handle_t);
178 static int sdhc_host_reset1(sdmmc_chipset_handle_t);
179 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t);
180 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t);
181 static int sdhc_card_detect(sdmmc_chipset_handle_t);
182 static int sdhc_write_protect(sdmmc_chipset_handle_t);
183 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
184 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
185 static int sdhc_bus_width(sdmmc_chipset_handle_t, int);
186 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int);
187 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
188 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t);
189 static void sdhc_exec_command(sdmmc_chipset_handle_t,
190 struct sdmmc_command *);
191 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
192 static int sdhc_execute_tuning1(struct sdhc_host *, int);
193 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
194 static void sdhc_tuning_timer(void *);
195 static void sdhc_hw_reset(sdmmc_chipset_handle_t);
196 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
197 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
198 static int sdhc_soft_reset(struct sdhc_host *, int);
199 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool);
200 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
201 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
202 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
203 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
204 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
205 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
206 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
207
208 static struct sdmmc_chip_functions sdhc_functions = {
209 /* host controller reset */
210 .host_reset = sdhc_host_reset,
211
212 /* host controller capabilities */
213 .host_ocr = sdhc_host_ocr,
214 .host_maxblklen = sdhc_host_maxblklen,
215
216 /* card detection */
217 .card_detect = sdhc_card_detect,
218
219 /* write protect */
220 .write_protect = sdhc_write_protect,
221
222 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
223 .bus_power = sdhc_bus_power,
224 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */
225 .bus_width = sdhc_bus_width,
226 .bus_rod = sdhc_bus_rod,
227
228 /* command execution */
229 .exec_command = sdhc_exec_command,
230
231 /* card interrupt */
232 .card_enable_intr = sdhc_card_enable_intr,
233 .card_intr_ack = sdhc_card_intr_ack,
234
235 /* UHS functions */
236 .signal_voltage = sdhc_signal_voltage,
237 .bus_clock_ddr = sdhc_bus_clock_ddr,
238 .execute_tuning = sdhc_execute_tuning,
239 .hw_reset = sdhc_hw_reset,
240 };
241
242 static int
243 sdhc_cfprint(void *aux, const char *pnp)
244 {
245 const struct sdmmcbus_attach_args * const saa = aux;
246 const struct sdhc_host * const hp = saa->saa_sch;
247
248 if (pnp) {
249 aprint_normal("sdmmc at %s", pnp);
250 }
251 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
252 if (hp->sc->sc_host[host] == hp) {
253 aprint_normal(" slot %zu", host);
254 }
255 }
256
257 return UNCONF;
258 }
259
260 /*
261 * Called by attachment driver. For each SD card slot there is one SD
262 * host controller standard register set. (1.3)
263 */
264 int
265 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
266 bus_space_handle_t ioh, bus_size_t iosize)
267 {
268 struct sdmmcbus_attach_args saa;
269 struct sdhc_host *hp;
270 uint32_t caps, caps2;
271 uint16_t sdhcver;
272 int error;
273
274 /* Allocate one more host structure. */
275 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
276 if (hp == NULL) {
277 aprint_error_dev(sc->sc_dev,
278 "couldn't alloc memory (sdhc host)\n");
279 goto err1;
280 }
281 sc->sc_host[sc->sc_nhosts++] = hp;
282
283 /* Fill in the new host structure. */
284 hp->sc = sc;
285 hp->iot = iot;
286 hp->ioh = ioh;
287 hp->ios = iosize;
288 hp->dmat = sc->sc_dmat;
289
290 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
291 cv_init(&hp->intr_cv, "sdhcintr");
292 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
293 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
294
295 if (iosize <= SDHC_HOST_CTL_VERSION) {
296 aprint_normal_dev(sc->sc_dev, "SDHC NO-VERS");
297 hp->specver = -1;
298 } else {
299 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
300 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
301 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
302 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
303 } else
304 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
305 aprint_normal_dev(sc->sc_dev, "SDHC ");
306 hp->specver = SDHC_SPEC_VERSION(sdhcver);
307 switch (SDHC_SPEC_VERSION(sdhcver)) {
308 case SDHC_SPEC_VERS_100:
309 aprint_normal("1.0");
310 break;
311
312 case SDHC_SPEC_VERS_200:
313 aprint_normal("2.0");
314 break;
315
316 case SDHC_SPEC_VERS_300:
317 aprint_normal("3.0");
318 break;
319
320 case SDHC_SPEC_VERS_400:
321 aprint_normal("4.0");
322 break;
323
324 default:
325 aprint_normal("unknown version(0x%x)",
326 SDHC_SPEC_VERSION(sdhcver));
327 break;
328 }
329 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
330 }
331
332 /*
333 * Reset the host controller and enable interrupts.
334 */
335 (void)sdhc_host_reset(hp);
336
337 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
338 /* init uSDHC registers */
339 HWRITE4(hp, SDHC_MMC_BOOT, 0);
340 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
341 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
342 HWRITE4(hp, SDHC_WATERMARK_LEVEL,
343 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
344 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
345 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
346 (0x40 << SDHC_WATERMARK_READ_SHIFT));
347 HSET4(hp, SDHC_VEND_SPEC,
348 SDHC_VEND_SPEC_MBO |
349 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
350 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
351 SDHC_VEND_SPEC_HCLK_SOFT_EN |
352 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
353 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
354 SDHC_VEND_SPEC_FRC_SDCLK_ON);
355 }
356
357 /* Determine host capabilities. */
358 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
359 caps = sc->sc_caps;
360 caps2 = sc->sc_caps2;
361 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
362 /* uSDHC capability register is little bit different */
363 caps = HREAD4(hp, SDHC_CAPABILITIES);
364 caps |= SDHC_8BIT_SUPP;
365 if (caps & SDHC_ADMA1_SUPP)
366 caps |= SDHC_ADMA2_SUPP;
367 sc->sc_caps = caps;
368 /* uSDHC has no SDHC_CAPABILITIES2 register */
369 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
370 } else {
371 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
372 if (hp->specver >= SDHC_SPEC_VERS_300) {
373 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
374 } else {
375 caps2 = sc->sc_caps2 = 0;
376 }
377 }
378
379 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
380 SDHC_RETUNING_MODES_MASK;
381 if (retuning_mode == SDHC_RETUNING_MODE_1) {
382 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
383 SDHC_TIMER_COUNT_MASK;
384 if (hp->tuning_timer_count == 0xf)
385 hp->tuning_timer_count = 0;
386 if (hp->tuning_timer_count)
387 hp->tuning_timer_count =
388 1 << (hp->tuning_timer_count - 1);
389 }
390
391 /*
392 * Use DMA if the host system and the controller support it.
393 * Suports integrated or external DMA egine, with or without
394 * SDHC_DMA_ENABLE in the command.
395 */
396 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
397 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
398 ISSET(caps, SDHC_DMA_SUPPORT)))) {
399 SET(hp->flags, SHF_USE_DMA);
400
401 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) &&
402 ISSET(caps, SDHC_ADMA2_SUPP)) {
403 SET(hp->flags, SHF_MODE_DMAEN);
404 /*
405 * 64-bit mode was present in the 2.00 spec, removed
406 * from 3.00, and re-added in 4.00 with a different
407 * descriptor layout. We only support 2.00 and 3.00
408 * descriptors for now.
409 */
410 if (hp->specver == SDHC_SPEC_VERS_200 &&
411 ISSET(caps, SDHC_64BIT_SYS_BUS)) {
412 SET(hp->flags, SHF_USE_ADMA2_64);
413 aprint_normal(", 64-bit ADMA2");
414 } else {
415 SET(hp->flags, SHF_USE_ADMA2_32);
416 aprint_normal(", 32-bit ADMA2");
417 }
418 } else {
419 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
420 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
421 SET(hp->flags, SHF_MODE_DMAEN);
422 if (sc->sc_vendor_transfer_data_dma) {
423 aprint_normal(", platform DMA");
424 } else {
425 aprint_normal(", SDMA");
426 }
427 }
428 } else {
429 aprint_normal(", PIO");
430 }
431
432 /*
433 * Determine the base clock frequency. (2.2.24)
434 */
435 if (hp->specver >= SDHC_SPEC_VERS_300) {
436 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
437 } else {
438 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
439 }
440 if (hp->clkbase == 0 ||
441 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
442 if (sc->sc_clkbase == 0) {
443 /* The attachment driver must tell us. */
444 aprint_error_dev(sc->sc_dev,
445 "unknown base clock frequency\n");
446 goto err;
447 }
448 hp->clkbase = sc->sc_clkbase;
449 }
450 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
451 /* SDHC 1.0 supports only 10-63 MHz. */
452 aprint_error_dev(sc->sc_dev,
453 "base clock frequency out of range: %u MHz\n",
454 hp->clkbase / 1000);
455 goto err;
456 }
457 aprint_normal(", %u kHz", hp->clkbase);
458
459 /*
460 * XXX Set the data timeout counter value according to
461 * capabilities. (2.2.15)
462 */
463 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
464 #if 1
465 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
466 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
467 #endif
468
469 if (ISSET(caps, SDHC_EMBEDDED_SLOT))
470 aprint_normal(", embedded slot");
471
472 /*
473 * Determine SD bus voltage levels supported by the controller.
474 */
475 aprint_normal(",");
476 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
477 SET(hp->ocr, MMC_OCR_HCS);
478 aprint_normal(" HS");
479 }
480 if (ISSET(caps2, SDHC_SDR50_SUPP)) {
481 SET(hp->ocr, MMC_OCR_S18A);
482 aprint_normal(" SDR50");
483 }
484 if (ISSET(caps2, SDHC_DDR50_SUPP)) {
485 SET(hp->ocr, MMC_OCR_S18A);
486 aprint_normal(" DDR50");
487 }
488 if (ISSET(caps2, SDHC_SDR104_SUPP)) {
489 SET(hp->ocr, MMC_OCR_S18A);
490 aprint_normal(" SDR104 HS200");
491 }
492 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
493 SET(hp->ocr, MMC_OCR_1_65V_1_95V);
494 aprint_normal(" 1.8V");
495 }
496 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
497 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
498 aprint_normal(" 3.0V");
499 }
500 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
501 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
502 aprint_normal(" 3.3V");
503 }
504 if (hp->specver >= SDHC_SPEC_VERS_300) {
505 aprint_normal(", re-tuning mode %d", retuning_mode + 1);
506 if (hp->tuning_timer_count)
507 aprint_normal(" (%us timer)", hp->tuning_timer_count);
508 }
509
510 /*
511 * Determine the maximum block length supported by the host
512 * controller. (2.2.24)
513 */
514 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
515 case SDHC_MAX_BLK_LEN_512:
516 hp->maxblklen = 512;
517 break;
518
519 case SDHC_MAX_BLK_LEN_1024:
520 hp->maxblklen = 1024;
521 break;
522
523 case SDHC_MAX_BLK_LEN_2048:
524 hp->maxblklen = 2048;
525 break;
526
527 case SDHC_MAX_BLK_LEN_4096:
528 hp->maxblklen = 4096;
529 break;
530
531 default:
532 aprint_error_dev(sc->sc_dev, "max block length unknown\n");
533 goto err;
534 }
535 aprint_normal(", %u byte blocks", hp->maxblklen);
536 aprint_normal("\n");
537
538 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
539 int rseg;
540
541 /* Allocate ADMA2 descriptor memory */
542 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
543 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
544 if (error) {
545 aprint_error_dev(sc->sc_dev,
546 "ADMA2 dmamem_alloc failed (%d)\n", error);
547 goto adma_done;
548 }
549 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
550 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
551 if (error) {
552 aprint_error_dev(sc->sc_dev,
553 "ADMA2 dmamem_map failed (%d)\n", error);
554 goto adma_done;
555 }
556 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
557 0, BUS_DMA_WAITOK, &hp->adma_map);
558 if (error) {
559 aprint_error_dev(sc->sc_dev,
560 "ADMA2 dmamap_create failed (%d)\n", error);
561 goto adma_done;
562 }
563 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
564 hp->adma2, PAGE_SIZE, NULL,
565 BUS_DMA_WAITOK|BUS_DMA_WRITE);
566 if (error) {
567 aprint_error_dev(sc->sc_dev,
568 "ADMA2 dmamap_load failed (%d)\n", error);
569 goto adma_done;
570 }
571
572 memset(hp->adma2, 0, PAGE_SIZE);
573
574 adma_done:
575 if (error)
576 CLR(hp->flags, SHF_USE_ADMA2_MASK);
577 }
578
579 /*
580 * Attach the generic SD/MMC bus driver. (The bus driver must
581 * not invoke any chipset functions before it is attached.)
582 */
583 memset(&saa, 0, sizeof(saa));
584 saa.saa_busname = "sdmmc";
585 saa.saa_sct = &sdhc_functions;
586 saa.saa_sch = hp;
587 saa.saa_dmat = hp->dmat;
588 saa.saa_clkmax = hp->clkbase;
589 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
590 saa.saa_clkmin = hp->clkbase / 256 / 2046;
591 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
592 saa.saa_clkmin = hp->clkbase / 256 / 16;
593 else if (hp->sc->sc_clkmsk != 0)
594 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
595 (ffs(hp->sc->sc_clkmsk) - 1));
596 else if (hp->specver >= SDHC_SPEC_VERS_300)
597 saa.saa_clkmin = hp->clkbase / 0x3ff;
598 else
599 saa.saa_clkmin = hp->clkbase / 256;
600 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
601 saa.saa_caps |= SMC_CAPS_AUTO_STOP;
602 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
603 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
604 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
605 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
606 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED;
607 if (ISSET(caps2, SDHC_SDR104_SUPP))
608 saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
609 SMC_CAPS_UHS_SDR50 |
610 SMC_CAPS_MMC_HS200;
611 if (ISSET(caps2, SDHC_SDR50_SUPP))
612 saa.saa_caps |= SMC_CAPS_UHS_SDR50;
613 if (ISSET(caps2, SDHC_DDR50_SUPP))
614 saa.saa_caps |= SMC_CAPS_UHS_DDR50;
615 if (ISSET(hp->flags, SHF_USE_DMA)) {
616 saa.saa_caps |= SMC_CAPS_DMA;
617 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
618 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
619 }
620 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
621 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
622 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
623 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
624 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint);
625
626 return 0;
627
628 err:
629 callout_destroy(&hp->tuning_timer);
630 cv_destroy(&hp->intr_cv);
631 mutex_destroy(&hp->intr_lock);
632 free(hp, M_DEVBUF);
633 sc->sc_host[--sc->sc_nhosts] = NULL;
634 err1:
635 return 1;
636 }
637
638 int
639 sdhc_detach(struct sdhc_softc *sc, int flags)
640 {
641 struct sdhc_host *hp;
642 int rv = 0;
643
644 for (size_t n = 0; n < sc->sc_nhosts; n++) {
645 hp = sc->sc_host[n];
646 if (hp == NULL)
647 continue;
648 if (hp->sdmmc != NULL) {
649 rv = config_detach(hp->sdmmc, flags);
650 if (rv)
651 break;
652 hp->sdmmc = NULL;
653 }
654 /* disable interrupts */
655 if ((flags & DETACH_FORCE) == 0) {
656 mutex_enter(&hp->intr_lock);
657 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
658 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
659 } else {
660 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
661 }
662 sdhc_soft_reset(hp, SDHC_RESET_ALL);
663 mutex_exit(&hp->intr_lock);
664 }
665 callout_halt(&hp->tuning_timer, NULL);
666 callout_destroy(&hp->tuning_timer);
667 cv_destroy(&hp->intr_cv);
668 mutex_destroy(&hp->intr_lock);
669 if (hp->ios > 0) {
670 bus_space_unmap(hp->iot, hp->ioh, hp->ios);
671 hp->ios = 0;
672 }
673 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
674 bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
675 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
676 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
677 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
678 }
679 free(hp, M_DEVBUF);
680 sc->sc_host[n] = NULL;
681 }
682
683 return rv;
684 }
685
686 bool
687 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
688 {
689 struct sdhc_softc *sc = device_private(dev);
690 struct sdhc_host *hp;
691 size_t i;
692
693 /* XXX poll for command completion or suspend command
694 * in progress */
695
696 /* Save the host controller state. */
697 for (size_t n = 0; n < sc->sc_nhosts; n++) {
698 hp = sc->sc_host[n];
699 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
700 for (i = 0; i < sizeof hp->regs; i += 4) {
701 uint32_t v = HREAD4(hp, i);
702 hp->regs[i + 0] = (v >> 0);
703 hp->regs[i + 1] = (v >> 8);
704 if (i + 3 < sizeof hp->regs) {
705 hp->regs[i + 2] = (v >> 16);
706 hp->regs[i + 3] = (v >> 24);
707 }
708 }
709 } else {
710 for (i = 0; i < sizeof hp->regs; i++) {
711 hp->regs[i] = HREAD1(hp, i);
712 }
713 }
714 }
715 return true;
716 }
717
718 bool
719 sdhc_resume(device_t dev, const pmf_qual_t *qual)
720 {
721 struct sdhc_softc *sc = device_private(dev);
722 struct sdhc_host *hp;
723 size_t i;
724
725 /* Restore the host controller state. */
726 for (size_t n = 0; n < sc->sc_nhosts; n++) {
727 hp = sc->sc_host[n];
728 (void)sdhc_host_reset(hp);
729 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
730 for (i = 0; i < sizeof hp->regs; i += 4) {
731 if (i + 3 < sizeof hp->regs) {
732 HWRITE4(hp, i,
733 (hp->regs[i + 0] << 0)
734 | (hp->regs[i + 1] << 8)
735 | (hp->regs[i + 2] << 16)
736 | (hp->regs[i + 3] << 24));
737 } else {
738 HWRITE4(hp, i,
739 (hp->regs[i + 0] << 0)
740 | (hp->regs[i + 1] << 8));
741 }
742 }
743 } else {
744 for (i = 0; i < sizeof hp->regs; i++) {
745 HWRITE1(hp, i, hp->regs[i]);
746 }
747 }
748 }
749 return true;
750 }
751
752 bool
753 sdhc_shutdown(device_t dev, int flags)
754 {
755 struct sdhc_softc *sc = device_private(dev);
756 struct sdhc_host *hp;
757
758 /* XXX chip locks up if we don't disable it before reboot. */
759 for (size_t i = 0; i < sc->sc_nhosts; i++) {
760 hp = sc->sc_host[i];
761 (void)sdhc_host_reset(hp);
762 }
763 return true;
764 }
765
766 /*
767 * Reset the host controller. Called during initialization, when
768 * cards are removed, upon resume, and during error recovery.
769 */
770 static int
771 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
772 {
773 struct sdhc_host *hp = (struct sdhc_host *)sch;
774 uint32_t sdhcimask;
775 int error;
776
777 KASSERT(mutex_owned(&hp->intr_lock));
778
779 /* Disable all interrupts. */
780 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
781 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
782 } else {
783 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
784 }
785
786 /*
787 * Reset the entire host controller and wait up to 100ms for
788 * the controller to clear the reset bit.
789 */
790 error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
791 if (error)
792 goto out;
793
794 /* Set data timeout counter value to max for now. */
795 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
796 #if 1
797 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
798 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
799 #endif
800
801 /* Enable interrupts. */
802 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
803 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
804 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
805 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
806 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
807 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
808 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
809 sdhcimask ^=
810 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
811 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
812 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
813 } else {
814 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
815 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
816 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
817 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
818 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
819 }
820
821 out:
822 return error;
823 }
824
825 static int
826 sdhc_host_reset(sdmmc_chipset_handle_t sch)
827 {
828 struct sdhc_host *hp = (struct sdhc_host *)sch;
829 int error;
830
831 mutex_enter(&hp->intr_lock);
832 error = sdhc_host_reset1(sch);
833 mutex_exit(&hp->intr_lock);
834
835 return error;
836 }
837
838 static uint32_t
839 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
840 {
841 struct sdhc_host *hp = (struct sdhc_host *)sch;
842
843 return hp->ocr;
844 }
845
846 static int
847 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
848 {
849 struct sdhc_host *hp = (struct sdhc_host *)sch;
850
851 return hp->maxblklen;
852 }
853
854 /*
855 * Return non-zero if the card is currently inserted.
856 */
857 static int
858 sdhc_card_detect(sdmmc_chipset_handle_t sch)
859 {
860 struct sdhc_host *hp = (struct sdhc_host *)sch;
861 int r;
862
863 if (hp->sc->sc_vendor_card_detect)
864 return (*hp->sc->sc_vendor_card_detect)(hp->sc);
865
866 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
867
868 return r ? 1 : 0;
869 }
870
871 /*
872 * Return non-zero if the card is currently write-protected.
873 */
874 static int
875 sdhc_write_protect(sdmmc_chipset_handle_t sch)
876 {
877 struct sdhc_host *hp = (struct sdhc_host *)sch;
878 int r;
879
880 if (hp->sc->sc_vendor_write_protect)
881 return (*hp->sc->sc_vendor_write_protect)(hp->sc);
882
883 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
884
885 return r ? 0 : 1;
886 }
887
888 /*
889 * Set or change SD bus voltage and enable or disable SD bus power.
890 * Return zero on success.
891 */
892 static int
893 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
894 {
895 struct sdhc_host *hp = (struct sdhc_host *)sch;
896 uint8_t vdd;
897 int error = 0;
898 const uint32_t pcmask =
899 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
900
901 mutex_enter(&hp->intr_lock);
902
903 /*
904 * Disable bus power before voltage change.
905 */
906 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
907 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0))
908 HWRITE1(hp, SDHC_POWER_CTL, 0);
909
910 /* If power is disabled, reset the host and return now. */
911 if (ocr == 0) {
912 (void)sdhc_host_reset1(hp);
913 callout_halt(&hp->tuning_timer, &hp->intr_lock);
914 goto out;
915 }
916
917 /*
918 * Select the lowest voltage according to capabilities.
919 */
920 ocr &= hp->ocr;
921 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
922 vdd = SDHC_VOLTAGE_1_8V;
923 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
924 vdd = SDHC_VOLTAGE_3_0V;
925 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
926 vdd = SDHC_VOLTAGE_3_3V;
927 } else {
928 /* Unsupported voltage level requested. */
929 error = EINVAL;
930 goto out;
931 }
932
933 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
934 /*
935 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus
936 * voltage ramp until power rises.
937 */
938
939 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
940 HWRITE1(hp, SDHC_POWER_CTL,
941 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
942 } else {
943 HWRITE1(hp, SDHC_POWER_CTL,
944 HREAD1(hp, SDHC_POWER_CTL) & pcmask);
945 sdmmc_delay(1);
946 HWRITE1(hp, SDHC_POWER_CTL,
947 (vdd << SDHC_VOLTAGE_SHIFT));
948 sdmmc_delay(1);
949 HSET1(hp, SDHC_POWER_CTL, SDHC_BUS_POWER);
950 sdmmc_delay(10000);
951 }
952
953 /*
954 * The host system may not power the bus due to battery low,
955 * etc. In that case, the host controller should clear the
956 * bus power bit.
957 */
958 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
959 error = ENXIO;
960 goto out;
961 }
962 }
963
964 out:
965 mutex_exit(&hp->intr_lock);
966
967 return error;
968 }
969
970 /*
971 * Return the smallest possible base clock frequency divisor value
972 * for the CLOCK_CTL register to produce `freq' (KHz).
973 */
974 static bool
975 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
976 {
977 u_int div;
978
979 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
980 for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
981 if ((hp->clkbase / div) <= freq) {
982 *divp = SDHC_SDCLK_CGM
983 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
984 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
985 //freq = hp->clkbase / div;
986 return true;
987 }
988 }
989 /* No divisor found. */
990 return false;
991 }
992 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
993 u_int dvs = (hp->clkbase + freq - 1) / freq;
994 u_int roundup = dvs & 1;
995 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
996 if (dvs + roundup <= 16) {
997 dvs += roundup - 1;
998 *divp = (div << SDHC_SDCLK_DIV_SHIFT)
999 | (dvs << SDHC_SDCLK_DVS_SHIFT);
1000 DPRINTF(2,
1001 ("%s: divisor for freq %u is %u * %u\n",
1002 HDEVNAME(hp), freq, div * 2, dvs + 1));
1003 //freq = hp->clkbase / (div * 2) * (dvs + 1);
1004 return true;
1005 }
1006 /*
1007 * If we drop bits, we need to round up the divisor.
1008 */
1009 roundup |= dvs & 1;
1010 }
1011 /* No divisor found. */
1012 return false;
1013 }
1014 if (hp->sc->sc_clkmsk != 0) {
1015 div = howmany(hp->clkbase, freq);
1016 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1017 return false;
1018 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1019 //freq = hp->clkbase / div;
1020 return true;
1021 }
1022 if (hp->specver >= SDHC_SPEC_VERS_300) {
1023 div = howmany(hp->clkbase, freq);
1024 div = div > 1 ? howmany(div, 2) : 0;
1025 if (div > 0x3ff)
1026 return false;
1027 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1028 << SDHC_SDCLK_XDIV_SHIFT) |
1029 (((div >> 0) & SDHC_SDCLK_DIV_MASK)
1030 << SDHC_SDCLK_DIV_SHIFT);
1031 //freq = hp->clkbase / (div ? div * 2 : 1);
1032 return true;
1033 } else {
1034 for (div = 1; div <= 256; div *= 2) {
1035 if ((hp->clkbase / div) <= freq) {
1036 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1037 //freq = hp->clkbase / div;
1038 return true;
1039 }
1040 }
1041 /* No divisor found. */
1042 return false;
1043 }
1044 /* No divisor found. */
1045 return false;
1046 }
1047
1048 /*
1049 * Set or change SDCLK frequency or disable the SD clock.
1050 * Return zero on success.
1051 */
1052 static int
1053 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1054 {
1055 struct sdhc_host *hp = (struct sdhc_host *)sch;
1056 u_int div;
1057 u_int timo;
1058 int16_t reg;
1059 int error = 0;
1060 bool present __diagused;
1061
1062 mutex_enter(&hp->intr_lock);
1063
1064 #ifdef DIAGNOSTIC
1065 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1066
1067 /* Must not stop the clock if commands are in progress. */
1068 if (present && sdhc_card_detect(hp)) {
1069 aprint_normal_dev(hp->sc->sc_dev,
1070 "%s: command in progress\n", __func__);
1071 }
1072 #endif
1073
1074 if (hp->sc->sc_vendor_bus_clock) {
1075 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1076 if (error != 0)
1077 goto out;
1078 }
1079
1080 /*
1081 * Stop SD clock before changing the frequency.
1082 */
1083 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1084 HCLR4(hp, SDHC_VEND_SPEC,
1085 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1086 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1087 if (freq == SDMMC_SDCLK_OFF) {
1088 goto out;
1089 }
1090 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1091 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1092 if (freq == SDMMC_SDCLK_OFF) {
1093 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1094 goto out;
1095 }
1096 } else {
1097 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1098 if (freq == SDMMC_SDCLK_OFF)
1099 goto out;
1100 }
1101
1102 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1103 if (ddr)
1104 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1105 else
1106 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1107 } else if (hp->specver >= SDHC_SPEC_VERS_300) {
1108 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1109 if (freq > 100000) {
1110 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1111 } else if (freq > 50000) {
1112 if (ddr) {
1113 HSET2(hp, SDHC_HOST_CTL2,
1114 SDHC_UHS_MODE_SELECT_DDR50);
1115 } else {
1116 HSET2(hp, SDHC_HOST_CTL2,
1117 SDHC_UHS_MODE_SELECT_SDR50);
1118 }
1119 } else if (freq > 25000) {
1120 if (ddr) {
1121 HSET2(hp, SDHC_HOST_CTL2,
1122 SDHC_UHS_MODE_SELECT_DDR50);
1123 } else {
1124 HSET2(hp, SDHC_HOST_CTL2,
1125 SDHC_UHS_MODE_SELECT_SDR25);
1126 }
1127 } else if (freq > 400) {
1128 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1129 }
1130 }
1131
1132 /*
1133 * Slow down Ricoh 5U823 controller that isn't reliable
1134 * at 100MHz bus clock.
1135 */
1136 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1137 if (freq == 100000)
1138 --freq;
1139 }
1140
1141 /*
1142 * Set the minimum base clock frequency divisor.
1143 */
1144 if (!sdhc_clock_divisor(hp, freq, &div)) {
1145 /* Invalid base clock frequency or `freq' value. */
1146 aprint_error_dev(hp->sc->sc_dev,
1147 "Invalid bus clock %d kHz\n", freq);
1148 error = EINVAL;
1149 goto out;
1150 }
1151 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1152 if (ddr) {
1153 /* in ddr mode, divisor >>= 1 */
1154 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1155 SDHC_SDCLK_DIV_SHIFT)) |
1156 (div & (SDHC_SDCLK_DVS_MASK <<
1157 SDHC_SDCLK_DVS_SHIFT));
1158 }
1159 for (timo = 1000; timo > 0; timo--) {
1160 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1161 break;
1162 sdmmc_delay(10);
1163 }
1164 HWRITE4(hp, SDHC_CLOCK_CTL,
1165 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1166 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1167 HWRITE4(hp, SDHC_CLOCK_CTL,
1168 div | (SDHC_TIMEOUT_MAX << 16));
1169 } else {
1170 reg = HREAD2(hp, SDHC_CLOCK_CTL);
1171 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1172 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1173 }
1174
1175 /*
1176 * Start internal clock. Wait 10ms for stabilization.
1177 */
1178 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1179 HSET4(hp, SDHC_VEND_SPEC,
1180 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1181 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1182 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1183 sdmmc_delay(10000);
1184 HSET4(hp, SDHC_CLOCK_CTL,
1185 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1186 } else {
1187 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1188 for (timo = 1000; timo > 0; timo--) {
1189 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1190 SDHC_INTCLK_STABLE))
1191 break;
1192 sdmmc_delay(10);
1193 }
1194 if (timo == 0) {
1195 error = ETIMEDOUT;
1196 DPRINTF(1,("%s: timeout\n", __func__));
1197 goto out;
1198 }
1199 }
1200
1201 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1202 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1203 /*
1204 * Sending 80 clocks at 400kHz takes 200us.
1205 * So delay for that time + slop and then
1206 * check a few times for completion.
1207 */
1208 sdmmc_delay(210);
1209 for (timo = 10; timo > 0; timo--) {
1210 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1211 SDHC_INIT_ACTIVE))
1212 break;
1213 sdmmc_delay(10);
1214 }
1215 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1216
1217 /*
1218 * Enable SD clock.
1219 */
1220 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1221 HSET4(hp, SDHC_VEND_SPEC,
1222 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1223 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1224 } else {
1225 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1226 }
1227 } else {
1228 /*
1229 * Enable SD clock.
1230 */
1231 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1232
1233 if (freq > 25000 &&
1234 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1235 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1236 else
1237 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1238 }
1239
1240 out:
1241 mutex_exit(&hp->intr_lock);
1242
1243 return error;
1244 }
1245
1246 static int
1247 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1248 {
1249 struct sdhc_host *hp = (struct sdhc_host *)sch;
1250 int reg;
1251
1252 switch (width) {
1253 case 1:
1254 case 4:
1255 break;
1256
1257 case 8:
1258 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1259 break;
1260 /* FALLTHROUGH */
1261 default:
1262 DPRINTF(0,("%s: unsupported bus width (%d)\n",
1263 HDEVNAME(hp), width));
1264 return 1;
1265 }
1266
1267 if (hp->sc->sc_vendor_bus_width) {
1268 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1269 if (error != 0)
1270 return error;
1271 }
1272
1273 mutex_enter(&hp->intr_lock);
1274
1275 reg = HREAD1(hp, SDHC_HOST_CTL);
1276 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1277 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1278 if (width == 4)
1279 reg |= SDHC_4BIT_MODE;
1280 else if (width == 8)
1281 reg |= SDHC_ESDHC_8BIT_MODE;
1282 } else {
1283 reg &= ~SDHC_4BIT_MODE;
1284 if (hp->specver >= SDHC_SPEC_VERS_300) {
1285 reg &= ~SDHC_8BIT_MODE;
1286 }
1287 if (width == 4) {
1288 reg |= SDHC_4BIT_MODE;
1289 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1290 reg |= SDHC_8BIT_MODE;
1291 }
1292 }
1293 HWRITE1(hp, SDHC_HOST_CTL, reg);
1294
1295 mutex_exit(&hp->intr_lock);
1296
1297 return 0;
1298 }
1299
1300 static int
1301 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1302 {
1303 struct sdhc_host *hp = (struct sdhc_host *)sch;
1304
1305 if (hp->sc->sc_vendor_rod)
1306 return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1307
1308 return 0;
1309 }
1310
1311 static void
1312 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1313 {
1314 struct sdhc_host *hp = (struct sdhc_host *)sch;
1315
1316 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1317 mutex_enter(&hp->intr_lock);
1318 if (enable) {
1319 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1320 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1321 } else {
1322 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1323 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1324 }
1325 mutex_exit(&hp->intr_lock);
1326 }
1327 }
1328
1329 static void
1330 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1331 {
1332 struct sdhc_host *hp = (struct sdhc_host *)sch;
1333
1334 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1335 mutex_enter(&hp->intr_lock);
1336 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1337 mutex_exit(&hp->intr_lock);
1338 }
1339 }
1340
1341 static int
1342 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1343 {
1344 struct sdhc_host *hp = (struct sdhc_host *)sch;
1345 int error = 0;
1346
1347 if (hp->specver < SDHC_SPEC_VERS_300)
1348 return EINVAL;
1349
1350 mutex_enter(&hp->intr_lock);
1351 switch (signal_voltage) {
1352 case SDMMC_SIGNAL_VOLTAGE_180:
1353 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1354 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1355 signal_voltage);
1356 if (error != 0)
1357 break;
1358 }
1359 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1360 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1361 break;
1362 case SDMMC_SIGNAL_VOLTAGE_330:
1363 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1364 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1365 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1366 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1367 signal_voltage);
1368 if (error != 0)
1369 break;
1370 }
1371 break;
1372 default:
1373 error = EINVAL;
1374 break;
1375 }
1376 mutex_exit(&hp->intr_lock);
1377
1378 return error;
1379 }
1380
1381 /*
1382 * Sampling clock tuning procedure (UHS)
1383 */
1384 static int
1385 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1386 {
1387 struct sdmmc_command cmd;
1388 uint8_t hostctl;
1389 int opcode, error, retry = 40;
1390
1391 KASSERT(mutex_owned(&hp->intr_lock));
1392
1393 hp->tuning_timing = timing;
1394
1395 switch (timing) {
1396 case SDMMC_TIMING_MMC_HS200:
1397 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1398 break;
1399 case SDMMC_TIMING_UHS_SDR50:
1400 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1401 return 0;
1402 /* FALLTHROUGH */
1403 case SDMMC_TIMING_UHS_SDR104:
1404 opcode = MMC_SEND_TUNING_BLOCK;
1405 break;
1406 default:
1407 return EINVAL;
1408 }
1409
1410 hostctl = HREAD1(hp, SDHC_HOST_CTL);
1411
1412 /* enable buffer read ready interrupt */
1413 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1414 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1415
1416 /* disable DMA */
1417 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1418
1419 /* reset tuning circuit */
1420 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1421
1422 /* start of tuning */
1423 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1424
1425 do {
1426 memset(&cmd, 0, sizeof(cmd));
1427 cmd.c_opcode = opcode;
1428 cmd.c_arg = 0;
1429 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1430 if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1431 cmd.c_blklen = cmd.c_datalen = 128;
1432 } else {
1433 cmd.c_blklen = cmd.c_datalen = 64;
1434 }
1435
1436 error = sdhc_start_command(hp, &cmd);
1437 if (error)
1438 break;
1439
1440 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1441 SDHC_TUNING_TIMEOUT, false)) {
1442 break;
1443 }
1444
1445 delay(1000);
1446 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1447
1448 /* disable buffer read ready interrupt */
1449 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1450 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1451
1452 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1453 HCLR2(hp, SDHC_HOST_CTL2,
1454 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1455 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1456 aprint_error_dev(hp->sc->sc_dev,
1457 "tuning did not complete, using fixed sampling clock\n");
1458 return EIO; /* tuning did not complete */
1459 }
1460
1461 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1462 HCLR2(hp, SDHC_HOST_CTL2,
1463 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1464 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1465 aprint_error_dev(hp->sc->sc_dev,
1466 "tuning failed, using fixed sampling clock\n");
1467 return EIO; /* tuning failed */
1468 }
1469
1470 if (hp->tuning_timer_count) {
1471 callout_schedule(&hp->tuning_timer,
1472 hz * hp->tuning_timer_count);
1473 }
1474
1475 return 0; /* tuning completed */
1476 }
1477
1478 static int
1479 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1480 {
1481 struct sdhc_host *hp = (struct sdhc_host *)sch;
1482 int error;
1483
1484 mutex_enter(&hp->intr_lock);
1485 error = sdhc_execute_tuning1(hp, timing);
1486 mutex_exit(&hp->intr_lock);
1487 return error;
1488 }
1489
1490 static void
1491 sdhc_tuning_timer(void *arg)
1492 {
1493 struct sdhc_host *hp = arg;
1494
1495 atomic_swap_uint(&hp->tuning_timer_pending, 1);
1496 }
1497
1498 static void
1499 sdhc_hw_reset(sdmmc_chipset_handle_t sch)
1500 {
1501 struct sdhc_host *hp = (struct sdhc_host *)sch;
1502 struct sdhc_softc *sc = hp->sc;
1503
1504 if (sc->sc_vendor_hw_reset != NULL)
1505 sc->sc_vendor_hw_reset(sc, hp);
1506 }
1507
1508 static int
1509 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1510 {
1511 uint32_t state;
1512 int timeout;
1513
1514 for (timeout = 10000; timeout > 0; timeout--) {
1515 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1516 return 0;
1517 sdmmc_delay(10);
1518 }
1519 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1520 mask, value, state);
1521 return ETIMEDOUT;
1522 }
1523
1524 static void
1525 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1526 {
1527 struct sdhc_host *hp = (struct sdhc_host *)sch;
1528 int error;
1529 bool probing;
1530
1531 mutex_enter(&hp->intr_lock);
1532
1533 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1534 (void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1535 }
1536
1537 if (cmd->c_data &&
1538 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1539 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1540 if (ISSET(hp->flags, SHF_USE_DMA)) {
1541 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1542 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1543 } else {
1544 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1545 HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1546 }
1547 }
1548
1549 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1550 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1551 if (cmd->c_data != NULL) {
1552 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1553 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1554 } else {
1555 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1556 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1557 }
1558 }
1559
1560 /*
1561 * Start the MMC command, or mark `cmd' as failed and return.
1562 */
1563 error = sdhc_start_command(hp, cmd);
1564 if (error) {
1565 cmd->c_error = error;
1566 goto out;
1567 }
1568
1569 /*
1570 * Wait until the command phase is done, or until the command
1571 * is marked done for any other reason.
1572 */
1573 probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1574 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT, probing)) {
1575 DPRINTF(1,("%s: timeout for command\n", __func__));
1576 sdmmc_delay(50);
1577 cmd->c_error = ETIMEDOUT;
1578 goto out;
1579 }
1580
1581 /*
1582 * The host controller removes bits [0:7] from the response
1583 * data (CRC) and we pass the data up unchanged to the bus
1584 * driver (without padding).
1585 */
1586 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1587 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1588 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1589 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1590 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1591 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1592 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1593 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1594 (cmd->c_resp[1] << 24);
1595 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1596 (cmd->c_resp[2] << 24);
1597 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1598 (cmd->c_resp[3] << 24);
1599 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1600 }
1601 }
1602 }
1603 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1604
1605 /*
1606 * If the command has data to transfer in any direction,
1607 * execute the transfer now.
1608 */
1609 if (cmd->c_error == 0 && cmd->c_data != NULL)
1610 sdhc_transfer_data(hp, cmd);
1611 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1612 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) &&
1613 !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1614 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1615 HDEVNAME(hp)));
1616 cmd->c_error = ETIMEDOUT;
1617 goto out;
1618 }
1619 }
1620
1621 out:
1622 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1623 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1624 /* Turn off the LED. */
1625 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1626 }
1627 SET(cmd->c_flags, SCF_ITSDONE);
1628
1629 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) &&
1630 cmd->c_opcode == MMC_STOP_TRANSMISSION)
1631 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
1632
1633 mutex_exit(&hp->intr_lock);
1634
1635 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1636 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1637 cmd->c_flags, cmd->c_error));
1638 }
1639
1640 static int
1641 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1642 {
1643 struct sdhc_softc * const sc = hp->sc;
1644 uint16_t blksize = 0;
1645 uint16_t blkcount = 0;
1646 uint16_t mode;
1647 uint16_t command;
1648 uint32_t pmask;
1649 int error;
1650
1651 KASSERT(mutex_owned(&hp->intr_lock));
1652
1653 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1654 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1655 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1656
1657 /*
1658 * The maximum block length for commands should be the minimum
1659 * of the host buffer size and the card buffer size. (1.7.2)
1660 */
1661
1662 /* Fragment the data into proper blocks. */
1663 if (cmd->c_datalen > 0) {
1664 blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1665 blkcount = cmd->c_datalen / blksize;
1666 if (cmd->c_datalen % blksize > 0) {
1667 /* XXX: Split this command. (1.7.4) */
1668 aprint_error_dev(sc->sc_dev,
1669 "data not a multiple of %u bytes\n", blksize);
1670 return EINVAL;
1671 }
1672 }
1673
1674 /* Check limit imposed by 9-bit block count. (1.7.2) */
1675 if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1676 aprint_error_dev(sc->sc_dev, "too much data\n");
1677 return EINVAL;
1678 }
1679
1680 /* Prepare transfer mode register value. (2.2.5) */
1681 mode = SDHC_BLOCK_COUNT_ENABLE;
1682 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1683 mode |= SDHC_READ_MODE;
1684 if (blkcount > 1) {
1685 mode |= SDHC_MULTI_BLOCK_MODE;
1686 /* XXX only for memory commands? */
1687 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
1688 mode |= SDHC_AUTO_CMD12_ENABLE;
1689 }
1690 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1691 ISSET(hp->flags, SHF_MODE_DMAEN)) {
1692 mode |= SDHC_DMA_ENABLE;
1693 }
1694
1695 /*
1696 * Prepare command register value. (2.2.6)
1697 */
1698 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1699
1700 if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1701 command |= SDHC_CRC_CHECK_ENABLE;
1702 if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1703 command |= SDHC_INDEX_CHECK_ENABLE;
1704 if (cmd->c_datalen > 0)
1705 command |= SDHC_DATA_PRESENT_SELECT;
1706
1707 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1708 command |= SDHC_NO_RESPONSE;
1709 else if (ISSET(cmd->c_flags, SCF_RSP_136))
1710 command |= SDHC_RESP_LEN_136;
1711 else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1712 command |= SDHC_RESP_LEN_48_CHK_BUSY;
1713 else
1714 command |= SDHC_RESP_LEN_48;
1715
1716 /* Wait until command and optionally data inhibit bits are clear. (1.5) */
1717 pmask = SDHC_CMD_INHIBIT_CMD;
1718 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1719 pmask |= SDHC_CMD_INHIBIT_DAT;
1720 error = sdhc_wait_state(hp, pmask, 0);
1721 if (error) {
1722 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1723 device_printf(sc->sc_dev, "command or data phase inhibited\n");
1724 return error;
1725 }
1726
1727 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1728 HDEVNAME(hp), blksize, blkcount, mode, command));
1729
1730 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1731 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1732 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */
1733 }
1734
1735 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1736 /* Alert the user not to remove the card. */
1737 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1738 }
1739
1740 /* Set DMA start address. */
1741 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1742 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1743 bus_addr_t paddr =
1744 cmd->c_dmamap->dm_segs[seg].ds_addr;
1745 uint16_t len =
1746 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1747 0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1748 uint16_t attr =
1749 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1750 if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1751 attr |= SDHC_ADMA2_END;
1752 }
1753 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1754 struct sdhc_adma2_descriptor32 *desc =
1755 hp->adma2;
1756 desc[seg].attribute = htole16(attr);
1757 desc[seg].length = htole16(len);
1758 desc[seg].address = htole32(paddr);
1759 } else {
1760 struct sdhc_adma2_descriptor64 *desc =
1761 hp->adma2;
1762 desc[seg].attribute = htole16(attr);
1763 desc[seg].length = htole16(len);
1764 desc[seg].address = htole32(paddr & 0xffffffff);
1765 desc[seg].address_hi = htole32(
1766 (uint64_t)paddr >> 32);
1767 }
1768 }
1769 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1770 struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1771 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1772 } else {
1773 struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1774 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1775 }
1776 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1777 BUS_DMASYNC_PREWRITE);
1778 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1779 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1780 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1781 } else {
1782 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1783 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1784 }
1785
1786 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1787
1788 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1789 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1790 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1791 (uint64_t)desc_addr >> 32);
1792 }
1793 } else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1794 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1795 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1796 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1797 }
1798 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1799 }
1800
1801 /*
1802 * Start a CPU data transfer. Writing to the high order byte
1803 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1804 */
1805 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1806 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1807 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1808 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1809 /* mode bits is in MIX_CTRL register on uSDHC */
1810 HWRITE4(hp, SDHC_MIX_CTRL, mode |
1811 (HREAD4(hp, SDHC_MIX_CTRL) &
1812 ~(SDHC_MULTI_BLOCK_MODE |
1813 SDHC_READ_MODE |
1814 SDHC_AUTO_CMD12_ENABLE |
1815 SDHC_BLOCK_COUNT_ENABLE |
1816 SDHC_DMA_ENABLE)));
1817 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1818 } else {
1819 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1820 }
1821 } else {
1822 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1823 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1824 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1825 HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1826 HWRITE2(hp, SDHC_COMMAND, command);
1827 }
1828
1829 return 0;
1830 }
1831
1832 static void
1833 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1834 {
1835 struct sdhc_softc *sc = hp->sc;
1836 int error;
1837
1838 KASSERT(mutex_owned(&hp->intr_lock));
1839
1840 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1841 MMC_R1(cmd->c_resp), cmd->c_datalen));
1842
1843 #ifdef SDHC_DEBUG
1844 /* XXX I forgot why I wanted to know when this happens :-( */
1845 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1846 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1847 aprint_error_dev(hp->sc->sc_dev,
1848 "CMD52/53 error response flags %#x\n",
1849 MMC_R1(cmd->c_resp) & 0xff00);
1850 }
1851 #endif
1852
1853 if (cmd->c_dmamap != NULL) {
1854 if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1855 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1856 if (error == 0 && !sdhc_wait_intr(hp,
1857 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1858 DPRINTF(1,("%s: timeout\n", __func__));
1859 error = ETIMEDOUT;
1860 }
1861 } else {
1862 error = sdhc_transfer_data_dma(hp, cmd);
1863 }
1864 } else
1865 error = sdhc_transfer_data_pio(hp, cmd);
1866 if (error)
1867 cmd->c_error = error;
1868 SET(cmd->c_flags, SCF_ITSDONE);
1869
1870 DPRINTF(1,("%s: data transfer done (error=%d)\n",
1871 HDEVNAME(hp), cmd->c_error));
1872 }
1873
1874 static int
1875 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1876 {
1877 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1878 bus_addr_t posaddr;
1879 bus_addr_t segaddr;
1880 bus_size_t seglen;
1881 u_int seg = 0;
1882 int error = 0;
1883 int status;
1884
1885 KASSERT(mutex_owned(&hp->intr_lock));
1886 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1887 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1888 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1889 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1890
1891 for (;;) {
1892 status = sdhc_wait_intr(hp,
1893 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1894 SDHC_DMA_TIMEOUT, false);
1895
1896 if (status & SDHC_TRANSFER_COMPLETE) {
1897 break;
1898 }
1899 if (!status) {
1900 DPRINTF(1,("%s: timeout\n", __func__));
1901 error = ETIMEDOUT;
1902 break;
1903 }
1904
1905 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1906 continue;
1907 }
1908
1909 if ((status & SDHC_DMA_INTERRUPT) == 0) {
1910 continue;
1911 }
1912
1913 /* DMA Interrupt (boundary crossing) */
1914
1915 segaddr = dm_segs[seg].ds_addr;
1916 seglen = dm_segs[seg].ds_len;
1917 posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1918
1919 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1920 continue;
1921 }
1922 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1923 HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1924 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1925 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1926 KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1927 }
1928
1929 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1930 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1931 PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1932 }
1933
1934 return error;
1935 }
1936
1937 static int
1938 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1939 {
1940 uint8_t *data = cmd->c_data;
1941 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1942 u_int len, datalen;
1943 u_int imask;
1944 u_int pmask;
1945 int error = 0;
1946
1947 KASSERT(mutex_owned(&hp->intr_lock));
1948
1949 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1950 imask = SDHC_BUFFER_READ_READY;
1951 pmask = SDHC_BUFFER_READ_ENABLE;
1952 if (ISSET(hp->sc->sc_flags,
1953 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1954 pio_func = esdhc_read_data_pio;
1955 } else {
1956 pio_func = sdhc_read_data_pio;
1957 }
1958 } else {
1959 imask = SDHC_BUFFER_WRITE_READY;
1960 pmask = SDHC_BUFFER_WRITE_ENABLE;
1961 if (ISSET(hp->sc->sc_flags,
1962 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1963 pio_func = esdhc_write_data_pio;
1964 } else {
1965 pio_func = sdhc_write_data_pio;
1966 }
1967 }
1968 datalen = cmd->c_datalen;
1969
1970 KASSERT(mutex_owned(&hp->intr_lock));
1971 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
1972 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1973 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1974
1975 while (datalen > 0) {
1976 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
1977 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1978 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
1979 } else {
1980 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
1981 }
1982 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
1983 DPRINTF(1,("%s: timeout\n", __func__));
1984 error = ETIMEDOUT;
1985 break;
1986 }
1987
1988 error = sdhc_wait_state(hp, pmask, pmask);
1989 if (error)
1990 break;
1991 }
1992
1993 len = MIN(datalen, cmd->c_blklen);
1994 (*pio_func)(hp, data, len);
1995 DPRINTF(2,("%s: pio data transfer %u @ %p\n",
1996 HDEVNAME(hp), len, data));
1997
1998 data += len;
1999 datalen -= len;
2000 }
2001
2002 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
2003 SDHC_TRANSFER_TIMEOUT, false)) {
2004 DPRINTF(1,("%s: timeout for transfer\n", __func__));
2005 error = ETIMEDOUT;
2006 }
2007
2008 return error;
2009 }
2010
2011 static void
2012 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2013 {
2014
2015 if (((__uintptr_t)data & 3) == 0) {
2016 while (datalen > 3) {
2017 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
2018 data += 4;
2019 datalen -= 4;
2020 }
2021 if (datalen > 1) {
2022 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2023 data += 2;
2024 datalen -= 2;
2025 }
2026 if (datalen > 0) {
2027 *data = HREAD1(hp, SDHC_DATA);
2028 data += 1;
2029 datalen -= 1;
2030 }
2031 } else if (((__uintptr_t)data & 1) == 0) {
2032 while (datalen > 1) {
2033 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2034 data += 2;
2035 datalen -= 2;
2036 }
2037 if (datalen > 0) {
2038 *data = HREAD1(hp, SDHC_DATA);
2039 data += 1;
2040 datalen -= 1;
2041 }
2042 } else {
2043 while (datalen > 0) {
2044 *data = HREAD1(hp, SDHC_DATA);
2045 data += 1;
2046 datalen -= 1;
2047 }
2048 }
2049 }
2050
2051 static void
2052 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2053 {
2054
2055 if (((__uintptr_t)data & 3) == 0) {
2056 while (datalen > 3) {
2057 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2058 data += 4;
2059 datalen -= 4;
2060 }
2061 if (datalen > 1) {
2062 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2063 data += 2;
2064 datalen -= 2;
2065 }
2066 if (datalen > 0) {
2067 HWRITE1(hp, SDHC_DATA, *data);
2068 data += 1;
2069 datalen -= 1;
2070 }
2071 } else if (((__uintptr_t)data & 1) == 0) {
2072 while (datalen > 1) {
2073 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2074 data += 2;
2075 datalen -= 2;
2076 }
2077 if (datalen > 0) {
2078 HWRITE1(hp, SDHC_DATA, *data);
2079 data += 1;
2080 datalen -= 1;
2081 }
2082 } else {
2083 while (datalen > 0) {
2084 HWRITE1(hp, SDHC_DATA, *data);
2085 data += 1;
2086 datalen -= 1;
2087 }
2088 }
2089 }
2090
2091 static void
2092 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2093 {
2094 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2095 uint32_t v;
2096
2097 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2098 size_t count = 0;
2099
2100 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2101 if (count == 0) {
2102 /*
2103 * If we've drained "watermark" words, we need to wait
2104 * a little bit so the read FIFO can refill.
2105 */
2106 sdmmc_delay(10);
2107 count = watermark;
2108 }
2109 v = HREAD4(hp, SDHC_DATA);
2110 v = le32toh(v);
2111 *(uint32_t *)data = v;
2112 data += 4;
2113 datalen -= 4;
2114 status = HREAD2(hp, SDHC_NINTR_STATUS);
2115 count--;
2116 }
2117 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2118 if (count == 0) {
2119 sdmmc_delay(10);
2120 }
2121 v = HREAD4(hp, SDHC_DATA);
2122 v = le32toh(v);
2123 do {
2124 *data++ = v;
2125 v >>= 8;
2126 } while (--datalen > 0);
2127 }
2128 }
2129
2130 static void
2131 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2132 {
2133 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2134 uint32_t v;
2135
2136 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2137 size_t count = watermark;
2138
2139 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2140 if (count == 0) {
2141 sdmmc_delay(10);
2142 count = watermark;
2143 }
2144 v = *(uint32_t *)data;
2145 v = htole32(v);
2146 HWRITE4(hp, SDHC_DATA, v);
2147 data += 4;
2148 datalen -= 4;
2149 status = HREAD2(hp, SDHC_NINTR_STATUS);
2150 count--;
2151 }
2152 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2153 if (count == 0) {
2154 sdmmc_delay(10);
2155 }
2156 v = *(uint32_t *)data;
2157 v = htole32(v);
2158 HWRITE4(hp, SDHC_DATA, v);
2159 }
2160 }
2161
2162 /* Prepare for another command. */
2163 static int
2164 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2165 {
2166 int timo;
2167
2168 KASSERT(mutex_owned(&hp->intr_lock));
2169
2170 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2171
2172 /* Request the reset. */
2173 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2174
2175 /*
2176 * If necessary, wait for the controller to set the bits to
2177 * acknowledge the reset.
2178 */
2179 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2180 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2181 for (timo = 10000; timo > 0; timo--) {
2182 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2183 break;
2184 /* Short delay because I worry we may miss it... */
2185 sdmmc_delay(1);
2186 }
2187 if (timo == 0) {
2188 DPRINTF(1,("%s: timeout for reset on\n", __func__));
2189 return ETIMEDOUT;
2190 }
2191 }
2192
2193 /*
2194 * Wait for the controller to clear the bits to indicate that
2195 * the reset has completed.
2196 */
2197 for (timo = 10; timo > 0; timo--) {
2198 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2199 break;
2200 sdmmc_delay(10000);
2201 }
2202 if (timo == 0) {
2203 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2204 HREAD1(hp, SDHC_SOFTWARE_RESET)));
2205 return ETIMEDOUT;
2206 }
2207
2208 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2209 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2210 }
2211
2212 return 0;
2213 }
2214
2215 static int
2216 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2217 {
2218 int status, error, nointr;
2219
2220 KASSERT(mutex_owned(&hp->intr_lock));
2221
2222 mask |= SDHC_ERROR_INTERRUPT;
2223
2224 nointr = 0;
2225 status = hp->intr_status & mask;
2226 while (status == 0) {
2227 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2228 == EWOULDBLOCK) {
2229 nointr = 1;
2230 break;
2231 }
2232 status = hp->intr_status & mask;
2233 }
2234 error = hp->intr_error_status;
2235
2236 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2237 error));
2238
2239 hp->intr_status &= ~status;
2240 hp->intr_error_status &= ~error;
2241
2242 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2243 if (ISSET(error, SDHC_DMA_ERROR))
2244 device_printf(hp->sc->sc_dev,"dma error\n");
2245 if (ISSET(error, SDHC_ADMA_ERROR))
2246 device_printf(hp->sc->sc_dev,"adma error\n");
2247 if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2248 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2249 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2250 device_printf(hp->sc->sc_dev,"current limit error\n");
2251 if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2252 device_printf(hp->sc->sc_dev,"data end bit error\n");
2253 if (ISSET(error, SDHC_DATA_CRC_ERROR))
2254 device_printf(hp->sc->sc_dev,"data crc error\n");
2255 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2256 device_printf(hp->sc->sc_dev,"data timeout error\n");
2257 if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2258 device_printf(hp->sc->sc_dev,"cmd index error\n");
2259 if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2260 device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2261 if (ISSET(error, SDHC_CMD_CRC_ERROR))
2262 device_printf(hp->sc->sc_dev,"cmd crc error\n");
2263 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2264 if (!probing)
2265 device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2266 #ifdef SDHC_DEBUG
2267 else if (sdhcdebug > 0)
2268 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2269 #endif
2270 }
2271 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2272 device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2273 (error & ~SDHC_EINTR_STATUS_MASK));
2274 if (error == 0)
2275 device_printf(hp->sc->sc_dev,"no error\n");
2276
2277 /* Command timeout has higher priority than command complete. */
2278 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2279 CLR(status, SDHC_COMMAND_COMPLETE);
2280
2281 /* Transfer complete has higher priority than data timeout. */
2282 if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2283 CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2284 }
2285
2286 if (nointr ||
2287 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2288 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2289 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2290 hp->intr_error_status = 0;
2291 status = 0;
2292 }
2293
2294 return status;
2295 }
2296
2297 /*
2298 * Established by attachment driver at interrupt priority IPL_SDMMC.
2299 */
2300 int
2301 sdhc_intr(void *arg)
2302 {
2303 struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2304 struct sdhc_host *hp;
2305 int done = 0;
2306 uint16_t status;
2307 uint16_t error;
2308
2309 /* We got an interrupt, but we don't know from which slot. */
2310 for (size_t host = 0; host < sc->sc_nhosts; host++) {
2311 hp = sc->sc_host[host];
2312 if (hp == NULL)
2313 continue;
2314
2315 mutex_enter(&hp->intr_lock);
2316
2317 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2318 /* Find out which interrupts are pending. */
2319 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2320 status = xstatus;
2321 error = xstatus >> 16;
2322 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2323 (xstatus & SDHC_TRANSFER_COMPLETE) &&
2324 !(xstatus & SDHC_DMA_INTERRUPT)) {
2325 /* read again due to uSDHC errata */
2326 status = xstatus = HREAD4(hp,
2327 SDHC_NINTR_STATUS);
2328 error = xstatus >> 16;
2329 }
2330 if (ISSET(sc->sc_flags,
2331 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2332 if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2333 SET(status, SDHC_ERROR_INTERRUPT);
2334 }
2335 if (error)
2336 xstatus |= SDHC_ERROR_INTERRUPT;
2337 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2338 goto next_port; /* no interrupt for us */
2339 /* Acknowledge the interrupts we are about to handle. */
2340 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2341 } else {
2342 /* Find out which interrupts are pending. */
2343 error = 0;
2344 status = HREAD2(hp, SDHC_NINTR_STATUS);
2345 if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2346 goto next_port; /* no interrupt for us */
2347 /* Acknowledge the interrupts we are about to handle. */
2348 HWRITE2(hp, SDHC_NINTR_STATUS, status);
2349 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2350 /* Acknowledge error interrupts. */
2351 error = HREAD2(hp, SDHC_EINTR_STATUS);
2352 HWRITE2(hp, SDHC_EINTR_STATUS, error);
2353 }
2354 }
2355
2356 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2357 status, error));
2358
2359 /* Claim this interrupt. */
2360 done = 1;
2361
2362 if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2363 ISSET(error, SDHC_ADMA_ERROR)) {
2364 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2365 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2366 adma_err);
2367 }
2368
2369 /*
2370 * Wake up the sdmmc event thread to scan for cards.
2371 */
2372 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2373 if (hp->sdmmc != NULL) {
2374 sdmmc_needs_discover(hp->sdmmc);
2375 }
2376 if (ISSET(sc->sc_flags,
2377 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2378 HCLR4(hp, SDHC_NINTR_STATUS_EN,
2379 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2380 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2381 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2382 }
2383 }
2384
2385 /*
2386 * Schedule re-tuning process (UHS).
2387 */
2388 if (ISSET(status, SDHC_RETUNING_EVENT)) {
2389 atomic_swap_uint(&hp->tuning_timer_pending, 1);
2390 }
2391
2392 /*
2393 * Wake up the blocking process to service command
2394 * related interrupt(s).
2395 */
2396 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2397 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2398 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2399 hp->intr_error_status |= error;
2400 hp->intr_status |= status;
2401 if (ISSET(sc->sc_flags,
2402 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2403 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2404 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2405 }
2406 cv_broadcast(&hp->intr_cv);
2407 }
2408
2409 /*
2410 * Service SD card interrupts.
2411 */
2412 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2413 && ISSET(status, SDHC_CARD_INTERRUPT)) {
2414 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2415 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2416 sdmmc_card_intr(hp->sdmmc);
2417 }
2418 next_port:
2419 mutex_exit(&hp->intr_lock);
2420 }
2421
2422 return done;
2423 }
2424
2425 kmutex_t *
2426 sdhc_host_lock(struct sdhc_host *hp)
2427 {
2428 return &hp->intr_lock;
2429 }
2430
2431 uint8_t
2432 sdhc_host_read_1(struct sdhc_host *hp, int reg)
2433 {
2434 return HREAD1(hp, reg);
2435 }
2436
2437 uint16_t
2438 sdhc_host_read_2(struct sdhc_host *hp, int reg)
2439 {
2440 return HREAD2(hp, reg);
2441 }
2442
2443 uint32_t
2444 sdhc_host_read_4(struct sdhc_host *hp, int reg)
2445 {
2446 return HREAD4(hp, reg);
2447 }
2448
2449 void
2450 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val)
2451 {
2452 HWRITE1(hp, reg, val);
2453 }
2454
2455 void
2456 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val)
2457 {
2458 HWRITE2(hp, reg, val);
2459 }
2460
2461 void
2462 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val)
2463 {
2464 HWRITE4(hp, reg, val);
2465 }
2466
2467 #ifdef SDHC_DEBUG
2468 void
2469 sdhc_dump_regs(struct sdhc_host *hp)
2470 {
2471
2472 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE,
2473 HREAD4(hp, SDHC_PRESENT_STATE));
2474 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2475 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL,
2476 HREAD1(hp, SDHC_POWER_CTL));
2477 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS,
2478 HREAD2(hp, SDHC_NINTR_STATUS));
2479 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS,
2480 HREAD2(hp, SDHC_EINTR_STATUS));
2481 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN,
2482 HREAD2(hp, SDHC_NINTR_STATUS_EN));
2483 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN,
2484 HREAD2(hp, SDHC_EINTR_STATUS_EN));
2485 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN,
2486 HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2487 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN,
2488 HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2489 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES,
2490 HREAD4(hp, SDHC_CAPABILITIES));
2491 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2492 HREAD4(hp, SDHC_MAX_CAPABILITIES));
2493 }
2494 #endif
2495