sdhc.c revision 1.100.4.3 1 /* $NetBSD: sdhc.c,v 1.100.4.3 2021/12/03 19:31:19 martin Exp $ */
2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * SD Host Controller driver based on the SD Host Controller Standard
22 * Simplified Specification Version 1.00 (www.sdcard.com).
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.100.4.3 2021/12/03 19:31:19 martin Exp $");
27
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s) do {} while (0)
53 #endif
54
55 #define SDHC_COMMAND_TIMEOUT hz
56 #define SDHC_BUFFER_TIMEOUT hz
57 #define SDHC_TRANSFER_TIMEOUT hz
58 #define SDHC_DMA_TIMEOUT (hz*3)
59 #define SDHC_TUNING_TIMEOUT hz
60
61 struct sdhc_host {
62 struct sdhc_softc *sc; /* host controller device */
63
64 bus_space_tag_t iot; /* host register set tag */
65 bus_space_handle_t ioh; /* host register set handle */
66 bus_size_t ios; /* host register space size */
67 bus_dma_tag_t dmat; /* host DMA tag */
68
69 device_t sdmmc; /* generic SD/MMC device */
70
71 u_int clkbase; /* base clock frequency in KHz */
72 int maxblklen; /* maximum block length */
73 uint32_t ocr; /* OCR value from capabilities */
74
75 uint8_t regs[14]; /* host controller state */
76
77 uint16_t intr_status; /* soft interrupt status */
78 uint16_t intr_error_status; /* soft error status */
79 kmutex_t intr_lock;
80 kcondvar_t intr_cv;
81
82 callout_t tuning_timer;
83 int tuning_timing;
84 u_int tuning_timer_count;
85 u_int tuning_timer_pending;
86
87 int specver; /* spec. version */
88
89 uint32_t flags; /* flags for this host */
90 #define SHF_USE_DMA 0x0001
91 #define SHF_USE_4BIT_MODE 0x0002
92 #define SHF_USE_8BIT_MODE 0x0004
93 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */
94 #define SHF_USE_ADMA2_32 0x0010
95 #define SHF_USE_ADMA2_64 0x0020
96 #define SHF_USE_ADMA2_MASK 0x0030
97
98 bus_dmamap_t adma_map;
99 bus_dma_segment_t adma_segs[1];
100 void *adma2;
101 };
102
103 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev))
104
105 static uint8_t
106 hread1(struct sdhc_host *hp, bus_size_t reg)
107 {
108
109 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
110 return bus_space_read_1(hp->iot, hp->ioh, reg);
111 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
112 }
113
114 static uint16_t
115 hread2(struct sdhc_host *hp, bus_size_t reg)
116 {
117
118 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
119 return bus_space_read_2(hp->iot, hp->ioh, reg);
120 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
121 }
122
123 #define HREAD1(hp, reg) hread1(hp, reg)
124 #define HREAD2(hp, reg) hread2(hp, reg)
125 #define HREAD4(hp, reg) \
126 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
127
128
129 static void
130 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
131 {
132
133 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
134 bus_space_write_1(hp->iot, hp->ioh, o, val);
135 } else {
136 const size_t shift = 8 * (o & 3);
137 o &= -4;
138 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
139 tmp = (val << shift) | (tmp & ~(0xffU << shift));
140 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
141 }
142 }
143
144 static void
145 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
146 {
147
148 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
149 bus_space_write_2(hp->iot, hp->ioh, o, val);
150 } else {
151 const size_t shift = 8 * (o & 2);
152 o &= -4;
153 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
154 tmp = (val << shift) | (tmp & ~(0xffffU << shift));
155 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
156 }
157 }
158
159 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val)
160 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val)
161 #define HWRITE4(hp, reg, val) \
162 bus_space_write_4((hp)->iot, (hp)->ioh, (reg), (val))
163
164 #define HCLR1(hp, reg, bits) \
165 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
166 #define HCLR2(hp, reg, bits) \
167 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
168 #define HCLR4(hp, reg, bits) \
169 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
170 #define HSET1(hp, reg, bits) \
171 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
172 #define HSET2(hp, reg, bits) \
173 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
174 #define HSET4(hp, reg, bits) \
175 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
176
177 static int sdhc_host_reset(sdmmc_chipset_handle_t);
178 static int sdhc_host_reset1(sdmmc_chipset_handle_t);
179 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t);
180 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t);
181 static int sdhc_card_detect(sdmmc_chipset_handle_t);
182 static int sdhc_write_protect(sdmmc_chipset_handle_t);
183 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
184 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
185 static int sdhc_bus_width(sdmmc_chipset_handle_t, int);
186 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int);
187 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
188 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t);
189 static void sdhc_exec_command(sdmmc_chipset_handle_t,
190 struct sdmmc_command *);
191 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
192 static int sdhc_execute_tuning1(struct sdhc_host *, int);
193 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
194 static void sdhc_tuning_timer(void *);
195 static void sdhc_hw_reset(sdmmc_chipset_handle_t);
196 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
197 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
198 static int sdhc_soft_reset(struct sdhc_host *, int);
199 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool);
200 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
201 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
202 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
203 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
204 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
205 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
206 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
207
208 static struct sdmmc_chip_functions sdhc_functions = {
209 /* host controller reset */
210 .host_reset = sdhc_host_reset,
211
212 /* host controller capabilities */
213 .host_ocr = sdhc_host_ocr,
214 .host_maxblklen = sdhc_host_maxblklen,
215
216 /* card detection */
217 .card_detect = sdhc_card_detect,
218
219 /* write protect */
220 .write_protect = sdhc_write_protect,
221
222 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
223 .bus_power = sdhc_bus_power,
224 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */
225 .bus_width = sdhc_bus_width,
226 .bus_rod = sdhc_bus_rod,
227
228 /* command execution */
229 .exec_command = sdhc_exec_command,
230
231 /* card interrupt */
232 .card_enable_intr = sdhc_card_enable_intr,
233 .card_intr_ack = sdhc_card_intr_ack,
234
235 /* UHS functions */
236 .signal_voltage = sdhc_signal_voltage,
237 .bus_clock_ddr = sdhc_bus_clock_ddr,
238 .execute_tuning = sdhc_execute_tuning,
239 .hw_reset = sdhc_hw_reset,
240 };
241
242 static int
243 sdhc_cfprint(void *aux, const char *pnp)
244 {
245 const struct sdmmcbus_attach_args * const saa = aux;
246 const struct sdhc_host * const hp = saa->saa_sch;
247
248 if (pnp) {
249 aprint_normal("sdmmc at %s", pnp);
250 }
251 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
252 if (hp->sc->sc_host[host] == hp) {
253 aprint_normal(" slot %zu", host);
254 }
255 }
256
257 return UNCONF;
258 }
259
260 /*
261 * Called by attachment driver. For each SD card slot there is one SD
262 * host controller standard register set. (1.3)
263 */
264 int
265 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
266 bus_space_handle_t ioh, bus_size_t iosize)
267 {
268 struct sdmmcbus_attach_args saa;
269 struct sdhc_host *hp;
270 uint32_t caps, caps2;
271 uint16_t sdhcver;
272 int error;
273
274 /* Allocate one more host structure. */
275 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
276 if (hp == NULL) {
277 aprint_error_dev(sc->sc_dev,
278 "couldn't alloc memory (sdhc host)\n");
279 goto err1;
280 }
281 sc->sc_host[sc->sc_nhosts++] = hp;
282
283 /* Fill in the new host structure. */
284 hp->sc = sc;
285 hp->iot = iot;
286 hp->ioh = ioh;
287 hp->ios = iosize;
288 hp->dmat = sc->sc_dmat;
289
290 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
291 cv_init(&hp->intr_cv, "sdhcintr");
292 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
293 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
294
295 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
296 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
297 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
298 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
299 } else if (iosize <= SDHC_HOST_CTL_VERSION) {
300 sdhcver = SDHC_SPEC_NOVERS << SDHC_SPEC_VERS_SHIFT;
301 } else {
302 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
303 }
304 aprint_normal_dev(sc->sc_dev, "SDHC ");
305 hp->specver = SDHC_SPEC_VERSION(sdhcver);
306 switch (SDHC_SPEC_VERSION(sdhcver)) {
307 case SDHC_SPEC_VERS_100:
308 aprint_normal("1.0");
309 break;
310 case SDHC_SPEC_VERS_200:
311 aprint_normal("2.0");
312 break;
313 case SDHC_SPEC_VERS_300:
314 aprint_normal("3.0");
315 break;
316 case SDHC_SPEC_VERS_400:
317 aprint_normal("4.0");
318 break;
319 case SDHC_SPEC_VERS_410:
320 aprint_normal("4.1");
321 break;
322 case SDHC_SPEC_VERS_420:
323 aprint_normal("4.2");
324 break;
325 case SDHC_SPEC_NOVERS:
326 hp->specver = -1;
327 aprint_normal("NO-VERS");
328 break;
329 default:
330 aprint_normal("unknown version(0x%x)",
331 SDHC_SPEC_VERSION(sdhcver));
332 break;
333 }
334 if (SDHC_SPEC_VERSION(sdhcver) != SDHC_SPEC_NOVERS)
335 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
336
337 /*
338 * Reset the host controller and enable interrupts.
339 */
340 (void)sdhc_host_reset(hp);
341
342 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
343 /* init uSDHC registers */
344 HWRITE4(hp, SDHC_MMC_BOOT, 0);
345 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
346 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
347 HWRITE4(hp, SDHC_WATERMARK_LEVEL,
348 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
349 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
350 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
351 (0x40 << SDHC_WATERMARK_READ_SHIFT));
352 HSET4(hp, SDHC_VEND_SPEC,
353 SDHC_VEND_SPEC_MBO |
354 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
355 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
356 SDHC_VEND_SPEC_HCLK_SOFT_EN |
357 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
358 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
359 SDHC_VEND_SPEC_FRC_SDCLK_ON);
360 }
361
362 /* Determine host capabilities. */
363 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
364 caps = sc->sc_caps;
365 caps2 = sc->sc_caps2;
366 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
367 /* uSDHC capability register is little bit different */
368 caps = HREAD4(hp, SDHC_CAPABILITIES);
369 caps |= SDHC_8BIT_SUPP;
370 if (caps & SDHC_ADMA1_SUPP)
371 caps |= SDHC_ADMA2_SUPP;
372 sc->sc_caps = caps;
373 /* uSDHC has no SDHC_CAPABILITIES2 register */
374 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
375 } else {
376 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
377 if (hp->specver >= SDHC_SPEC_VERS_300) {
378 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
379 } else {
380 caps2 = sc->sc_caps2 = 0;
381 }
382 }
383
384 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
385 SDHC_RETUNING_MODES_MASK;
386 if (retuning_mode == SDHC_RETUNING_MODE_1) {
387 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
388 SDHC_TIMER_COUNT_MASK;
389 if (hp->tuning_timer_count == 0xf)
390 hp->tuning_timer_count = 0;
391 if (hp->tuning_timer_count)
392 hp->tuning_timer_count =
393 1 << (hp->tuning_timer_count - 1);
394 }
395
396 /*
397 * Use DMA if the host system and the controller support it.
398 * Supports integrated or external DMA egine, with or without
399 * SDHC_DMA_ENABLE in the command.
400 */
401 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
402 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
403 ISSET(caps, SDHC_DMA_SUPPORT)))) {
404 SET(hp->flags, SHF_USE_DMA);
405
406 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) &&
407 ISSET(caps, SDHC_ADMA2_SUPP)) {
408 SET(hp->flags, SHF_MODE_DMAEN);
409 /*
410 * 64-bit mode was present in the 2.00 spec, removed
411 * from 3.00, and re-added in 4.00 with a different
412 * descriptor layout. We only support 2.00 and 3.00
413 * descriptors for now.
414 */
415 if (hp->specver == SDHC_SPEC_VERS_200 &&
416 ISSET(caps, SDHC_64BIT_SYS_BUS)) {
417 SET(hp->flags, SHF_USE_ADMA2_64);
418 aprint_normal(", 64-bit ADMA2");
419 } else {
420 SET(hp->flags, SHF_USE_ADMA2_32);
421 aprint_normal(", 32-bit ADMA2");
422 }
423 } else {
424 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
425 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
426 SET(hp->flags, SHF_MODE_DMAEN);
427 if (sc->sc_vendor_transfer_data_dma) {
428 aprint_normal(", platform DMA");
429 } else {
430 aprint_normal(", SDMA");
431 }
432 }
433 } else {
434 aprint_normal(", PIO");
435 }
436
437 /*
438 * Determine the base clock frequency. (2.2.24)
439 */
440 if (hp->specver >= SDHC_SPEC_VERS_300) {
441 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
442 } else {
443 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
444 }
445 if (hp->clkbase == 0 ||
446 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
447 if (sc->sc_clkbase == 0) {
448 /* The attachment driver must tell us. */
449 aprint_error_dev(sc->sc_dev,
450 "unknown base clock frequency\n");
451 goto err;
452 }
453 hp->clkbase = sc->sc_clkbase;
454 }
455 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
456 /* SDHC 1.0 supports only 10-63 MHz. */
457 aprint_error_dev(sc->sc_dev,
458 "base clock frequency out of range: %u MHz\n",
459 hp->clkbase / 1000);
460 goto err;
461 }
462 aprint_normal(", %u kHz", hp->clkbase);
463
464 /*
465 * XXX Set the data timeout counter value according to
466 * capabilities. (2.2.15)
467 */
468 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
469 #if 1
470 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
471 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
472 #endif
473
474 if (ISSET(caps, SDHC_EMBEDDED_SLOT))
475 aprint_normal(", embedded slot");
476
477 /*
478 * Determine SD bus voltage levels supported by the controller.
479 */
480 aprint_normal(",");
481 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
482 SET(hp->ocr, MMC_OCR_HCS);
483 aprint_normal(" HS");
484 }
485 if (ISSET(caps2, SDHC_SDR50_SUPP)) {
486 SET(hp->ocr, MMC_OCR_S18A);
487 aprint_normal(" SDR50");
488 }
489 if (ISSET(caps2, SDHC_DDR50_SUPP)) {
490 SET(hp->ocr, MMC_OCR_S18A);
491 aprint_normal(" DDR50");
492 }
493 if (ISSET(caps2, SDHC_SDR104_SUPP)) {
494 SET(hp->ocr, MMC_OCR_S18A);
495 aprint_normal(" SDR104 HS200");
496 }
497 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
498 SET(hp->ocr, MMC_OCR_1_65V_1_95V);
499 aprint_normal(" 1.8V");
500 }
501 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
502 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
503 aprint_normal(" 3.0V");
504 }
505 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
506 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
507 aprint_normal(" 3.3V");
508 }
509 if (hp->specver >= SDHC_SPEC_VERS_300) {
510 aprint_normal(", re-tuning mode %d", retuning_mode + 1);
511 if (hp->tuning_timer_count)
512 aprint_normal(" (%us timer)", hp->tuning_timer_count);
513 }
514
515 /*
516 * Determine the maximum block length supported by the host
517 * controller. (2.2.24)
518 */
519 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
520 case SDHC_MAX_BLK_LEN_512:
521 hp->maxblklen = 512;
522 break;
523
524 case SDHC_MAX_BLK_LEN_1024:
525 hp->maxblklen = 1024;
526 break;
527
528 case SDHC_MAX_BLK_LEN_2048:
529 hp->maxblklen = 2048;
530 break;
531
532 case SDHC_MAX_BLK_LEN_4096:
533 hp->maxblklen = 4096;
534 break;
535
536 default:
537 aprint_error_dev(sc->sc_dev, "max block length unknown\n");
538 goto err;
539 }
540 aprint_normal(", %u byte blocks", hp->maxblklen);
541 aprint_normal("\n");
542
543 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
544 int rseg;
545
546 /* Allocate ADMA2 descriptor memory */
547 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
548 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
549 if (error) {
550 aprint_error_dev(sc->sc_dev,
551 "ADMA2 dmamem_alloc failed (%d)\n", error);
552 goto adma_done;
553 }
554 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
555 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
556 if (error) {
557 aprint_error_dev(sc->sc_dev,
558 "ADMA2 dmamem_map failed (%d)\n", error);
559 goto adma_done;
560 }
561 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
562 0, BUS_DMA_WAITOK, &hp->adma_map);
563 if (error) {
564 aprint_error_dev(sc->sc_dev,
565 "ADMA2 dmamap_create failed (%d)\n", error);
566 goto adma_done;
567 }
568 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
569 hp->adma2, PAGE_SIZE, NULL,
570 BUS_DMA_WAITOK|BUS_DMA_WRITE);
571 if (error) {
572 aprint_error_dev(sc->sc_dev,
573 "ADMA2 dmamap_load failed (%d)\n", error);
574 goto adma_done;
575 }
576
577 memset(hp->adma2, 0, PAGE_SIZE);
578
579 adma_done:
580 if (error)
581 CLR(hp->flags, SHF_USE_ADMA2_MASK);
582 }
583
584 /*
585 * Attach the generic SD/MMC bus driver. (The bus driver must
586 * not invoke any chipset functions before it is attached.)
587 */
588 memset(&saa, 0, sizeof(saa));
589 saa.saa_busname = "sdmmc";
590 saa.saa_sct = &sdhc_functions;
591 saa.saa_sch = hp;
592 saa.saa_dmat = hp->dmat;
593 saa.saa_clkmax = hp->clkbase;
594 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
595 saa.saa_clkmin = hp->clkbase / 256 / 2046;
596 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
597 saa.saa_clkmin = hp->clkbase / 256 / 16;
598 else if (hp->sc->sc_clkmsk != 0)
599 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
600 (ffs(hp->sc->sc_clkmsk) - 1));
601 else if (hp->specver >= SDHC_SPEC_VERS_300)
602 saa.saa_clkmin = hp->clkbase / 0x3ff;
603 else
604 saa.saa_clkmin = hp->clkbase / 256;
605 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
606 saa.saa_caps |= SMC_CAPS_AUTO_STOP;
607 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
608 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
609 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
610 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
611 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED;
612 if (ISSET(caps2, SDHC_SDR104_SUPP))
613 saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
614 SMC_CAPS_UHS_SDR50 |
615 SMC_CAPS_MMC_HS200;
616 if (ISSET(caps2, SDHC_SDR50_SUPP))
617 saa.saa_caps |= SMC_CAPS_UHS_SDR50;
618 if (ISSET(caps2, SDHC_DDR50_SUPP))
619 saa.saa_caps |= SMC_CAPS_UHS_DDR50;
620 if (ISSET(hp->flags, SHF_USE_DMA)) {
621 saa.saa_caps |= SMC_CAPS_DMA;
622 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
623 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
624 }
625 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
626 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
627 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
628 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
629 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint);
630
631 return 0;
632
633 err:
634 callout_destroy(&hp->tuning_timer);
635 cv_destroy(&hp->intr_cv);
636 mutex_destroy(&hp->intr_lock);
637 free(hp, M_DEVBUF);
638 sc->sc_host[--sc->sc_nhosts] = NULL;
639 err1:
640 return 1;
641 }
642
643 int
644 sdhc_detach(struct sdhc_softc *sc, int flags)
645 {
646 struct sdhc_host *hp;
647 int rv = 0;
648
649 for (size_t n = 0; n < sc->sc_nhosts; n++) {
650 hp = sc->sc_host[n];
651 if (hp == NULL)
652 continue;
653 if (hp->sdmmc != NULL) {
654 rv = config_detach(hp->sdmmc, flags);
655 if (rv)
656 break;
657 hp->sdmmc = NULL;
658 }
659 /* disable interrupts */
660 if ((flags & DETACH_FORCE) == 0) {
661 mutex_enter(&hp->intr_lock);
662 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
663 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
664 } else {
665 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
666 }
667 sdhc_soft_reset(hp, SDHC_RESET_ALL);
668 mutex_exit(&hp->intr_lock);
669 }
670 callout_halt(&hp->tuning_timer, NULL);
671 callout_destroy(&hp->tuning_timer);
672 cv_destroy(&hp->intr_cv);
673 mutex_destroy(&hp->intr_lock);
674 if (hp->ios > 0) {
675 bus_space_unmap(hp->iot, hp->ioh, hp->ios);
676 hp->ios = 0;
677 }
678 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
679 bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
680 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
681 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
682 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
683 }
684 free(hp, M_DEVBUF);
685 sc->sc_host[n] = NULL;
686 }
687
688 return rv;
689 }
690
691 bool
692 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
693 {
694 struct sdhc_softc *sc = device_private(dev);
695 struct sdhc_host *hp;
696 size_t i;
697
698 /* XXX poll for command completion or suspend command
699 * in progress */
700
701 /* Save the host controller state. */
702 for (size_t n = 0; n < sc->sc_nhosts; n++) {
703 hp = sc->sc_host[n];
704 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
705 for (i = 0; i < sizeof hp->regs; i += 4) {
706 uint32_t v = HREAD4(hp, i);
707 hp->regs[i + 0] = (v >> 0);
708 hp->regs[i + 1] = (v >> 8);
709 if (i + 3 < sizeof hp->regs) {
710 hp->regs[i + 2] = (v >> 16);
711 hp->regs[i + 3] = (v >> 24);
712 }
713 }
714 } else {
715 for (i = 0; i < sizeof hp->regs; i++) {
716 hp->regs[i] = HREAD1(hp, i);
717 }
718 }
719 }
720 return true;
721 }
722
723 bool
724 sdhc_resume(device_t dev, const pmf_qual_t *qual)
725 {
726 struct sdhc_softc *sc = device_private(dev);
727 struct sdhc_host *hp;
728 size_t i;
729
730 /* Restore the host controller state. */
731 for (size_t n = 0; n < sc->sc_nhosts; n++) {
732 hp = sc->sc_host[n];
733 (void)sdhc_host_reset(hp);
734 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
735 for (i = 0; i < sizeof hp->regs; i += 4) {
736 if (i + 3 < sizeof hp->regs) {
737 HWRITE4(hp, i,
738 (hp->regs[i + 0] << 0)
739 | (hp->regs[i + 1] << 8)
740 | (hp->regs[i + 2] << 16)
741 | (hp->regs[i + 3] << 24));
742 } else {
743 HWRITE4(hp, i,
744 (hp->regs[i + 0] << 0)
745 | (hp->regs[i + 1] << 8));
746 }
747 }
748 } else {
749 for (i = 0; i < sizeof hp->regs; i++) {
750 HWRITE1(hp, i, hp->regs[i]);
751 }
752 }
753 }
754 return true;
755 }
756
757 bool
758 sdhc_shutdown(device_t dev, int flags)
759 {
760 struct sdhc_softc *sc = device_private(dev);
761 struct sdhc_host *hp;
762
763 /* XXX chip locks up if we don't disable it before reboot. */
764 for (size_t i = 0; i < sc->sc_nhosts; i++) {
765 hp = sc->sc_host[i];
766 (void)sdhc_host_reset(hp);
767 }
768 return true;
769 }
770
771 /*
772 * Reset the host controller. Called during initialization, when
773 * cards are removed, upon resume, and during error recovery.
774 */
775 static int
776 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
777 {
778 struct sdhc_host *hp = (struct sdhc_host *)sch;
779 uint32_t sdhcimask;
780 int error;
781
782 KASSERT(mutex_owned(&hp->intr_lock));
783
784 /* Disable all interrupts. */
785 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
786 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
787 } else {
788 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
789 }
790
791 /*
792 * Reset the entire host controller and wait up to 100ms for
793 * the controller to clear the reset bit.
794 */
795 error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
796 if (error)
797 goto out;
798
799 /* Set data timeout counter value to max for now. */
800 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
801 #if 1
802 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
803 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
804 #endif
805
806 /* Enable interrupts. */
807 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
808 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
809 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
810 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
811 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
812 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
813 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
814 sdhcimask ^=
815 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
816 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
817 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
818 } else {
819 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
820 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
821 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
822 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
823 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
824 }
825
826 out:
827 return error;
828 }
829
830 static int
831 sdhc_host_reset(sdmmc_chipset_handle_t sch)
832 {
833 struct sdhc_host *hp = (struct sdhc_host *)sch;
834 int error;
835
836 mutex_enter(&hp->intr_lock);
837 error = sdhc_host_reset1(sch);
838 mutex_exit(&hp->intr_lock);
839
840 return error;
841 }
842
843 static uint32_t
844 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
845 {
846 struct sdhc_host *hp = (struct sdhc_host *)sch;
847
848 return hp->ocr;
849 }
850
851 static int
852 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
853 {
854 struct sdhc_host *hp = (struct sdhc_host *)sch;
855
856 return hp->maxblklen;
857 }
858
859 /*
860 * Return non-zero if the card is currently inserted.
861 */
862 static int
863 sdhc_card_detect(sdmmc_chipset_handle_t sch)
864 {
865 struct sdhc_host *hp = (struct sdhc_host *)sch;
866 int r;
867
868 if (hp->sc->sc_vendor_card_detect)
869 return (*hp->sc->sc_vendor_card_detect)(hp->sc);
870
871 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
872
873 return r ? 1 : 0;
874 }
875
876 /*
877 * Return non-zero if the card is currently write-protected.
878 */
879 static int
880 sdhc_write_protect(sdmmc_chipset_handle_t sch)
881 {
882 struct sdhc_host *hp = (struct sdhc_host *)sch;
883 int r;
884
885 if (hp->sc->sc_vendor_write_protect)
886 return (*hp->sc->sc_vendor_write_protect)(hp->sc);
887
888 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
889
890 return r ? 0 : 1;
891 }
892
893 /*
894 * Set or change SD bus voltage and enable or disable SD bus power.
895 * Return zero on success.
896 */
897 static int
898 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
899 {
900 struct sdhc_host *hp = (struct sdhc_host *)sch;
901 uint8_t vdd;
902 int error = 0;
903 const uint32_t pcmask =
904 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
905
906 mutex_enter(&hp->intr_lock);
907
908 /*
909 * Disable bus power before voltage change.
910 */
911 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
912 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0))
913 HWRITE1(hp, SDHC_POWER_CTL, 0);
914
915 /* If power is disabled, reset the host and return now. */
916 if (ocr == 0) {
917 (void)sdhc_host_reset1(hp);
918 callout_halt(&hp->tuning_timer, &hp->intr_lock);
919 goto out;
920 }
921
922 /*
923 * Select the lowest voltage according to capabilities.
924 */
925 ocr &= hp->ocr;
926 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
927 vdd = SDHC_VOLTAGE_1_8V;
928 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
929 vdd = SDHC_VOLTAGE_3_0V;
930 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
931 vdd = SDHC_VOLTAGE_3_3V;
932 } else {
933 /* Unsupported voltage level requested. */
934 error = EINVAL;
935 goto out;
936 }
937
938 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
939 /*
940 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus
941 * voltage ramp until power rises.
942 */
943
944 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
945 HWRITE1(hp, SDHC_POWER_CTL,
946 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
947 } else {
948 HWRITE1(hp, SDHC_POWER_CTL,
949 HREAD1(hp, SDHC_POWER_CTL) & pcmask);
950 sdmmc_delay(1);
951 HWRITE1(hp, SDHC_POWER_CTL,
952 (vdd << SDHC_VOLTAGE_SHIFT));
953 sdmmc_delay(1);
954 HSET1(hp, SDHC_POWER_CTL, SDHC_BUS_POWER);
955 sdmmc_delay(10000);
956 }
957
958 /*
959 * The host system may not power the bus due to battery low,
960 * etc. In that case, the host controller should clear the
961 * bus power bit.
962 */
963 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
964 error = ENXIO;
965 goto out;
966 }
967 }
968
969 out:
970 mutex_exit(&hp->intr_lock);
971
972 return error;
973 }
974
975 /*
976 * Return the smallest possible base clock frequency divisor value
977 * for the CLOCK_CTL register to produce `freq' (KHz).
978 */
979 static bool
980 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
981 {
982 u_int div;
983
984 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
985 for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
986 if ((hp->clkbase / div) <= freq) {
987 *divp = SDHC_SDCLK_CGM
988 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
989 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
990 //freq = hp->clkbase / div;
991 return true;
992 }
993 }
994 /* No divisor found. */
995 return false;
996 }
997 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
998 u_int dvs = (hp->clkbase + freq - 1) / freq;
999 u_int roundup = dvs & 1;
1000 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
1001 if (dvs + roundup <= 16) {
1002 dvs += roundup - 1;
1003 *divp = (div << SDHC_SDCLK_DIV_SHIFT)
1004 | (dvs << SDHC_SDCLK_DVS_SHIFT);
1005 DPRINTF(2,
1006 ("%s: divisor for freq %u is %u * %u\n",
1007 HDEVNAME(hp), freq, div * 2, dvs + 1));
1008 //freq = hp->clkbase / (div * 2) * (dvs + 1);
1009 return true;
1010 }
1011 /*
1012 * If we drop bits, we need to round up the divisor.
1013 */
1014 roundup |= dvs & 1;
1015 }
1016 /* No divisor found. */
1017 return false;
1018 }
1019 if (hp->sc->sc_clkmsk != 0) {
1020 div = howmany(hp->clkbase, freq);
1021 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1022 return false;
1023 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1024 //freq = hp->clkbase / div;
1025 return true;
1026 }
1027 if (hp->specver >= SDHC_SPEC_VERS_300) {
1028 div = howmany(hp->clkbase, freq);
1029 div = div > 1 ? howmany(div, 2) : 0;
1030 if (div > 0x3ff)
1031 return false;
1032 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1033 << SDHC_SDCLK_XDIV_SHIFT) |
1034 (((div >> 0) & SDHC_SDCLK_DIV_MASK)
1035 << SDHC_SDCLK_DIV_SHIFT);
1036 //freq = hp->clkbase / (div ? div * 2 : 1);
1037 return true;
1038 } else {
1039 for (div = 1; div <= 256; div *= 2) {
1040 if ((hp->clkbase / div) <= freq) {
1041 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1042 //freq = hp->clkbase / div;
1043 return true;
1044 }
1045 }
1046 /* No divisor found. */
1047 return false;
1048 }
1049 /* No divisor found. */
1050 return false;
1051 }
1052
1053 /*
1054 * Set or change SDCLK frequency or disable the SD clock.
1055 * Return zero on success.
1056 */
1057 static int
1058 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1059 {
1060 struct sdhc_host *hp = (struct sdhc_host *)sch;
1061 u_int div;
1062 u_int timo;
1063 int16_t reg;
1064 int error = 0;
1065 bool present __diagused;
1066
1067 mutex_enter(&hp->intr_lock);
1068
1069 #ifdef DIAGNOSTIC
1070 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1071
1072 /* Must not stop the clock if commands are in progress. */
1073 if (present && sdhc_card_detect(hp)) {
1074 aprint_normal_dev(hp->sc->sc_dev,
1075 "%s: command in progress\n", __func__);
1076 }
1077 #endif
1078
1079 if (hp->sc->sc_vendor_bus_clock) {
1080 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1081 if (error != 0)
1082 goto out;
1083 }
1084
1085 /*
1086 * Stop SD clock before changing the frequency.
1087 */
1088 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1089 HCLR4(hp, SDHC_VEND_SPEC,
1090 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1091 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1092 if (freq == SDMMC_SDCLK_OFF) {
1093 goto out;
1094 }
1095 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1096 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1097 if (freq == SDMMC_SDCLK_OFF) {
1098 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1099 goto out;
1100 }
1101 } else {
1102 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1103 if (freq == SDMMC_SDCLK_OFF)
1104 goto out;
1105 }
1106
1107 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1108 if (ddr)
1109 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1110 else
1111 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1112 } else if (hp->specver >= SDHC_SPEC_VERS_300) {
1113 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1114 if (freq > 100000) {
1115 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1116 } else if (freq > 50000) {
1117 if (ddr) {
1118 HSET2(hp, SDHC_HOST_CTL2,
1119 SDHC_UHS_MODE_SELECT_DDR50);
1120 } else {
1121 HSET2(hp, SDHC_HOST_CTL2,
1122 SDHC_UHS_MODE_SELECT_SDR50);
1123 }
1124 } else if (freq > 25000) {
1125 if (ddr) {
1126 HSET2(hp, SDHC_HOST_CTL2,
1127 SDHC_UHS_MODE_SELECT_DDR50);
1128 } else {
1129 HSET2(hp, SDHC_HOST_CTL2,
1130 SDHC_UHS_MODE_SELECT_SDR25);
1131 }
1132 } else if (freq > 400) {
1133 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1134 }
1135 }
1136
1137 /*
1138 * Slow down Ricoh 5U823 controller that isn't reliable
1139 * at 100MHz bus clock.
1140 */
1141 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1142 if (freq == 100000)
1143 --freq;
1144 }
1145
1146 /*
1147 * Set the minimum base clock frequency divisor.
1148 */
1149 if (!sdhc_clock_divisor(hp, freq, &div)) {
1150 /* Invalid base clock frequency or `freq' value. */
1151 aprint_error_dev(hp->sc->sc_dev,
1152 "Invalid bus clock %d kHz\n", freq);
1153 error = EINVAL;
1154 goto out;
1155 }
1156 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1157 if (ddr) {
1158 /* in ddr mode, divisor >>= 1 */
1159 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1160 SDHC_SDCLK_DIV_SHIFT)) |
1161 (div & (SDHC_SDCLK_DVS_MASK <<
1162 SDHC_SDCLK_DVS_SHIFT));
1163 }
1164 for (timo = 1000; timo > 0; timo--) {
1165 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1166 break;
1167 sdmmc_delay(10);
1168 }
1169 HWRITE4(hp, SDHC_CLOCK_CTL,
1170 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1171 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1172 HWRITE4(hp, SDHC_CLOCK_CTL,
1173 div | (SDHC_TIMEOUT_MAX << 16));
1174 } else {
1175 reg = HREAD2(hp, SDHC_CLOCK_CTL);
1176 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1177 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1178 }
1179
1180 /*
1181 * Start internal clock. Wait 10ms for stabilization.
1182 */
1183 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1184 HSET4(hp, SDHC_VEND_SPEC,
1185 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1186 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1187 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1188 sdmmc_delay(10000);
1189 HSET4(hp, SDHC_CLOCK_CTL,
1190 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1191 } else {
1192 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1193 for (timo = 1000; timo > 0; timo--) {
1194 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1195 SDHC_INTCLK_STABLE))
1196 break;
1197 sdmmc_delay(10);
1198 }
1199 if (timo == 0) {
1200 error = ETIMEDOUT;
1201 DPRINTF(1,("%s: timeout\n", __func__));
1202 goto out;
1203 }
1204 }
1205
1206 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1207 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1208 /*
1209 * Sending 80 clocks at 400kHz takes 200us.
1210 * So delay for that time + slop and then
1211 * check a few times for completion.
1212 */
1213 sdmmc_delay(210);
1214 for (timo = 10; timo > 0; timo--) {
1215 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1216 SDHC_INIT_ACTIVE))
1217 break;
1218 sdmmc_delay(10);
1219 }
1220 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1221
1222 /*
1223 * Enable SD clock.
1224 */
1225 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1226 HSET4(hp, SDHC_VEND_SPEC,
1227 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1228 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1229 } else {
1230 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1231 }
1232 } else {
1233 /*
1234 * Enable SD clock.
1235 */
1236 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1237
1238 if (freq > 25000 &&
1239 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1240 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1241 else
1242 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1243 }
1244
1245 out:
1246 mutex_exit(&hp->intr_lock);
1247
1248 return error;
1249 }
1250
1251 static int
1252 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1253 {
1254 struct sdhc_host *hp = (struct sdhc_host *)sch;
1255 int reg;
1256
1257 switch (width) {
1258 case 1:
1259 case 4:
1260 break;
1261
1262 case 8:
1263 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1264 break;
1265 /* FALLTHROUGH */
1266 default:
1267 DPRINTF(0,("%s: unsupported bus width (%d)\n",
1268 HDEVNAME(hp), width));
1269 return 1;
1270 }
1271
1272 if (hp->sc->sc_vendor_bus_width) {
1273 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1274 if (error != 0)
1275 return error;
1276 }
1277
1278 mutex_enter(&hp->intr_lock);
1279
1280 reg = HREAD1(hp, SDHC_HOST_CTL);
1281 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1282 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1283 if (width == 4)
1284 reg |= SDHC_4BIT_MODE;
1285 else if (width == 8)
1286 reg |= SDHC_ESDHC_8BIT_MODE;
1287 } else {
1288 reg &= ~SDHC_4BIT_MODE;
1289 if (hp->specver >= SDHC_SPEC_VERS_300) {
1290 reg &= ~SDHC_8BIT_MODE;
1291 }
1292 if (width == 4) {
1293 reg |= SDHC_4BIT_MODE;
1294 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1295 reg |= SDHC_8BIT_MODE;
1296 }
1297 }
1298 HWRITE1(hp, SDHC_HOST_CTL, reg);
1299
1300 mutex_exit(&hp->intr_lock);
1301
1302 return 0;
1303 }
1304
1305 static int
1306 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1307 {
1308 struct sdhc_host *hp = (struct sdhc_host *)sch;
1309
1310 if (hp->sc->sc_vendor_rod)
1311 return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1312
1313 return 0;
1314 }
1315
1316 static void
1317 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1318 {
1319 struct sdhc_host *hp = (struct sdhc_host *)sch;
1320
1321 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1322 mutex_enter(&hp->intr_lock);
1323 if (enable) {
1324 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1325 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1326 } else {
1327 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1328 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1329 }
1330 mutex_exit(&hp->intr_lock);
1331 }
1332 }
1333
1334 static void
1335 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1336 {
1337 struct sdhc_host *hp = (struct sdhc_host *)sch;
1338
1339 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1340 mutex_enter(&hp->intr_lock);
1341 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1342 mutex_exit(&hp->intr_lock);
1343 }
1344 }
1345
1346 static int
1347 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1348 {
1349 struct sdhc_host *hp = (struct sdhc_host *)sch;
1350 int error = 0;
1351
1352 if (hp->specver < SDHC_SPEC_VERS_300)
1353 return EINVAL;
1354
1355 mutex_enter(&hp->intr_lock);
1356 switch (signal_voltage) {
1357 case SDMMC_SIGNAL_VOLTAGE_180:
1358 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1359 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1360 signal_voltage);
1361 if (error != 0)
1362 break;
1363 }
1364 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1365 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1366 break;
1367 case SDMMC_SIGNAL_VOLTAGE_330:
1368 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1369 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1370 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1371 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1372 signal_voltage);
1373 if (error != 0)
1374 break;
1375 }
1376 break;
1377 default:
1378 error = EINVAL;
1379 break;
1380 }
1381 mutex_exit(&hp->intr_lock);
1382
1383 return error;
1384 }
1385
1386 /*
1387 * Sampling clock tuning procedure (UHS)
1388 */
1389 static int
1390 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1391 {
1392 struct sdmmc_command cmd;
1393 uint8_t hostctl;
1394 int opcode, error, retry = 40;
1395
1396 KASSERT(mutex_owned(&hp->intr_lock));
1397
1398 hp->tuning_timing = timing;
1399
1400 switch (timing) {
1401 case SDMMC_TIMING_MMC_HS200:
1402 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1403 break;
1404 case SDMMC_TIMING_UHS_SDR50:
1405 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1406 return 0;
1407 /* FALLTHROUGH */
1408 case SDMMC_TIMING_UHS_SDR104:
1409 opcode = MMC_SEND_TUNING_BLOCK;
1410 break;
1411 default:
1412 return EINVAL;
1413 }
1414
1415 hostctl = HREAD1(hp, SDHC_HOST_CTL);
1416
1417 /* enable buffer read ready interrupt */
1418 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1419 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1420
1421 /* disable DMA */
1422 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1423
1424 /* reset tuning circuit */
1425 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1426
1427 /* start of tuning */
1428 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1429
1430 do {
1431 memset(&cmd, 0, sizeof(cmd));
1432 cmd.c_opcode = opcode;
1433 cmd.c_arg = 0;
1434 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1435 if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1436 cmd.c_blklen = cmd.c_datalen = 128;
1437 } else {
1438 cmd.c_blklen = cmd.c_datalen = 64;
1439 }
1440
1441 error = sdhc_start_command(hp, &cmd);
1442 if (error)
1443 break;
1444
1445 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1446 SDHC_TUNING_TIMEOUT, false)) {
1447 break;
1448 }
1449
1450 delay(1000);
1451 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1452
1453 /* disable buffer read ready interrupt */
1454 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1455 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1456
1457 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1458 HCLR2(hp, SDHC_HOST_CTL2,
1459 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1460 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1461 aprint_error_dev(hp->sc->sc_dev,
1462 "tuning did not complete, using fixed sampling clock\n");
1463 return EIO; /* tuning did not complete */
1464 }
1465
1466 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1467 HCLR2(hp, SDHC_HOST_CTL2,
1468 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1469 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1470 aprint_error_dev(hp->sc->sc_dev,
1471 "tuning failed, using fixed sampling clock\n");
1472 return EIO; /* tuning failed */
1473 }
1474
1475 if (hp->tuning_timer_count) {
1476 callout_schedule(&hp->tuning_timer,
1477 hz * hp->tuning_timer_count);
1478 }
1479
1480 return 0; /* tuning completed */
1481 }
1482
1483 static int
1484 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1485 {
1486 struct sdhc_host *hp = (struct sdhc_host *)sch;
1487 int error;
1488
1489 mutex_enter(&hp->intr_lock);
1490 error = sdhc_execute_tuning1(hp, timing);
1491 mutex_exit(&hp->intr_lock);
1492 return error;
1493 }
1494
1495 static void
1496 sdhc_tuning_timer(void *arg)
1497 {
1498 struct sdhc_host *hp = arg;
1499
1500 atomic_swap_uint(&hp->tuning_timer_pending, 1);
1501 }
1502
1503 static void
1504 sdhc_hw_reset(sdmmc_chipset_handle_t sch)
1505 {
1506 struct sdhc_host *hp = (struct sdhc_host *)sch;
1507 struct sdhc_softc *sc = hp->sc;
1508
1509 if (sc->sc_vendor_hw_reset != NULL)
1510 sc->sc_vendor_hw_reset(sc, hp);
1511 }
1512
1513 static int
1514 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1515 {
1516 uint32_t state;
1517 int timeout;
1518
1519 for (timeout = 10000; timeout > 0; timeout--) {
1520 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1521 return 0;
1522 sdmmc_delay(10);
1523 }
1524 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1525 mask, value, state);
1526 return ETIMEDOUT;
1527 }
1528
1529 static void
1530 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1531 {
1532 struct sdhc_host *hp = (struct sdhc_host *)sch;
1533 int error;
1534 bool probing;
1535
1536 mutex_enter(&hp->intr_lock);
1537
1538 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1539 (void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1540 }
1541
1542 if (cmd->c_data &&
1543 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1544 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1545 if (ISSET(hp->flags, SHF_USE_DMA)) {
1546 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1547 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1548 } else {
1549 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1550 HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1551 }
1552 }
1553
1554 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1555 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1556 if (cmd->c_data != NULL) {
1557 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1558 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1559 } else {
1560 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1561 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1562 }
1563 }
1564
1565 /*
1566 * Start the MMC command, or mark `cmd' as failed and return.
1567 */
1568 error = sdhc_start_command(hp, cmd);
1569 if (error) {
1570 cmd->c_error = error;
1571 goto out;
1572 }
1573
1574 /*
1575 * Wait until the command phase is done, or until the command
1576 * is marked done for any other reason.
1577 */
1578 probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1579 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT, probing)) {
1580 DPRINTF(1,("%s: timeout for command\n", __func__));
1581 sdmmc_delay(50);
1582 cmd->c_error = ETIMEDOUT;
1583 goto out;
1584 }
1585
1586 /*
1587 * The host controller removes bits [0:7] from the response
1588 * data (CRC) and we pass the data up unchanged to the bus
1589 * driver (without padding).
1590 */
1591 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1592 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1593 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1594 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1595 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1596 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1597 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1598 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1599 (cmd->c_resp[1] << 24);
1600 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1601 (cmd->c_resp[2] << 24);
1602 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1603 (cmd->c_resp[3] << 24);
1604 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1605 }
1606 }
1607 }
1608 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1609
1610 /*
1611 * If the command has data to transfer in any direction,
1612 * execute the transfer now.
1613 */
1614 if (cmd->c_error == 0 && cmd->c_data != NULL)
1615 sdhc_transfer_data(hp, cmd);
1616 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1617 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) &&
1618 !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1619 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1620 HDEVNAME(hp)));
1621 cmd->c_error = ETIMEDOUT;
1622 goto out;
1623 }
1624 }
1625
1626 out:
1627 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1628 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1629 /* Turn off the LED. */
1630 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1631 }
1632 SET(cmd->c_flags, SCF_ITSDONE);
1633
1634 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) &&
1635 cmd->c_opcode == MMC_STOP_TRANSMISSION)
1636 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
1637
1638 mutex_exit(&hp->intr_lock);
1639
1640 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1641 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1642 cmd->c_flags, cmd->c_error));
1643 }
1644
1645 static int
1646 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1647 {
1648 struct sdhc_softc * const sc = hp->sc;
1649 uint16_t blksize = 0;
1650 uint16_t blkcount = 0;
1651 uint16_t mode;
1652 uint16_t command;
1653 uint32_t pmask;
1654 int error;
1655
1656 KASSERT(mutex_owned(&hp->intr_lock));
1657
1658 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1659 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1660 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1661
1662 /*
1663 * The maximum block length for commands should be the minimum
1664 * of the host buffer size and the card buffer size. (1.7.2)
1665 */
1666
1667 /* Fragment the data into proper blocks. */
1668 if (cmd->c_datalen > 0) {
1669 blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1670 blkcount = cmd->c_datalen / blksize;
1671 if (cmd->c_datalen % blksize > 0) {
1672 /* XXX: Split this command. (1.7.4) */
1673 aprint_error_dev(sc->sc_dev,
1674 "data not a multiple of %u bytes\n", blksize);
1675 return EINVAL;
1676 }
1677 }
1678
1679 /* Check limit imposed by 9-bit block count. (1.7.2) */
1680 if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1681 aprint_error_dev(sc->sc_dev, "too much data\n");
1682 return EINVAL;
1683 }
1684
1685 /* Prepare transfer mode register value. (2.2.5) */
1686 mode = SDHC_BLOCK_COUNT_ENABLE;
1687 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1688 mode |= SDHC_READ_MODE;
1689 if (blkcount > 1) {
1690 mode |= SDHC_MULTI_BLOCK_MODE;
1691 /* XXX only for memory commands? */
1692 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
1693 mode |= SDHC_AUTO_CMD12_ENABLE;
1694 }
1695 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1696 ISSET(hp->flags, SHF_MODE_DMAEN)) {
1697 mode |= SDHC_DMA_ENABLE;
1698 }
1699
1700 /*
1701 * Prepare command register value. (2.2.6)
1702 */
1703 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1704
1705 if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1706 command |= SDHC_CRC_CHECK_ENABLE;
1707 if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1708 command |= SDHC_INDEX_CHECK_ENABLE;
1709 if (cmd->c_datalen > 0)
1710 command |= SDHC_DATA_PRESENT_SELECT;
1711
1712 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1713 command |= SDHC_NO_RESPONSE;
1714 else if (ISSET(cmd->c_flags, SCF_RSP_136))
1715 command |= SDHC_RESP_LEN_136;
1716 else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1717 command |= SDHC_RESP_LEN_48_CHK_BUSY;
1718 else
1719 command |= SDHC_RESP_LEN_48;
1720
1721 /* Wait until command and optionally data inhibit bits are clear. (1.5) */
1722 pmask = SDHC_CMD_INHIBIT_CMD;
1723 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1724 pmask |= SDHC_CMD_INHIBIT_DAT;
1725 error = sdhc_wait_state(hp, pmask, 0);
1726 if (error) {
1727 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1728 device_printf(sc->sc_dev, "command or data phase inhibited\n");
1729 return error;
1730 }
1731
1732 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1733 HDEVNAME(hp), blksize, blkcount, mode, command));
1734
1735 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1736 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1737 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */
1738 }
1739
1740 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1741 /* Alert the user not to remove the card. */
1742 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1743 }
1744
1745 /* Set DMA start address. */
1746 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1747 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1748 bus_addr_t paddr =
1749 cmd->c_dmamap->dm_segs[seg].ds_addr;
1750 uint16_t len =
1751 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1752 0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1753 uint16_t attr =
1754 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1755 if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1756 attr |= SDHC_ADMA2_END;
1757 }
1758 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1759 struct sdhc_adma2_descriptor32 *desc =
1760 hp->adma2;
1761 desc[seg].attribute = htole16(attr);
1762 desc[seg].length = htole16(len);
1763 desc[seg].address = htole32(paddr);
1764 } else {
1765 struct sdhc_adma2_descriptor64 *desc =
1766 hp->adma2;
1767 desc[seg].attribute = htole16(attr);
1768 desc[seg].length = htole16(len);
1769 desc[seg].address = htole32(paddr & 0xffffffff);
1770 desc[seg].address_hi = htole32(
1771 (uint64_t)paddr >> 32);
1772 }
1773 }
1774 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1775 struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1776 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1777 } else {
1778 struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1779 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1780 }
1781 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1782 BUS_DMASYNC_PREWRITE);
1783 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1784 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1785 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1786 } else {
1787 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1788 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1789 }
1790
1791 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1792
1793 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1794 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1795 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1796 (uint64_t)desc_addr >> 32);
1797 }
1798 } else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1799 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1800 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1801 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1802 }
1803 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1804 }
1805
1806 /*
1807 * Start a CPU data transfer. Writing to the high order byte
1808 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1809 */
1810 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1811 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1812 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1813 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1814 /* mode bits is in MIX_CTRL register on uSDHC */
1815 HWRITE4(hp, SDHC_MIX_CTRL, mode |
1816 (HREAD4(hp, SDHC_MIX_CTRL) &
1817 ~(SDHC_MULTI_BLOCK_MODE |
1818 SDHC_READ_MODE |
1819 SDHC_AUTO_CMD12_ENABLE |
1820 SDHC_BLOCK_COUNT_ENABLE |
1821 SDHC_DMA_ENABLE)));
1822 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1823 } else {
1824 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1825 }
1826 } else {
1827 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1828 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1829 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1830 HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1831 HWRITE2(hp, SDHC_COMMAND, command);
1832 }
1833
1834 return 0;
1835 }
1836
1837 static void
1838 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1839 {
1840 struct sdhc_softc *sc = hp->sc;
1841 int error;
1842
1843 KASSERT(mutex_owned(&hp->intr_lock));
1844
1845 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1846 MMC_R1(cmd->c_resp), cmd->c_datalen));
1847
1848 #ifdef SDHC_DEBUG
1849 /* XXX I forgot why I wanted to know when this happens :-( */
1850 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1851 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1852 aprint_error_dev(hp->sc->sc_dev,
1853 "CMD52/53 error response flags %#x\n",
1854 MMC_R1(cmd->c_resp) & 0xff00);
1855 }
1856 #endif
1857
1858 if (cmd->c_dmamap != NULL) {
1859 if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1860 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1861 if (error == 0 && !sdhc_wait_intr(hp,
1862 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1863 DPRINTF(1,("%s: timeout\n", __func__));
1864 error = ETIMEDOUT;
1865 }
1866 } else {
1867 error = sdhc_transfer_data_dma(hp, cmd);
1868 }
1869 } else
1870 error = sdhc_transfer_data_pio(hp, cmd);
1871 if (error)
1872 cmd->c_error = error;
1873 SET(cmd->c_flags, SCF_ITSDONE);
1874
1875 DPRINTF(1,("%s: data transfer done (error=%d)\n",
1876 HDEVNAME(hp), cmd->c_error));
1877 }
1878
1879 static int
1880 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1881 {
1882 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1883 bus_addr_t posaddr;
1884 bus_addr_t segaddr;
1885 bus_size_t seglen;
1886 u_int seg = 0;
1887 int error = 0;
1888 int status;
1889
1890 KASSERT(mutex_owned(&hp->intr_lock));
1891 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1892 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1893 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1894 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1895
1896 for (;;) {
1897 status = sdhc_wait_intr(hp,
1898 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1899 SDHC_DMA_TIMEOUT, false);
1900
1901 if (status & SDHC_TRANSFER_COMPLETE) {
1902 break;
1903 }
1904 if (!status) {
1905 DPRINTF(1,("%s: timeout\n", __func__));
1906 error = ETIMEDOUT;
1907 break;
1908 }
1909
1910 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1911 continue;
1912 }
1913
1914 if ((status & SDHC_DMA_INTERRUPT) == 0) {
1915 continue;
1916 }
1917
1918 /* DMA Interrupt (boundary crossing) */
1919
1920 segaddr = dm_segs[seg].ds_addr;
1921 seglen = dm_segs[seg].ds_len;
1922 posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1923
1924 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1925 continue;
1926 }
1927 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1928 HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1929 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1930 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1931 KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1932 }
1933
1934 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1935 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1936 PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1937 }
1938
1939 return error;
1940 }
1941
1942 static int
1943 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1944 {
1945 uint8_t *data = cmd->c_data;
1946 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1947 u_int len, datalen;
1948 u_int imask;
1949 u_int pmask;
1950 int error = 0;
1951
1952 KASSERT(mutex_owned(&hp->intr_lock));
1953
1954 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1955 imask = SDHC_BUFFER_READ_READY;
1956 pmask = SDHC_BUFFER_READ_ENABLE;
1957 if (ISSET(hp->sc->sc_flags,
1958 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1959 pio_func = esdhc_read_data_pio;
1960 } else {
1961 pio_func = sdhc_read_data_pio;
1962 }
1963 } else {
1964 imask = SDHC_BUFFER_WRITE_READY;
1965 pmask = SDHC_BUFFER_WRITE_ENABLE;
1966 if (ISSET(hp->sc->sc_flags,
1967 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1968 pio_func = esdhc_write_data_pio;
1969 } else {
1970 pio_func = sdhc_write_data_pio;
1971 }
1972 }
1973 datalen = cmd->c_datalen;
1974
1975 KASSERT(mutex_owned(&hp->intr_lock));
1976 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
1977 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1978 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1979
1980 while (datalen > 0) {
1981 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
1982 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1983 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
1984 } else {
1985 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
1986 }
1987 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
1988 DPRINTF(1,("%s: timeout\n", __func__));
1989 error = ETIMEDOUT;
1990 break;
1991 }
1992
1993 error = sdhc_wait_state(hp, pmask, pmask);
1994 if (error)
1995 break;
1996 }
1997
1998 len = MIN(datalen, cmd->c_blklen);
1999 (*pio_func)(hp, data, len);
2000 DPRINTF(2,("%s: pio data transfer %u @ %p\n",
2001 HDEVNAME(hp), len, data));
2002
2003 data += len;
2004 datalen -= len;
2005 }
2006
2007 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
2008 SDHC_TRANSFER_TIMEOUT, false)) {
2009 DPRINTF(1,("%s: timeout for transfer\n", __func__));
2010 error = ETIMEDOUT;
2011 }
2012
2013 return error;
2014 }
2015
2016 static void
2017 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2018 {
2019
2020 if (((__uintptr_t)data & 3) == 0) {
2021 while (datalen > 3) {
2022 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
2023 data += 4;
2024 datalen -= 4;
2025 }
2026 if (datalen > 1) {
2027 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2028 data += 2;
2029 datalen -= 2;
2030 }
2031 if (datalen > 0) {
2032 *data = HREAD1(hp, SDHC_DATA);
2033 data += 1;
2034 datalen -= 1;
2035 }
2036 } else if (((__uintptr_t)data & 1) == 0) {
2037 while (datalen > 1) {
2038 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2039 data += 2;
2040 datalen -= 2;
2041 }
2042 if (datalen > 0) {
2043 *data = HREAD1(hp, SDHC_DATA);
2044 data += 1;
2045 datalen -= 1;
2046 }
2047 } else {
2048 while (datalen > 0) {
2049 *data = HREAD1(hp, SDHC_DATA);
2050 data += 1;
2051 datalen -= 1;
2052 }
2053 }
2054 }
2055
2056 static void
2057 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2058 {
2059
2060 if (((__uintptr_t)data & 3) == 0) {
2061 while (datalen > 3) {
2062 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2063 data += 4;
2064 datalen -= 4;
2065 }
2066 if (datalen > 1) {
2067 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2068 data += 2;
2069 datalen -= 2;
2070 }
2071 if (datalen > 0) {
2072 HWRITE1(hp, SDHC_DATA, *data);
2073 data += 1;
2074 datalen -= 1;
2075 }
2076 } else if (((__uintptr_t)data & 1) == 0) {
2077 while (datalen > 1) {
2078 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2079 data += 2;
2080 datalen -= 2;
2081 }
2082 if (datalen > 0) {
2083 HWRITE1(hp, SDHC_DATA, *data);
2084 data += 1;
2085 datalen -= 1;
2086 }
2087 } else {
2088 while (datalen > 0) {
2089 HWRITE1(hp, SDHC_DATA, *data);
2090 data += 1;
2091 datalen -= 1;
2092 }
2093 }
2094 }
2095
2096 static void
2097 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2098 {
2099 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2100 uint32_t v;
2101
2102 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2103 size_t count = 0;
2104
2105 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2106 if (count == 0) {
2107 /*
2108 * If we've drained "watermark" words, we need to wait
2109 * a little bit so the read FIFO can refill.
2110 */
2111 sdmmc_delay(10);
2112 count = watermark;
2113 }
2114 v = HREAD4(hp, SDHC_DATA);
2115 v = le32toh(v);
2116 *(uint32_t *)data = v;
2117 data += 4;
2118 datalen -= 4;
2119 status = HREAD2(hp, SDHC_NINTR_STATUS);
2120 count--;
2121 }
2122 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2123 if (count == 0) {
2124 sdmmc_delay(10);
2125 }
2126 v = HREAD4(hp, SDHC_DATA);
2127 v = le32toh(v);
2128 do {
2129 *data++ = v;
2130 v >>= 8;
2131 } while (--datalen > 0);
2132 }
2133 }
2134
2135 static void
2136 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2137 {
2138 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2139 uint32_t v;
2140
2141 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2142 size_t count = watermark;
2143
2144 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2145 if (count == 0) {
2146 sdmmc_delay(10);
2147 count = watermark;
2148 }
2149 v = *(uint32_t *)data;
2150 v = htole32(v);
2151 HWRITE4(hp, SDHC_DATA, v);
2152 data += 4;
2153 datalen -= 4;
2154 status = HREAD2(hp, SDHC_NINTR_STATUS);
2155 count--;
2156 }
2157 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2158 if (count == 0) {
2159 sdmmc_delay(10);
2160 }
2161 v = *(uint32_t *)data;
2162 v = htole32(v);
2163 HWRITE4(hp, SDHC_DATA, v);
2164 }
2165 }
2166
2167 /* Prepare for another command. */
2168 static int
2169 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2170 {
2171 int timo;
2172
2173 KASSERT(mutex_owned(&hp->intr_lock));
2174
2175 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2176
2177 /* Request the reset. */
2178 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2179
2180 /*
2181 * If necessary, wait for the controller to set the bits to
2182 * acknowledge the reset.
2183 */
2184 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2185 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2186 for (timo = 10000; timo > 0; timo--) {
2187 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2188 break;
2189 /* Short delay because I worry we may miss it... */
2190 sdmmc_delay(1);
2191 }
2192 if (timo == 0) {
2193 DPRINTF(1,("%s: timeout for reset on\n", __func__));
2194 return ETIMEDOUT;
2195 }
2196 }
2197
2198 /*
2199 * Wait for the controller to clear the bits to indicate that
2200 * the reset has completed.
2201 */
2202 for (timo = 10; timo > 0; timo--) {
2203 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2204 break;
2205 sdmmc_delay(10000);
2206 }
2207 if (timo == 0) {
2208 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2209 HREAD1(hp, SDHC_SOFTWARE_RESET)));
2210 return ETIMEDOUT;
2211 }
2212
2213 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2214 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2215 }
2216
2217 return 0;
2218 }
2219
2220 static int
2221 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2222 {
2223 int status, error, nointr;
2224
2225 KASSERT(mutex_owned(&hp->intr_lock));
2226
2227 mask |= SDHC_ERROR_INTERRUPT;
2228
2229 nointr = 0;
2230 status = hp->intr_status & mask;
2231 while (status == 0) {
2232 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2233 == EWOULDBLOCK) {
2234 nointr = 1;
2235 break;
2236 }
2237 status = hp->intr_status & mask;
2238 }
2239 error = hp->intr_error_status;
2240
2241 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2242 error));
2243
2244 hp->intr_status &= ~status;
2245 hp->intr_error_status &= ~error;
2246
2247 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2248 if (ISSET(error, SDHC_DMA_ERROR))
2249 device_printf(hp->sc->sc_dev,"dma error\n");
2250 if (ISSET(error, SDHC_ADMA_ERROR))
2251 device_printf(hp->sc->sc_dev,"adma error\n");
2252 if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2253 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2254 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2255 device_printf(hp->sc->sc_dev,"current limit error\n");
2256 if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2257 device_printf(hp->sc->sc_dev,"data end bit error\n");
2258 if (ISSET(error, SDHC_DATA_CRC_ERROR))
2259 device_printf(hp->sc->sc_dev,"data crc error\n");
2260 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2261 device_printf(hp->sc->sc_dev,"data timeout error\n");
2262 if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2263 device_printf(hp->sc->sc_dev,"cmd index error\n");
2264 if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2265 device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2266 if (ISSET(error, SDHC_CMD_CRC_ERROR))
2267 device_printf(hp->sc->sc_dev,"cmd crc error\n");
2268 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2269 if (!probing)
2270 device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2271 #ifdef SDHC_DEBUG
2272 else if (sdhcdebug > 0)
2273 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2274 #endif
2275 }
2276 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2277 device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2278 (error & ~SDHC_EINTR_STATUS_MASK));
2279 if (error == 0)
2280 device_printf(hp->sc->sc_dev,"no error\n");
2281
2282 /* Command timeout has higher priority than command complete. */
2283 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2284 CLR(status, SDHC_COMMAND_COMPLETE);
2285
2286 /* Transfer complete has higher priority than data timeout. */
2287 if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2288 CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2289 }
2290
2291 if (nointr ||
2292 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2293 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2294 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2295 hp->intr_error_status = 0;
2296 status = 0;
2297 }
2298
2299 return status;
2300 }
2301
2302 /*
2303 * Established by attachment driver at interrupt priority IPL_SDMMC.
2304 */
2305 int
2306 sdhc_intr(void *arg)
2307 {
2308 struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2309 struct sdhc_host *hp;
2310 int done = 0;
2311 uint16_t status;
2312 uint16_t error;
2313
2314 /* We got an interrupt, but we don't know from which slot. */
2315 for (size_t host = 0; host < sc->sc_nhosts; host++) {
2316 hp = sc->sc_host[host];
2317 if (hp == NULL)
2318 continue;
2319
2320 mutex_enter(&hp->intr_lock);
2321
2322 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2323 /* Find out which interrupts are pending. */
2324 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2325 status = xstatus;
2326 error = xstatus >> 16;
2327 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2328 (xstatus & SDHC_TRANSFER_COMPLETE) &&
2329 !(xstatus & SDHC_DMA_INTERRUPT)) {
2330 /* read again due to uSDHC errata */
2331 status = xstatus = HREAD4(hp,
2332 SDHC_NINTR_STATUS);
2333 error = xstatus >> 16;
2334 }
2335 if (ISSET(sc->sc_flags,
2336 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2337 if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2338 SET(status, SDHC_ERROR_INTERRUPT);
2339 }
2340 if (error)
2341 xstatus |= SDHC_ERROR_INTERRUPT;
2342 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2343 goto next_port; /* no interrupt for us */
2344 /* Acknowledge the interrupts we are about to handle. */
2345 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2346 } else {
2347 /* Find out which interrupts are pending. */
2348 error = 0;
2349 status = HREAD2(hp, SDHC_NINTR_STATUS);
2350 if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2351 goto next_port; /* no interrupt for us */
2352 /* Acknowledge the interrupts we are about to handle. */
2353 HWRITE2(hp, SDHC_NINTR_STATUS, status);
2354 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2355 /* Acknowledge error interrupts. */
2356 error = HREAD2(hp, SDHC_EINTR_STATUS);
2357 HWRITE2(hp, SDHC_EINTR_STATUS, error);
2358 }
2359 }
2360
2361 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2362 status, error));
2363
2364 /* Claim this interrupt. */
2365 done = 1;
2366
2367 if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2368 ISSET(error, SDHC_ADMA_ERROR)) {
2369 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2370 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2371 adma_err);
2372 }
2373
2374 /*
2375 * Wake up the sdmmc event thread to scan for cards.
2376 */
2377 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2378 if (hp->sdmmc != NULL) {
2379 sdmmc_needs_discover(hp->sdmmc);
2380 }
2381 if (ISSET(sc->sc_flags,
2382 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2383 HCLR4(hp, SDHC_NINTR_STATUS_EN,
2384 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2385 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2386 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2387 }
2388 }
2389
2390 /*
2391 * Schedule re-tuning process (UHS).
2392 */
2393 if (ISSET(status, SDHC_RETUNING_EVENT)) {
2394 atomic_swap_uint(&hp->tuning_timer_pending, 1);
2395 }
2396
2397 /*
2398 * Wake up the blocking process to service command
2399 * related interrupt(s).
2400 */
2401 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2402 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2403 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2404 hp->intr_error_status |= error;
2405 hp->intr_status |= status;
2406 if (ISSET(sc->sc_flags,
2407 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2408 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2409 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2410 }
2411 cv_broadcast(&hp->intr_cv);
2412 }
2413
2414 /*
2415 * Service SD card interrupts.
2416 */
2417 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2418 && ISSET(status, SDHC_CARD_INTERRUPT)) {
2419 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2420 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2421 sdmmc_card_intr(hp->sdmmc);
2422 }
2423 next_port:
2424 mutex_exit(&hp->intr_lock);
2425 }
2426
2427 return done;
2428 }
2429
2430 kmutex_t *
2431 sdhc_host_lock(struct sdhc_host *hp)
2432 {
2433 return &hp->intr_lock;
2434 }
2435
2436 uint8_t
2437 sdhc_host_read_1(struct sdhc_host *hp, int reg)
2438 {
2439 return HREAD1(hp, reg);
2440 }
2441
2442 uint16_t
2443 sdhc_host_read_2(struct sdhc_host *hp, int reg)
2444 {
2445 return HREAD2(hp, reg);
2446 }
2447
2448 uint32_t
2449 sdhc_host_read_4(struct sdhc_host *hp, int reg)
2450 {
2451 return HREAD4(hp, reg);
2452 }
2453
2454 void
2455 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val)
2456 {
2457 HWRITE1(hp, reg, val);
2458 }
2459
2460 void
2461 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val)
2462 {
2463 HWRITE2(hp, reg, val);
2464 }
2465
2466 void
2467 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val)
2468 {
2469 HWRITE4(hp, reg, val);
2470 }
2471
2472 #ifdef SDHC_DEBUG
2473 void
2474 sdhc_dump_regs(struct sdhc_host *hp)
2475 {
2476
2477 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE,
2478 HREAD4(hp, SDHC_PRESENT_STATE));
2479 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2480 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL,
2481 HREAD1(hp, SDHC_POWER_CTL));
2482 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS,
2483 HREAD2(hp, SDHC_NINTR_STATUS));
2484 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS,
2485 HREAD2(hp, SDHC_EINTR_STATUS));
2486 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN,
2487 HREAD2(hp, SDHC_NINTR_STATUS_EN));
2488 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN,
2489 HREAD2(hp, SDHC_EINTR_STATUS_EN));
2490 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN,
2491 HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2492 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN,
2493 HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2494 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES,
2495 HREAD4(hp, SDHC_CAPABILITIES));
2496 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2497 HREAD4(hp, SDHC_MAX_CAPABILITIES));
2498 }
2499 #endif
2500