sdhc.c revision 1.30.2.4 1 /* $NetBSD: sdhc.c,v 1.30.2.4 2017/12/03 11:37:32 jdolecek Exp $ */
2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * SD Host Controller driver based on the SD Host Controller Standard
22 * Simplified Specification Version 1.00 (www.sdcard.com).
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.30.2.4 2017/12/03 11:37:32 jdolecek Exp $");
27
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s) do {} while (0)
53 #endif
54
55 #define SDHC_COMMAND_TIMEOUT hz
56 #define SDHC_BUFFER_TIMEOUT hz
57 #define SDHC_TRANSFER_TIMEOUT hz
58 #define SDHC_DMA_TIMEOUT (hz*3)
59 #define SDHC_TUNING_TIMEOUT hz
60
61 struct sdhc_host {
62 struct sdhc_softc *sc; /* host controller device */
63
64 bus_space_tag_t iot; /* host register set tag */
65 bus_space_handle_t ioh; /* host register set handle */
66 bus_size_t ios; /* host register space size */
67 bus_dma_tag_t dmat; /* host DMA tag */
68
69 device_t sdmmc; /* generic SD/MMC device */
70
71 u_int clkbase; /* base clock frequency in KHz */
72 int maxblklen; /* maximum block length */
73 uint32_t ocr; /* OCR value from capabilities */
74
75 uint8_t regs[14]; /* host controller state */
76
77 uint16_t intr_status; /* soft interrupt status */
78 uint16_t intr_error_status; /* soft error status */
79 kmutex_t intr_lock;
80 kcondvar_t intr_cv;
81
82 callout_t tuning_timer;
83 int tuning_timing;
84 u_int tuning_timer_count;
85 u_int tuning_timer_pending;
86
87 int specver; /* spec. version */
88
89 uint32_t flags; /* flags for this host */
90 #define SHF_USE_DMA 0x0001
91 #define SHF_USE_4BIT_MODE 0x0002
92 #define SHF_USE_8BIT_MODE 0x0004
93 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */
94 #define SHF_USE_ADMA2_32 0x0010
95 #define SHF_USE_ADMA2_64 0x0020
96 #define SHF_USE_ADMA2_MASK 0x0030
97
98 bus_dmamap_t adma_map;
99 bus_dma_segment_t adma_segs[1];
100 void *adma2;
101 };
102
103 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev))
104
105 static uint8_t
106 hread1(struct sdhc_host *hp, bus_size_t reg)
107 {
108
109 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
110 return bus_space_read_1(hp->iot, hp->ioh, reg);
111 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
112 }
113
114 static uint16_t
115 hread2(struct sdhc_host *hp, bus_size_t reg)
116 {
117
118 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
119 return bus_space_read_2(hp->iot, hp->ioh, reg);
120 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
121 }
122
123 #define HREAD1(hp, reg) hread1(hp, reg)
124 #define HREAD2(hp, reg) hread2(hp, reg)
125 #define HREAD4(hp, reg) \
126 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
127
128
129 static void
130 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
131 {
132
133 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
134 bus_space_write_1(hp->iot, hp->ioh, o, val);
135 } else {
136 const size_t shift = 8 * (o & 3);
137 o &= -4;
138 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
139 tmp = (val << shift) | (tmp & ~(0xff << shift));
140 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
141 }
142 }
143
144 static void
145 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
146 {
147
148 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
149 bus_space_write_2(hp->iot, hp->ioh, o, val);
150 } else {
151 const size_t shift = 8 * (o & 2);
152 o &= -4;
153 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
154 tmp = (val << shift) | (tmp & ~(0xffff << shift));
155 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
156 }
157 }
158
159 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val)
160 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val)
161 #define HWRITE4(hp, reg, val) \
162 bus_space_write_4((hp)->iot, (hp)->ioh, (reg), (val))
163
164 #define HCLR1(hp, reg, bits) \
165 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
166 #define HCLR2(hp, reg, bits) \
167 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
168 #define HCLR4(hp, reg, bits) \
169 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
170 #define HSET1(hp, reg, bits) \
171 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
172 #define HSET2(hp, reg, bits) \
173 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
174 #define HSET4(hp, reg, bits) \
175 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
176
177 static int sdhc_host_reset(sdmmc_chipset_handle_t);
178 static int sdhc_host_reset1(sdmmc_chipset_handle_t);
179 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t);
180 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t);
181 static int sdhc_card_detect(sdmmc_chipset_handle_t);
182 static int sdhc_write_protect(sdmmc_chipset_handle_t);
183 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
184 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
185 static int sdhc_bus_width(sdmmc_chipset_handle_t, int);
186 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int);
187 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
188 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t);
189 static void sdhc_exec_command(sdmmc_chipset_handle_t,
190 struct sdmmc_command *);
191 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
192 static int sdhc_execute_tuning1(struct sdhc_host *, int);
193 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
194 static void sdhc_tuning_timer(void *);
195 static void sdhc_hw_reset(sdmmc_chipset_handle_t);
196 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
197 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
198 static int sdhc_soft_reset(struct sdhc_host *, int);
199 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool);
200 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
201 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
202 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
203 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
204 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
205 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
206 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
207
208 static struct sdmmc_chip_functions sdhc_functions = {
209 /* host controller reset */
210 .host_reset = sdhc_host_reset,
211
212 /* host controller capabilities */
213 .host_ocr = sdhc_host_ocr,
214 .host_maxblklen = sdhc_host_maxblklen,
215
216 /* card detection */
217 .card_detect = sdhc_card_detect,
218
219 /* write protect */
220 .write_protect = sdhc_write_protect,
221
222 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
223 .bus_power = sdhc_bus_power,
224 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */
225 .bus_width = sdhc_bus_width,
226 .bus_rod = sdhc_bus_rod,
227
228 /* command execution */
229 .exec_command = sdhc_exec_command,
230
231 /* card interrupt */
232 .card_enable_intr = sdhc_card_enable_intr,
233 .card_intr_ack = sdhc_card_intr_ack,
234
235 /* UHS functions */
236 .signal_voltage = sdhc_signal_voltage,
237 .bus_clock_ddr = sdhc_bus_clock_ddr,
238 .execute_tuning = sdhc_execute_tuning,
239 .hw_reset = sdhc_hw_reset,
240 };
241
242 static int
243 sdhc_cfprint(void *aux, const char *pnp)
244 {
245 const struct sdmmcbus_attach_args * const saa = aux;
246 const struct sdhc_host * const hp = saa->saa_sch;
247
248 if (pnp) {
249 aprint_normal("sdmmc at %s", pnp);
250 }
251 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
252 if (hp->sc->sc_host[host] == hp) {
253 aprint_normal(" slot %zu", host);
254 }
255 }
256
257 return UNCONF;
258 }
259
260 /*
261 * Called by attachment driver. For each SD card slot there is one SD
262 * host controller standard register set. (1.3)
263 */
264 int
265 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
266 bus_space_handle_t ioh, bus_size_t iosize)
267 {
268 struct sdmmcbus_attach_args saa;
269 struct sdhc_host *hp;
270 uint32_t caps, caps2;
271 uint16_t sdhcver;
272 int error;
273
274 /* Allocate one more host structure. */
275 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
276 if (hp == NULL) {
277 aprint_error_dev(sc->sc_dev,
278 "couldn't alloc memory (sdhc host)\n");
279 goto err1;
280 }
281 sc->sc_host[sc->sc_nhosts++] = hp;
282
283 /* Fill in the new host structure. */
284 hp->sc = sc;
285 hp->iot = iot;
286 hp->ioh = ioh;
287 hp->ios = iosize;
288 hp->dmat = sc->sc_dmat;
289
290 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
291 cv_init(&hp->intr_cv, "sdhcintr");
292 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
293 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
294
295 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
296 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
297 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
298 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
299 } else if (iosize <= SDHC_HOST_CTL_VERSION) {
300 sdhcver = SDHC_SPEC_NOVERS << SDHC_SPEC_VERS_SHIFT;
301 } else {
302 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
303 }
304 aprint_normal_dev(sc->sc_dev, "SDHC ");
305 hp->specver = SDHC_SPEC_VERSION(sdhcver);
306 switch (SDHC_SPEC_VERSION(sdhcver)) {
307 case SDHC_SPEC_VERS_100:
308 aprint_normal("1.0");
309 break;
310 case SDHC_SPEC_VERS_200:
311 aprint_normal("2.0");
312 break;
313 case SDHC_SPEC_VERS_300:
314 aprint_normal("3.0");
315 break;
316 case SDHC_SPEC_VERS_400:
317 aprint_normal("4.0");
318 break;
319 case SDHC_SPEC_NOVERS:
320 hp->specver = -1;
321 aprint_normal("NO-VERS");
322 break;
323 default:
324 aprint_normal("unknown version(0x%x)",
325 SDHC_SPEC_VERSION(sdhcver));
326 break;
327 }
328 if (SDHC_SPEC_VERSION(sdhcver) != SDHC_SPEC_NOVERS)
329 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
330
331 /*
332 * Reset the host controller and enable interrupts.
333 */
334 (void)sdhc_host_reset(hp);
335
336 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
337 /* init uSDHC registers */
338 HWRITE4(hp, SDHC_MMC_BOOT, 0);
339 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
340 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
341 HWRITE4(hp, SDHC_WATERMARK_LEVEL,
342 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
343 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
344 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
345 (0x40 << SDHC_WATERMARK_READ_SHIFT));
346 HSET4(hp, SDHC_VEND_SPEC,
347 SDHC_VEND_SPEC_MBO |
348 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
349 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
350 SDHC_VEND_SPEC_HCLK_SOFT_EN |
351 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
352 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
353 SDHC_VEND_SPEC_FRC_SDCLK_ON);
354 }
355
356 /* Determine host capabilities. */
357 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
358 caps = sc->sc_caps;
359 caps2 = sc->sc_caps2;
360 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
361 /* uSDHC capability register is little bit different */
362 caps = HREAD4(hp, SDHC_CAPABILITIES);
363 caps |= SDHC_8BIT_SUPP;
364 if (caps & SDHC_ADMA1_SUPP)
365 caps |= SDHC_ADMA2_SUPP;
366 sc->sc_caps = caps;
367 /* uSDHC has no SDHC_CAPABILITIES2 register */
368 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
369 } else {
370 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
371 if (hp->specver >= SDHC_SPEC_VERS_300) {
372 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
373 } else {
374 caps2 = sc->sc_caps2 = 0;
375 }
376 }
377
378 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
379 SDHC_RETUNING_MODES_MASK;
380 if (retuning_mode == SDHC_RETUNING_MODE_1) {
381 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
382 SDHC_TIMER_COUNT_MASK;
383 if (hp->tuning_timer_count == 0xf)
384 hp->tuning_timer_count = 0;
385 if (hp->tuning_timer_count)
386 hp->tuning_timer_count =
387 1 << (hp->tuning_timer_count - 1);
388 }
389
390 /*
391 * Use DMA if the host system and the controller support it.
392 * Suports integrated or external DMA egine, with or without
393 * SDHC_DMA_ENABLE in the command.
394 */
395 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
396 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
397 ISSET(caps, SDHC_DMA_SUPPORT)))) {
398 SET(hp->flags, SHF_USE_DMA);
399
400 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) &&
401 ISSET(caps, SDHC_ADMA2_SUPP)) {
402 SET(hp->flags, SHF_MODE_DMAEN);
403 /*
404 * 64-bit mode was present in the 2.00 spec, removed
405 * from 3.00, and re-added in 4.00 with a different
406 * descriptor layout. We only support 2.00 and 3.00
407 * descriptors for now.
408 */
409 if (hp->specver == SDHC_SPEC_VERS_200 &&
410 ISSET(caps, SDHC_64BIT_SYS_BUS)) {
411 SET(hp->flags, SHF_USE_ADMA2_64);
412 aprint_normal(", 64-bit ADMA2");
413 } else {
414 SET(hp->flags, SHF_USE_ADMA2_32);
415 aprint_normal(", 32-bit ADMA2");
416 }
417 } else {
418 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
419 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
420 SET(hp->flags, SHF_MODE_DMAEN);
421 if (sc->sc_vendor_transfer_data_dma) {
422 aprint_normal(", platform DMA");
423 } else {
424 aprint_normal(", SDMA");
425 }
426 }
427 } else {
428 aprint_normal(", PIO");
429 }
430
431 /*
432 * Determine the base clock frequency. (2.2.24)
433 */
434 if (hp->specver >= SDHC_SPEC_VERS_300) {
435 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
436 } else {
437 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
438 }
439 if (hp->clkbase == 0 ||
440 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
441 if (sc->sc_clkbase == 0) {
442 /* The attachment driver must tell us. */
443 aprint_error_dev(sc->sc_dev,
444 "unknown base clock frequency\n");
445 goto err;
446 }
447 hp->clkbase = sc->sc_clkbase;
448 }
449 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
450 /* SDHC 1.0 supports only 10-63 MHz. */
451 aprint_error_dev(sc->sc_dev,
452 "base clock frequency out of range: %u MHz\n",
453 hp->clkbase / 1000);
454 goto err;
455 }
456 aprint_normal(", %u kHz", hp->clkbase);
457
458 /*
459 * XXX Set the data timeout counter value according to
460 * capabilities. (2.2.15)
461 */
462 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
463 #if 1
464 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
465 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
466 #endif
467
468 if (ISSET(caps, SDHC_EMBEDDED_SLOT))
469 aprint_normal(", embedded slot");
470
471 /*
472 * Determine SD bus voltage levels supported by the controller.
473 */
474 aprint_normal(",");
475 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
476 SET(hp->ocr, MMC_OCR_HCS);
477 aprint_normal(" HS");
478 }
479 if (ISSET(caps2, SDHC_SDR50_SUPP)) {
480 SET(hp->ocr, MMC_OCR_S18A);
481 aprint_normal(" SDR50");
482 }
483 if (ISSET(caps2, SDHC_DDR50_SUPP)) {
484 SET(hp->ocr, MMC_OCR_S18A);
485 aprint_normal(" DDR50");
486 }
487 if (ISSET(caps2, SDHC_SDR104_SUPP)) {
488 SET(hp->ocr, MMC_OCR_S18A);
489 aprint_normal(" SDR104 HS200");
490 }
491 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
492 SET(hp->ocr, MMC_OCR_1_65V_1_95V);
493 aprint_normal(" 1.8V");
494 }
495 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
496 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
497 aprint_normal(" 3.0V");
498 }
499 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
500 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
501 aprint_normal(" 3.3V");
502 }
503 if (hp->specver >= SDHC_SPEC_VERS_300) {
504 aprint_normal(", re-tuning mode %d", retuning_mode + 1);
505 if (hp->tuning_timer_count)
506 aprint_normal(" (%us timer)", hp->tuning_timer_count);
507 }
508
509 /*
510 * Determine the maximum block length supported by the host
511 * controller. (2.2.24)
512 */
513 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
514 case SDHC_MAX_BLK_LEN_512:
515 hp->maxblklen = 512;
516 break;
517
518 case SDHC_MAX_BLK_LEN_1024:
519 hp->maxblklen = 1024;
520 break;
521
522 case SDHC_MAX_BLK_LEN_2048:
523 hp->maxblklen = 2048;
524 break;
525
526 case SDHC_MAX_BLK_LEN_4096:
527 hp->maxblklen = 4096;
528 break;
529
530 default:
531 aprint_error_dev(sc->sc_dev, "max block length unknown\n");
532 goto err;
533 }
534 aprint_normal(", %u byte blocks", hp->maxblklen);
535 aprint_normal("\n");
536
537 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
538 int rseg;
539
540 /* Allocate ADMA2 descriptor memory */
541 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
542 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
543 if (error) {
544 aprint_error_dev(sc->sc_dev,
545 "ADMA2 dmamem_alloc failed (%d)\n", error);
546 goto adma_done;
547 }
548 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
549 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
550 if (error) {
551 aprint_error_dev(sc->sc_dev,
552 "ADMA2 dmamem_map failed (%d)\n", error);
553 goto adma_done;
554 }
555 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
556 0, BUS_DMA_WAITOK, &hp->adma_map);
557 if (error) {
558 aprint_error_dev(sc->sc_dev,
559 "ADMA2 dmamap_create failed (%d)\n", error);
560 goto adma_done;
561 }
562 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
563 hp->adma2, PAGE_SIZE, NULL,
564 BUS_DMA_WAITOK|BUS_DMA_WRITE);
565 if (error) {
566 aprint_error_dev(sc->sc_dev,
567 "ADMA2 dmamap_load failed (%d)\n", error);
568 goto adma_done;
569 }
570
571 memset(hp->adma2, 0, PAGE_SIZE);
572
573 adma_done:
574 if (error)
575 CLR(hp->flags, SHF_USE_ADMA2_MASK);
576 }
577
578 /*
579 * Attach the generic SD/MMC bus driver. (The bus driver must
580 * not invoke any chipset functions before it is attached.)
581 */
582 memset(&saa, 0, sizeof(saa));
583 saa.saa_busname = "sdmmc";
584 saa.saa_sct = &sdhc_functions;
585 saa.saa_sch = hp;
586 saa.saa_dmat = hp->dmat;
587 saa.saa_clkmax = hp->clkbase;
588 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
589 saa.saa_clkmin = hp->clkbase / 256 / 2046;
590 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
591 saa.saa_clkmin = hp->clkbase / 256 / 16;
592 else if (hp->sc->sc_clkmsk != 0)
593 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
594 (ffs(hp->sc->sc_clkmsk) - 1));
595 else if (hp->specver >= SDHC_SPEC_VERS_300)
596 saa.saa_clkmin = hp->clkbase / 0x3ff;
597 else
598 saa.saa_clkmin = hp->clkbase / 256;
599 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
600 saa.saa_caps |= SMC_CAPS_AUTO_STOP;
601 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
602 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
603 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
604 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
605 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED;
606 if (ISSET(caps2, SDHC_SDR104_SUPP))
607 saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
608 SMC_CAPS_UHS_SDR50 |
609 SMC_CAPS_MMC_HS200;
610 if (ISSET(caps2, SDHC_SDR50_SUPP))
611 saa.saa_caps |= SMC_CAPS_UHS_SDR50;
612 if (ISSET(caps2, SDHC_DDR50_SUPP))
613 saa.saa_caps |= SMC_CAPS_UHS_DDR50;
614 if (ISSET(hp->flags, SHF_USE_DMA)) {
615 saa.saa_caps |= SMC_CAPS_DMA;
616 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
617 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
618 }
619 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
620 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
621 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
622 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
623 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint);
624
625 return 0;
626
627 err:
628 callout_destroy(&hp->tuning_timer);
629 cv_destroy(&hp->intr_cv);
630 mutex_destroy(&hp->intr_lock);
631 free(hp, M_DEVBUF);
632 sc->sc_host[--sc->sc_nhosts] = NULL;
633 err1:
634 return 1;
635 }
636
637 int
638 sdhc_detach(struct sdhc_softc *sc, int flags)
639 {
640 struct sdhc_host *hp;
641 int rv = 0;
642
643 for (size_t n = 0; n < sc->sc_nhosts; n++) {
644 hp = sc->sc_host[n];
645 if (hp == NULL)
646 continue;
647 if (hp->sdmmc != NULL) {
648 rv = config_detach(hp->sdmmc, flags);
649 if (rv)
650 break;
651 hp->sdmmc = NULL;
652 }
653 /* disable interrupts */
654 if ((flags & DETACH_FORCE) == 0) {
655 mutex_enter(&hp->intr_lock);
656 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
657 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
658 } else {
659 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
660 }
661 sdhc_soft_reset(hp, SDHC_RESET_ALL);
662 mutex_exit(&hp->intr_lock);
663 }
664 callout_halt(&hp->tuning_timer, NULL);
665 callout_destroy(&hp->tuning_timer);
666 cv_destroy(&hp->intr_cv);
667 mutex_destroy(&hp->intr_lock);
668 if (hp->ios > 0) {
669 bus_space_unmap(hp->iot, hp->ioh, hp->ios);
670 hp->ios = 0;
671 }
672 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
673 bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
674 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
675 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
676 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
677 }
678 free(hp, M_DEVBUF);
679 sc->sc_host[n] = NULL;
680 }
681
682 return rv;
683 }
684
685 bool
686 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
687 {
688 struct sdhc_softc *sc = device_private(dev);
689 struct sdhc_host *hp;
690 size_t i;
691
692 /* XXX poll for command completion or suspend command
693 * in progress */
694
695 /* Save the host controller state. */
696 for (size_t n = 0; n < sc->sc_nhosts; n++) {
697 hp = sc->sc_host[n];
698 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
699 for (i = 0; i < sizeof hp->regs; i += 4) {
700 uint32_t v = HREAD4(hp, i);
701 hp->regs[i + 0] = (v >> 0);
702 hp->regs[i + 1] = (v >> 8);
703 if (i + 3 < sizeof hp->regs) {
704 hp->regs[i + 2] = (v >> 16);
705 hp->regs[i + 3] = (v >> 24);
706 }
707 }
708 } else {
709 for (i = 0; i < sizeof hp->regs; i++) {
710 hp->regs[i] = HREAD1(hp, i);
711 }
712 }
713 }
714 return true;
715 }
716
717 bool
718 sdhc_resume(device_t dev, const pmf_qual_t *qual)
719 {
720 struct sdhc_softc *sc = device_private(dev);
721 struct sdhc_host *hp;
722 size_t i;
723
724 /* Restore the host controller state. */
725 for (size_t n = 0; n < sc->sc_nhosts; n++) {
726 hp = sc->sc_host[n];
727 (void)sdhc_host_reset(hp);
728 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
729 for (i = 0; i < sizeof hp->regs; i += 4) {
730 if (i + 3 < sizeof hp->regs) {
731 HWRITE4(hp, i,
732 (hp->regs[i + 0] << 0)
733 | (hp->regs[i + 1] << 8)
734 | (hp->regs[i + 2] << 16)
735 | (hp->regs[i + 3] << 24));
736 } else {
737 HWRITE4(hp, i,
738 (hp->regs[i + 0] << 0)
739 | (hp->regs[i + 1] << 8));
740 }
741 }
742 } else {
743 for (i = 0; i < sizeof hp->regs; i++) {
744 HWRITE1(hp, i, hp->regs[i]);
745 }
746 }
747 }
748 return true;
749 }
750
751 bool
752 sdhc_shutdown(device_t dev, int flags)
753 {
754 struct sdhc_softc *sc = device_private(dev);
755 struct sdhc_host *hp;
756
757 /* XXX chip locks up if we don't disable it before reboot. */
758 for (size_t i = 0; i < sc->sc_nhosts; i++) {
759 hp = sc->sc_host[i];
760 (void)sdhc_host_reset(hp);
761 }
762 return true;
763 }
764
765 /*
766 * Reset the host controller. Called during initialization, when
767 * cards are removed, upon resume, and during error recovery.
768 */
769 static int
770 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
771 {
772 struct sdhc_host *hp = (struct sdhc_host *)sch;
773 uint32_t sdhcimask;
774 int error;
775
776 KASSERT(mutex_owned(&hp->intr_lock));
777
778 /* Disable all interrupts. */
779 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
780 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
781 } else {
782 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
783 }
784
785 /*
786 * Reset the entire host controller and wait up to 100ms for
787 * the controller to clear the reset bit.
788 */
789 error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
790 if (error)
791 goto out;
792
793 /* Set data timeout counter value to max for now. */
794 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
795 #if 1
796 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
797 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
798 #endif
799
800 /* Enable interrupts. */
801 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
802 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
803 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
804 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
805 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
806 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
807 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
808 sdhcimask ^=
809 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
810 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
811 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
812 } else {
813 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
814 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
815 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
816 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
817 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
818 }
819
820 out:
821 return error;
822 }
823
824 static int
825 sdhc_host_reset(sdmmc_chipset_handle_t sch)
826 {
827 struct sdhc_host *hp = (struct sdhc_host *)sch;
828 int error;
829
830 mutex_enter(&hp->intr_lock);
831 error = sdhc_host_reset1(sch);
832 mutex_exit(&hp->intr_lock);
833
834 return error;
835 }
836
837 static uint32_t
838 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
839 {
840 struct sdhc_host *hp = (struct sdhc_host *)sch;
841
842 return hp->ocr;
843 }
844
845 static int
846 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
847 {
848 struct sdhc_host *hp = (struct sdhc_host *)sch;
849
850 return hp->maxblklen;
851 }
852
853 /*
854 * Return non-zero if the card is currently inserted.
855 */
856 static int
857 sdhc_card_detect(sdmmc_chipset_handle_t sch)
858 {
859 struct sdhc_host *hp = (struct sdhc_host *)sch;
860 int r;
861
862 if (hp->sc->sc_vendor_card_detect)
863 return (*hp->sc->sc_vendor_card_detect)(hp->sc);
864
865 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
866
867 return r ? 1 : 0;
868 }
869
870 /*
871 * Return non-zero if the card is currently write-protected.
872 */
873 static int
874 sdhc_write_protect(sdmmc_chipset_handle_t sch)
875 {
876 struct sdhc_host *hp = (struct sdhc_host *)sch;
877 int r;
878
879 if (hp->sc->sc_vendor_write_protect)
880 return (*hp->sc->sc_vendor_write_protect)(hp->sc);
881
882 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
883
884 return r ? 0 : 1;
885 }
886
887 /*
888 * Set or change SD bus voltage and enable or disable SD bus power.
889 * Return zero on success.
890 */
891 static int
892 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
893 {
894 struct sdhc_host *hp = (struct sdhc_host *)sch;
895 uint8_t vdd;
896 int error = 0;
897 const uint32_t pcmask =
898 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
899
900 mutex_enter(&hp->intr_lock);
901
902 /*
903 * Disable bus power before voltage change.
904 */
905 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
906 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0))
907 HWRITE1(hp, SDHC_POWER_CTL, 0);
908
909 /* If power is disabled, reset the host and return now. */
910 if (ocr == 0) {
911 (void)sdhc_host_reset1(hp);
912 callout_halt(&hp->tuning_timer, &hp->intr_lock);
913 goto out;
914 }
915
916 /*
917 * Select the lowest voltage according to capabilities.
918 */
919 ocr &= hp->ocr;
920 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
921 vdd = SDHC_VOLTAGE_1_8V;
922 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
923 vdd = SDHC_VOLTAGE_3_0V;
924 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
925 vdd = SDHC_VOLTAGE_3_3V;
926 } else {
927 /* Unsupported voltage level requested. */
928 error = EINVAL;
929 goto out;
930 }
931
932 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
933 /*
934 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus
935 * voltage ramp until power rises.
936 */
937
938 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
939 HWRITE1(hp, SDHC_POWER_CTL,
940 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
941 } else {
942 HWRITE1(hp, SDHC_POWER_CTL,
943 HREAD1(hp, SDHC_POWER_CTL) & pcmask);
944 sdmmc_delay(1);
945 HWRITE1(hp, SDHC_POWER_CTL,
946 (vdd << SDHC_VOLTAGE_SHIFT));
947 sdmmc_delay(1);
948 HSET1(hp, SDHC_POWER_CTL, SDHC_BUS_POWER);
949 sdmmc_delay(10000);
950 }
951
952 /*
953 * The host system may not power the bus due to battery low,
954 * etc. In that case, the host controller should clear the
955 * bus power bit.
956 */
957 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
958 error = ENXIO;
959 goto out;
960 }
961 }
962
963 out:
964 mutex_exit(&hp->intr_lock);
965
966 return error;
967 }
968
969 /*
970 * Return the smallest possible base clock frequency divisor value
971 * for the CLOCK_CTL register to produce `freq' (KHz).
972 */
973 static bool
974 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
975 {
976 u_int div;
977
978 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
979 for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
980 if ((hp->clkbase / div) <= freq) {
981 *divp = SDHC_SDCLK_CGM
982 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
983 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
984 //freq = hp->clkbase / div;
985 return true;
986 }
987 }
988 /* No divisor found. */
989 return false;
990 }
991 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
992 u_int dvs = (hp->clkbase + freq - 1) / freq;
993 u_int roundup = dvs & 1;
994 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
995 if (dvs + roundup <= 16) {
996 dvs += roundup - 1;
997 *divp = (div << SDHC_SDCLK_DIV_SHIFT)
998 | (dvs << SDHC_SDCLK_DVS_SHIFT);
999 DPRINTF(2,
1000 ("%s: divisor for freq %u is %u * %u\n",
1001 HDEVNAME(hp), freq, div * 2, dvs + 1));
1002 //freq = hp->clkbase / (div * 2) * (dvs + 1);
1003 return true;
1004 }
1005 /*
1006 * If we drop bits, we need to round up the divisor.
1007 */
1008 roundup |= dvs & 1;
1009 }
1010 /* No divisor found. */
1011 return false;
1012 }
1013 if (hp->sc->sc_clkmsk != 0) {
1014 div = howmany(hp->clkbase, freq);
1015 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1016 return false;
1017 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1018 //freq = hp->clkbase / div;
1019 return true;
1020 }
1021 if (hp->specver >= SDHC_SPEC_VERS_300) {
1022 div = howmany(hp->clkbase, freq);
1023 div = div > 1 ? howmany(div, 2) : 0;
1024 if (div > 0x3ff)
1025 return false;
1026 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1027 << SDHC_SDCLK_XDIV_SHIFT) |
1028 (((div >> 0) & SDHC_SDCLK_DIV_MASK)
1029 << SDHC_SDCLK_DIV_SHIFT);
1030 //freq = hp->clkbase / (div ? div * 2 : 1);
1031 return true;
1032 } else {
1033 for (div = 1; div <= 256; div *= 2) {
1034 if ((hp->clkbase / div) <= freq) {
1035 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1036 //freq = hp->clkbase / div;
1037 return true;
1038 }
1039 }
1040 /* No divisor found. */
1041 return false;
1042 }
1043 /* No divisor found. */
1044 return false;
1045 }
1046
1047 /*
1048 * Set or change SDCLK frequency or disable the SD clock.
1049 * Return zero on success.
1050 */
1051 static int
1052 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1053 {
1054 struct sdhc_host *hp = (struct sdhc_host *)sch;
1055 u_int div;
1056 u_int timo;
1057 int16_t reg;
1058 int error = 0;
1059 bool present __diagused;
1060
1061 mutex_enter(&hp->intr_lock);
1062
1063 #ifdef DIAGNOSTIC
1064 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1065
1066 /* Must not stop the clock if commands are in progress. */
1067 if (present && sdhc_card_detect(hp)) {
1068 aprint_normal_dev(hp->sc->sc_dev,
1069 "%s: command in progress\n", __func__);
1070 }
1071 #endif
1072
1073 if (hp->sc->sc_vendor_bus_clock) {
1074 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1075 if (error != 0)
1076 goto out;
1077 }
1078
1079 /*
1080 * Stop SD clock before changing the frequency.
1081 */
1082 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1083 HCLR4(hp, SDHC_VEND_SPEC,
1084 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1085 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1086 if (freq == SDMMC_SDCLK_OFF) {
1087 goto out;
1088 }
1089 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1090 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1091 if (freq == SDMMC_SDCLK_OFF) {
1092 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1093 goto out;
1094 }
1095 } else {
1096 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1097 if (freq == SDMMC_SDCLK_OFF)
1098 goto out;
1099 }
1100
1101 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1102 if (ddr)
1103 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1104 else
1105 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1106 } else if (hp->specver >= SDHC_SPEC_VERS_300) {
1107 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1108 if (freq > 100000) {
1109 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1110 } else if (freq > 50000) {
1111 if (ddr) {
1112 HSET2(hp, SDHC_HOST_CTL2,
1113 SDHC_UHS_MODE_SELECT_DDR50);
1114 } else {
1115 HSET2(hp, SDHC_HOST_CTL2,
1116 SDHC_UHS_MODE_SELECT_SDR50);
1117 }
1118 } else if (freq > 25000) {
1119 if (ddr) {
1120 HSET2(hp, SDHC_HOST_CTL2,
1121 SDHC_UHS_MODE_SELECT_DDR50);
1122 } else {
1123 HSET2(hp, SDHC_HOST_CTL2,
1124 SDHC_UHS_MODE_SELECT_SDR25);
1125 }
1126 } else if (freq > 400) {
1127 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1128 }
1129 }
1130
1131 /*
1132 * Slow down Ricoh 5U823 controller that isn't reliable
1133 * at 100MHz bus clock.
1134 */
1135 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1136 if (freq == 100000)
1137 --freq;
1138 }
1139
1140 /*
1141 * Set the minimum base clock frequency divisor.
1142 */
1143 if (!sdhc_clock_divisor(hp, freq, &div)) {
1144 /* Invalid base clock frequency or `freq' value. */
1145 aprint_error_dev(hp->sc->sc_dev,
1146 "Invalid bus clock %d kHz\n", freq);
1147 error = EINVAL;
1148 goto out;
1149 }
1150 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1151 if (ddr) {
1152 /* in ddr mode, divisor >>= 1 */
1153 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1154 SDHC_SDCLK_DIV_SHIFT)) |
1155 (div & (SDHC_SDCLK_DVS_MASK <<
1156 SDHC_SDCLK_DVS_SHIFT));
1157 }
1158 for (timo = 1000; timo > 0; timo--) {
1159 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1160 break;
1161 sdmmc_delay(10);
1162 }
1163 HWRITE4(hp, SDHC_CLOCK_CTL,
1164 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1165 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1166 HWRITE4(hp, SDHC_CLOCK_CTL,
1167 div | (SDHC_TIMEOUT_MAX << 16));
1168 } else {
1169 reg = HREAD2(hp, SDHC_CLOCK_CTL);
1170 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1171 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1172 }
1173
1174 /*
1175 * Start internal clock. Wait 10ms for stabilization.
1176 */
1177 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1178 HSET4(hp, SDHC_VEND_SPEC,
1179 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1180 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1181 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1182 sdmmc_delay(10000);
1183 HSET4(hp, SDHC_CLOCK_CTL,
1184 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1185 } else {
1186 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1187 for (timo = 1000; timo > 0; timo--) {
1188 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1189 SDHC_INTCLK_STABLE))
1190 break;
1191 sdmmc_delay(10);
1192 }
1193 if (timo == 0) {
1194 error = ETIMEDOUT;
1195 DPRINTF(1,("%s: timeout\n", __func__));
1196 goto out;
1197 }
1198 }
1199
1200 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1201 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1202 /*
1203 * Sending 80 clocks at 400kHz takes 200us.
1204 * So delay for that time + slop and then
1205 * check a few times for completion.
1206 */
1207 sdmmc_delay(210);
1208 for (timo = 10; timo > 0; timo--) {
1209 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1210 SDHC_INIT_ACTIVE))
1211 break;
1212 sdmmc_delay(10);
1213 }
1214 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1215
1216 /*
1217 * Enable SD clock.
1218 */
1219 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1220 HSET4(hp, SDHC_VEND_SPEC,
1221 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1222 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1223 } else {
1224 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1225 }
1226 } else {
1227 /*
1228 * Enable SD clock.
1229 */
1230 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1231
1232 if (freq > 25000 &&
1233 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1234 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1235 else
1236 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1237 }
1238
1239 out:
1240 mutex_exit(&hp->intr_lock);
1241
1242 return error;
1243 }
1244
1245 static int
1246 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1247 {
1248 struct sdhc_host *hp = (struct sdhc_host *)sch;
1249 int reg;
1250
1251 switch (width) {
1252 case 1:
1253 case 4:
1254 break;
1255
1256 case 8:
1257 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1258 break;
1259 /* FALLTHROUGH */
1260 default:
1261 DPRINTF(0,("%s: unsupported bus width (%d)\n",
1262 HDEVNAME(hp), width));
1263 return 1;
1264 }
1265
1266 if (hp->sc->sc_vendor_bus_width) {
1267 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1268 if (error != 0)
1269 return error;
1270 }
1271
1272 mutex_enter(&hp->intr_lock);
1273
1274 reg = HREAD1(hp, SDHC_HOST_CTL);
1275 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1276 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1277 if (width == 4)
1278 reg |= SDHC_4BIT_MODE;
1279 else if (width == 8)
1280 reg |= SDHC_ESDHC_8BIT_MODE;
1281 } else {
1282 reg &= ~SDHC_4BIT_MODE;
1283 if (hp->specver >= SDHC_SPEC_VERS_300) {
1284 reg &= ~SDHC_8BIT_MODE;
1285 }
1286 if (width == 4) {
1287 reg |= SDHC_4BIT_MODE;
1288 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1289 reg |= SDHC_8BIT_MODE;
1290 }
1291 }
1292 HWRITE1(hp, SDHC_HOST_CTL, reg);
1293
1294 mutex_exit(&hp->intr_lock);
1295
1296 return 0;
1297 }
1298
1299 static int
1300 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1301 {
1302 struct sdhc_host *hp = (struct sdhc_host *)sch;
1303
1304 if (hp->sc->sc_vendor_rod)
1305 return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1306
1307 return 0;
1308 }
1309
1310 static void
1311 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1312 {
1313 struct sdhc_host *hp = (struct sdhc_host *)sch;
1314
1315 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1316 mutex_enter(&hp->intr_lock);
1317 if (enable) {
1318 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1319 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1320 } else {
1321 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1322 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1323 }
1324 mutex_exit(&hp->intr_lock);
1325 }
1326 }
1327
1328 static void
1329 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1330 {
1331 struct sdhc_host *hp = (struct sdhc_host *)sch;
1332
1333 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1334 mutex_enter(&hp->intr_lock);
1335 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1336 mutex_exit(&hp->intr_lock);
1337 }
1338 }
1339
1340 static int
1341 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1342 {
1343 struct sdhc_host *hp = (struct sdhc_host *)sch;
1344 int error = 0;
1345
1346 if (hp->specver < SDHC_SPEC_VERS_300)
1347 return EINVAL;
1348
1349 mutex_enter(&hp->intr_lock);
1350 switch (signal_voltage) {
1351 case SDMMC_SIGNAL_VOLTAGE_180:
1352 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1353 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1354 signal_voltage);
1355 if (error != 0)
1356 break;
1357 }
1358 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1359 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1360 break;
1361 case SDMMC_SIGNAL_VOLTAGE_330:
1362 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1363 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1364 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1365 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1366 signal_voltage);
1367 if (error != 0)
1368 break;
1369 }
1370 break;
1371 default:
1372 error = EINVAL;
1373 break;
1374 }
1375 mutex_exit(&hp->intr_lock);
1376
1377 return error;
1378 }
1379
1380 /*
1381 * Sampling clock tuning procedure (UHS)
1382 */
1383 static int
1384 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1385 {
1386 struct sdmmc_command cmd;
1387 uint8_t hostctl;
1388 int opcode, error, retry = 40;
1389
1390 KASSERT(mutex_owned(&hp->intr_lock));
1391
1392 hp->tuning_timing = timing;
1393
1394 switch (timing) {
1395 case SDMMC_TIMING_MMC_HS200:
1396 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1397 break;
1398 case SDMMC_TIMING_UHS_SDR50:
1399 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1400 return 0;
1401 /* FALLTHROUGH */
1402 case SDMMC_TIMING_UHS_SDR104:
1403 opcode = MMC_SEND_TUNING_BLOCK;
1404 break;
1405 default:
1406 return EINVAL;
1407 }
1408
1409 hostctl = HREAD1(hp, SDHC_HOST_CTL);
1410
1411 /* enable buffer read ready interrupt */
1412 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1413 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1414
1415 /* disable DMA */
1416 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1417
1418 /* reset tuning circuit */
1419 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1420
1421 /* start of tuning */
1422 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1423
1424 do {
1425 memset(&cmd, 0, sizeof(cmd));
1426 cmd.c_opcode = opcode;
1427 cmd.c_arg = 0;
1428 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1429 if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1430 cmd.c_blklen = cmd.c_datalen = 128;
1431 } else {
1432 cmd.c_blklen = cmd.c_datalen = 64;
1433 }
1434
1435 error = sdhc_start_command(hp, &cmd);
1436 if (error)
1437 break;
1438
1439 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1440 SDHC_TUNING_TIMEOUT, false)) {
1441 break;
1442 }
1443
1444 delay(1000);
1445 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1446
1447 /* disable buffer read ready interrupt */
1448 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1449 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1450
1451 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1452 HCLR2(hp, SDHC_HOST_CTL2,
1453 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1454 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1455 aprint_error_dev(hp->sc->sc_dev,
1456 "tuning did not complete, using fixed sampling clock\n");
1457 return EIO; /* tuning did not complete */
1458 }
1459
1460 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1461 HCLR2(hp, SDHC_HOST_CTL2,
1462 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1463 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1464 aprint_error_dev(hp->sc->sc_dev,
1465 "tuning failed, using fixed sampling clock\n");
1466 return EIO; /* tuning failed */
1467 }
1468
1469 if (hp->tuning_timer_count) {
1470 callout_schedule(&hp->tuning_timer,
1471 hz * hp->tuning_timer_count);
1472 }
1473
1474 return 0; /* tuning completed */
1475 }
1476
1477 static int
1478 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1479 {
1480 struct sdhc_host *hp = (struct sdhc_host *)sch;
1481 int error;
1482
1483 mutex_enter(&hp->intr_lock);
1484 error = sdhc_execute_tuning1(hp, timing);
1485 mutex_exit(&hp->intr_lock);
1486 return error;
1487 }
1488
1489 static void
1490 sdhc_tuning_timer(void *arg)
1491 {
1492 struct sdhc_host *hp = arg;
1493
1494 atomic_swap_uint(&hp->tuning_timer_pending, 1);
1495 }
1496
1497 static void
1498 sdhc_hw_reset(sdmmc_chipset_handle_t sch)
1499 {
1500 struct sdhc_host *hp = (struct sdhc_host *)sch;
1501 struct sdhc_softc *sc = hp->sc;
1502
1503 if (sc->sc_vendor_hw_reset != NULL)
1504 sc->sc_vendor_hw_reset(sc, hp);
1505 }
1506
1507 static int
1508 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1509 {
1510 uint32_t state;
1511 int timeout;
1512
1513 for (timeout = 10000; timeout > 0; timeout--) {
1514 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1515 return 0;
1516 sdmmc_delay(10);
1517 }
1518 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1519 mask, value, state);
1520 return ETIMEDOUT;
1521 }
1522
1523 static void
1524 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1525 {
1526 struct sdhc_host *hp = (struct sdhc_host *)sch;
1527 int error;
1528 bool probing;
1529
1530 mutex_enter(&hp->intr_lock);
1531
1532 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1533 (void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1534 }
1535
1536 if (cmd->c_data &&
1537 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1538 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1539 if (ISSET(hp->flags, SHF_USE_DMA)) {
1540 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1541 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1542 } else {
1543 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1544 HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1545 }
1546 }
1547
1548 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1549 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1550 if (cmd->c_data != NULL) {
1551 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1552 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1553 } else {
1554 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1555 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1556 }
1557 }
1558
1559 /*
1560 * Start the MMC command, or mark `cmd' as failed and return.
1561 */
1562 error = sdhc_start_command(hp, cmd);
1563 if (error) {
1564 cmd->c_error = error;
1565 goto out;
1566 }
1567
1568 /*
1569 * Wait until the command phase is done, or until the command
1570 * is marked done for any other reason.
1571 */
1572 probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1573 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT, probing)) {
1574 DPRINTF(1,("%s: timeout for command\n", __func__));
1575 sdmmc_delay(50);
1576 cmd->c_error = ETIMEDOUT;
1577 goto out;
1578 }
1579
1580 /*
1581 * The host controller removes bits [0:7] from the response
1582 * data (CRC) and we pass the data up unchanged to the bus
1583 * driver (without padding).
1584 */
1585 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1586 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1587 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1588 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1589 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1590 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1591 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1592 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1593 (cmd->c_resp[1] << 24);
1594 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1595 (cmd->c_resp[2] << 24);
1596 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1597 (cmd->c_resp[3] << 24);
1598 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1599 }
1600 }
1601 }
1602 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1603
1604 /*
1605 * If the command has data to transfer in any direction,
1606 * execute the transfer now.
1607 */
1608 if (cmd->c_error == 0 && cmd->c_data != NULL)
1609 sdhc_transfer_data(hp, cmd);
1610 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1611 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) &&
1612 !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1613 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1614 HDEVNAME(hp)));
1615 cmd->c_error = ETIMEDOUT;
1616 goto out;
1617 }
1618 }
1619
1620 out:
1621 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1622 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1623 /* Turn off the LED. */
1624 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1625 }
1626 SET(cmd->c_flags, SCF_ITSDONE);
1627
1628 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) &&
1629 cmd->c_opcode == MMC_STOP_TRANSMISSION)
1630 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
1631
1632 mutex_exit(&hp->intr_lock);
1633
1634 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1635 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1636 cmd->c_flags, cmd->c_error));
1637 }
1638
1639 static int
1640 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1641 {
1642 struct sdhc_softc * const sc = hp->sc;
1643 uint16_t blksize = 0;
1644 uint16_t blkcount = 0;
1645 uint16_t mode;
1646 uint16_t command;
1647 uint32_t pmask;
1648 int error;
1649
1650 KASSERT(mutex_owned(&hp->intr_lock));
1651
1652 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1653 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1654 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1655
1656 /*
1657 * The maximum block length for commands should be the minimum
1658 * of the host buffer size and the card buffer size. (1.7.2)
1659 */
1660
1661 /* Fragment the data into proper blocks. */
1662 if (cmd->c_datalen > 0) {
1663 blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1664 blkcount = cmd->c_datalen / blksize;
1665 if (cmd->c_datalen % blksize > 0) {
1666 /* XXX: Split this command. (1.7.4) */
1667 aprint_error_dev(sc->sc_dev,
1668 "data not a multiple of %u bytes\n", blksize);
1669 return EINVAL;
1670 }
1671 }
1672
1673 /* Check limit imposed by 9-bit block count. (1.7.2) */
1674 if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1675 aprint_error_dev(sc->sc_dev, "too much data\n");
1676 return EINVAL;
1677 }
1678
1679 /* Prepare transfer mode register value. (2.2.5) */
1680 mode = SDHC_BLOCK_COUNT_ENABLE;
1681 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1682 mode |= SDHC_READ_MODE;
1683 if (blkcount > 1) {
1684 mode |= SDHC_MULTI_BLOCK_MODE;
1685 /* XXX only for memory commands? */
1686 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
1687 mode |= SDHC_AUTO_CMD12_ENABLE;
1688 }
1689 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1690 ISSET(hp->flags, SHF_MODE_DMAEN)) {
1691 mode |= SDHC_DMA_ENABLE;
1692 }
1693
1694 /*
1695 * Prepare command register value. (2.2.6)
1696 */
1697 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1698
1699 if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1700 command |= SDHC_CRC_CHECK_ENABLE;
1701 if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1702 command |= SDHC_INDEX_CHECK_ENABLE;
1703 if (cmd->c_datalen > 0)
1704 command |= SDHC_DATA_PRESENT_SELECT;
1705
1706 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1707 command |= SDHC_NO_RESPONSE;
1708 else if (ISSET(cmd->c_flags, SCF_RSP_136))
1709 command |= SDHC_RESP_LEN_136;
1710 else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1711 command |= SDHC_RESP_LEN_48_CHK_BUSY;
1712 else
1713 command |= SDHC_RESP_LEN_48;
1714
1715 /* Wait until command and optionally data inhibit bits are clear. (1.5) */
1716 pmask = SDHC_CMD_INHIBIT_CMD;
1717 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1718 pmask |= SDHC_CMD_INHIBIT_DAT;
1719 error = sdhc_wait_state(hp, pmask, 0);
1720 if (error) {
1721 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1722 device_printf(sc->sc_dev, "command or data phase inhibited\n");
1723 return error;
1724 }
1725
1726 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1727 HDEVNAME(hp), blksize, blkcount, mode, command));
1728
1729 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1730 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1731 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */
1732 }
1733
1734 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1735 /* Alert the user not to remove the card. */
1736 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1737 }
1738
1739 /* Set DMA start address. */
1740 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1741 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1742 bus_addr_t paddr =
1743 cmd->c_dmamap->dm_segs[seg].ds_addr;
1744 uint16_t len =
1745 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1746 0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1747 uint16_t attr =
1748 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1749 if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1750 attr |= SDHC_ADMA2_END;
1751 }
1752 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1753 struct sdhc_adma2_descriptor32 *desc =
1754 hp->adma2;
1755 desc[seg].attribute = htole16(attr);
1756 desc[seg].length = htole16(len);
1757 desc[seg].address = htole32(paddr);
1758 } else {
1759 struct sdhc_adma2_descriptor64 *desc =
1760 hp->adma2;
1761 desc[seg].attribute = htole16(attr);
1762 desc[seg].length = htole16(len);
1763 desc[seg].address = htole32(paddr & 0xffffffff);
1764 desc[seg].address_hi = htole32(
1765 (uint64_t)paddr >> 32);
1766 }
1767 }
1768 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1769 struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1770 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1771 } else {
1772 struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1773 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1774 }
1775 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1776 BUS_DMASYNC_PREWRITE);
1777 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1778 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1779 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1780 } else {
1781 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1782 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1783 }
1784
1785 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1786
1787 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1788 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1789 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1790 (uint64_t)desc_addr >> 32);
1791 }
1792 } else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1793 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1794 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1795 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1796 }
1797 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1798 }
1799
1800 /*
1801 * Start a CPU data transfer. Writing to the high order byte
1802 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1803 */
1804 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1805 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1806 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1807 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1808 /* mode bits is in MIX_CTRL register on uSDHC */
1809 HWRITE4(hp, SDHC_MIX_CTRL, mode |
1810 (HREAD4(hp, SDHC_MIX_CTRL) &
1811 ~(SDHC_MULTI_BLOCK_MODE |
1812 SDHC_READ_MODE |
1813 SDHC_AUTO_CMD12_ENABLE |
1814 SDHC_BLOCK_COUNT_ENABLE |
1815 SDHC_DMA_ENABLE)));
1816 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1817 } else {
1818 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1819 }
1820 } else {
1821 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1822 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1823 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1824 HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1825 HWRITE2(hp, SDHC_COMMAND, command);
1826 }
1827
1828 return 0;
1829 }
1830
1831 static void
1832 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1833 {
1834 struct sdhc_softc *sc = hp->sc;
1835 int error;
1836
1837 KASSERT(mutex_owned(&hp->intr_lock));
1838
1839 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1840 MMC_R1(cmd->c_resp), cmd->c_datalen));
1841
1842 #ifdef SDHC_DEBUG
1843 /* XXX I forgot why I wanted to know when this happens :-( */
1844 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1845 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1846 aprint_error_dev(hp->sc->sc_dev,
1847 "CMD52/53 error response flags %#x\n",
1848 MMC_R1(cmd->c_resp) & 0xff00);
1849 }
1850 #endif
1851
1852 if (cmd->c_dmamap != NULL) {
1853 if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1854 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1855 if (error == 0 && !sdhc_wait_intr(hp,
1856 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1857 DPRINTF(1,("%s: timeout\n", __func__));
1858 error = ETIMEDOUT;
1859 }
1860 } else {
1861 error = sdhc_transfer_data_dma(hp, cmd);
1862 }
1863 } else
1864 error = sdhc_transfer_data_pio(hp, cmd);
1865 if (error)
1866 cmd->c_error = error;
1867 SET(cmd->c_flags, SCF_ITSDONE);
1868
1869 DPRINTF(1,("%s: data transfer done (error=%d)\n",
1870 HDEVNAME(hp), cmd->c_error));
1871 }
1872
1873 static int
1874 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1875 {
1876 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1877 bus_addr_t posaddr;
1878 bus_addr_t segaddr;
1879 bus_size_t seglen;
1880 u_int seg = 0;
1881 int error = 0;
1882 int status;
1883
1884 KASSERT(mutex_owned(&hp->intr_lock));
1885 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1886 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1887 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1888 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1889
1890 for (;;) {
1891 status = sdhc_wait_intr(hp,
1892 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1893 SDHC_DMA_TIMEOUT, false);
1894
1895 if (status & SDHC_TRANSFER_COMPLETE) {
1896 break;
1897 }
1898 if (!status) {
1899 DPRINTF(1,("%s: timeout\n", __func__));
1900 error = ETIMEDOUT;
1901 break;
1902 }
1903
1904 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1905 continue;
1906 }
1907
1908 if ((status & SDHC_DMA_INTERRUPT) == 0) {
1909 continue;
1910 }
1911
1912 /* DMA Interrupt (boundary crossing) */
1913
1914 segaddr = dm_segs[seg].ds_addr;
1915 seglen = dm_segs[seg].ds_len;
1916 posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1917
1918 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1919 continue;
1920 }
1921 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1922 HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1923 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1924 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1925 KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1926 }
1927
1928 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1929 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1930 PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1931 }
1932
1933 return error;
1934 }
1935
1936 static int
1937 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1938 {
1939 uint8_t *data = cmd->c_data;
1940 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1941 u_int len, datalen;
1942 u_int imask;
1943 u_int pmask;
1944 int error = 0;
1945
1946 KASSERT(mutex_owned(&hp->intr_lock));
1947
1948 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1949 imask = SDHC_BUFFER_READ_READY;
1950 pmask = SDHC_BUFFER_READ_ENABLE;
1951 if (ISSET(hp->sc->sc_flags,
1952 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1953 pio_func = esdhc_read_data_pio;
1954 } else {
1955 pio_func = sdhc_read_data_pio;
1956 }
1957 } else {
1958 imask = SDHC_BUFFER_WRITE_READY;
1959 pmask = SDHC_BUFFER_WRITE_ENABLE;
1960 if (ISSET(hp->sc->sc_flags,
1961 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1962 pio_func = esdhc_write_data_pio;
1963 } else {
1964 pio_func = sdhc_write_data_pio;
1965 }
1966 }
1967 datalen = cmd->c_datalen;
1968
1969 KASSERT(mutex_owned(&hp->intr_lock));
1970 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
1971 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1972 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1973
1974 while (datalen > 0) {
1975 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
1976 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1977 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
1978 } else {
1979 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
1980 }
1981 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
1982 DPRINTF(1,("%s: timeout\n", __func__));
1983 error = ETIMEDOUT;
1984 break;
1985 }
1986
1987 error = sdhc_wait_state(hp, pmask, pmask);
1988 if (error)
1989 break;
1990 }
1991
1992 len = MIN(datalen, cmd->c_blklen);
1993 (*pio_func)(hp, data, len);
1994 DPRINTF(2,("%s: pio data transfer %u @ %p\n",
1995 HDEVNAME(hp), len, data));
1996
1997 data += len;
1998 datalen -= len;
1999 }
2000
2001 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
2002 SDHC_TRANSFER_TIMEOUT, false)) {
2003 DPRINTF(1,("%s: timeout for transfer\n", __func__));
2004 error = ETIMEDOUT;
2005 }
2006
2007 return error;
2008 }
2009
2010 static void
2011 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2012 {
2013
2014 if (((__uintptr_t)data & 3) == 0) {
2015 while (datalen > 3) {
2016 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
2017 data += 4;
2018 datalen -= 4;
2019 }
2020 if (datalen > 1) {
2021 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2022 data += 2;
2023 datalen -= 2;
2024 }
2025 if (datalen > 0) {
2026 *data = HREAD1(hp, SDHC_DATA);
2027 data += 1;
2028 datalen -= 1;
2029 }
2030 } else if (((__uintptr_t)data & 1) == 0) {
2031 while (datalen > 1) {
2032 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2033 data += 2;
2034 datalen -= 2;
2035 }
2036 if (datalen > 0) {
2037 *data = HREAD1(hp, SDHC_DATA);
2038 data += 1;
2039 datalen -= 1;
2040 }
2041 } else {
2042 while (datalen > 0) {
2043 *data = HREAD1(hp, SDHC_DATA);
2044 data += 1;
2045 datalen -= 1;
2046 }
2047 }
2048 }
2049
2050 static void
2051 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2052 {
2053
2054 if (((__uintptr_t)data & 3) == 0) {
2055 while (datalen > 3) {
2056 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2057 data += 4;
2058 datalen -= 4;
2059 }
2060 if (datalen > 1) {
2061 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2062 data += 2;
2063 datalen -= 2;
2064 }
2065 if (datalen > 0) {
2066 HWRITE1(hp, SDHC_DATA, *data);
2067 data += 1;
2068 datalen -= 1;
2069 }
2070 } else if (((__uintptr_t)data & 1) == 0) {
2071 while (datalen > 1) {
2072 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2073 data += 2;
2074 datalen -= 2;
2075 }
2076 if (datalen > 0) {
2077 HWRITE1(hp, SDHC_DATA, *data);
2078 data += 1;
2079 datalen -= 1;
2080 }
2081 } else {
2082 while (datalen > 0) {
2083 HWRITE1(hp, SDHC_DATA, *data);
2084 data += 1;
2085 datalen -= 1;
2086 }
2087 }
2088 }
2089
2090 static void
2091 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2092 {
2093 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2094 uint32_t v;
2095
2096 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2097 size_t count = 0;
2098
2099 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2100 if (count == 0) {
2101 /*
2102 * If we've drained "watermark" words, we need to wait
2103 * a little bit so the read FIFO can refill.
2104 */
2105 sdmmc_delay(10);
2106 count = watermark;
2107 }
2108 v = HREAD4(hp, SDHC_DATA);
2109 v = le32toh(v);
2110 *(uint32_t *)data = v;
2111 data += 4;
2112 datalen -= 4;
2113 status = HREAD2(hp, SDHC_NINTR_STATUS);
2114 count--;
2115 }
2116 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2117 if (count == 0) {
2118 sdmmc_delay(10);
2119 }
2120 v = HREAD4(hp, SDHC_DATA);
2121 v = le32toh(v);
2122 do {
2123 *data++ = v;
2124 v >>= 8;
2125 } while (--datalen > 0);
2126 }
2127 }
2128
2129 static void
2130 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2131 {
2132 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2133 uint32_t v;
2134
2135 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2136 size_t count = watermark;
2137
2138 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2139 if (count == 0) {
2140 sdmmc_delay(10);
2141 count = watermark;
2142 }
2143 v = *(uint32_t *)data;
2144 v = htole32(v);
2145 HWRITE4(hp, SDHC_DATA, v);
2146 data += 4;
2147 datalen -= 4;
2148 status = HREAD2(hp, SDHC_NINTR_STATUS);
2149 count--;
2150 }
2151 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2152 if (count == 0) {
2153 sdmmc_delay(10);
2154 }
2155 v = *(uint32_t *)data;
2156 v = htole32(v);
2157 HWRITE4(hp, SDHC_DATA, v);
2158 }
2159 }
2160
2161 /* Prepare for another command. */
2162 static int
2163 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2164 {
2165 int timo;
2166
2167 KASSERT(mutex_owned(&hp->intr_lock));
2168
2169 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2170
2171 /* Request the reset. */
2172 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2173
2174 /*
2175 * If necessary, wait for the controller to set the bits to
2176 * acknowledge the reset.
2177 */
2178 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2179 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2180 for (timo = 10000; timo > 0; timo--) {
2181 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2182 break;
2183 /* Short delay because I worry we may miss it... */
2184 sdmmc_delay(1);
2185 }
2186 if (timo == 0) {
2187 DPRINTF(1,("%s: timeout for reset on\n", __func__));
2188 return ETIMEDOUT;
2189 }
2190 }
2191
2192 /*
2193 * Wait for the controller to clear the bits to indicate that
2194 * the reset has completed.
2195 */
2196 for (timo = 10; timo > 0; timo--) {
2197 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2198 break;
2199 sdmmc_delay(10000);
2200 }
2201 if (timo == 0) {
2202 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2203 HREAD1(hp, SDHC_SOFTWARE_RESET)));
2204 return ETIMEDOUT;
2205 }
2206
2207 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2208 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2209 }
2210
2211 return 0;
2212 }
2213
2214 static int
2215 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2216 {
2217 int status, error, nointr;
2218
2219 KASSERT(mutex_owned(&hp->intr_lock));
2220
2221 mask |= SDHC_ERROR_INTERRUPT;
2222
2223 nointr = 0;
2224 status = hp->intr_status & mask;
2225 while (status == 0) {
2226 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2227 == EWOULDBLOCK) {
2228 nointr = 1;
2229 break;
2230 }
2231 status = hp->intr_status & mask;
2232 }
2233 error = hp->intr_error_status;
2234
2235 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2236 error));
2237
2238 hp->intr_status &= ~status;
2239 hp->intr_error_status &= ~error;
2240
2241 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2242 if (ISSET(error, SDHC_DMA_ERROR))
2243 device_printf(hp->sc->sc_dev,"dma error\n");
2244 if (ISSET(error, SDHC_ADMA_ERROR))
2245 device_printf(hp->sc->sc_dev,"adma error\n");
2246 if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2247 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2248 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2249 device_printf(hp->sc->sc_dev,"current limit error\n");
2250 if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2251 device_printf(hp->sc->sc_dev,"data end bit error\n");
2252 if (ISSET(error, SDHC_DATA_CRC_ERROR))
2253 device_printf(hp->sc->sc_dev,"data crc error\n");
2254 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2255 device_printf(hp->sc->sc_dev,"data timeout error\n");
2256 if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2257 device_printf(hp->sc->sc_dev,"cmd index error\n");
2258 if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2259 device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2260 if (ISSET(error, SDHC_CMD_CRC_ERROR))
2261 device_printf(hp->sc->sc_dev,"cmd crc error\n");
2262 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2263 if (!probing)
2264 device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2265 #ifdef SDHC_DEBUG
2266 else if (sdhcdebug > 0)
2267 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2268 #endif
2269 }
2270 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2271 device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2272 (error & ~SDHC_EINTR_STATUS_MASK));
2273 if (error == 0)
2274 device_printf(hp->sc->sc_dev,"no error\n");
2275
2276 /* Command timeout has higher priority than command complete. */
2277 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2278 CLR(status, SDHC_COMMAND_COMPLETE);
2279
2280 /* Transfer complete has higher priority than data timeout. */
2281 if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2282 CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2283 }
2284
2285 if (nointr ||
2286 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2287 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2288 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2289 hp->intr_error_status = 0;
2290 status = 0;
2291 }
2292
2293 return status;
2294 }
2295
2296 /*
2297 * Established by attachment driver at interrupt priority IPL_SDMMC.
2298 */
2299 int
2300 sdhc_intr(void *arg)
2301 {
2302 struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2303 struct sdhc_host *hp;
2304 int done = 0;
2305 uint16_t status;
2306 uint16_t error;
2307
2308 /* We got an interrupt, but we don't know from which slot. */
2309 for (size_t host = 0; host < sc->sc_nhosts; host++) {
2310 hp = sc->sc_host[host];
2311 if (hp == NULL)
2312 continue;
2313
2314 mutex_enter(&hp->intr_lock);
2315
2316 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2317 /* Find out which interrupts are pending. */
2318 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2319 status = xstatus;
2320 error = xstatus >> 16;
2321 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2322 (xstatus & SDHC_TRANSFER_COMPLETE) &&
2323 !(xstatus & SDHC_DMA_INTERRUPT)) {
2324 /* read again due to uSDHC errata */
2325 status = xstatus = HREAD4(hp,
2326 SDHC_NINTR_STATUS);
2327 error = xstatus >> 16;
2328 }
2329 if (ISSET(sc->sc_flags,
2330 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2331 if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2332 SET(status, SDHC_ERROR_INTERRUPT);
2333 }
2334 if (error)
2335 xstatus |= SDHC_ERROR_INTERRUPT;
2336 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2337 goto next_port; /* no interrupt for us */
2338 /* Acknowledge the interrupts we are about to handle. */
2339 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2340 } else {
2341 /* Find out which interrupts are pending. */
2342 error = 0;
2343 status = HREAD2(hp, SDHC_NINTR_STATUS);
2344 if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2345 goto next_port; /* no interrupt for us */
2346 /* Acknowledge the interrupts we are about to handle. */
2347 HWRITE2(hp, SDHC_NINTR_STATUS, status);
2348 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2349 /* Acknowledge error interrupts. */
2350 error = HREAD2(hp, SDHC_EINTR_STATUS);
2351 HWRITE2(hp, SDHC_EINTR_STATUS, error);
2352 }
2353 }
2354
2355 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2356 status, error));
2357
2358 /* Claim this interrupt. */
2359 done = 1;
2360
2361 if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2362 ISSET(error, SDHC_ADMA_ERROR)) {
2363 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2364 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2365 adma_err);
2366 }
2367
2368 /*
2369 * Wake up the sdmmc event thread to scan for cards.
2370 */
2371 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2372 if (hp->sdmmc != NULL) {
2373 sdmmc_needs_discover(hp->sdmmc);
2374 }
2375 if (ISSET(sc->sc_flags,
2376 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2377 HCLR4(hp, SDHC_NINTR_STATUS_EN,
2378 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2379 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2380 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2381 }
2382 }
2383
2384 /*
2385 * Schedule re-tuning process (UHS).
2386 */
2387 if (ISSET(status, SDHC_RETUNING_EVENT)) {
2388 atomic_swap_uint(&hp->tuning_timer_pending, 1);
2389 }
2390
2391 /*
2392 * Wake up the blocking process to service command
2393 * related interrupt(s).
2394 */
2395 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2396 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2397 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2398 hp->intr_error_status |= error;
2399 hp->intr_status |= status;
2400 if (ISSET(sc->sc_flags,
2401 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2402 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2403 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2404 }
2405 cv_broadcast(&hp->intr_cv);
2406 }
2407
2408 /*
2409 * Service SD card interrupts.
2410 */
2411 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2412 && ISSET(status, SDHC_CARD_INTERRUPT)) {
2413 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2414 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2415 sdmmc_card_intr(hp->sdmmc);
2416 }
2417 next_port:
2418 mutex_exit(&hp->intr_lock);
2419 }
2420
2421 return done;
2422 }
2423
2424 kmutex_t *
2425 sdhc_host_lock(struct sdhc_host *hp)
2426 {
2427 return &hp->intr_lock;
2428 }
2429
2430 uint8_t
2431 sdhc_host_read_1(struct sdhc_host *hp, int reg)
2432 {
2433 return HREAD1(hp, reg);
2434 }
2435
2436 uint16_t
2437 sdhc_host_read_2(struct sdhc_host *hp, int reg)
2438 {
2439 return HREAD2(hp, reg);
2440 }
2441
2442 uint32_t
2443 sdhc_host_read_4(struct sdhc_host *hp, int reg)
2444 {
2445 return HREAD4(hp, reg);
2446 }
2447
2448 void
2449 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val)
2450 {
2451 HWRITE1(hp, reg, val);
2452 }
2453
2454 void
2455 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val)
2456 {
2457 HWRITE2(hp, reg, val);
2458 }
2459
2460 void
2461 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val)
2462 {
2463 HWRITE4(hp, reg, val);
2464 }
2465
2466 #ifdef SDHC_DEBUG
2467 void
2468 sdhc_dump_regs(struct sdhc_host *hp)
2469 {
2470
2471 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE,
2472 HREAD4(hp, SDHC_PRESENT_STATE));
2473 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2474 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL,
2475 HREAD1(hp, SDHC_POWER_CTL));
2476 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS,
2477 HREAD2(hp, SDHC_NINTR_STATUS));
2478 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS,
2479 HREAD2(hp, SDHC_EINTR_STATUS));
2480 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN,
2481 HREAD2(hp, SDHC_NINTR_STATUS_EN));
2482 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN,
2483 HREAD2(hp, SDHC_EINTR_STATUS_EN));
2484 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN,
2485 HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2486 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN,
2487 HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2488 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES,
2489 HREAD4(hp, SDHC_CAPABILITIES));
2490 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2491 HREAD4(hp, SDHC_MAX_CAPABILITIES));
2492 }
2493 #endif
2494