sdhc.c revision 1.105 1 /* $NetBSD: sdhc.c,v 1.105 2019/10/28 06:00:14 mlelstv Exp $ */
2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * SD Host Controller driver based on the SD Host Controller Standard
22 * Simplified Specification Version 1.00 (www.sdcard.com).
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.105 2019/10/28 06:00:14 mlelstv Exp $");
27
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s) do {} while (0)
53 #endif
54
55 #define SDHC_COMMAND_TIMEOUT hz
56 #define SDHC_BUFFER_TIMEOUT hz
57 #define SDHC_TRANSFER_TIMEOUT hz
58 #define SDHC_DMA_TIMEOUT (hz*3)
59 #define SDHC_TUNING_TIMEOUT hz
60
61 struct sdhc_host {
62 struct sdhc_softc *sc; /* host controller device */
63
64 bus_space_tag_t iot; /* host register set tag */
65 bus_space_handle_t ioh; /* host register set handle */
66 bus_size_t ios; /* host register space size */
67 bus_dma_tag_t dmat; /* host DMA tag */
68
69 device_t sdmmc; /* generic SD/MMC device */
70
71 u_int clkbase; /* base clock frequency in KHz */
72 int maxblklen; /* maximum block length */
73 uint32_t ocr; /* OCR value from capabilities */
74
75 uint8_t regs[14]; /* host controller state */
76
77 uint16_t intr_status; /* soft interrupt status */
78 uint16_t intr_error_status; /* soft error status */
79 kmutex_t intr_lock;
80 kcondvar_t intr_cv;
81
82 callout_t tuning_timer;
83 int tuning_timing;
84 u_int tuning_timer_count;
85 u_int tuning_timer_pending;
86
87 int specver; /* spec. version */
88
89 uint32_t flags; /* flags for this host */
90 #define SHF_USE_DMA 0x0001
91 #define SHF_USE_4BIT_MODE 0x0002
92 #define SHF_USE_8BIT_MODE 0x0004
93 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */
94 #define SHF_USE_ADMA2_32 0x0010
95 #define SHF_USE_ADMA2_64 0x0020
96 #define SHF_USE_ADMA2_MASK 0x0030
97
98 bus_dmamap_t adma_map;
99 bus_dma_segment_t adma_segs[1];
100 void *adma2;
101
102 uint8_t vdd; /* last vdd setting */
103 };
104
105 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev))
106
107 static uint8_t
108 hread1(struct sdhc_host *hp, bus_size_t reg)
109 {
110
111 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
112 return bus_space_read_1(hp->iot, hp->ioh, reg);
113 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
114 }
115
116 static uint16_t
117 hread2(struct sdhc_host *hp, bus_size_t reg)
118 {
119
120 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
121 return bus_space_read_2(hp->iot, hp->ioh, reg);
122 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
123 }
124
125 #define HREAD1(hp, reg) hread1(hp, reg)
126 #define HREAD2(hp, reg) hread2(hp, reg)
127 #define HREAD4(hp, reg) \
128 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
129
130
131 static void
132 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
133 {
134
135 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
136 bus_space_write_1(hp->iot, hp->ioh, o, val);
137 } else {
138 const size_t shift = 8 * (o & 3);
139 o &= -4;
140 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
141 tmp = (val << shift) | (tmp & ~(0xff << shift));
142 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
143 }
144 }
145
146 static void
147 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
148 {
149
150 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
151 bus_space_write_2(hp->iot, hp->ioh, o, val);
152 } else {
153 const size_t shift = 8 * (o & 2);
154 o &= -4;
155 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
156 tmp = (val << shift) | (tmp & ~(0xffff << shift));
157 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
158 }
159 }
160
161 static void
162 hwrite4(struct sdhc_host *hp, bus_size_t o, uint32_t val)
163 {
164
165 bus_space_write_4(hp->iot, hp->ioh, o, val);
166 }
167
168 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val)
169 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val)
170 #define HWRITE4(hp, reg, val) hwrite4(hp, reg, val)
171
172 #define HCLR1(hp, reg, bits) \
173 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
174 #define HCLR2(hp, reg, bits) \
175 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
176 #define HCLR4(hp, reg, bits) \
177 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
178 #define HSET1(hp, reg, bits) \
179 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
180 #define HSET2(hp, reg, bits) \
181 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
182 #define HSET4(hp, reg, bits) \
183 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
184
185 static int sdhc_host_reset(sdmmc_chipset_handle_t);
186 static int sdhc_host_reset1(sdmmc_chipset_handle_t);
187 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t);
188 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t);
189 static int sdhc_card_detect(sdmmc_chipset_handle_t);
190 static int sdhc_write_protect(sdmmc_chipset_handle_t);
191 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
192 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
193 static int sdhc_bus_width(sdmmc_chipset_handle_t, int);
194 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int);
195 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
196 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t);
197 static void sdhc_exec_command(sdmmc_chipset_handle_t,
198 struct sdmmc_command *);
199 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
200 static int sdhc_execute_tuning1(struct sdhc_host *, int);
201 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
202 static void sdhc_tuning_timer(void *);
203 static void sdhc_hw_reset(sdmmc_chipset_handle_t);
204 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
205 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
206 static int sdhc_soft_reset(struct sdhc_host *, int);
207 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool);
208 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
209 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
210 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
211 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
212 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
213 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
214 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
215
216 static struct sdmmc_chip_functions sdhc_functions = {
217 /* host controller reset */
218 .host_reset = sdhc_host_reset,
219
220 /* host controller capabilities */
221 .host_ocr = sdhc_host_ocr,
222 .host_maxblklen = sdhc_host_maxblklen,
223
224 /* card detection */
225 .card_detect = sdhc_card_detect,
226
227 /* write protect */
228 .write_protect = sdhc_write_protect,
229
230 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
231 .bus_power = sdhc_bus_power,
232 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */
233 .bus_width = sdhc_bus_width,
234 .bus_rod = sdhc_bus_rod,
235
236 /* command execution */
237 .exec_command = sdhc_exec_command,
238
239 /* card interrupt */
240 .card_enable_intr = sdhc_card_enable_intr,
241 .card_intr_ack = sdhc_card_intr_ack,
242
243 /* UHS functions */
244 .signal_voltage = sdhc_signal_voltage,
245 .bus_clock_ddr = sdhc_bus_clock_ddr,
246 .execute_tuning = sdhc_execute_tuning,
247 .hw_reset = sdhc_hw_reset,
248 };
249
250 static int
251 sdhc_cfprint(void *aux, const char *pnp)
252 {
253 const struct sdmmcbus_attach_args * const saa = aux;
254 const struct sdhc_host * const hp = saa->saa_sch;
255
256 if (pnp) {
257 aprint_normal("sdmmc at %s", pnp);
258 }
259 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
260 if (hp->sc->sc_host[host] == hp) {
261 aprint_normal(" slot %zu", host);
262 }
263 }
264
265 return UNCONF;
266 }
267
268 /*
269 * Called by attachment driver. For each SD card slot there is one SD
270 * host controller standard register set. (1.3)
271 */
272 int
273 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
274 bus_space_handle_t ioh, bus_size_t iosize)
275 {
276 struct sdmmcbus_attach_args saa;
277 struct sdhc_host *hp;
278 uint32_t caps, caps2;
279 uint16_t sdhcver;
280 int error;
281
282 /* Allocate one more host structure. */
283 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
284 if (hp == NULL) {
285 aprint_error_dev(sc->sc_dev,
286 "couldn't alloc memory (sdhc host)\n");
287 goto err1;
288 }
289 sc->sc_host[sc->sc_nhosts++] = hp;
290
291 /* Fill in the new host structure. */
292 hp->sc = sc;
293 hp->iot = iot;
294 hp->ioh = ioh;
295 hp->ios = iosize;
296 hp->dmat = sc->sc_dmat;
297
298 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
299 cv_init(&hp->intr_cv, "sdhcintr");
300 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
301 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
302
303 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
304 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
305 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
306 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
307 } else if (iosize <= SDHC_HOST_CTL_VERSION) {
308 sdhcver = SDHC_SPEC_NOVERS << SDHC_SPEC_VERS_SHIFT;
309 } else {
310 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
311 }
312 aprint_normal_dev(sc->sc_dev, "SDHC ");
313 hp->specver = SDHC_SPEC_VERSION(sdhcver);
314 switch (SDHC_SPEC_VERSION(sdhcver)) {
315 case SDHC_SPEC_VERS_100:
316 aprint_normal("1.0");
317 break;
318 case SDHC_SPEC_VERS_200:
319 aprint_normal("2.0");
320 break;
321 case SDHC_SPEC_VERS_300:
322 aprint_normal("3.0");
323 break;
324 case SDHC_SPEC_VERS_400:
325 aprint_normal("4.0");
326 break;
327 case SDHC_SPEC_NOVERS:
328 hp->specver = -1;
329 aprint_normal("NO-VERS");
330 break;
331 default:
332 aprint_normal("unknown version(0x%x)",
333 SDHC_SPEC_VERSION(sdhcver));
334 break;
335 }
336 if (SDHC_SPEC_VERSION(sdhcver) != SDHC_SPEC_NOVERS)
337 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
338
339 /*
340 * Reset the host controller and enable interrupts.
341 */
342 (void)sdhc_host_reset(hp);
343
344 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
345 /* init uSDHC registers */
346 HWRITE4(hp, SDHC_MMC_BOOT, 0);
347 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
348 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
349 HWRITE4(hp, SDHC_WATERMARK_LEVEL,
350 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
351 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
352 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
353 (0x40 << SDHC_WATERMARK_READ_SHIFT));
354 HSET4(hp, SDHC_VEND_SPEC,
355 SDHC_VEND_SPEC_MBO |
356 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
357 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
358 SDHC_VEND_SPEC_HCLK_SOFT_EN |
359 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
360 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
361 SDHC_VEND_SPEC_FRC_SDCLK_ON);
362 }
363
364 /* Determine host capabilities. */
365 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
366 caps = sc->sc_caps;
367 caps2 = sc->sc_caps2;
368 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
369 /* uSDHC capability register is little bit different */
370 caps = HREAD4(hp, SDHC_CAPABILITIES);
371 caps |= SDHC_8BIT_SUPP;
372 if (caps & SDHC_ADMA1_SUPP)
373 caps |= SDHC_ADMA2_SUPP;
374 sc->sc_caps = caps;
375 /* uSDHC has no SDHC_CAPABILITIES2 register */
376 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
377 } else {
378 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
379 if (hp->specver >= SDHC_SPEC_VERS_300) {
380 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
381 } else {
382 caps2 = sc->sc_caps2 = 0;
383 }
384 }
385
386 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
387 SDHC_RETUNING_MODES_MASK;
388 if (retuning_mode == SDHC_RETUNING_MODE_1) {
389 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
390 SDHC_TIMER_COUNT_MASK;
391 if (hp->tuning_timer_count == 0xf)
392 hp->tuning_timer_count = 0;
393 if (hp->tuning_timer_count)
394 hp->tuning_timer_count =
395 1 << (hp->tuning_timer_count - 1);
396 }
397
398 /*
399 * Use DMA if the host system and the controller support it.
400 * Suports integrated or external DMA egine, with or without
401 * SDHC_DMA_ENABLE in the command.
402 */
403 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
404 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
405 ISSET(caps, SDHC_DMA_SUPPORT)))) {
406 SET(hp->flags, SHF_USE_DMA);
407
408 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) &&
409 ISSET(caps, SDHC_ADMA2_SUPP)) {
410 SET(hp->flags, SHF_MODE_DMAEN);
411 /*
412 * 64-bit mode was present in the 2.00 spec, removed
413 * from 3.00, and re-added in 4.00 with a different
414 * descriptor layout. We only support 2.00 and 3.00
415 * descriptors for now.
416 */
417 if (hp->specver == SDHC_SPEC_VERS_200 &&
418 ISSET(caps, SDHC_64BIT_SYS_BUS)) {
419 SET(hp->flags, SHF_USE_ADMA2_64);
420 aprint_normal(", 64-bit ADMA2");
421 } else {
422 SET(hp->flags, SHF_USE_ADMA2_32);
423 aprint_normal(", 32-bit ADMA2");
424 }
425 } else {
426 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
427 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
428 SET(hp->flags, SHF_MODE_DMAEN);
429 if (sc->sc_vendor_transfer_data_dma) {
430 aprint_normal(", platform DMA");
431 } else {
432 aprint_normal(", SDMA");
433 }
434 }
435 } else {
436 aprint_normal(", PIO");
437 }
438
439 /*
440 * Determine the base clock frequency. (2.2.24)
441 */
442 if (hp->specver >= SDHC_SPEC_VERS_300) {
443 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
444 } else {
445 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
446 }
447 if (hp->clkbase == 0 ||
448 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
449 if (sc->sc_clkbase == 0) {
450 /* The attachment driver must tell us. */
451 aprint_error_dev(sc->sc_dev,
452 "unknown base clock frequency\n");
453 goto err;
454 }
455 hp->clkbase = sc->sc_clkbase;
456 }
457 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
458 /* SDHC 1.0 supports only 10-63 MHz. */
459 aprint_error_dev(sc->sc_dev,
460 "base clock frequency out of range: %u MHz\n",
461 hp->clkbase / 1000);
462 goto err;
463 }
464 aprint_normal(", %u kHz", hp->clkbase);
465
466 /*
467 * XXX Set the data timeout counter value according to
468 * capabilities. (2.2.15)
469 */
470 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
471 #if 1
472 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
473 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
474 #endif
475
476 if (ISSET(caps, SDHC_EMBEDDED_SLOT))
477 aprint_normal(", embedded slot");
478
479 /*
480 * Determine SD bus voltage levels supported by the controller.
481 */
482 aprint_normal(",");
483 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
484 SET(hp->ocr, MMC_OCR_HCS);
485 aprint_normal(" HS");
486 }
487 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_1_8_V)) {
488 if (ISSET(caps2, SDHC_SDR50_SUPP)) {
489 SET(hp->ocr, MMC_OCR_S18A);
490 aprint_normal(" SDR50");
491 }
492 if (ISSET(caps2, SDHC_DDR50_SUPP)) {
493 SET(hp->ocr, MMC_OCR_S18A);
494 aprint_normal(" DDR50");
495 }
496 if (ISSET(caps2, SDHC_SDR104_SUPP)) {
497 SET(hp->ocr, MMC_OCR_S18A);
498 aprint_normal(" SDR104 HS200");
499 }
500 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
501 SET(hp->ocr, MMC_OCR_1_65V_1_95V);
502 aprint_normal(" 1.8V");
503 }
504 }
505 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
506 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
507 aprint_normal(" 3.0V");
508 }
509 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
510 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
511 aprint_normal(" 3.3V");
512 }
513 if (hp->specver >= SDHC_SPEC_VERS_300) {
514 aprint_normal(", re-tuning mode %d", retuning_mode + 1);
515 if (hp->tuning_timer_count)
516 aprint_normal(" (%us timer)", hp->tuning_timer_count);
517 }
518
519 /*
520 * Determine the maximum block length supported by the host
521 * controller. (2.2.24)
522 */
523 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
524 case SDHC_MAX_BLK_LEN_512:
525 hp->maxblklen = 512;
526 break;
527
528 case SDHC_MAX_BLK_LEN_1024:
529 hp->maxblklen = 1024;
530 break;
531
532 case SDHC_MAX_BLK_LEN_2048:
533 hp->maxblklen = 2048;
534 break;
535
536 case SDHC_MAX_BLK_LEN_4096:
537 hp->maxblklen = 4096;
538 break;
539
540 default:
541 aprint_error_dev(sc->sc_dev, "max block length unknown\n");
542 goto err;
543 }
544 aprint_normal(", %u byte blocks", hp->maxblklen);
545 aprint_normal("\n");
546
547 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
548 int rseg;
549
550 /* Allocate ADMA2 descriptor memory */
551 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
552 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
553 if (error) {
554 aprint_error_dev(sc->sc_dev,
555 "ADMA2 dmamem_alloc failed (%d)\n", error);
556 goto adma_done;
557 }
558 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
559 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
560 if (error) {
561 aprint_error_dev(sc->sc_dev,
562 "ADMA2 dmamem_map failed (%d)\n", error);
563 goto adma_done;
564 }
565 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
566 0, BUS_DMA_WAITOK, &hp->adma_map);
567 if (error) {
568 aprint_error_dev(sc->sc_dev,
569 "ADMA2 dmamap_create failed (%d)\n", error);
570 goto adma_done;
571 }
572 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
573 hp->adma2, PAGE_SIZE, NULL,
574 BUS_DMA_WAITOK|BUS_DMA_WRITE);
575 if (error) {
576 aprint_error_dev(sc->sc_dev,
577 "ADMA2 dmamap_load failed (%d)\n", error);
578 goto adma_done;
579 }
580
581 memset(hp->adma2, 0, PAGE_SIZE);
582
583 adma_done:
584 if (error)
585 CLR(hp->flags, SHF_USE_ADMA2_MASK);
586 }
587
588 /*
589 * Attach the generic SD/MMC bus driver. (The bus driver must
590 * not invoke any chipset functions before it is attached.)
591 */
592 memset(&saa, 0, sizeof(saa));
593 saa.saa_busname = "sdmmc";
594 saa.saa_sct = &sdhc_functions;
595 saa.saa_sch = hp;
596 saa.saa_dmat = hp->dmat;
597 saa.saa_clkmax = hp->clkbase;
598 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
599 saa.saa_clkmin = hp->clkbase / 256 / 2046;
600 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
601 saa.saa_clkmin = hp->clkbase / 256 / 16;
602 else if (hp->sc->sc_clkmsk != 0)
603 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
604 (ffs(hp->sc->sc_clkmsk) - 1));
605 else if (hp->specver >= SDHC_SPEC_VERS_300)
606 saa.saa_clkmin = hp->clkbase / 0x3ff;
607 else
608 saa.saa_clkmin = hp->clkbase / 256;
609 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
610 saa.saa_caps |= SMC_CAPS_AUTO_STOP;
611 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
612 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
613 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
614 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
615 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED;
616 if (ISSET(caps2, SDHC_SDR104_SUPP))
617 saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
618 SMC_CAPS_UHS_SDR50 |
619 SMC_CAPS_MMC_HS200;
620 if (ISSET(caps2, SDHC_SDR50_SUPP))
621 saa.saa_caps |= SMC_CAPS_UHS_SDR50;
622 if (ISSET(caps2, SDHC_DDR50_SUPP))
623 saa.saa_caps |= SMC_CAPS_UHS_DDR50;
624 if (ISSET(hp->flags, SHF_USE_DMA)) {
625 saa.saa_caps |= SMC_CAPS_DMA;
626 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
627 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
628 }
629 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
630 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
631 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
632 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
633
634 if (ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA2_ZEROLEN))
635 saa.saa_max_seg = 65535;
636
637 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint);
638
639 return 0;
640
641 err:
642 callout_destroy(&hp->tuning_timer);
643 cv_destroy(&hp->intr_cv);
644 mutex_destroy(&hp->intr_lock);
645 free(hp, M_DEVBUF);
646 sc->sc_host[--sc->sc_nhosts] = NULL;
647 err1:
648 return 1;
649 }
650
651 int
652 sdhc_detach(struct sdhc_softc *sc, int flags)
653 {
654 struct sdhc_host *hp;
655 int rv = 0;
656
657 for (size_t n = 0; n < sc->sc_nhosts; n++) {
658 hp = sc->sc_host[n];
659 if (hp == NULL)
660 continue;
661 if (hp->sdmmc != NULL) {
662 rv = config_detach(hp->sdmmc, flags);
663 if (rv)
664 break;
665 hp->sdmmc = NULL;
666 }
667 /* disable interrupts */
668 if ((flags & DETACH_FORCE) == 0) {
669 mutex_enter(&hp->intr_lock);
670 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
671 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
672 } else {
673 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
674 }
675 sdhc_soft_reset(hp, SDHC_RESET_ALL);
676 mutex_exit(&hp->intr_lock);
677 }
678 callout_halt(&hp->tuning_timer, NULL);
679 callout_destroy(&hp->tuning_timer);
680 cv_destroy(&hp->intr_cv);
681 mutex_destroy(&hp->intr_lock);
682 if (hp->ios > 0) {
683 bus_space_unmap(hp->iot, hp->ioh, hp->ios);
684 hp->ios = 0;
685 }
686 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
687 bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
688 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
689 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
690 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
691 }
692 free(hp, M_DEVBUF);
693 sc->sc_host[n] = NULL;
694 }
695
696 return rv;
697 }
698
699 bool
700 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
701 {
702 struct sdhc_softc *sc = device_private(dev);
703 struct sdhc_host *hp;
704 size_t i;
705
706 /* XXX poll for command completion or suspend command
707 * in progress */
708
709 /* Save the host controller state. */
710 for (size_t n = 0; n < sc->sc_nhosts; n++) {
711 hp = sc->sc_host[n];
712 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
713 for (i = 0; i < sizeof hp->regs; i += 4) {
714 uint32_t v = HREAD4(hp, i);
715 hp->regs[i + 0] = (v >> 0);
716 hp->regs[i + 1] = (v >> 8);
717 if (i + 3 < sizeof hp->regs) {
718 hp->regs[i + 2] = (v >> 16);
719 hp->regs[i + 3] = (v >> 24);
720 }
721 }
722 } else {
723 for (i = 0; i < sizeof hp->regs; i++) {
724 hp->regs[i] = HREAD1(hp, i);
725 }
726 }
727 }
728 return true;
729 }
730
731 bool
732 sdhc_resume(device_t dev, const pmf_qual_t *qual)
733 {
734 struct sdhc_softc *sc = device_private(dev);
735 struct sdhc_host *hp;
736 size_t i;
737
738 /* Restore the host controller state. */
739 for (size_t n = 0; n < sc->sc_nhosts; n++) {
740 hp = sc->sc_host[n];
741 (void)sdhc_host_reset(hp);
742 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
743 for (i = 0; i < sizeof hp->regs; i += 4) {
744 if (i + 3 < sizeof hp->regs) {
745 HWRITE4(hp, i,
746 (hp->regs[i + 0] << 0)
747 | (hp->regs[i + 1] << 8)
748 | (hp->regs[i + 2] << 16)
749 | (hp->regs[i + 3] << 24));
750 } else {
751 HWRITE4(hp, i,
752 (hp->regs[i + 0] << 0)
753 | (hp->regs[i + 1] << 8));
754 }
755 }
756 } else {
757 for (i = 0; i < sizeof hp->regs; i++) {
758 HWRITE1(hp, i, hp->regs[i]);
759 }
760 }
761 }
762 return true;
763 }
764
765 bool
766 sdhc_shutdown(device_t dev, int flags)
767 {
768 struct sdhc_softc *sc = device_private(dev);
769 struct sdhc_host *hp;
770
771 /* XXX chip locks up if we don't disable it before reboot. */
772 for (size_t i = 0; i < sc->sc_nhosts; i++) {
773 hp = sc->sc_host[i];
774 (void)sdhc_host_reset(hp);
775 }
776 return true;
777 }
778
779 /*
780 * Reset the host controller. Called during initialization, when
781 * cards are removed, upon resume, and during error recovery.
782 */
783 static int
784 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
785 {
786 struct sdhc_host *hp = (struct sdhc_host *)sch;
787 uint32_t sdhcimask;
788 int error;
789
790 KASSERT(mutex_owned(&hp->intr_lock));
791
792 /* Disable all interrupts. */
793 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
794 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
795 } else {
796 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
797 }
798
799 /* Let sdhc_bus_power restore power */
800 hp->vdd = 0;
801
802 /*
803 * Reset the entire host controller and wait up to 100ms for
804 * the controller to clear the reset bit.
805 */
806 error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
807 if (error)
808 goto out;
809
810 /* Set data timeout counter value to max for now. */
811 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
812 #if 1
813 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
814 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
815 #endif
816
817 /* Enable interrupts. */
818 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
819 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
820 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
821 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
822 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
823 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
824 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
825 sdhcimask ^=
826 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
827 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
828 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
829 } else {
830 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
831 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
832 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
833 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
834 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
835 }
836
837 out:
838 return error;
839 }
840
841 static int
842 sdhc_host_reset(sdmmc_chipset_handle_t sch)
843 {
844 struct sdhc_host *hp = (struct sdhc_host *)sch;
845 int error;
846
847 mutex_enter(&hp->intr_lock);
848 error = sdhc_host_reset1(sch);
849 mutex_exit(&hp->intr_lock);
850
851 return error;
852 }
853
854 static uint32_t
855 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
856 {
857 struct sdhc_host *hp = (struct sdhc_host *)sch;
858
859 return hp->ocr;
860 }
861
862 static int
863 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
864 {
865 struct sdhc_host *hp = (struct sdhc_host *)sch;
866
867 return hp->maxblklen;
868 }
869
870 /*
871 * Return non-zero if the card is currently inserted.
872 */
873 static int
874 sdhc_card_detect(sdmmc_chipset_handle_t sch)
875 {
876 struct sdhc_host *hp = (struct sdhc_host *)sch;
877 int r;
878
879 if (hp->sc->sc_vendor_card_detect)
880 return (*hp->sc->sc_vendor_card_detect)(hp->sc);
881
882 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
883
884 return r ? 1 : 0;
885 }
886
887 /*
888 * Return non-zero if the card is currently write-protected.
889 */
890 static int
891 sdhc_write_protect(sdmmc_chipset_handle_t sch)
892 {
893 struct sdhc_host *hp = (struct sdhc_host *)sch;
894 int r;
895
896 if (hp->sc->sc_vendor_write_protect)
897 return (*hp->sc->sc_vendor_write_protect)(hp->sc);
898
899 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
900
901 return r ? 0 : 1;
902 }
903
904 /*
905 * Set or change SD bus voltage and enable or disable SD bus power.
906 * Return zero on success.
907 */
908 static int
909 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
910 {
911 struct sdhc_host *hp = (struct sdhc_host *)sch;
912 uint8_t vdd;
913 int error = 0;
914 const uint32_t pcmask =
915 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
916 uint32_t reg;
917
918 mutex_enter(&hp->intr_lock);
919
920 /*
921 * Disable bus power before voltage change.
922 */
923 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
924 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0)) {
925 hp->vdd = 0;
926 HWRITE1(hp, SDHC_POWER_CTL, 0);
927 }
928
929 /* If power is disabled, reset the host and return now. */
930 if (ocr == 0) {
931 (void)sdhc_host_reset1(hp);
932 callout_halt(&hp->tuning_timer, &hp->intr_lock);
933 goto out;
934 }
935
936 /*
937 * Select the lowest voltage according to capabilities.
938 */
939 ocr &= hp->ocr;
940 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
941 vdd = SDHC_VOLTAGE_1_8V;
942 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
943 vdd = SDHC_VOLTAGE_3_0V;
944 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
945 vdd = SDHC_VOLTAGE_3_3V;
946 } else {
947 /* Unsupported voltage level requested. */
948 error = EINVAL;
949 goto out;
950 }
951
952 /*
953 * Did voltage change ?
954 */
955 if (vdd == hp->vdd)
956 goto out;
957
958 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
959 /*
960 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus
961 * voltage ramp until power rises.
962 */
963
964 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
965 HWRITE1(hp, SDHC_POWER_CTL,
966 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
967 } else {
968 reg = HREAD1(hp, SDHC_POWER_CTL) & pcmask;
969 HWRITE1(hp, SDHC_POWER_CTL, reg);
970 sdmmc_delay(1);
971 reg |= (vdd << SDHC_VOLTAGE_SHIFT);
972 HWRITE1(hp, SDHC_POWER_CTL, reg);
973 sdmmc_delay(1);
974 reg |= SDHC_BUS_POWER;
975 HWRITE1(hp, SDHC_POWER_CTL, reg);
976 sdmmc_delay(10000);
977 }
978
979 /*
980 * The host system may not power the bus due to battery low,
981 * etc. In that case, the host controller should clear the
982 * bus power bit.
983 */
984 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
985 error = ENXIO;
986 goto out;
987 }
988 }
989
990 /* power successfully changed */
991 hp->vdd = vdd;
992
993 out:
994 mutex_exit(&hp->intr_lock);
995
996 return error;
997 }
998
999 /*
1000 * Return the smallest possible base clock frequency divisor value
1001 * for the CLOCK_CTL register to produce `freq' (KHz).
1002 */
1003 static bool
1004 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
1005 {
1006 u_int div;
1007
1008 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
1009 for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
1010 if ((hp->clkbase / div) <= freq) {
1011 *divp = SDHC_SDCLK_CGM
1012 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
1013 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
1014 //freq = hp->clkbase / div;
1015 return true;
1016 }
1017 }
1018 /* No divisor found. */
1019 return false;
1020 }
1021 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
1022 u_int dvs = (hp->clkbase + freq - 1) / freq;
1023 u_int roundup = dvs & 1;
1024 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
1025 if (dvs + roundup <= 16) {
1026 dvs += roundup - 1;
1027 *divp = (div << SDHC_SDCLK_DIV_SHIFT)
1028 | (dvs << SDHC_SDCLK_DVS_SHIFT);
1029 DPRINTF(2,
1030 ("%s: divisor for freq %u is %u * %u\n",
1031 HDEVNAME(hp), freq, div * 2, dvs + 1));
1032 //freq = hp->clkbase / (div * 2) * (dvs + 1);
1033 return true;
1034 }
1035 /*
1036 * If we drop bits, we need to round up the divisor.
1037 */
1038 roundup |= dvs & 1;
1039 }
1040 /* No divisor found. */
1041 return false;
1042 }
1043 if (hp->sc->sc_clkmsk != 0) {
1044 div = howmany(hp->clkbase, freq);
1045 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1046 return false;
1047 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1048 //freq = hp->clkbase / div;
1049 return true;
1050 }
1051 if (hp->specver >= SDHC_SPEC_VERS_300) {
1052 div = howmany(hp->clkbase, freq);
1053 div = div > 1 ? howmany(div, 2) : 0;
1054 if (div > 0x3ff)
1055 return false;
1056 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1057 << SDHC_SDCLK_XDIV_SHIFT) |
1058 (((div >> 0) & SDHC_SDCLK_DIV_MASK)
1059 << SDHC_SDCLK_DIV_SHIFT);
1060 //freq = hp->clkbase / (div ? div * 2 : 1);
1061 return true;
1062 } else {
1063 for (div = 1; div <= 256; div *= 2) {
1064 if ((hp->clkbase / div) <= freq) {
1065 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1066 //freq = hp->clkbase / div;
1067 return true;
1068 }
1069 }
1070 /* No divisor found. */
1071 return false;
1072 }
1073 /* No divisor found. */
1074 return false;
1075 }
1076
1077 /*
1078 * Set or change SDCLK frequency or disable the SD clock.
1079 * Return zero on success.
1080 */
1081 static int
1082 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1083 {
1084 struct sdhc_host *hp = (struct sdhc_host *)sch;
1085 u_int div;
1086 u_int timo;
1087 int16_t reg;
1088 int error = 0;
1089 bool present __diagused;
1090
1091 mutex_enter(&hp->intr_lock);
1092
1093 #ifdef DIAGNOSTIC
1094 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1095
1096 /* Must not stop the clock if commands are in progress. */
1097 if (present && sdhc_card_detect(hp)) {
1098 aprint_normal_dev(hp->sc->sc_dev,
1099 "%s: command in progress\n", __func__);
1100 }
1101 #endif
1102
1103 if (hp->sc->sc_vendor_bus_clock) {
1104 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1105 if (error != 0)
1106 goto out;
1107 }
1108
1109 /*
1110 * Stop SD clock before changing the frequency.
1111 */
1112 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1113 HCLR4(hp, SDHC_VEND_SPEC,
1114 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1115 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1116 if (freq == SDMMC_SDCLK_OFF) {
1117 goto out;
1118 }
1119 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1120 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1121 if (freq == SDMMC_SDCLK_OFF) {
1122 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1123 goto out;
1124 }
1125 } else {
1126 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1127 if (freq == SDMMC_SDCLK_OFF)
1128 goto out;
1129 }
1130
1131 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1132 if (ddr)
1133 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1134 else
1135 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1136 } else if (hp->specver >= SDHC_SPEC_VERS_300) {
1137 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1138 if (freq > 100000) {
1139 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1140 } else if (freq > 50000) {
1141 if (ddr) {
1142 HSET2(hp, SDHC_HOST_CTL2,
1143 SDHC_UHS_MODE_SELECT_DDR50);
1144 } else {
1145 HSET2(hp, SDHC_HOST_CTL2,
1146 SDHC_UHS_MODE_SELECT_SDR50);
1147 }
1148 } else if (freq > 25000) {
1149 if (ddr) {
1150 HSET2(hp, SDHC_HOST_CTL2,
1151 SDHC_UHS_MODE_SELECT_DDR50);
1152 } else {
1153 HSET2(hp, SDHC_HOST_CTL2,
1154 SDHC_UHS_MODE_SELECT_SDR25);
1155 }
1156 } else if (freq > 400) {
1157 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1158 }
1159 }
1160
1161 /*
1162 * Slow down Ricoh 5U823 controller that isn't reliable
1163 * at 100MHz bus clock.
1164 */
1165 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1166 if (freq == 100000)
1167 --freq;
1168 }
1169
1170 /*
1171 * Set the minimum base clock frequency divisor.
1172 */
1173 if (!sdhc_clock_divisor(hp, freq, &div)) {
1174 /* Invalid base clock frequency or `freq' value. */
1175 aprint_error_dev(hp->sc->sc_dev,
1176 "Invalid bus clock %d kHz\n", freq);
1177 error = EINVAL;
1178 goto out;
1179 }
1180 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1181 if (ddr) {
1182 /* in ddr mode, divisor >>= 1 */
1183 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1184 SDHC_SDCLK_DIV_SHIFT)) |
1185 (div & (SDHC_SDCLK_DVS_MASK <<
1186 SDHC_SDCLK_DVS_SHIFT));
1187 }
1188 for (timo = 1000; timo > 0; timo--) {
1189 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1190 break;
1191 sdmmc_delay(10);
1192 }
1193 HWRITE4(hp, SDHC_CLOCK_CTL,
1194 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1195 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1196 HWRITE4(hp, SDHC_CLOCK_CTL,
1197 div | (SDHC_TIMEOUT_MAX << 16));
1198 } else {
1199 reg = HREAD2(hp, SDHC_CLOCK_CTL);
1200 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1201 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1202 }
1203
1204 /*
1205 * Start internal clock. Wait 10ms for stabilization.
1206 */
1207 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1208 HSET4(hp, SDHC_VEND_SPEC,
1209 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1210 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1211 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1212 sdmmc_delay(10000);
1213 HSET4(hp, SDHC_CLOCK_CTL,
1214 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1215 } else {
1216 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1217 for (timo = 1000; timo > 0; timo--) {
1218 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1219 SDHC_INTCLK_STABLE))
1220 break;
1221 sdmmc_delay(10);
1222 }
1223 if (timo == 0) {
1224 error = ETIMEDOUT;
1225 DPRINTF(1,("%s: timeout\n", __func__));
1226 goto out;
1227 }
1228 }
1229
1230 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1231 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1232 /*
1233 * Sending 80 clocks at 400kHz takes 200us.
1234 * So delay for that time + slop and then
1235 * check a few times for completion.
1236 */
1237 sdmmc_delay(210);
1238 for (timo = 10; timo > 0; timo--) {
1239 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1240 SDHC_INIT_ACTIVE))
1241 break;
1242 sdmmc_delay(10);
1243 }
1244 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1245
1246 /*
1247 * Enable SD clock.
1248 */
1249 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1250 HSET4(hp, SDHC_VEND_SPEC,
1251 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1252 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1253 } else {
1254 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1255 }
1256 } else {
1257 /*
1258 * Enable SD clock.
1259 */
1260 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1261
1262 if (freq > 25000 &&
1263 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1264 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1265 else
1266 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1267 }
1268
1269 if (hp->sc->sc_vendor_bus_clock_post) {
1270 error = (*hp->sc->sc_vendor_bus_clock_post)(hp->sc, freq);
1271 if (error != 0)
1272 goto out;
1273 }
1274
1275 out:
1276 mutex_exit(&hp->intr_lock);
1277
1278 return error;
1279 }
1280
1281 static int
1282 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1283 {
1284 struct sdhc_host *hp = (struct sdhc_host *)sch;
1285 int reg;
1286
1287 switch (width) {
1288 case 1:
1289 case 4:
1290 break;
1291
1292 case 8:
1293 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1294 break;
1295 /* FALLTHROUGH */
1296 default:
1297 DPRINTF(0,("%s: unsupported bus width (%d)\n",
1298 HDEVNAME(hp), width));
1299 return 1;
1300 }
1301
1302 if (hp->sc->sc_vendor_bus_width) {
1303 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1304 if (error != 0)
1305 return error;
1306 }
1307
1308 mutex_enter(&hp->intr_lock);
1309
1310 reg = HREAD1(hp, SDHC_HOST_CTL);
1311 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1312 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1313 if (width == 4)
1314 reg |= SDHC_4BIT_MODE;
1315 else if (width == 8)
1316 reg |= SDHC_ESDHC_8BIT_MODE;
1317 } else {
1318 reg &= ~SDHC_4BIT_MODE;
1319 if (hp->specver >= SDHC_SPEC_VERS_300) {
1320 reg &= ~SDHC_8BIT_MODE;
1321 }
1322 if (width == 4) {
1323 reg |= SDHC_4BIT_MODE;
1324 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1325 reg |= SDHC_8BIT_MODE;
1326 }
1327 }
1328 HWRITE1(hp, SDHC_HOST_CTL, reg);
1329
1330 mutex_exit(&hp->intr_lock);
1331
1332 return 0;
1333 }
1334
1335 static int
1336 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1337 {
1338 struct sdhc_host *hp = (struct sdhc_host *)sch;
1339
1340 if (hp->sc->sc_vendor_rod)
1341 return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1342
1343 return 0;
1344 }
1345
1346 static void
1347 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1348 {
1349 struct sdhc_host *hp = (struct sdhc_host *)sch;
1350
1351 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1352 mutex_enter(&hp->intr_lock);
1353 if (enable) {
1354 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1355 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1356 } else {
1357 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1358 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1359 }
1360 mutex_exit(&hp->intr_lock);
1361 }
1362 }
1363
1364 static void
1365 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1366 {
1367 struct sdhc_host *hp = (struct sdhc_host *)sch;
1368
1369 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1370 mutex_enter(&hp->intr_lock);
1371 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1372 mutex_exit(&hp->intr_lock);
1373 }
1374 }
1375
1376 static int
1377 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1378 {
1379 struct sdhc_host *hp = (struct sdhc_host *)sch;
1380 int error = 0;
1381
1382 if (hp->specver < SDHC_SPEC_VERS_300)
1383 return EINVAL;
1384
1385 mutex_enter(&hp->intr_lock);
1386 switch (signal_voltage) {
1387 case SDMMC_SIGNAL_VOLTAGE_180:
1388 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1389 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1390 signal_voltage);
1391 if (error != 0)
1392 break;
1393 }
1394 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1395 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1396 break;
1397 case SDMMC_SIGNAL_VOLTAGE_330:
1398 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1399 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1400 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1401 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1402 signal_voltage);
1403 if (error != 0)
1404 break;
1405 }
1406 break;
1407 default:
1408 error = EINVAL;
1409 break;
1410 }
1411 mutex_exit(&hp->intr_lock);
1412
1413 return error;
1414 }
1415
1416 /*
1417 * Sampling clock tuning procedure (UHS)
1418 */
1419 static int
1420 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1421 {
1422 struct sdmmc_command cmd;
1423 uint8_t hostctl;
1424 int opcode, error, retry = 40;
1425
1426 KASSERT(mutex_owned(&hp->intr_lock));
1427
1428 hp->tuning_timing = timing;
1429
1430 switch (timing) {
1431 case SDMMC_TIMING_MMC_HS200:
1432 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1433 break;
1434 case SDMMC_TIMING_UHS_SDR50:
1435 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1436 return 0;
1437 /* FALLTHROUGH */
1438 case SDMMC_TIMING_UHS_SDR104:
1439 opcode = MMC_SEND_TUNING_BLOCK;
1440 break;
1441 default:
1442 return EINVAL;
1443 }
1444
1445 hostctl = HREAD1(hp, SDHC_HOST_CTL);
1446
1447 /* enable buffer read ready interrupt */
1448 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1449 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1450
1451 /* disable DMA */
1452 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1453
1454 /* reset tuning circuit */
1455 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1456
1457 /* start of tuning */
1458 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1459
1460 do {
1461 memset(&cmd, 0, sizeof(cmd));
1462 cmd.c_opcode = opcode;
1463 cmd.c_arg = 0;
1464 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1465 if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1466 cmd.c_blklen = cmd.c_datalen = 128;
1467 } else {
1468 cmd.c_blklen = cmd.c_datalen = 64;
1469 }
1470
1471 error = sdhc_start_command(hp, &cmd);
1472 if (error)
1473 break;
1474
1475 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1476 SDHC_TUNING_TIMEOUT, false)) {
1477 break;
1478 }
1479
1480 delay(1000);
1481 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1482
1483 /* disable buffer read ready interrupt */
1484 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1485 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1486
1487 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1488 HCLR2(hp, SDHC_HOST_CTL2,
1489 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1490 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1491 aprint_error_dev(hp->sc->sc_dev,
1492 "tuning did not complete, using fixed sampling clock\n");
1493 return 0; /* tuning did not complete */
1494 }
1495
1496 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1497 HCLR2(hp, SDHC_HOST_CTL2,
1498 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1499 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1500 aprint_error_dev(hp->sc->sc_dev,
1501 "tuning failed, using fixed sampling clock\n");
1502 return 0; /* tuning failed */
1503 }
1504
1505 if (hp->tuning_timer_count) {
1506 callout_schedule(&hp->tuning_timer,
1507 hz * hp->tuning_timer_count);
1508 }
1509
1510 return 0; /* tuning completed */
1511 }
1512
1513 static int
1514 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1515 {
1516 struct sdhc_host *hp = (struct sdhc_host *)sch;
1517 int error;
1518
1519 mutex_enter(&hp->intr_lock);
1520 error = sdhc_execute_tuning1(hp, timing);
1521 mutex_exit(&hp->intr_lock);
1522 return error;
1523 }
1524
1525 static void
1526 sdhc_tuning_timer(void *arg)
1527 {
1528 struct sdhc_host *hp = arg;
1529
1530 atomic_swap_uint(&hp->tuning_timer_pending, 1);
1531 }
1532
1533 static void
1534 sdhc_hw_reset(sdmmc_chipset_handle_t sch)
1535 {
1536 struct sdhc_host *hp = (struct sdhc_host *)sch;
1537 struct sdhc_softc *sc = hp->sc;
1538
1539 if (sc->sc_vendor_hw_reset != NULL)
1540 sc->sc_vendor_hw_reset(sc, hp);
1541 }
1542
1543 static int
1544 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1545 {
1546 uint32_t state;
1547 int timeout;
1548
1549 for (timeout = 100000; timeout > 0; timeout--) {
1550 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1551 return 0;
1552 sdmmc_delay(10);
1553 }
1554 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1555 mask, value, state);
1556 return ETIMEDOUT;
1557 }
1558
1559 static void
1560 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1561 {
1562 struct sdhc_host *hp = (struct sdhc_host *)sch;
1563 int error;
1564 bool probing;
1565
1566 mutex_enter(&hp->intr_lock);
1567
1568 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1569 (void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1570 }
1571
1572 if (cmd->c_data &&
1573 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1574 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1575 if (ISSET(hp->flags, SHF_USE_DMA)) {
1576 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1577 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1578 } else {
1579 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1580 HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1581 }
1582 }
1583
1584 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1585 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1586 if (cmd->c_data != NULL) {
1587 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1588 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1589 } else {
1590 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1591 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1592 }
1593 }
1594
1595 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_STOP_WITH_TC)) {
1596 if (cmd->c_opcode == MMC_STOP_TRANSMISSION)
1597 SET(cmd->c_flags, SCF_RSP_BSY);
1598 }
1599
1600 /*
1601 * Start the MMC command, or mark `cmd' as failed and return.
1602 */
1603 error = sdhc_start_command(hp, cmd);
1604 if (error) {
1605 cmd->c_error = error;
1606 goto out;
1607 }
1608
1609 /*
1610 * Wait until the command phase is done, or until the command
1611 * is marked done for any other reason.
1612 */
1613 probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1614 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT*3, probing)) {
1615 DPRINTF(1,("%s: timeout for command\n", __func__));
1616 sdmmc_delay(50);
1617 cmd->c_error = ETIMEDOUT;
1618 goto out;
1619 }
1620
1621 /*
1622 * The host controller removes bits [0:7] from the response
1623 * data (CRC) and we pass the data up unchanged to the bus
1624 * driver (without padding).
1625 */
1626 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1627 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1628 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1629 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1630 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1631 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1632 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1633 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1634 (cmd->c_resp[1] << 24);
1635 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1636 (cmd->c_resp[2] << 24);
1637 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1638 (cmd->c_resp[3] << 24);
1639 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1640 }
1641 }
1642 }
1643 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1644
1645 /*
1646 * If the command has data to transfer in any direction,
1647 * execute the transfer now.
1648 */
1649 if (cmd->c_error == 0 && cmd->c_data != NULL)
1650 sdhc_transfer_data(hp, cmd);
1651 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1652 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) &&
1653 !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1654 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1655 HDEVNAME(hp)));
1656 cmd->c_error = ETIMEDOUT;
1657 goto out;
1658 }
1659 }
1660
1661 out:
1662 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1663 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1664 /* Turn off the LED. */
1665 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1666 }
1667 SET(cmd->c_flags, SCF_ITSDONE);
1668
1669 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) &&
1670 cmd->c_opcode == MMC_STOP_TRANSMISSION)
1671 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
1672
1673 mutex_exit(&hp->intr_lock);
1674
1675 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1676 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1677 cmd->c_flags, cmd->c_error));
1678 }
1679
1680 static int
1681 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1682 {
1683 struct sdhc_softc * const sc = hp->sc;
1684 uint16_t blksize = 0;
1685 uint16_t blkcount = 0;
1686 uint16_t mode;
1687 uint16_t command;
1688 uint32_t pmask;
1689 int error;
1690
1691 KASSERT(mutex_owned(&hp->intr_lock));
1692
1693 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1694 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1695 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1696
1697 /*
1698 * The maximum block length for commands should be the minimum
1699 * of the host buffer size and the card buffer size. (1.7.2)
1700 */
1701
1702 /* Fragment the data into proper blocks. */
1703 if (cmd->c_datalen > 0) {
1704 blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1705 blkcount = cmd->c_datalen / blksize;
1706 if (cmd->c_datalen % blksize > 0) {
1707 /* XXX: Split this command. (1.7.4) */
1708 aprint_error_dev(sc->sc_dev,
1709 "data not a multiple of %u bytes\n", blksize);
1710 return EINVAL;
1711 }
1712 }
1713
1714 /* Check limit imposed by 9-bit block count. (1.7.2) */
1715 if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1716 aprint_error_dev(sc->sc_dev, "too much data\n");
1717 return EINVAL;
1718 }
1719
1720 /* Prepare transfer mode register value. (2.2.5) */
1721 mode = SDHC_BLOCK_COUNT_ENABLE;
1722 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1723 mode |= SDHC_READ_MODE;
1724 if (blkcount > 1) {
1725 mode |= SDHC_MULTI_BLOCK_MODE;
1726 /* XXX only for memory commands? */
1727 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
1728 mode |= SDHC_AUTO_CMD12_ENABLE;
1729 }
1730 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1731 ISSET(hp->flags, SHF_MODE_DMAEN)) {
1732 mode |= SDHC_DMA_ENABLE;
1733 }
1734
1735 /*
1736 * Prepare command register value. (2.2.6)
1737 */
1738 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1739
1740 if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1741 command |= SDHC_CRC_CHECK_ENABLE;
1742 if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1743 command |= SDHC_INDEX_CHECK_ENABLE;
1744 if (cmd->c_datalen > 0)
1745 command |= SDHC_DATA_PRESENT_SELECT;
1746
1747 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1748 command |= SDHC_NO_RESPONSE;
1749 else if (ISSET(cmd->c_flags, SCF_RSP_136))
1750 command |= SDHC_RESP_LEN_136;
1751 else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1752 command |= SDHC_RESP_LEN_48_CHK_BUSY;
1753 else
1754 command |= SDHC_RESP_LEN_48;
1755
1756 /* Wait until command and optionally data inhibit bits are clear. (1.5) */
1757 pmask = SDHC_CMD_INHIBIT_CMD;
1758 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1759 pmask |= SDHC_CMD_INHIBIT_DAT;
1760 error = sdhc_wait_state(hp, pmask, 0);
1761 if (error) {
1762 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1763 device_printf(sc->sc_dev, "command or data phase inhibited\n");
1764 return error;
1765 }
1766
1767 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1768 HDEVNAME(hp), blksize, blkcount, mode, command));
1769
1770 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1771 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1772 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */
1773 }
1774
1775 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1776 /* Alert the user not to remove the card. */
1777 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1778 }
1779
1780 /* Set DMA start address. */
1781 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1782 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1783 bus_addr_t paddr =
1784 cmd->c_dmamap->dm_segs[seg].ds_addr;
1785 uint16_t len =
1786 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1787 0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1788 uint16_t attr =
1789 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1790 if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1791 attr |= SDHC_ADMA2_END;
1792 }
1793 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1794 struct sdhc_adma2_descriptor32 *desc =
1795 hp->adma2;
1796 desc[seg].attribute = htole16(attr);
1797 desc[seg].length = htole16(len);
1798 desc[seg].address = htole32(paddr);
1799 } else {
1800 struct sdhc_adma2_descriptor64 *desc =
1801 hp->adma2;
1802 desc[seg].attribute = htole16(attr);
1803 desc[seg].length = htole16(len);
1804 desc[seg].address = htole32(paddr & 0xffffffff);
1805 desc[seg].address_hi = htole32(
1806 (uint64_t)paddr >> 32);
1807 }
1808 }
1809 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1810 struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1811 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1812 } else {
1813 struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1814 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1815 }
1816 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1817 BUS_DMASYNC_PREWRITE);
1818 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1819 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1820 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1821 } else {
1822 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1823 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1824 }
1825
1826 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1827
1828 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1829 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1830 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1831 (uint64_t)desc_addr >> 32);
1832 }
1833 } else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1834 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1835 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1836 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1837 }
1838 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1839 }
1840
1841 /*
1842 * Start a CPU data transfer. Writing to the high order byte
1843 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1844 */
1845 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1846 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1847 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1848 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1849 /* mode bits is in MIX_CTRL register on uSDHC */
1850 HWRITE4(hp, SDHC_MIX_CTRL, mode |
1851 (HREAD4(hp, SDHC_MIX_CTRL) & ~SDHC_TRANSFER_MODE_MASK));
1852 if (cmd->c_opcode == MMC_STOP_TRANSMISSION)
1853 command |= SDHC_COMMAND_TYPE_ABORT;
1854 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1855 } else {
1856 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1857 }
1858 } else {
1859 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1860 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1861 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1862 HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1863 HWRITE2(hp, SDHC_COMMAND, command);
1864 }
1865
1866 return 0;
1867 }
1868
1869 static void
1870 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1871 {
1872 struct sdhc_softc *sc = hp->sc;
1873 int error;
1874
1875 KASSERT(mutex_owned(&hp->intr_lock));
1876
1877 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1878 MMC_R1(cmd->c_resp), cmd->c_datalen));
1879
1880 #ifdef SDHC_DEBUG
1881 /* XXX I forgot why I wanted to know when this happens :-( */
1882 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1883 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1884 aprint_error_dev(hp->sc->sc_dev,
1885 "CMD52/53 error response flags %#x\n",
1886 MMC_R1(cmd->c_resp) & 0xff00);
1887 }
1888 #endif
1889
1890 if (cmd->c_dmamap != NULL) {
1891 if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1892 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1893 if (error == 0 && !sdhc_wait_intr(hp,
1894 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1895 DPRINTF(1,("%s: timeout\n", __func__));
1896 error = ETIMEDOUT;
1897 }
1898 } else {
1899 error = sdhc_transfer_data_dma(hp, cmd);
1900 }
1901 } else
1902 error = sdhc_transfer_data_pio(hp, cmd);
1903 if (error)
1904 cmd->c_error = error;
1905 SET(cmd->c_flags, SCF_ITSDONE);
1906
1907 DPRINTF(1,("%s: data transfer done (error=%d)\n",
1908 HDEVNAME(hp), cmd->c_error));
1909 }
1910
1911 static int
1912 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1913 {
1914 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1915 bus_addr_t posaddr;
1916 bus_addr_t segaddr;
1917 bus_size_t seglen;
1918 u_int seg = 0;
1919 int error = 0;
1920 int status;
1921
1922 KASSERT(mutex_owned(&hp->intr_lock));
1923 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1924 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1925 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1926 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1927
1928 for (;;) {
1929 status = sdhc_wait_intr(hp,
1930 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1931 SDHC_DMA_TIMEOUT, false);
1932
1933 if (status & SDHC_TRANSFER_COMPLETE) {
1934 break;
1935 }
1936 if (!status) {
1937 DPRINTF(1,("%s: timeout\n", __func__));
1938 error = ETIMEDOUT;
1939 break;
1940 }
1941
1942 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1943 continue;
1944 }
1945
1946 if ((status & SDHC_DMA_INTERRUPT) == 0) {
1947 continue;
1948 }
1949
1950 /* DMA Interrupt (boundary crossing) */
1951
1952 segaddr = dm_segs[seg].ds_addr;
1953 seglen = dm_segs[seg].ds_len;
1954 posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1955
1956 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1957 continue;
1958 }
1959 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1960 HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1961 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1962 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1963 KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1964 }
1965
1966 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1967 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1968 PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1969 }
1970
1971 return error;
1972 }
1973
1974 static int
1975 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1976 {
1977 uint8_t *data = cmd->c_data;
1978 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1979 u_int len, datalen;
1980 u_int imask;
1981 u_int pmask;
1982 int error = 0;
1983
1984 KASSERT(mutex_owned(&hp->intr_lock));
1985
1986 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1987 imask = SDHC_BUFFER_READ_READY;
1988 pmask = SDHC_BUFFER_READ_ENABLE;
1989 if (ISSET(hp->sc->sc_flags,
1990 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1991 pio_func = esdhc_read_data_pio;
1992 } else {
1993 pio_func = sdhc_read_data_pio;
1994 }
1995 } else {
1996 imask = SDHC_BUFFER_WRITE_READY;
1997 pmask = SDHC_BUFFER_WRITE_ENABLE;
1998 if (ISSET(hp->sc->sc_flags,
1999 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2000 pio_func = esdhc_write_data_pio;
2001 } else {
2002 pio_func = sdhc_write_data_pio;
2003 }
2004 }
2005 datalen = cmd->c_datalen;
2006
2007 KASSERT(mutex_owned(&hp->intr_lock));
2008 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
2009 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
2010 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
2011
2012 while (datalen > 0) {
2013 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
2014 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2015 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
2016 } else {
2017 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
2018 }
2019 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
2020 DPRINTF(1,("%s: timeout\n", __func__));
2021 error = ETIMEDOUT;
2022 break;
2023 }
2024
2025 error = sdhc_wait_state(hp, pmask, pmask);
2026 if (error)
2027 break;
2028 }
2029
2030 len = MIN(datalen, cmd->c_blklen);
2031 (*pio_func)(hp, data, len);
2032 DPRINTF(2,("%s: pio data transfer %u @ %p\n",
2033 HDEVNAME(hp), len, data));
2034
2035 data += len;
2036 datalen -= len;
2037 }
2038
2039 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
2040 SDHC_TRANSFER_TIMEOUT, false)) {
2041 DPRINTF(1,("%s: timeout for transfer\n", __func__));
2042 error = ETIMEDOUT;
2043 }
2044
2045 return error;
2046 }
2047
2048 static void
2049 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2050 {
2051
2052 if (((__uintptr_t)data & 3) == 0) {
2053 while (datalen > 3) {
2054 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
2055 data += 4;
2056 datalen -= 4;
2057 }
2058 if (datalen > 1) {
2059 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2060 data += 2;
2061 datalen -= 2;
2062 }
2063 if (datalen > 0) {
2064 *data = HREAD1(hp, SDHC_DATA);
2065 data += 1;
2066 datalen -= 1;
2067 }
2068 } else if (((__uintptr_t)data & 1) == 0) {
2069 while (datalen > 1) {
2070 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2071 data += 2;
2072 datalen -= 2;
2073 }
2074 if (datalen > 0) {
2075 *data = HREAD1(hp, SDHC_DATA);
2076 data += 1;
2077 datalen -= 1;
2078 }
2079 } else {
2080 while (datalen > 0) {
2081 *data = HREAD1(hp, SDHC_DATA);
2082 data += 1;
2083 datalen -= 1;
2084 }
2085 }
2086 }
2087
2088 static void
2089 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2090 {
2091
2092 if (((__uintptr_t)data & 3) == 0) {
2093 while (datalen > 3) {
2094 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2095 data += 4;
2096 datalen -= 4;
2097 }
2098 if (datalen > 1) {
2099 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2100 data += 2;
2101 datalen -= 2;
2102 }
2103 if (datalen > 0) {
2104 HWRITE1(hp, SDHC_DATA, *data);
2105 data += 1;
2106 datalen -= 1;
2107 }
2108 } else if (((__uintptr_t)data & 1) == 0) {
2109 while (datalen > 1) {
2110 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2111 data += 2;
2112 datalen -= 2;
2113 }
2114 if (datalen > 0) {
2115 HWRITE1(hp, SDHC_DATA, *data);
2116 data += 1;
2117 datalen -= 1;
2118 }
2119 } else {
2120 while (datalen > 0) {
2121 HWRITE1(hp, SDHC_DATA, *data);
2122 data += 1;
2123 datalen -= 1;
2124 }
2125 }
2126 }
2127
2128 static void
2129 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2130 {
2131 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2132 uint32_t v;
2133
2134 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2135 size_t count = 0;
2136
2137 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2138 if (count == 0) {
2139 /*
2140 * If we've drained "watermark" words, we need to wait
2141 * a little bit so the read FIFO can refill.
2142 */
2143 sdmmc_delay(10);
2144 count = watermark;
2145 }
2146 v = HREAD4(hp, SDHC_DATA);
2147 v = le32toh(v);
2148 *(uint32_t *)data = v;
2149 data += 4;
2150 datalen -= 4;
2151 status = HREAD2(hp, SDHC_NINTR_STATUS);
2152 count--;
2153 }
2154 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2155 if (count == 0) {
2156 sdmmc_delay(10);
2157 }
2158 v = HREAD4(hp, SDHC_DATA);
2159 v = le32toh(v);
2160 do {
2161 *data++ = v;
2162 v >>= 8;
2163 } while (--datalen > 0);
2164 }
2165 }
2166
2167 static void
2168 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2169 {
2170 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2171 uint32_t v;
2172
2173 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2174 size_t count = watermark;
2175
2176 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2177 if (count == 0) {
2178 sdmmc_delay(10);
2179 count = watermark;
2180 }
2181 v = *(uint32_t *)data;
2182 v = htole32(v);
2183 HWRITE4(hp, SDHC_DATA, v);
2184 data += 4;
2185 datalen -= 4;
2186 status = HREAD2(hp, SDHC_NINTR_STATUS);
2187 count--;
2188 }
2189 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2190 if (count == 0) {
2191 sdmmc_delay(10);
2192 }
2193 v = *(uint32_t *)data;
2194 v = htole32(v);
2195 HWRITE4(hp, SDHC_DATA, v);
2196 }
2197 }
2198
2199 /* Prepare for another command. */
2200 static int
2201 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2202 {
2203 int timo;
2204
2205 KASSERT(mutex_owned(&hp->intr_lock));
2206
2207 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2208
2209 /* Request the reset. */
2210 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2211
2212 /*
2213 * If necessary, wait for the controller to set the bits to
2214 * acknowledge the reset.
2215 */
2216 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2217 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2218 for (timo = 10000; timo > 0; timo--) {
2219 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2220 break;
2221 /* Short delay because I worry we may miss it... */
2222 sdmmc_delay(1);
2223 }
2224 if (timo == 0) {
2225 DPRINTF(1,("%s: timeout for reset on\n", __func__));
2226 return ETIMEDOUT;
2227 }
2228 }
2229
2230 /*
2231 * Wait for the controller to clear the bits to indicate that
2232 * the reset has completed.
2233 */
2234 for (timo = 10; timo > 0; timo--) {
2235 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2236 break;
2237 sdmmc_delay(10000);
2238 }
2239 if (timo == 0) {
2240 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2241 HREAD1(hp, SDHC_SOFTWARE_RESET)));
2242 return ETIMEDOUT;
2243 }
2244
2245 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2246 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2247 }
2248
2249 return 0;
2250 }
2251
2252 static int
2253 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2254 {
2255 int status, error, nointr;
2256
2257 KASSERT(mutex_owned(&hp->intr_lock));
2258
2259 mask |= SDHC_ERROR_INTERRUPT;
2260
2261 nointr = 0;
2262 status = hp->intr_status & mask;
2263 while (status == 0) {
2264 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2265 == EWOULDBLOCK) {
2266 nointr = 1;
2267 break;
2268 }
2269 status = hp->intr_status & mask;
2270 }
2271 error = hp->intr_error_status;
2272
2273 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2274 error));
2275
2276 hp->intr_status &= ~status;
2277 hp->intr_error_status &= ~error;
2278
2279 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2280 if (ISSET(error, SDHC_DMA_ERROR))
2281 device_printf(hp->sc->sc_dev,"dma error\n");
2282 if (ISSET(error, SDHC_ADMA_ERROR))
2283 device_printf(hp->sc->sc_dev,"adma error\n");
2284 if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2285 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2286 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2287 device_printf(hp->sc->sc_dev,"current limit error\n");
2288 if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2289 device_printf(hp->sc->sc_dev,"data end bit error\n");
2290 if (ISSET(error, SDHC_DATA_CRC_ERROR))
2291 device_printf(hp->sc->sc_dev,"data crc error\n");
2292 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2293 device_printf(hp->sc->sc_dev,"data timeout error\n");
2294 if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2295 device_printf(hp->sc->sc_dev,"cmd index error\n");
2296 if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2297 device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2298 if (ISSET(error, SDHC_CMD_CRC_ERROR))
2299 device_printf(hp->sc->sc_dev,"cmd crc error\n");
2300 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2301 if (!probing)
2302 device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2303 #ifdef SDHC_DEBUG
2304 else if (sdhcdebug > 0)
2305 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2306 #endif
2307 }
2308 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2309 device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2310 (error & ~SDHC_EINTR_STATUS_MASK));
2311 if (error == 0)
2312 device_printf(hp->sc->sc_dev,"no error\n");
2313
2314 /* Command timeout has higher priority than command complete. */
2315 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2316 CLR(status, SDHC_COMMAND_COMPLETE);
2317
2318 /* Transfer complete has higher priority than data timeout. */
2319 if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2320 CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2321 }
2322
2323 if (nointr ||
2324 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2325 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2326 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2327 hp->intr_error_status = 0;
2328 status = 0;
2329 }
2330
2331 return status;
2332 }
2333
2334 /*
2335 * Established by attachment driver at interrupt priority IPL_SDMMC.
2336 */
2337 int
2338 sdhc_intr(void *arg)
2339 {
2340 struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2341 struct sdhc_host *hp;
2342 int done = 0;
2343 uint16_t status;
2344 uint16_t error;
2345
2346 /* We got an interrupt, but we don't know from which slot. */
2347 for (size_t host = 0; host < sc->sc_nhosts; host++) {
2348 hp = sc->sc_host[host];
2349 if (hp == NULL)
2350 continue;
2351
2352 mutex_enter(&hp->intr_lock);
2353
2354 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2355 /* Find out which interrupts are pending. */
2356 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2357 status = xstatus;
2358 error = xstatus >> 16;
2359 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2360 (xstatus & SDHC_TRANSFER_COMPLETE) &&
2361 !(xstatus & SDHC_DMA_INTERRUPT)) {
2362 /* read again due to uSDHC errata */
2363 status = xstatus = HREAD4(hp,
2364 SDHC_NINTR_STATUS);
2365 error = xstatus >> 16;
2366 }
2367 if (ISSET(sc->sc_flags,
2368 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2369 if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2370 SET(status, SDHC_ERROR_INTERRUPT);
2371 }
2372 if (error)
2373 xstatus |= SDHC_ERROR_INTERRUPT;
2374 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2375 goto next_port; /* no interrupt for us */
2376 /* Acknowledge the interrupts we are about to handle. */
2377 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2378 } else {
2379 /* Find out which interrupts are pending. */
2380 error = 0;
2381 status = HREAD2(hp, SDHC_NINTR_STATUS);
2382 if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2383 goto next_port; /* no interrupt for us */
2384 /* Acknowledge the interrupts we are about to handle. */
2385 HWRITE2(hp, SDHC_NINTR_STATUS, status);
2386 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2387 /* Acknowledge error interrupts. */
2388 error = HREAD2(hp, SDHC_EINTR_STATUS);
2389 HWRITE2(hp, SDHC_EINTR_STATUS, error);
2390 }
2391 }
2392
2393 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2394 status, error));
2395
2396 /* Claim this interrupt. */
2397 done = 1;
2398
2399 if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2400 ISSET(error, SDHC_ADMA_ERROR)) {
2401 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2402 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2403 adma_err);
2404 }
2405
2406 /*
2407 * Wake up the sdmmc event thread to scan for cards.
2408 */
2409 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2410 if (hp->sdmmc != NULL) {
2411 sdmmc_needs_discover(hp->sdmmc);
2412 }
2413 if (ISSET(sc->sc_flags,
2414 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2415 HCLR4(hp, SDHC_NINTR_STATUS_EN,
2416 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2417 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2418 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2419 }
2420 }
2421
2422 /*
2423 * Schedule re-tuning process (UHS).
2424 */
2425 if (ISSET(status, SDHC_RETUNING_EVENT)) {
2426 atomic_swap_uint(&hp->tuning_timer_pending, 1);
2427 }
2428
2429 /*
2430 * Wake up the blocking process to service command
2431 * related interrupt(s).
2432 */
2433 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2434 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2435 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2436 hp->intr_error_status |= error;
2437 hp->intr_status |= status;
2438 if (ISSET(sc->sc_flags,
2439 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2440 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2441 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2442 }
2443 cv_broadcast(&hp->intr_cv);
2444 }
2445
2446 /*
2447 * Service SD card interrupts.
2448 */
2449 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2450 && ISSET(status, SDHC_CARD_INTERRUPT)) {
2451 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2452 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2453 sdmmc_card_intr(hp->sdmmc);
2454 }
2455 next_port:
2456 mutex_exit(&hp->intr_lock);
2457 }
2458
2459 return done;
2460 }
2461
2462 kmutex_t *
2463 sdhc_host_lock(struct sdhc_host *hp)
2464 {
2465 return &hp->intr_lock;
2466 }
2467
2468 uint8_t
2469 sdhc_host_read_1(struct sdhc_host *hp, int reg)
2470 {
2471 return HREAD1(hp, reg);
2472 }
2473
2474 uint16_t
2475 sdhc_host_read_2(struct sdhc_host *hp, int reg)
2476 {
2477 return HREAD2(hp, reg);
2478 }
2479
2480 uint32_t
2481 sdhc_host_read_4(struct sdhc_host *hp, int reg)
2482 {
2483 return HREAD4(hp, reg);
2484 }
2485
2486 void
2487 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val)
2488 {
2489 HWRITE1(hp, reg, val);
2490 }
2491
2492 void
2493 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val)
2494 {
2495 HWRITE2(hp, reg, val);
2496 }
2497
2498 void
2499 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val)
2500 {
2501 HWRITE4(hp, reg, val);
2502 }
2503
2504 #ifdef SDHC_DEBUG
2505 void
2506 sdhc_dump_regs(struct sdhc_host *hp)
2507 {
2508
2509 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE,
2510 HREAD4(hp, SDHC_PRESENT_STATE));
2511 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2512 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL,
2513 HREAD1(hp, SDHC_POWER_CTL));
2514 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS,
2515 HREAD2(hp, SDHC_NINTR_STATUS));
2516 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS,
2517 HREAD2(hp, SDHC_EINTR_STATUS));
2518 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN,
2519 HREAD2(hp, SDHC_NINTR_STATUS_EN));
2520 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN,
2521 HREAD2(hp, SDHC_EINTR_STATUS_EN));
2522 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN,
2523 HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2524 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN,
2525 HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2526 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES,
2527 HREAD4(hp, SDHC_CAPABILITIES));
2528 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2529 HREAD4(hp, SDHC_MAX_CAPABILITIES));
2530 }
2531 #endif
2532