sdhc.c revision 1.86 1 /* $NetBSD: sdhc.c,v 1.86 2015/09/09 08:06:47 mlelstv Exp $ */
2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * SD Host Controller driver based on the SD Host Controller Standard
22 * Simplified Specification Version 1.00 (www.sdcard.com).
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.86 2015/09/09 08:06:47 mlelstv Exp $");
27
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s) do {} while (0)
53 #endif
54
55 #define SDHC_COMMAND_TIMEOUT hz
56 #define SDHC_BUFFER_TIMEOUT hz
57 #define SDHC_TRANSFER_TIMEOUT hz
58 #define SDHC_DMA_TIMEOUT (hz*3)
59 #define SDHC_TUNING_TIMEOUT hz
60
61 struct sdhc_host {
62 struct sdhc_softc *sc; /* host controller device */
63
64 bus_space_tag_t iot; /* host register set tag */
65 bus_space_handle_t ioh; /* host register set handle */
66 bus_size_t ios; /* host register space size */
67 bus_dma_tag_t dmat; /* host DMA tag */
68
69 device_t sdmmc; /* generic SD/MMC device */
70
71 u_int clkbase; /* base clock frequency in KHz */
72 int maxblklen; /* maximum block length */
73 uint32_t ocr; /* OCR value from capabilities */
74
75 uint8_t regs[14]; /* host controller state */
76
77 uint16_t intr_status; /* soft interrupt status */
78 uint16_t intr_error_status; /* soft error status */
79 kmutex_t intr_lock;
80 kcondvar_t intr_cv;
81
82 callout_t tuning_timer;
83 int tuning_timing;
84 u_int tuning_timer_count;
85 u_int tuning_timer_pending;
86
87 int specver; /* spec. version */
88
89 uint32_t flags; /* flags for this host */
90 #define SHF_USE_DMA 0x0001
91 #define SHF_USE_4BIT_MODE 0x0002
92 #define SHF_USE_8BIT_MODE 0x0004
93 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */
94 #define SHF_USE_ADMA2_32 0x0010
95 #define SHF_USE_ADMA2_64 0x0020
96 #define SHF_USE_ADMA2_MASK 0x0030
97
98 bus_dmamap_t adma_map;
99 bus_dma_segment_t adma_segs[1];
100 void *adma2;
101 };
102
103 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev))
104
105 static uint8_t
106 hread1(struct sdhc_host *hp, bus_size_t reg)
107 {
108
109 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
110 return bus_space_read_1(hp->iot, hp->ioh, reg);
111 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
112 }
113
114 static uint16_t
115 hread2(struct sdhc_host *hp, bus_size_t reg)
116 {
117
118 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
119 return bus_space_read_2(hp->iot, hp->ioh, reg);
120 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
121 }
122
123 #define HREAD1(hp, reg) hread1(hp, reg)
124 #define HREAD2(hp, reg) hread2(hp, reg)
125 #define HREAD4(hp, reg) \
126 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
127
128
129 static void
130 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
131 {
132
133 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
134 bus_space_write_1(hp->iot, hp->ioh, o, val);
135 } else {
136 const size_t shift = 8 * (o & 3);
137 o &= -4;
138 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
139 tmp = (val << shift) | (tmp & ~(0xff << shift));
140 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
141 }
142 }
143
144 static void
145 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
146 {
147
148 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
149 bus_space_write_2(hp->iot, hp->ioh, o, val);
150 } else {
151 const size_t shift = 8 * (o & 2);
152 o &= -4;
153 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
154 tmp = (val << shift) | (tmp & ~(0xffff << shift));
155 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
156 }
157 }
158
159 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val)
160 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val)
161 #define HWRITE4(hp, reg, val) \
162 bus_space_write_4((hp)->iot, (hp)->ioh, (reg), (val))
163
164 #define HCLR1(hp, reg, bits) \
165 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
166 #define HCLR2(hp, reg, bits) \
167 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
168 #define HCLR4(hp, reg, bits) \
169 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
170 #define HSET1(hp, reg, bits) \
171 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
172 #define HSET2(hp, reg, bits) \
173 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
174 #define HSET4(hp, reg, bits) \
175 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
176
177 static int sdhc_host_reset(sdmmc_chipset_handle_t);
178 static int sdhc_host_reset1(sdmmc_chipset_handle_t);
179 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t);
180 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t);
181 static int sdhc_card_detect(sdmmc_chipset_handle_t);
182 static int sdhc_write_protect(sdmmc_chipset_handle_t);
183 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
184 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
185 static int sdhc_bus_width(sdmmc_chipset_handle_t, int);
186 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int);
187 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
188 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t);
189 static void sdhc_exec_command(sdmmc_chipset_handle_t,
190 struct sdmmc_command *);
191 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
192 static int sdhc_execute_tuning1(struct sdhc_host *, int);
193 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
194 static void sdhc_tuning_timer(void *);
195 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
196 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
197 static int sdhc_soft_reset(struct sdhc_host *, int);
198 static int sdhc_wait_intr(struct sdhc_host *, int, int);
199 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
200 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
201 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
202 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
203 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
204 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
205 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
206
207 static struct sdmmc_chip_functions sdhc_functions = {
208 /* host controller reset */
209 .host_reset = sdhc_host_reset,
210
211 /* host controller capabilities */
212 .host_ocr = sdhc_host_ocr,
213 .host_maxblklen = sdhc_host_maxblklen,
214
215 /* card detection */
216 .card_detect = sdhc_card_detect,
217
218 /* write protect */
219 .write_protect = sdhc_write_protect,
220
221 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
222 .bus_power = sdhc_bus_power,
223 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */
224 .bus_width = sdhc_bus_width,
225 .bus_rod = sdhc_bus_rod,
226
227 /* command execution */
228 .exec_command = sdhc_exec_command,
229
230 /* card interrupt */
231 .card_enable_intr = sdhc_card_enable_intr,
232 .card_intr_ack = sdhc_card_intr_ack,
233
234 /* UHS functions */
235 .signal_voltage = sdhc_signal_voltage,
236 .bus_clock_ddr = sdhc_bus_clock_ddr,
237 .execute_tuning = sdhc_execute_tuning,
238 };
239
240 static int
241 sdhc_cfprint(void *aux, const char *pnp)
242 {
243 const struct sdmmcbus_attach_args * const saa = aux;
244 const struct sdhc_host * const hp = saa->saa_sch;
245
246 if (pnp) {
247 aprint_normal("sdmmc at %s", pnp);
248 }
249 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
250 if (hp->sc->sc_host[host] == hp) {
251 aprint_normal(" slot %zu", host);
252 }
253 }
254
255 return UNCONF;
256 }
257
258 /*
259 * Called by attachment driver. For each SD card slot there is one SD
260 * host controller standard register set. (1.3)
261 */
262 int
263 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
264 bus_space_handle_t ioh, bus_size_t iosize)
265 {
266 struct sdmmcbus_attach_args saa;
267 struct sdhc_host *hp;
268 uint32_t caps, caps2;
269 uint16_t sdhcver;
270 int error;
271
272 /* Allocate one more host structure. */
273 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
274 if (hp == NULL) {
275 aprint_error_dev(sc->sc_dev,
276 "couldn't alloc memory (sdhc host)\n");
277 goto err1;
278 }
279 sc->sc_host[sc->sc_nhosts++] = hp;
280
281 /* Fill in the new host structure. */
282 hp->sc = sc;
283 hp->iot = iot;
284 hp->ioh = ioh;
285 hp->ios = iosize;
286 hp->dmat = sc->sc_dmat;
287
288 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
289 cv_init(&hp->intr_cv, "sdhcintr");
290 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
291 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
292
293 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
294 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
295 } else {
296 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
297 }
298 aprint_normal_dev(sc->sc_dev, "SDHC ");
299 hp->specver = SDHC_SPEC_VERSION(sdhcver);
300 switch (SDHC_SPEC_VERSION(sdhcver)) {
301 case SDHC_SPEC_VERS_100:
302 aprint_normal("1.0");
303 break;
304
305 case SDHC_SPEC_VERS_200:
306 aprint_normal("2.0");
307 break;
308
309 case SDHC_SPEC_VERS_300:
310 aprint_normal("3.0");
311 break;
312
313 case SDHC_SPEC_VERS_400:
314 aprint_normal("4.0");
315 break;
316
317 default:
318 aprint_normal("unknown version(0x%x)",
319 SDHC_SPEC_VERSION(sdhcver));
320 break;
321 }
322 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
323
324 /*
325 * Reset the host controller and enable interrupts.
326 */
327 (void)sdhc_host_reset(hp);
328
329 /* Determine host capabilities. */
330 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
331 caps = sc->sc_caps;
332 caps2 = sc->sc_caps2;
333 } else {
334 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
335 if (hp->specver >= SDHC_SPEC_VERS_300) {
336 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
337 } else {
338 caps2 = sc->sc_caps2 = 0;
339 }
340 }
341
342 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
343 SDHC_RETUNING_MODES_MASK;
344 if (retuning_mode == SDHC_RETUNING_MODE_1) {
345 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
346 SDHC_TIMER_COUNT_MASK;
347 if (hp->tuning_timer_count == 0xf)
348 hp->tuning_timer_count = 0;
349 if (hp->tuning_timer_count)
350 hp->tuning_timer_count =
351 1 << (hp->tuning_timer_count - 1);
352 }
353
354 /*
355 * Use DMA if the host system and the controller support it.
356 * Suports integrated or external DMA egine, with or without
357 * SDHC_DMA_ENABLE in the command.
358 */
359 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
360 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
361 ISSET(caps, SDHC_DMA_SUPPORT)))) {
362 SET(hp->flags, SHF_USE_DMA);
363
364 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) &&
365 ISSET(caps, SDHC_ADMA2_SUPP)) {
366 SET(hp->flags, SHF_MODE_DMAEN);
367 /*
368 * 64-bit mode was present in the 2.00 spec, removed
369 * from 3.00, and re-added in 4.00 with a different
370 * descriptor layout. We only support 2.00 and 3.00
371 * descriptors for now.
372 */
373 if (hp->specver == SDHC_SPEC_VERS_200 &&
374 ISSET(caps, SDHC_64BIT_SYS_BUS)) {
375 SET(hp->flags, SHF_USE_ADMA2_64);
376 aprint_normal(", 64-bit ADMA2");
377 } else {
378 SET(hp->flags, SHF_USE_ADMA2_32);
379 aprint_normal(", 32-bit ADMA2");
380 }
381 } else {
382 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
383 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
384 SET(hp->flags, SHF_MODE_DMAEN);
385 if (sc->sc_vendor_transfer_data_dma) {
386 aprint_normal(", platform DMA");
387 } else {
388 aprint_normal(", SDMA");
389 }
390 }
391 } else {
392 aprint_normal(", PIO");
393 }
394
395 /*
396 * Determine the base clock frequency. (2.2.24)
397 */
398 if (hp->specver >= SDHC_SPEC_VERS_300) {
399 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
400 } else {
401 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
402 }
403 if (hp->clkbase == 0 ||
404 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
405 if (sc->sc_clkbase == 0) {
406 /* The attachment driver must tell us. */
407 aprint_error_dev(sc->sc_dev,
408 "unknown base clock frequency\n");
409 goto err;
410 }
411 hp->clkbase = sc->sc_clkbase;
412 }
413 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
414 /* SDHC 1.0 supports only 10-63 MHz. */
415 aprint_error_dev(sc->sc_dev,
416 "base clock frequency out of range: %u MHz\n",
417 hp->clkbase / 1000);
418 goto err;
419 }
420 aprint_normal(", %u kHz", hp->clkbase);
421
422 /*
423 * XXX Set the data timeout counter value according to
424 * capabilities. (2.2.15)
425 */
426 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
427 #if 1
428 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
429 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
430 #endif
431
432 if (ISSET(caps, SDHC_EMBEDDED_SLOT))
433 aprint_normal(", embedded slot");
434
435 /*
436 * Determine SD bus voltage levels supported by the controller.
437 */
438 aprint_normal(",");
439 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
440 SET(hp->ocr, MMC_OCR_HCS);
441 aprint_normal(" HS");
442 }
443 if (ISSET(caps2, SDHC_SDR50_SUPP)) {
444 SET(hp->ocr, MMC_OCR_S18A);
445 aprint_normal(" SDR50");
446 }
447 if (ISSET(caps2, SDHC_DDR50_SUPP)) {
448 SET(hp->ocr, MMC_OCR_S18A);
449 aprint_normal(" DDR50");
450 }
451 if (ISSET(caps2, SDHC_SDR104_SUPP)) {
452 SET(hp->ocr, MMC_OCR_S18A);
453 aprint_normal(" SDR104 HS200");
454 }
455 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
456 SET(hp->ocr, MMC_OCR_1_7V_1_8V | MMC_OCR_1_8V_1_9V);
457 aprint_normal(" 1.8V");
458 }
459 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
460 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
461 aprint_normal(" 3.0V");
462 }
463 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
464 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
465 aprint_normal(" 3.3V");
466 }
467 if (hp->specver >= SDHC_SPEC_VERS_300) {
468 aprint_normal(", re-tuning mode %d", retuning_mode + 1);
469 if (hp->tuning_timer_count)
470 aprint_normal(" (%us timer)", hp->tuning_timer_count);
471 }
472
473 /*
474 * Determine the maximum block length supported by the host
475 * controller. (2.2.24)
476 */
477 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
478 case SDHC_MAX_BLK_LEN_512:
479 hp->maxblklen = 512;
480 break;
481
482 case SDHC_MAX_BLK_LEN_1024:
483 hp->maxblklen = 1024;
484 break;
485
486 case SDHC_MAX_BLK_LEN_2048:
487 hp->maxblklen = 2048;
488 break;
489
490 case SDHC_MAX_BLK_LEN_4096:
491 hp->maxblklen = 4096;
492 break;
493
494 default:
495 aprint_error_dev(sc->sc_dev, "max block length unknown\n");
496 goto err;
497 }
498 aprint_normal(", %u byte blocks", hp->maxblklen);
499 aprint_normal("\n");
500
501 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
502 int rseg;
503
504 /* Allocate ADMA2 descriptor memory */
505 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
506 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
507 if (error) {
508 aprint_error_dev(sc->sc_dev,
509 "ADMA2 dmamem_alloc failed (%d)\n", error);
510 goto adma_done;
511 }
512 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
513 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
514 if (error) {
515 aprint_error_dev(sc->sc_dev,
516 "ADMA2 dmamem_map failed (%d)\n", error);
517 goto adma_done;
518 }
519 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
520 0, BUS_DMA_WAITOK, &hp->adma_map);
521 if (error) {
522 aprint_error_dev(sc->sc_dev,
523 "ADMA2 dmamap_create failed (%d)\n", error);
524 goto adma_done;
525 }
526 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
527 hp->adma2, PAGE_SIZE, NULL,
528 BUS_DMA_WAITOK|BUS_DMA_WRITE);
529 if (error) {
530 aprint_error_dev(sc->sc_dev,
531 "ADMA2 dmamap_load failed (%d)\n", error);
532 goto adma_done;
533 }
534
535 memset(hp->adma2, 0, PAGE_SIZE);
536
537 adma_done:
538 if (error)
539 CLR(hp->flags, SHF_USE_ADMA2_MASK);
540 }
541
542 /*
543 * Attach the generic SD/MMC bus driver. (The bus driver must
544 * not invoke any chipset functions before it is attached.)
545 */
546 memset(&saa, 0, sizeof(saa));
547 saa.saa_busname = "sdmmc";
548 saa.saa_sct = &sdhc_functions;
549 saa.saa_sch = hp;
550 saa.saa_dmat = hp->dmat;
551 saa.saa_clkmax = hp->clkbase;
552 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
553 saa.saa_clkmin = hp->clkbase / 256 / 2046;
554 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
555 saa.saa_clkmin = hp->clkbase / 256 / 16;
556 else if (hp->sc->sc_clkmsk != 0)
557 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
558 (ffs(hp->sc->sc_clkmsk) - 1));
559 else if (hp->specver >= SDHC_SPEC_VERS_300)
560 saa.saa_clkmin = hp->clkbase / 0x3ff;
561 else
562 saa.saa_clkmin = hp->clkbase / 256;
563 saa.saa_caps = SMC_CAPS_4BIT_MODE|SMC_CAPS_AUTO_STOP;
564 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
565 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
566 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
567 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED;
568 if (ISSET(caps2, SDHC_SDR104_SUPP))
569 saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
570 SMC_CAPS_UHS_SDR50 |
571 SMC_CAPS_MMC_HS200;
572 if (ISSET(caps2, SDHC_SDR50_SUPP))
573 saa.saa_caps |= SMC_CAPS_UHS_SDR50;
574 if (ISSET(caps2, SDHC_DDR50_SUPP))
575 saa.saa_caps |= SMC_CAPS_UHS_DDR50;
576 if (ISSET(hp->flags, SHF_USE_DMA)) {
577 saa.saa_caps |= SMC_CAPS_DMA;
578 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
579 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
580 }
581 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
582 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
583 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
584 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
585 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint);
586
587 return 0;
588
589 err:
590 callout_destroy(&hp->tuning_timer);
591 cv_destroy(&hp->intr_cv);
592 mutex_destroy(&hp->intr_lock);
593 free(hp, M_DEVBUF);
594 sc->sc_host[--sc->sc_nhosts] = NULL;
595 err1:
596 return 1;
597 }
598
599 int
600 sdhc_detach(struct sdhc_softc *sc, int flags)
601 {
602 struct sdhc_host *hp;
603 int rv = 0;
604
605 for (size_t n = 0; n < sc->sc_nhosts; n++) {
606 hp = sc->sc_host[n];
607 if (hp == NULL)
608 continue;
609 if (hp->sdmmc != NULL) {
610 rv = config_detach(hp->sdmmc, flags);
611 if (rv)
612 break;
613 hp->sdmmc = NULL;
614 }
615 /* disable interrupts */
616 if ((flags & DETACH_FORCE) == 0) {
617 mutex_enter(&hp->intr_lock);
618 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
619 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
620 } else {
621 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
622 }
623 sdhc_soft_reset(hp, SDHC_RESET_ALL);
624 mutex_exit(&hp->intr_lock);
625 }
626 callout_halt(&hp->tuning_timer, NULL);
627 callout_destroy(&hp->tuning_timer);
628 cv_destroy(&hp->intr_cv);
629 mutex_destroy(&hp->intr_lock);
630 if (hp->ios > 0) {
631 bus_space_unmap(hp->iot, hp->ioh, hp->ios);
632 hp->ios = 0;
633 }
634 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
635 bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
636 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
637 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
638 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
639 }
640 free(hp, M_DEVBUF);
641 sc->sc_host[n] = NULL;
642 }
643
644 return rv;
645 }
646
647 bool
648 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
649 {
650 struct sdhc_softc *sc = device_private(dev);
651 struct sdhc_host *hp;
652 size_t i;
653
654 /* XXX poll for command completion or suspend command
655 * in progress */
656
657 /* Save the host controller state. */
658 for (size_t n = 0; n < sc->sc_nhosts; n++) {
659 hp = sc->sc_host[n];
660 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
661 for (i = 0; i < sizeof hp->regs; i += 4) {
662 uint32_t v = HREAD4(hp, i);
663 hp->regs[i + 0] = (v >> 0);
664 hp->regs[i + 1] = (v >> 8);
665 if (i + 3 < sizeof hp->regs) {
666 hp->regs[i + 2] = (v >> 16);
667 hp->regs[i + 3] = (v >> 24);
668 }
669 }
670 } else {
671 for (i = 0; i < sizeof hp->regs; i++) {
672 hp->regs[i] = HREAD1(hp, i);
673 }
674 }
675 }
676 return true;
677 }
678
679 bool
680 sdhc_resume(device_t dev, const pmf_qual_t *qual)
681 {
682 struct sdhc_softc *sc = device_private(dev);
683 struct sdhc_host *hp;
684 size_t i;
685
686 /* Restore the host controller state. */
687 for (size_t n = 0; n < sc->sc_nhosts; n++) {
688 hp = sc->sc_host[n];
689 (void)sdhc_host_reset(hp);
690 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
691 for (i = 0; i < sizeof hp->regs; i += 4) {
692 if (i + 3 < sizeof hp->regs) {
693 HWRITE4(hp, i,
694 (hp->regs[i + 0] << 0)
695 | (hp->regs[i + 1] << 8)
696 | (hp->regs[i + 2] << 16)
697 | (hp->regs[i + 3] << 24));
698 } else {
699 HWRITE4(hp, i,
700 (hp->regs[i + 0] << 0)
701 | (hp->regs[i + 1] << 8));
702 }
703 }
704 } else {
705 for (i = 0; i < sizeof hp->regs; i++) {
706 HWRITE1(hp, i, hp->regs[i]);
707 }
708 }
709 }
710 return true;
711 }
712
713 bool
714 sdhc_shutdown(device_t dev, int flags)
715 {
716 struct sdhc_softc *sc = device_private(dev);
717 struct sdhc_host *hp;
718
719 /* XXX chip locks up if we don't disable it before reboot. */
720 for (size_t i = 0; i < sc->sc_nhosts; i++) {
721 hp = sc->sc_host[i];
722 (void)sdhc_host_reset(hp);
723 }
724 return true;
725 }
726
727 /*
728 * Reset the host controller. Called during initialization, when
729 * cards are removed, upon resume, and during error recovery.
730 */
731 static int
732 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
733 {
734 struct sdhc_host *hp = (struct sdhc_host *)sch;
735 uint32_t sdhcimask;
736 int error;
737
738 KASSERT(mutex_owned(&hp->intr_lock));
739
740 /* Disable all interrupts. */
741 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
742 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
743 } else {
744 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
745 }
746
747 /*
748 * Reset the entire host controller and wait up to 100ms for
749 * the controller to clear the reset bit.
750 */
751 error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
752 if (error)
753 goto out;
754
755 /* Set data timeout counter value to max for now. */
756 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
757 #if 1
758 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
759 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
760 #endif
761
762 /* Enable interrupts. */
763 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
764 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
765 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
766 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
767 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
768 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
769 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
770 sdhcimask ^=
771 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
772 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
773 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
774 } else {
775 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
776 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
777 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
778 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
779 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
780 }
781
782 out:
783 return error;
784 }
785
786 static int
787 sdhc_host_reset(sdmmc_chipset_handle_t sch)
788 {
789 struct sdhc_host *hp = (struct sdhc_host *)sch;
790 int error;
791
792 mutex_enter(&hp->intr_lock);
793 error = sdhc_host_reset1(sch);
794 mutex_exit(&hp->intr_lock);
795
796 return error;
797 }
798
799 static uint32_t
800 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
801 {
802 struct sdhc_host *hp = (struct sdhc_host *)sch;
803
804 return hp->ocr;
805 }
806
807 static int
808 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
809 {
810 struct sdhc_host *hp = (struct sdhc_host *)sch;
811
812 return hp->maxblklen;
813 }
814
815 /*
816 * Return non-zero if the card is currently inserted.
817 */
818 static int
819 sdhc_card_detect(sdmmc_chipset_handle_t sch)
820 {
821 struct sdhc_host *hp = (struct sdhc_host *)sch;
822 int r;
823
824 if (hp->sc->sc_vendor_card_detect)
825 return (*hp->sc->sc_vendor_card_detect)(hp->sc);
826
827 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
828
829 return r ? 1 : 0;
830 }
831
832 /*
833 * Return non-zero if the card is currently write-protected.
834 */
835 static int
836 sdhc_write_protect(sdmmc_chipset_handle_t sch)
837 {
838 struct sdhc_host *hp = (struct sdhc_host *)sch;
839 int r;
840
841 if (hp->sc->sc_vendor_write_protect)
842 return (*hp->sc->sc_vendor_write_protect)(hp->sc);
843
844 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
845
846 return r ? 0 : 1;
847 }
848
849 /*
850 * Set or change SD bus voltage and enable or disable SD bus power.
851 * Return zero on success.
852 */
853 static int
854 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
855 {
856 struct sdhc_host *hp = (struct sdhc_host *)sch;
857 uint8_t vdd;
858 int error = 0;
859 const uint32_t pcmask =
860 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
861
862 mutex_enter(&hp->intr_lock);
863
864 /*
865 * Disable bus power before voltage change.
866 */
867 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
868 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0))
869 HWRITE1(hp, SDHC_POWER_CTL, 0);
870
871 /* If power is disabled, reset the host and return now. */
872 if (ocr == 0) {
873 (void)sdhc_host_reset1(hp);
874 callout_halt(&hp->tuning_timer, &hp->intr_lock);
875 goto out;
876 }
877
878 /*
879 * Select the lowest voltage according to capabilities.
880 */
881 ocr &= hp->ocr;
882 if (ISSET(ocr, MMC_OCR_1_7V_1_8V|MMC_OCR_1_8V_1_9V)) {
883 vdd = SDHC_VOLTAGE_1_8V;
884 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
885 vdd = SDHC_VOLTAGE_3_0V;
886 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
887 vdd = SDHC_VOLTAGE_3_3V;
888 } else {
889 /* Unsupported voltage level requested. */
890 error = EINVAL;
891 goto out;
892 }
893
894 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
895 /*
896 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus
897 * voltage ramp until power rises.
898 */
899
900 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
901 HWRITE1(hp, SDHC_POWER_CTL,
902 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
903 } else {
904 HWRITE1(hp, SDHC_POWER_CTL,
905 HREAD1(hp, SDHC_POWER_CTL) & pcmask);
906 sdmmc_delay(1);
907 HWRITE1(hp, SDHC_POWER_CTL,
908 (vdd << SDHC_VOLTAGE_SHIFT));
909 sdmmc_delay(1);
910 HSET1(hp, SDHC_POWER_CTL, SDHC_BUS_POWER);
911 sdmmc_delay(10000);
912 }
913
914 /*
915 * The host system may not power the bus due to battery low,
916 * etc. In that case, the host controller should clear the
917 * bus power bit.
918 */
919 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
920 error = ENXIO;
921 goto out;
922 }
923 }
924
925 out:
926 mutex_exit(&hp->intr_lock);
927
928 return error;
929 }
930
931 /*
932 * Return the smallest possible base clock frequency divisor value
933 * for the CLOCK_CTL register to produce `freq' (KHz).
934 */
935 static bool
936 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
937 {
938 u_int div;
939
940 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
941 for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
942 if ((hp->clkbase / div) <= freq) {
943 *divp = SDHC_SDCLK_CGM
944 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
945 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
946 //freq = hp->clkbase / div;
947 return true;
948 }
949 }
950 /* No divisor found. */
951 return false;
952 }
953 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
954 u_int dvs = (hp->clkbase + freq - 1) / freq;
955 u_int roundup = dvs & 1;
956 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
957 if (dvs + roundup <= 16) {
958 dvs += roundup - 1;
959 *divp = (div << SDHC_SDCLK_DIV_SHIFT)
960 | (dvs << SDHC_SDCLK_DVS_SHIFT);
961 DPRINTF(2,
962 ("%s: divisor for freq %u is %u * %u\n",
963 HDEVNAME(hp), freq, div * 2, dvs + 1));
964 //freq = hp->clkbase / (div * 2) * (dvs + 1);
965 return true;
966 }
967 /*
968 * If we drop bits, we need to round up the divisor.
969 */
970 roundup |= dvs & 1;
971 }
972 /* No divisor found. */
973 return false;
974 }
975 if (hp->sc->sc_clkmsk != 0) {
976 div = howmany(hp->clkbase, freq);
977 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
978 return false;
979 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
980 //freq = hp->clkbase / div;
981 return true;
982 }
983 if (hp->specver >= SDHC_SPEC_VERS_300) {
984 div = howmany(hp->clkbase, freq);
985 div = div > 1 ? howmany(div, 2) : 0;
986 if (div > 0x3ff)
987 return false;
988 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
989 << SDHC_SDCLK_XDIV_SHIFT) |
990 (((div >> 0) & SDHC_SDCLK_DIV_MASK)
991 << SDHC_SDCLK_DIV_SHIFT);
992 //freq = hp->clkbase / (div ? div * 2 : 1);
993 return true;
994 } else {
995 for (div = 1; div <= 256; div *= 2) {
996 if ((hp->clkbase / div) <= freq) {
997 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
998 //freq = hp->clkbase / div;
999 return true;
1000 }
1001 }
1002 /* No divisor found. */
1003 return false;
1004 }
1005 /* No divisor found. */
1006 return false;
1007 }
1008
1009 /*
1010 * Set or change SDCLK frequency or disable the SD clock.
1011 * Return zero on success.
1012 */
1013 static int
1014 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1015 {
1016 struct sdhc_host *hp = (struct sdhc_host *)sch;
1017 u_int div;
1018 u_int timo;
1019 int16_t reg;
1020 int error = 0;
1021 bool present __diagused;
1022
1023 mutex_enter(&hp->intr_lock);
1024
1025 #ifdef DIAGNOSTIC
1026 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1027
1028 /* Must not stop the clock if commands are in progress. */
1029 if (present && sdhc_card_detect(hp)) {
1030 aprint_normal_dev(hp->sc->sc_dev,
1031 "%s: command in progress\n", __func__);
1032 }
1033 #endif
1034
1035 if (hp->sc->sc_vendor_bus_clock) {
1036 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1037 if (error != 0)
1038 goto out;
1039 }
1040
1041 /*
1042 * Stop SD clock before changing the frequency.
1043 */
1044 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1045 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1046 if (freq == SDMMC_SDCLK_OFF) {
1047 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1048 goto out;
1049 }
1050 } else {
1051 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1052 if (freq == SDMMC_SDCLK_OFF)
1053 goto out;
1054 }
1055
1056 if (hp->specver >= SDHC_SPEC_VERS_300) {
1057 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1058 if (freq > 100000) {
1059 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1060 } else if (freq > 50000) {
1061 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR50);
1062 } else if (freq > 25000) {
1063 if (ddr) {
1064 HSET2(hp, SDHC_HOST_CTL2,
1065 SDHC_UHS_MODE_SELECT_DDR50);
1066 } else {
1067 HSET2(hp, SDHC_HOST_CTL2,
1068 SDHC_UHS_MODE_SELECT_SDR25);
1069 }
1070 } else if (freq > 400) {
1071 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1072 }
1073 }
1074
1075 /*
1076 * Slow down Ricoh 5U823 controller that isn't reliable
1077 * at 100MHz bus clock.
1078 */
1079 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1080 if (freq == 100000)
1081 --freq;
1082 }
1083
1084 /*
1085 * Set the minimum base clock frequency divisor.
1086 */
1087 if (!sdhc_clock_divisor(hp, freq, &div)) {
1088 /* Invalid base clock frequency or `freq' value. */
1089 aprint_error_dev(hp->sc->sc_dev,
1090 "Invalid bus clock %d kHz\n", freq);
1091 error = EINVAL;
1092 goto out;
1093 }
1094 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1095 HWRITE4(hp, SDHC_CLOCK_CTL,
1096 div | (SDHC_TIMEOUT_MAX << 16));
1097 } else {
1098 reg = HREAD2(hp, SDHC_CLOCK_CTL);
1099 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1100 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1101 }
1102
1103 /*
1104 * Start internal clock. Wait 10ms for stabilization.
1105 */
1106 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1107 sdmmc_delay(10000);
1108 HSET4(hp, SDHC_CLOCK_CTL,
1109 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1110 } else {
1111 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1112 for (timo = 1000; timo > 0; timo--) {
1113 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1114 SDHC_INTCLK_STABLE))
1115 break;
1116 sdmmc_delay(10);
1117 }
1118 if (timo == 0) {
1119 error = ETIMEDOUT;
1120 DPRINTF(1,("%s: timeout\n", __func__));
1121 goto out;
1122 }
1123 }
1124
1125 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1126 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1127 /*
1128 * Sending 80 clocks at 400kHz takes 200us.
1129 * So delay for that time + slop and then
1130 * check a few times for completion.
1131 */
1132 sdmmc_delay(210);
1133 for (timo = 10; timo > 0; timo--) {
1134 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1135 SDHC_INIT_ACTIVE))
1136 break;
1137 sdmmc_delay(10);
1138 }
1139 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1140
1141 /*
1142 * Enable SD clock.
1143 */
1144 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1145 } else {
1146 /*
1147 * Enable SD clock.
1148 */
1149 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1150
1151 if (freq > 25000 &&
1152 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1153 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1154 else
1155 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1156 }
1157
1158 out:
1159 mutex_exit(&hp->intr_lock);
1160
1161 return error;
1162 }
1163
1164 static int
1165 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1166 {
1167 struct sdhc_host *hp = (struct sdhc_host *)sch;
1168 int reg;
1169
1170 switch (width) {
1171 case 1:
1172 case 4:
1173 break;
1174
1175 case 8:
1176 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1177 break;
1178 /* FALLTHROUGH */
1179 default:
1180 DPRINTF(0,("%s: unsupported bus width (%d)\n",
1181 HDEVNAME(hp), width));
1182 return 1;
1183 }
1184
1185 mutex_enter(&hp->intr_lock);
1186
1187 reg = HREAD1(hp, SDHC_HOST_CTL);
1188 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1189 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1190 if (width == 4)
1191 reg |= SDHC_4BIT_MODE;
1192 else if (width == 8)
1193 reg |= SDHC_ESDHC_8BIT_MODE;
1194 } else {
1195 reg &= ~SDHC_4BIT_MODE;
1196 if (hp->specver >= SDHC_SPEC_VERS_300) {
1197 reg &= ~SDHC_8BIT_MODE;
1198 }
1199 if (width == 4) {
1200 reg |= SDHC_4BIT_MODE;
1201 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1202 reg |= SDHC_8BIT_MODE;
1203 }
1204 }
1205 HWRITE1(hp, SDHC_HOST_CTL, reg);
1206
1207 mutex_exit(&hp->intr_lock);
1208
1209 return 0;
1210 }
1211
1212 static int
1213 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1214 {
1215 struct sdhc_host *hp = (struct sdhc_host *)sch;
1216
1217 if (hp->sc->sc_vendor_rod)
1218 return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1219
1220 return 0;
1221 }
1222
1223 static void
1224 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1225 {
1226 struct sdhc_host *hp = (struct sdhc_host *)sch;
1227
1228 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1229 mutex_enter(&hp->intr_lock);
1230 if (enable) {
1231 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1232 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1233 } else {
1234 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1235 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1236 }
1237 mutex_exit(&hp->intr_lock);
1238 }
1239 }
1240
1241 static void
1242 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1243 {
1244 struct sdhc_host *hp = (struct sdhc_host *)sch;
1245
1246 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1247 mutex_enter(&hp->intr_lock);
1248 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1249 mutex_exit(&hp->intr_lock);
1250 }
1251 }
1252
1253 static int
1254 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1255 {
1256 struct sdhc_host *hp = (struct sdhc_host *)sch;
1257
1258 mutex_enter(&hp->intr_lock);
1259 switch (signal_voltage) {
1260 case SDMMC_SIGNAL_VOLTAGE_180:
1261 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1262 break;
1263 case SDMMC_SIGNAL_VOLTAGE_330:
1264 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1265 break;
1266 default:
1267 return EINVAL;
1268 }
1269 mutex_exit(&hp->intr_lock);
1270
1271 return 0;
1272 }
1273
1274 /*
1275 * Sampling clock tuning procedure (UHS)
1276 */
1277 static int
1278 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1279 {
1280 struct sdmmc_command cmd;
1281 uint8_t hostctl;
1282 int opcode, error, retry = 40;
1283
1284 KASSERT(mutex_owned(&hp->intr_lock));
1285
1286 hp->tuning_timing = timing;
1287
1288 switch (timing) {
1289 case SDMMC_TIMING_MMC_HS200:
1290 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1291 break;
1292 case SDMMC_TIMING_UHS_SDR50:
1293 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1294 return 0;
1295 /* FALLTHROUGH */
1296 case SDMMC_TIMING_UHS_SDR104:
1297 opcode = MMC_SEND_TUNING_BLOCK;
1298 break;
1299 default:
1300 return EINVAL;
1301 }
1302
1303 hostctl = HREAD1(hp, SDHC_HOST_CTL);
1304
1305 /* enable buffer read ready interrupt */
1306 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1307 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1308
1309 /* disable DMA */
1310 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1311
1312 /* reset tuning circuit */
1313 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1314
1315 /* start of tuning */
1316 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1317
1318 do {
1319 memset(&cmd, 0, sizeof(cmd));
1320 cmd.c_opcode = opcode;
1321 cmd.c_arg = 0;
1322 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1323 if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1324 cmd.c_blklen = cmd.c_datalen = 128;
1325 } else {
1326 cmd.c_blklen = cmd.c_datalen = 64;
1327 }
1328
1329 error = sdhc_start_command(hp, &cmd);
1330 if (error)
1331 break;
1332
1333 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1334 SDHC_TUNING_TIMEOUT)) {
1335 break;
1336 }
1337
1338 delay(1000);
1339 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1340
1341 /* disable buffer read ready interrupt */
1342 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1343 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1344
1345 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1346 HCLR2(hp, SDHC_HOST_CTL2,
1347 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1348 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1349 aprint_error_dev(hp->sc->sc_dev,
1350 "tuning did not complete, using fixed sampling clock\n");
1351 return EIO; /* tuning did not complete */
1352 }
1353
1354 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1355 HCLR2(hp, SDHC_HOST_CTL2,
1356 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1357 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1358 aprint_error_dev(hp->sc->sc_dev,
1359 "tuning failed, using fixed sampling clock\n");
1360 return EIO; /* tuning failed */
1361 }
1362
1363 if (hp->tuning_timer_count) {
1364 callout_schedule(&hp->tuning_timer,
1365 hz * hp->tuning_timer_count);
1366 }
1367
1368 return 0; /* tuning completed */
1369 }
1370
1371 static int
1372 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1373 {
1374 struct sdhc_host *hp = (struct sdhc_host *)sch;
1375 int error;
1376
1377 mutex_enter(&hp->intr_lock);
1378 error = sdhc_execute_tuning1(hp, timing);
1379 mutex_exit(&hp->intr_lock);
1380 return error;
1381 }
1382
1383 static void
1384 sdhc_tuning_timer(void *arg)
1385 {
1386 struct sdhc_host *hp = arg;
1387
1388 atomic_swap_uint(&hp->tuning_timer_pending, 1);
1389 }
1390
1391 static int
1392 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1393 {
1394 uint32_t state;
1395 int timeout;
1396
1397 for (timeout = 10000; timeout > 0; timeout--) {
1398 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1399 return 0;
1400 sdmmc_delay(10);
1401 }
1402 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1403 mask, value, state);
1404 return ETIMEDOUT;
1405 }
1406
1407 static void
1408 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1409 {
1410 struct sdhc_host *hp = (struct sdhc_host *)sch;
1411 int error;
1412
1413 mutex_enter(&hp->intr_lock);
1414
1415 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1416 (void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1417 }
1418
1419 if (cmd->c_data && ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1420 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1421 if (ISSET(hp->flags, SHF_USE_DMA)) {
1422 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1423 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1424 } else {
1425 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1426 HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1427 }
1428 }
1429
1430 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1431 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1432 if (cmd->c_data != NULL) {
1433 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1434 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1435 } else {
1436 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1437 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1438 }
1439 }
1440
1441 /*
1442 * Start the MMC command, or mark `cmd' as failed and return.
1443 */
1444 error = sdhc_start_command(hp, cmd);
1445 if (error) {
1446 cmd->c_error = error;
1447 goto out;
1448 }
1449
1450 /*
1451 * Wait until the command phase is done, or until the command
1452 * is marked done for any other reason.
1453 */
1454 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT)) {
1455 DPRINTF(1,("%s: timeout for command\n", __func__));
1456 cmd->c_error = ETIMEDOUT;
1457 goto out;
1458 }
1459
1460 /*
1461 * The host controller removes bits [0:7] from the response
1462 * data (CRC) and we pass the data up unchanged to the bus
1463 * driver (without padding).
1464 */
1465 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1466 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1467 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1468 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1469 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1470 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1471 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1472 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1473 (cmd->c_resp[1] << 24);
1474 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1475 (cmd->c_resp[2] << 24);
1476 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1477 (cmd->c_resp[3] << 24);
1478 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1479 }
1480 }
1481 }
1482 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1483
1484 /*
1485 * If the command has data to transfer in any direction,
1486 * execute the transfer now.
1487 */
1488 if (cmd->c_error == 0 && cmd->c_data != NULL)
1489 sdhc_transfer_data(hp, cmd);
1490 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1491 if (!sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10)) {
1492 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1493 HDEVNAME(hp)));
1494 cmd->c_error = ETIMEDOUT;
1495 goto out;
1496 }
1497 }
1498
1499 out:
1500 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1501 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1502 /* Turn off the LED. */
1503 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1504 }
1505 SET(cmd->c_flags, SCF_ITSDONE);
1506
1507 mutex_exit(&hp->intr_lock);
1508
1509 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1510 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1511 cmd->c_flags, cmd->c_error));
1512 }
1513
1514 static int
1515 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1516 {
1517 struct sdhc_softc * const sc = hp->sc;
1518 uint16_t blksize = 0;
1519 uint16_t blkcount = 0;
1520 uint16_t mode;
1521 uint16_t command;
1522 uint32_t pmask;
1523 int error;
1524
1525 KASSERT(mutex_owned(&hp->intr_lock));
1526
1527 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1528 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1529 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1530
1531 /*
1532 * The maximum block length for commands should be the minimum
1533 * of the host buffer size and the card buffer size. (1.7.2)
1534 */
1535
1536 /* Fragment the data into proper blocks. */
1537 if (cmd->c_datalen > 0) {
1538 blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1539 blkcount = cmd->c_datalen / blksize;
1540 if (cmd->c_datalen % blksize > 0) {
1541 /* XXX: Split this command. (1.7.4) */
1542 aprint_error_dev(sc->sc_dev,
1543 "data not a multiple of %u bytes\n", blksize);
1544 return EINVAL;
1545 }
1546 }
1547
1548 /* Check limit imposed by 9-bit block count. (1.7.2) */
1549 if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1550 aprint_error_dev(sc->sc_dev, "too much data\n");
1551 return EINVAL;
1552 }
1553
1554 /* Prepare transfer mode register value. (2.2.5) */
1555 mode = SDHC_BLOCK_COUNT_ENABLE;
1556 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1557 mode |= SDHC_READ_MODE;
1558 if (blkcount > 1) {
1559 mode |= SDHC_MULTI_BLOCK_MODE;
1560 /* XXX only for memory commands? */
1561 mode |= SDHC_AUTO_CMD12_ENABLE;
1562 }
1563 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1564 ISSET(hp->flags, SHF_MODE_DMAEN)) {
1565 mode |= SDHC_DMA_ENABLE;
1566 }
1567
1568 /*
1569 * Prepare command register value. (2.2.6)
1570 */
1571 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1572
1573 if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1574 command |= SDHC_CRC_CHECK_ENABLE;
1575 if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1576 command |= SDHC_INDEX_CHECK_ENABLE;
1577 if (cmd->c_datalen > 0)
1578 command |= SDHC_DATA_PRESENT_SELECT;
1579
1580 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1581 command |= SDHC_NO_RESPONSE;
1582 else if (ISSET(cmd->c_flags, SCF_RSP_136))
1583 command |= SDHC_RESP_LEN_136;
1584 else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1585 command |= SDHC_RESP_LEN_48_CHK_BUSY;
1586 else
1587 command |= SDHC_RESP_LEN_48;
1588
1589 /* Wait until command and optionally data inhibit bits are clear. (1.5) */
1590 pmask = SDHC_CMD_INHIBIT_CMD;
1591 if (cmd->c_flags & SCF_CMD_ADTC)
1592 pmask |= SDHC_CMD_INHIBIT_DAT;
1593 error = sdhc_wait_state(hp, pmask, 0);
1594 if (error) {
1595 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1596 device_printf(sc->sc_dev, "command or data phase inhibited\n");
1597 return error;
1598 }
1599
1600 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1601 HDEVNAME(hp), blksize, blkcount, mode, command));
1602
1603 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1604 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1605 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */
1606 }
1607
1608 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1609 /* Alert the user not to remove the card. */
1610 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1611 }
1612
1613 /* Set DMA start address. */
1614 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1615 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1616 bus_addr_t paddr =
1617 cmd->c_dmamap->dm_segs[seg].ds_addr;
1618 uint16_t len =
1619 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1620 0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1621 uint16_t attr =
1622 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1623 if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1624 attr |= SDHC_ADMA2_END;
1625 }
1626 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1627 struct sdhc_adma2_descriptor32 *desc =
1628 hp->adma2;
1629 desc[seg].attribute = htole16(attr);
1630 desc[seg].length = htole16(len);
1631 desc[seg].address = htole32(paddr);
1632 } else {
1633 struct sdhc_adma2_descriptor64 *desc =
1634 hp->adma2;
1635 desc[seg].attribute = htole16(attr);
1636 desc[seg].length = htole16(len);
1637 desc[seg].address = htole32(paddr & 0xffffffff);
1638 desc[seg].address_hi = htole32(
1639 (uint64_t)paddr >> 32);
1640 }
1641 }
1642 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1643 struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1644 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1645 } else {
1646 struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1647 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1648 }
1649 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1650 BUS_DMASYNC_PREWRITE);
1651 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1652 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1653
1654 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1655
1656 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1657 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1658 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1659 (uint64_t)desc_addr >> 32);
1660 }
1661 } else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1662 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1663 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1664 }
1665
1666 /*
1667 * Start a CPU data transfer. Writing to the high order byte
1668 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1669 */
1670 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1671 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1672 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1673 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1674 } else {
1675 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1676 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1677 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1678 HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1679 HWRITE2(hp, SDHC_COMMAND, command);
1680 }
1681
1682 return 0;
1683 }
1684
1685 static void
1686 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1687 {
1688 struct sdhc_softc *sc = hp->sc;
1689 int error;
1690
1691 KASSERT(mutex_owned(&hp->intr_lock));
1692
1693 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1694 MMC_R1(cmd->c_resp), cmd->c_datalen));
1695
1696 #ifdef SDHC_DEBUG
1697 /* XXX I forgot why I wanted to know when this happens :-( */
1698 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1699 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1700 aprint_error_dev(hp->sc->sc_dev,
1701 "CMD52/53 error response flags %#x\n",
1702 MMC_R1(cmd->c_resp) & 0xff00);
1703 }
1704 #endif
1705
1706 if (cmd->c_dmamap != NULL) {
1707 if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1708 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1709 if (error == 0 && !sdhc_wait_intr(hp,
1710 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT)) {
1711 DPRINTF(1,("%s: timeout\n", __func__));
1712 error = ETIMEDOUT;
1713 }
1714 } else {
1715 error = sdhc_transfer_data_dma(hp, cmd);
1716 }
1717 } else
1718 error = sdhc_transfer_data_pio(hp, cmd);
1719 if (error)
1720 cmd->c_error = error;
1721 SET(cmd->c_flags, SCF_ITSDONE);
1722
1723 DPRINTF(1,("%s: data transfer done (error=%d)\n",
1724 HDEVNAME(hp), cmd->c_error));
1725 }
1726
1727 static int
1728 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1729 {
1730 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1731 bus_addr_t posaddr;
1732 bus_addr_t segaddr;
1733 bus_size_t seglen;
1734 u_int seg = 0;
1735 int error = 0;
1736 int status;
1737
1738 KASSERT(mutex_owned(&hp->intr_lock));
1739 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1740 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1741 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1742 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1743
1744 for (;;) {
1745 status = sdhc_wait_intr(hp,
1746 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1747 SDHC_DMA_TIMEOUT);
1748
1749 if (status & SDHC_TRANSFER_COMPLETE) {
1750 break;
1751 }
1752 if (!status) {
1753 DPRINTF(1,("%s: timeout\n", __func__));
1754 error = ETIMEDOUT;
1755 break;
1756 }
1757
1758 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1759 continue;
1760 }
1761
1762 if ((status & SDHC_DMA_INTERRUPT) == 0) {
1763 continue;
1764 }
1765
1766 /* DMA Interrupt (boundary crossing) */
1767
1768 segaddr = dm_segs[seg].ds_addr;
1769 seglen = dm_segs[seg].ds_len;
1770 posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1771
1772 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1773 continue;
1774 }
1775 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1776 HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1777 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1778 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1779 KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1780 }
1781
1782 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1783 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1784 PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1785 }
1786
1787 return error;
1788 }
1789
1790 static int
1791 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1792 {
1793 uint8_t *data = cmd->c_data;
1794 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1795 u_int len, datalen;
1796 u_int imask;
1797 u_int pmask;
1798 int error = 0;
1799
1800 KASSERT(mutex_owned(&hp->intr_lock));
1801
1802 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1803 imask = SDHC_BUFFER_READ_READY;
1804 pmask = SDHC_BUFFER_READ_ENABLE;
1805 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1806 pio_func = esdhc_read_data_pio;
1807 } else {
1808 pio_func = sdhc_read_data_pio;
1809 }
1810 } else {
1811 imask = SDHC_BUFFER_WRITE_READY;
1812 pmask = SDHC_BUFFER_WRITE_ENABLE;
1813 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1814 pio_func = esdhc_write_data_pio;
1815 } else {
1816 pio_func = sdhc_write_data_pio;
1817 }
1818 }
1819 datalen = cmd->c_datalen;
1820
1821 KASSERT(mutex_owned(&hp->intr_lock));
1822 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
1823 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1824 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1825
1826 while (datalen > 0) {
1827 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), imask)) {
1828 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1829 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
1830 } else {
1831 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
1832 }
1833 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT)) {
1834 DPRINTF(1,("%s: timeout\n", __func__));
1835 error = ETIMEDOUT;
1836 break;
1837 }
1838
1839 error = sdhc_wait_state(hp, pmask, pmask);
1840 if (error)
1841 break;
1842 }
1843
1844 len = MIN(datalen, cmd->c_blklen);
1845 (*pio_func)(hp, data, len);
1846 DPRINTF(2,("%s: pio data transfer %u @ %p\n",
1847 HDEVNAME(hp), len, data));
1848
1849 data += len;
1850 datalen -= len;
1851 }
1852
1853 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
1854 SDHC_TRANSFER_TIMEOUT)) {
1855 DPRINTF(1,("%s: timeout for transfer\n", __func__));
1856 error = ETIMEDOUT;
1857 }
1858
1859 return error;
1860 }
1861
1862 static void
1863 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1864 {
1865
1866 if (((__uintptr_t)data & 3) == 0) {
1867 while (datalen > 3) {
1868 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
1869 data += 4;
1870 datalen -= 4;
1871 }
1872 if (datalen > 1) {
1873 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
1874 data += 2;
1875 datalen -= 2;
1876 }
1877 if (datalen > 0) {
1878 *data = HREAD1(hp, SDHC_DATA);
1879 data += 1;
1880 datalen -= 1;
1881 }
1882 } else if (((__uintptr_t)data & 1) == 0) {
1883 while (datalen > 1) {
1884 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
1885 data += 2;
1886 datalen -= 2;
1887 }
1888 if (datalen > 0) {
1889 *data = HREAD1(hp, SDHC_DATA);
1890 data += 1;
1891 datalen -= 1;
1892 }
1893 } else {
1894 while (datalen > 0) {
1895 *data = HREAD1(hp, SDHC_DATA);
1896 data += 1;
1897 datalen -= 1;
1898 }
1899 }
1900 }
1901
1902 static void
1903 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1904 {
1905
1906 if (((__uintptr_t)data & 3) == 0) {
1907 while (datalen > 3) {
1908 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
1909 data += 4;
1910 datalen -= 4;
1911 }
1912 if (datalen > 1) {
1913 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
1914 data += 2;
1915 datalen -= 2;
1916 }
1917 if (datalen > 0) {
1918 HWRITE1(hp, SDHC_DATA, *data);
1919 data += 1;
1920 datalen -= 1;
1921 }
1922 } else if (((__uintptr_t)data & 1) == 0) {
1923 while (datalen > 1) {
1924 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
1925 data += 2;
1926 datalen -= 2;
1927 }
1928 if (datalen > 0) {
1929 HWRITE1(hp, SDHC_DATA, *data);
1930 data += 1;
1931 datalen -= 1;
1932 }
1933 } else {
1934 while (datalen > 0) {
1935 HWRITE1(hp, SDHC_DATA, *data);
1936 data += 1;
1937 datalen -= 1;
1938 }
1939 }
1940 }
1941
1942 static void
1943 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1944 {
1945 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
1946 uint32_t v;
1947
1948 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
1949 size_t count = 0;
1950
1951 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
1952 if (count == 0) {
1953 /*
1954 * If we've drained "watermark" words, we need to wait
1955 * a little bit so the read FIFO can refill.
1956 */
1957 sdmmc_delay(10);
1958 count = watermark;
1959 }
1960 v = HREAD4(hp, SDHC_DATA);
1961 v = le32toh(v);
1962 *(uint32_t *)data = v;
1963 data += 4;
1964 datalen -= 4;
1965 status = HREAD2(hp, SDHC_NINTR_STATUS);
1966 count--;
1967 }
1968 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
1969 if (count == 0) {
1970 sdmmc_delay(10);
1971 }
1972 v = HREAD4(hp, SDHC_DATA);
1973 v = le32toh(v);
1974 do {
1975 *data++ = v;
1976 v >>= 8;
1977 } while (--datalen > 0);
1978 }
1979 }
1980
1981 static void
1982 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1983 {
1984 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
1985 uint32_t v;
1986
1987 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
1988 size_t count = watermark;
1989
1990 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
1991 if (count == 0) {
1992 sdmmc_delay(10);
1993 count = watermark;
1994 }
1995 v = *(uint32_t *)data;
1996 v = htole32(v);
1997 HWRITE4(hp, SDHC_DATA, v);
1998 data += 4;
1999 datalen -= 4;
2000 status = HREAD2(hp, SDHC_NINTR_STATUS);
2001 count--;
2002 }
2003 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2004 if (count == 0) {
2005 sdmmc_delay(10);
2006 }
2007 v = *(uint32_t *)data;
2008 v = htole32(v);
2009 HWRITE4(hp, SDHC_DATA, v);
2010 }
2011 }
2012
2013 /* Prepare for another command. */
2014 static int
2015 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2016 {
2017 int timo;
2018
2019 KASSERT(mutex_owned(&hp->intr_lock));
2020
2021 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2022
2023 /* Request the reset. */
2024 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2025
2026 /*
2027 * If necessary, wait for the controller to set the bits to
2028 * acknowledge the reset.
2029 */
2030 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2031 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2032 for (timo = 10000; timo > 0; timo--) {
2033 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2034 break;
2035 /* Short delay because I worry we may miss it... */
2036 sdmmc_delay(1);
2037 }
2038 if (timo == 0)
2039 DPRINTF(1,("%s: timeout for reset on\n", __func__));
2040 return ETIMEDOUT;
2041 }
2042
2043 /*
2044 * Wait for the controller to clear the bits to indicate that
2045 * the reset has completed.
2046 */
2047 for (timo = 10; timo > 0; timo--) {
2048 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2049 break;
2050 sdmmc_delay(10000);
2051 }
2052 if (timo == 0) {
2053 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2054 HREAD1(hp, SDHC_SOFTWARE_RESET)));
2055 return ETIMEDOUT;
2056 }
2057
2058 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2059 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2060 }
2061
2062 return 0;
2063 }
2064
2065 static int
2066 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo)
2067 {
2068 int status, error, nointr;
2069
2070 KASSERT(mutex_owned(&hp->intr_lock));
2071
2072 mask |= SDHC_ERROR_INTERRUPT;
2073
2074 nointr = 0;
2075 status = hp->intr_status & mask;
2076 while (status == 0) {
2077 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2078 == EWOULDBLOCK) {
2079 nointr = 1;
2080 break;
2081 }
2082 status = hp->intr_status & mask;
2083 }
2084 error = hp->intr_error_status;
2085
2086 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2087 error));
2088
2089 hp->intr_status &= ~status;
2090 hp->intr_error_status &= ~error;
2091
2092 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2093 if (ISSET(error, SDHC_DMA_ERROR))
2094 device_printf(hp->sc->sc_dev,"dma error\n");
2095 if (ISSET(error, SDHC_ADMA_ERROR))
2096 device_printf(hp->sc->sc_dev,"adma error\n");
2097 if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2098 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2099 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2100 device_printf(hp->sc->sc_dev,"current limit error\n");
2101 if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2102 device_printf(hp->sc->sc_dev,"data end bit error\n");
2103 if (ISSET(error, SDHC_DATA_CRC_ERROR))
2104 device_printf(hp->sc->sc_dev,"data crc error\n");
2105 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2106 device_printf(hp->sc->sc_dev,"data timeout error\n");
2107 if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2108 device_printf(hp->sc->sc_dev,"cmd index error\n");
2109 if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2110 device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2111 if (ISSET(error, SDHC_CMD_CRC_ERROR))
2112 device_printf(hp->sc->sc_dev,"cmd crc error\n");
2113 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2114 device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2115 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2116 device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2117 (error & ~SDHC_EINTR_STATUS_MASK));
2118 if (error == 0)
2119 device_printf(hp->sc->sc_dev,"no error\n");
2120
2121 /* Command timeout has higher priority than command complete. */
2122 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2123 CLR(status, SDHC_COMMAND_COMPLETE);
2124
2125 /* Transfer complete has higher priority than data timeout. */
2126 if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2127 CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2128 }
2129
2130 if (nointr ||
2131 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2132 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2133 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2134 hp->intr_error_status = 0;
2135 status = 0;
2136 }
2137
2138 return status;
2139 }
2140
2141 /*
2142 * Established by attachment driver at interrupt priority IPL_SDMMC.
2143 */
2144 int
2145 sdhc_intr(void *arg)
2146 {
2147 struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2148 struct sdhc_host *hp;
2149 int done = 0;
2150 uint16_t status;
2151 uint16_t error;
2152
2153 /* We got an interrupt, but we don't know from which slot. */
2154 for (size_t host = 0; host < sc->sc_nhosts; host++) {
2155 hp = sc->sc_host[host];
2156 if (hp == NULL)
2157 continue;
2158
2159 mutex_enter(&hp->intr_lock);
2160
2161 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2162 /* Find out which interrupts are pending. */
2163 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2164 status = xstatus;
2165 error = xstatus >> 16;
2166 if (error)
2167 xstatus |= SDHC_ERROR_INTERRUPT;
2168 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2169 goto next_port; /* no interrupt for us */
2170 /* Acknowledge the interrupts we are about to handle. */
2171 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2172 } else {
2173 /* Find out which interrupts are pending. */
2174 error = 0;
2175 status = HREAD2(hp, SDHC_NINTR_STATUS);
2176 if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2177 goto next_port; /* no interrupt for us */
2178 /* Acknowledge the interrupts we are about to handle. */
2179 HWRITE2(hp, SDHC_NINTR_STATUS, status);
2180 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2181 /* Acknowledge error interrupts. */
2182 error = HREAD2(hp, SDHC_EINTR_STATUS);
2183 HWRITE2(hp, SDHC_EINTR_STATUS, error);
2184 }
2185 }
2186
2187 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2188 status, error));
2189
2190 /* Claim this interrupt. */
2191 done = 1;
2192
2193 if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2194 ISSET(error, SDHC_ADMA_ERROR)) {
2195 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2196 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2197 adma_err);
2198 }
2199
2200 /*
2201 * Wake up the sdmmc event thread to scan for cards.
2202 */
2203 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2204 if (hp->sdmmc != NULL) {
2205 sdmmc_needs_discover(hp->sdmmc);
2206 }
2207 if (ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2208 HCLR4(hp, SDHC_NINTR_STATUS_EN,
2209 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2210 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2211 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2212 }
2213 }
2214
2215 /*
2216 * Schedule re-tuning process (UHS).
2217 */
2218 if (ISSET(status, SDHC_RETUNING_EVENT)) {
2219 atomic_swap_uint(&hp->tuning_timer_pending, 1);
2220 }
2221
2222 /*
2223 * Wake up the blocking process to service command
2224 * related interrupt(s).
2225 */
2226 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2227 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2228 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2229 hp->intr_error_status |= error;
2230 hp->intr_status |= status;
2231 if (ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2232 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2233 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2234 }
2235 cv_broadcast(&hp->intr_cv);
2236 }
2237
2238 /*
2239 * Service SD card interrupts.
2240 */
2241 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED)
2242 && ISSET(status, SDHC_CARD_INTERRUPT)) {
2243 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2244 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2245 sdmmc_card_intr(hp->sdmmc);
2246 }
2247 next_port:
2248 mutex_exit(&hp->intr_lock);
2249 }
2250
2251 return done;
2252 }
2253
2254 kmutex_t *
2255 sdhc_host_lock(struct sdhc_host *hp)
2256 {
2257 return &hp->intr_lock;
2258 }
2259
2260 #ifdef SDHC_DEBUG
2261 void
2262 sdhc_dump_regs(struct sdhc_host *hp)
2263 {
2264
2265 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE,
2266 HREAD4(hp, SDHC_PRESENT_STATE));
2267 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2268 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL,
2269 HREAD1(hp, SDHC_POWER_CTL));
2270 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS,
2271 HREAD2(hp, SDHC_NINTR_STATUS));
2272 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS,
2273 HREAD2(hp, SDHC_EINTR_STATUS));
2274 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN,
2275 HREAD2(hp, SDHC_NINTR_STATUS_EN));
2276 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN,
2277 HREAD2(hp, SDHC_EINTR_STATUS_EN));
2278 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN,
2279 HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2280 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN,
2281 HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2282 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES,
2283 HREAD4(hp, SDHC_CAPABILITIES));
2284 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2285 HREAD4(hp, SDHC_MAX_CAPABILITIES));
2286 }
2287 #endif
2288