sunxi_mmc.c revision 1.20 1 /* $NetBSD: sunxi_mmc.c,v 1.20 2018/02/19 20:26:51 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_sunximmc.h"
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.20 2018/02/19 20:26:51 jmcneill Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45
46 #include <dev/fdt/fdtvar.h>
47
48 #include <arm/sunxi/sunxi_mmc.h>
49
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define DPRINTF(dev, fmt, ...) \
53 do { \
54 if (sunxi_mmc_debug & __BIT(device_unit(dev))) \
55 device_printf((dev), fmt, ##__VA_ARGS__); \
56 } while (0)
57 #else
58 #define DPRINTF(dev, fmt, ...) ((void)0)
59 #endif
60
61 enum sunxi_mmc_timing {
62 SUNXI_MMC_TIMING_400K,
63 SUNXI_MMC_TIMING_25M,
64 SUNXI_MMC_TIMING_50M,
65 SUNXI_MMC_TIMING_50M_DDR,
66 SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68
69 struct sunxi_mmc_delay {
70 u_int output_phase;
71 u_int sample_phase;
72 };
73
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
76 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
77 [SUNXI_MMC_TIMING_50M] = { 90, 120 },
78 [SUNXI_MMC_TIMING_50M_DDR] = { 60, 120 },
79 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 90, 180 },
80 };
81
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
84 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
85 [SUNXI_MMC_TIMING_50M] = { 150, 120 },
86 [SUNXI_MMC_TIMING_50M_DDR] = { 54, 36 },
87 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 72, 72 },
88 };
89
90 #define SUNXI_MMC_NDESC 16
91
92 struct sunxi_mmc_softc;
93
94 static int sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void sunxi_mmc_attach(device_t, device_t, void *);
96 static void sunxi_mmc_attach_i(device_t);
97
98 static int sunxi_mmc_intr(void *);
99 static int sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101
102 static int sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
103 static uint32_t sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
104 static int sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
105 static int sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
106 static int sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
107 static int sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
108 static int sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
109 static int sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
110 static int sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
111 static int sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
112 static void sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
113 struct sdmmc_command *);
114 static void sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
115 static void sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
116
117 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
118 .host_reset = sunxi_mmc_host_reset,
119 .host_ocr = sunxi_mmc_host_ocr,
120 .host_maxblklen = sunxi_mmc_host_maxblklen,
121 .card_detect = sunxi_mmc_card_detect,
122 .write_protect = sunxi_mmc_write_protect,
123 .bus_power = sunxi_mmc_bus_power,
124 .bus_clock_ddr = sunxi_mmc_bus_clock,
125 .bus_width = sunxi_mmc_bus_width,
126 .bus_rod = sunxi_mmc_bus_rod,
127 .signal_voltage = sunxi_mmc_signal_voltage,
128 .exec_command = sunxi_mmc_exec_command,
129 .card_enable_intr = sunxi_mmc_card_enable_intr,
130 .card_intr_ack = sunxi_mmc_card_intr_ack,
131 };
132
133 struct sunxi_mmc_config {
134 u_int idma_xferlen;
135 u_int flags;
136 #define SUNXI_MMC_FLAG_CALIB_REG 0x01
137 #define SUNXI_MMC_FLAG_NEW_TIMINGS 0x02
138 #define SUNXI_MMC_FLAG_MASK_DATA0 0x04
139 const struct sunxi_mmc_delay *delays;
140 uint32_t dma_ftrglevel;
141 };
142
143 struct sunxi_mmc_softc {
144 device_t sc_dev;
145 bus_space_tag_t sc_bst;
146 bus_space_handle_t sc_bsh;
147 bus_dma_tag_t sc_dmat;
148 int sc_phandle;
149
150 void *sc_ih;
151 kmutex_t sc_intr_lock;
152 kcondvar_t sc_intr_cv;
153 kcondvar_t sc_idst_cv;
154
155 int sc_mmc_width;
156 int sc_mmc_present;
157
158 device_t sc_sdmmc_dev;
159
160 struct sunxi_mmc_config *sc_config;
161
162 bus_dma_segment_t sc_idma_segs[1];
163 int sc_idma_nsegs;
164 bus_size_t sc_idma_size;
165 bus_dmamap_t sc_idma_map;
166 int sc_idma_ndesc;
167 void *sc_idma_desc;
168
169 bus_dmamap_t sc_dmabounce_map;
170 void *sc_dmabounce_buf;
171 size_t sc_dmabounce_buflen;
172
173 uint32_t sc_intr_rint;
174 uint32_t sc_idma_idst;
175
176 struct clk *sc_clk_ahb;
177 struct clk *sc_clk_mmc;
178 struct clk *sc_clk_output;
179 struct clk *sc_clk_sample;
180
181 struct fdtbus_reset *sc_rst_ahb;
182
183 struct fdtbus_gpio_pin *sc_gpio_cd;
184 int sc_gpio_cd_inverted;
185 struct fdtbus_gpio_pin *sc_gpio_wp;
186 int sc_gpio_wp_inverted;
187
188 struct fdtbus_regulator *sc_reg_vqmmc;
189
190 struct fdtbus_mmc_pwrseq *sc_pwrseq;
191
192 bool sc_non_removable;
193 bool sc_broken_cd;
194 };
195
196 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
197 sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
198
199 #define MMC_WRITE(sc, reg, val) \
200 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
201 #define MMC_READ(sc, reg) \
202 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
203
204 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
205 .idma_xferlen = 0x2000,
206 .dma_ftrglevel = 0x20070008,
207 .delays = NULL,
208 .flags = 0,
209 };
210
211 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
212 .idma_xferlen = 0x10000,
213 .dma_ftrglevel = 0x20070008,
214 .delays = NULL,
215 .flags = 0,
216 };
217
218 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
219 .idma_xferlen = 0x2000,
220 .dma_ftrglevel = 0x20070008,
221 .delays = sun7i_mmc_delays,
222 .flags = 0,
223 };
224
225 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
226 .idma_xferlen = 0x10000,
227 .dma_ftrglevel = 0x20070008,
228 .delays = NULL,
229 .flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
230 };
231
232 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
233 .idma_xferlen = 0x10000,
234 .dma_ftrglevel = 0x200f0010,
235 .delays = sun9i_mmc_delays,
236 .flags = 0,
237 };
238
239 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
240 .idma_xferlen = 0x10000,
241 .dma_ftrglevel = 0x20070008,
242 .delays = NULL,
243 .flags = SUNXI_MMC_FLAG_CALIB_REG |
244 SUNXI_MMC_FLAG_NEW_TIMINGS |
245 SUNXI_MMC_FLAG_MASK_DATA0,
246 };
247
248 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
249 .idma_xferlen = 0x2000,
250 .dma_ftrglevel = 0x20070008,
251 .delays = NULL,
252 .flags = SUNXI_MMC_FLAG_CALIB_REG,
253 };
254
255 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
256 .idma_xferlen = 0x10000,
257 .dma_ftrglevel = 0x20070008,
258 .delays = NULL,
259 .flags = SUNXI_MMC_FLAG_CALIB_REG |
260 SUNXI_MMC_FLAG_NEW_TIMINGS |
261 SUNXI_MMC_FLAG_MASK_DATA0,
262 };
263
264 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
265 .idma_xferlen = 0x2000,
266 .dma_ftrglevel = 0x20070008,
267 .delays = NULL,
268 .flags = SUNXI_MMC_FLAG_CALIB_REG,
269 };
270
271 static const struct of_compat_data compat_data[] = {
272 { "allwinner,sun4i-a10-mmc", (uintptr_t)&sun4i_a10_mmc_config },
273 { "allwinner,sun5i-a13-mmc", (uintptr_t)&sun5i_a13_mmc_config },
274 { "allwinner,sun7i-a20-mmc", (uintptr_t)&sun7i_a20_mmc_config },
275 { "allwinner,sun8i-a83t-emmc", (uintptr_t)&sun8i_a83t_emmc_config },
276 { "allwinner,sun9i-a80-mmc", (uintptr_t)&sun9i_a80_mmc_config },
277 { "allwinner,sun50i-a64-mmc", (uintptr_t)&sun50i_a64_mmc_config },
278 { "allwinner,sun50i-a64-emmc", (uintptr_t)&sun50i_a64_emmc_config },
279 { "allwinner,sun50i-h6-mmc", (uintptr_t)&sun50i_h6_mmc_config },
280 { "allwinner,sun50i-h6-emmc", (uintptr_t)&sun50i_h6_emmc_config },
281 { NULL }
282 };
283
284 static int
285 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
286 {
287 struct fdt_attach_args * const faa = aux;
288
289 return of_match_compat_data(faa->faa_phandle, compat_data);
290 }
291
292 static void
293 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
294 {
295 struct sunxi_mmc_softc * const sc = device_private(self);
296 struct fdt_attach_args * const faa = aux;
297 const int phandle = faa->faa_phandle;
298 char intrstr[128];
299 bus_addr_t addr;
300 bus_size_t size;
301
302 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
303 aprint_error(": couldn't get registers\n");
304 return;
305 }
306
307 sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
308 sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
309 sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
310 sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
311
312 #if notyet
313 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
314 sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
315 #else
316 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
317 #endif
318 aprint_error(": couldn't get clocks\n");
319 return;
320 }
321
322 sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
323
324 sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
325
326 sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
327
328 if (clk_enable(sc->sc_clk_ahb) != 0 ||
329 clk_enable(sc->sc_clk_mmc) != 0) {
330 aprint_error(": couldn't enable clocks\n");
331 return;
332 }
333
334 if (sc->sc_rst_ahb != NULL) {
335 if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
336 aprint_error(": couldn't de-assert resets\n");
337 return;
338 }
339 }
340
341 sc->sc_dev = self;
342 sc->sc_phandle = phandle;
343 sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
344 sc->sc_bst = faa->faa_bst;
345 sc->sc_dmat = faa->faa_dmat;
346 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
347 cv_init(&sc->sc_intr_cv, "awinmmcirq");
348 cv_init(&sc->sc_idst_cv, "awinmmcdma");
349
350 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
351 aprint_error(": couldn't map registers\n");
352 return;
353 }
354
355 aprint_naive("\n");
356 aprint_normal(": SD/MMC controller\n");
357
358 sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
359 GPIO_PIN_INPUT);
360 sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
361 GPIO_PIN_INPUT);
362
363 sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
364 sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
365
366 sc->sc_non_removable = of_hasprop(phandle, "non-removable");
367 sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
368
369 if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
370 sunxi_mmc_idma_setup(sc) != 0) {
371 aprint_error_dev(self, "failed to setup DMA\n");
372 return;
373 }
374
375 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
376 aprint_error_dev(self, "failed to decode interrupt\n");
377 return;
378 }
379
380 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
381 sunxi_mmc_intr, sc);
382 if (sc->sc_ih == NULL) {
383 aprint_error_dev(self, "failed to establish interrupt on %s\n",
384 intrstr);
385 return;
386 }
387 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
388
389 config_interrupts(self, sunxi_mmc_attach_i);
390 }
391
392 static int
393 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
394 {
395 bus_dma_segment_t ds[1];
396 int error, rseg;
397
398 sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
399 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
400 sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
401 if (error)
402 return error;
403 error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
404 &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
405 if (error)
406 goto free;
407 error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
408 sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
409 if (error)
410 goto unmap;
411 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
412 sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
413 BUS_DMA_WAITOK);
414 if (error)
415 goto destroy;
416 return 0;
417
418 destroy:
419 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
420 unmap:
421 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
422 sc->sc_dmabounce_buflen);
423 free:
424 bus_dmamem_free(sc->sc_dmat, ds, rseg);
425 return error;
426 }
427
428 static int
429 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
430 {
431 int error;
432
433 sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
434 sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
435 sc->sc_idma_ndesc;
436 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
437 sc->sc_idma_size, sc->sc_idma_segs, 1,
438 &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
439 if (error)
440 return error;
441 error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
442 sc->sc_idma_nsegs, sc->sc_idma_size,
443 &sc->sc_idma_desc, BUS_DMA_WAITOK);
444 if (error)
445 goto free;
446 error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
447 sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
448 if (error)
449 goto unmap;
450 error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
451 sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
452 if (error)
453 goto destroy;
454 return 0;
455
456 destroy:
457 bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
458 unmap:
459 bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
460 free:
461 bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
462 return error;
463 }
464
465 static int
466 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr)
467 {
468 const struct sunxi_mmc_delay *delays;
469 int error, timing;
470
471 if (freq <= 400) {
472 timing = SUNXI_MMC_TIMING_400K;
473 } else if (freq <= 25000) {
474 timing = SUNXI_MMC_TIMING_25M;
475 } else if (freq <= 52000) {
476 if (ddr) {
477 timing = sc->sc_mmc_width == 8 ?
478 SUNXI_MMC_TIMING_50M_DDR_8BIT :
479 SUNXI_MMC_TIMING_50M_DDR;
480 } else {
481 timing = SUNXI_MMC_TIMING_50M;
482 }
483 } else
484 return EINVAL;
485
486 error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << ddr);
487 if (error != 0)
488 return error;
489
490 if (sc->sc_config->delays == NULL)
491 return 0;
492
493 delays = &sc->sc_config->delays[timing];
494
495 if (sc->sc_clk_sample) {
496 error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
497 if (error != 0)
498 return error;
499 }
500 if (sc->sc_clk_output) {
501 error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
502 if (error != 0)
503 return error;
504 }
505
506 return 0;
507 }
508
509 static void
510 sunxi_mmc_attach_i(device_t self)
511 {
512 struct sunxi_mmc_softc *sc = device_private(self);
513 struct sdmmcbus_attach_args saa;
514 uint32_t width;
515
516 if (sc->sc_pwrseq)
517 fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
518
519 sunxi_mmc_host_reset(sc);
520 sunxi_mmc_bus_width(sc, 1);
521 sunxi_mmc_set_clock(sc, 400, false);
522
523 if (sc->sc_pwrseq)
524 fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
525
526 if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
527 width = 4;
528
529 memset(&saa, 0, sizeof(saa));
530 saa.saa_busname = "sdmmc";
531 saa.saa_sct = &sunxi_mmc_chip_functions;
532 saa.saa_sch = sc;
533 saa.saa_dmat = sc->sc_dmat;
534 saa.saa_clkmin = 400;
535 saa.saa_clkmax = 52000;
536 saa.saa_caps = SMC_CAPS_DMA |
537 SMC_CAPS_MULTI_SEG_DMA |
538 SMC_CAPS_AUTO_STOP |
539 SMC_CAPS_SD_HIGHSPEED |
540 SMC_CAPS_MMC_HIGHSPEED |
541 SMC_CAPS_MMC_DDR52 |
542 SMC_CAPS_POLLING;
543 if (width == 4)
544 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
545 if (width == 8)
546 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
547
548 if (sc->sc_gpio_cd)
549 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
550
551 sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
552 }
553
554 static int
555 sunxi_mmc_intr(void *priv)
556 {
557 struct sunxi_mmc_softc *sc = priv;
558 uint32_t idst, rint;
559
560 mutex_enter(&sc->sc_intr_lock);
561 idst = MMC_READ(sc, SUNXI_MMC_IDST);
562 rint = MMC_READ(sc, SUNXI_MMC_RINT);
563 if (!idst && !rint) {
564 mutex_exit(&sc->sc_intr_lock);
565 return 0;
566 }
567 MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
568 MMC_WRITE(sc, SUNXI_MMC_RINT, rint);
569
570 DPRINTF(sc->sc_dev, "mmc intr idst=%08X rint=%08X\n",
571 idst, rint);
572
573 if (idst != 0) {
574 sc->sc_idma_idst |= idst;
575 cv_broadcast(&sc->sc_idst_cv);
576 }
577
578 if ((rint & ~SUNXI_MMC_INT_SDIO_INT) != 0) {
579 sc->sc_intr_rint |= (rint & ~SUNXI_MMC_INT_SDIO_INT);
580 cv_broadcast(&sc->sc_intr_cv);
581 }
582
583 if ((rint & SUNXI_MMC_INT_SDIO_INT) != 0) {
584 sdmmc_card_intr(sc->sc_sdmmc_dev);
585 }
586
587 mutex_exit(&sc->sc_intr_lock);
588
589 return 1;
590 }
591
592 static int
593 sunxi_mmc_wait_rint(struct sunxi_mmc_softc *sc, uint32_t mask,
594 int timeout, bool poll)
595 {
596 int retry;
597 int error;
598
599 KASSERT(mutex_owned(&sc->sc_intr_lock));
600
601 if (sc->sc_intr_rint & mask)
602 return 0;
603
604 if (poll)
605 retry = timeout / hz * 1000;
606 else
607 retry = timeout / hz;
608
609 while (retry > 0) {
610 if (poll) {
611 sc->sc_intr_rint |= MMC_READ(sc, SUNXI_MMC_RINT);
612 } else {
613 error = cv_timedwait(&sc->sc_intr_cv,
614 &sc->sc_intr_lock, hz);
615 if (error && error != EWOULDBLOCK)
616 return error;
617 }
618 if (sc->sc_intr_rint & mask)
619 return 0;
620 if (poll)
621 delay(1000);
622 --retry;
623 }
624
625 return ETIMEDOUT;
626 }
627
628 static int
629 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
630 {
631 struct sunxi_mmc_softc *sc = sch;
632 int retry = 1000;
633
634 DPRINTF(sc->sc_dev, "host reset\n");
635
636 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
637 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_RESET);
638 while (--retry > 0) {
639 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
640 break;
641 delay(100);
642 }
643
644 MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
645
646 MMC_WRITE(sc, SUNXI_MMC_IMASK,
647 SUNXI_MMC_INT_CMD_DONE | SUNXI_MMC_INT_ERROR |
648 SUNXI_MMC_INT_DATA_OVER | SUNXI_MMC_INT_AUTO_CMD_DONE);
649
650 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
651 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_INTEN);
652
653 return 0;
654 }
655
656 static uint32_t
657 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
658 {
659 return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
660 }
661
662 static int
663 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
664 {
665 return 8192;
666 }
667
668 static int
669 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
670 {
671 struct sunxi_mmc_softc *sc = sch;
672
673 if (sc->sc_non_removable || sc->sc_broken_cd) {
674 /*
675 * Non-removable or broken card detect flag set in
676 * DT, assume always present
677 */
678 return 1;
679 } else if (sc->sc_gpio_cd != NULL) {
680 /* Use card detect GPIO */
681 int v = 0, i;
682 for (i = 0; i < 5; i++) {
683 v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
684 sc->sc_gpio_cd_inverted);
685 delay(1000);
686 }
687 if (v == 5)
688 sc->sc_mmc_present = 0;
689 else if (v == 0)
690 sc->sc_mmc_present = 1;
691 return sc->sc_mmc_present;
692 } else {
693 /* Use CARD_PRESENT field of SD_STATUS register */
694 const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
695 SUNXI_MMC_STATUS_CARD_PRESENT;
696 return present != 0;
697 }
698 }
699
700 static int
701 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
702 {
703 struct sunxi_mmc_softc *sc = sch;
704
705 if (sc->sc_gpio_wp == NULL) {
706 return 0; /* no write protect pin, assume rw */
707 } else {
708 return fdtbus_gpio_read(sc->sc_gpio_wp) ^
709 sc->sc_gpio_wp_inverted;
710 }
711 }
712
713 static int
714 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
715 {
716 return 0;
717 }
718
719 static int
720 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
721 {
722 uint32_t cmd;
723 int retry;
724
725 DPRINTF(sc->sc_dev, "update clock\n");
726
727 cmd = SUNXI_MMC_CMD_START |
728 SUNXI_MMC_CMD_UPCLK_ONLY |
729 SUNXI_MMC_CMD_WAIT_PRE_OVER;
730 MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
731 retry = 0xfffff;
732 while (--retry > 0) {
733 if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
734 break;
735 delay(10);
736 }
737
738 if (retry == 0) {
739 aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
740 DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
741 MMC_READ(sc, SUNXI_MMC_GCTRL));
742 DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
743 MMC_READ(sc, SUNXI_MMC_CLKCR));
744 DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
745 MMC_READ(sc, SUNXI_MMC_TIMEOUT));
746 DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
747 MMC_READ(sc, SUNXI_MMC_WIDTH));
748 DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
749 MMC_READ(sc, SUNXI_MMC_CMD));
750 DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
751 MMC_READ(sc, SUNXI_MMC_MINT));
752 DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
753 MMC_READ(sc, SUNXI_MMC_RINT));
754 DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
755 MMC_READ(sc, SUNXI_MMC_STATUS));
756 return ETIMEDOUT;
757 }
758
759 return 0;
760 }
761
762 static int
763 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
764 {
765 struct sunxi_mmc_softc *sc = sch;
766 uint32_t clkcr, gctrl, ntsr;
767 const u_int flags = sc->sc_config->flags;
768
769 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
770 if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
771 clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
772 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
773 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
774 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
775 if (sunxi_mmc_update_clock(sc) != 0)
776 return 1;
777 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
778 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
779 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
780 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
781 }
782 }
783
784 if (freq) {
785
786 clkcr &= ~SUNXI_MMC_CLKCR_DIV;
787 clkcr |= __SHIFTIN(ddr, SUNXI_MMC_CLKCR_DIV);
788 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
789
790 if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
791 ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
792 ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
793 MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
794 }
795
796 if (flags & SUNXI_MMC_FLAG_CALIB_REG)
797 MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
798
799 if (sunxi_mmc_update_clock(sc) != 0)
800 return 1;
801
802 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
803 if (ddr)
804 gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
805 else
806 gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
807 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
808
809 if (sunxi_mmc_set_clock(sc, freq, ddr) != 0)
810 return 1;
811
812 clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
813 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
814 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
815 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
816 if (sunxi_mmc_update_clock(sc) != 0)
817 return 1;
818 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
819 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
820 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
821 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
822 }
823 }
824
825 return 0;
826 }
827
828 static int
829 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
830 {
831 struct sunxi_mmc_softc *sc = sch;
832
833 DPRINTF(sc->sc_dev, "width = %d\n", width);
834
835 switch (width) {
836 case 1:
837 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
838 break;
839 case 4:
840 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
841 break;
842 case 8:
843 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
844 break;
845 default:
846 return 1;
847 }
848
849 sc->sc_mmc_width = width;
850
851 return 0;
852 }
853
854 static int
855 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
856 {
857 return -1;
858 }
859
860 static int
861 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
862 {
863 struct sunxi_mmc_softc *sc = sch;
864 u_int uvol;
865 int error;
866
867 if (sc->sc_reg_vqmmc == NULL)
868 return 0;
869
870 switch (signal_voltage) {
871 case SDMMC_SIGNAL_VOLTAGE_330:
872 uvol = 3300000;
873 break;
874 case SDMMC_SIGNAL_VOLTAGE_180:
875 uvol = 1800000;
876 break;
877 default:
878 return EINVAL;
879 }
880
881 error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
882 if (error != 0)
883 return error;
884
885 return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
886 }
887
888 static int
889 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
890 {
891 struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
892 bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
893 bus_dmamap_t map;
894 bus_size_t off;
895 int desc, resid, seg;
896 uint32_t val;
897
898 /*
899 * If the command includes a dma map use it, otherwise we need to
900 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
901 */
902 if (cmd->c_dmamap) {
903 map = cmd->c_dmamap;
904 } else {
905 if (cmd->c_datalen > sc->sc_dmabounce_buflen)
906 return E2BIG;
907 map = sc->sc_dmabounce_map;
908
909 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
910 memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
911 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
912 0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
913 } else {
914 memcpy(sc->sc_dmabounce_buf, cmd->c_data,
915 cmd->c_datalen);
916 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
917 0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
918 }
919 }
920
921 desc = 0;
922 for (seg = 0; seg < map->dm_nsegs; seg++) {
923 bus_addr_t paddr = map->dm_segs[seg].ds_addr;
924 bus_size_t len = map->dm_segs[seg].ds_len;
925 resid = min(len, cmd->c_resid);
926 off = 0;
927 while (resid > 0) {
928 if (desc == sc->sc_idma_ndesc)
929 break;
930 len = min(sc->sc_config->idma_xferlen, resid);
931 dma[desc].dma_buf_size = htole32(len);
932 dma[desc].dma_buf_addr = htole32(paddr + off);
933 dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
934 SUNXI_MMC_IDMA_CONFIG_OWN);
935 cmd->c_resid -= len;
936 resid -= len;
937 off += len;
938 if (desc == 0) {
939 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
940 }
941 if (cmd->c_resid == 0) {
942 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
943 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
944 dma[desc].dma_next = 0;
945 } else {
946 dma[desc].dma_config |=
947 htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
948 dma[desc].dma_next = htole32(
949 desc_paddr + ((desc+1) *
950 sizeof(struct sunxi_mmc_idma_descriptor)));
951 }
952 ++desc;
953 }
954 }
955 if (desc == sc->sc_idma_ndesc) {
956 aprint_error_dev(sc->sc_dev,
957 "not enough descriptors for %d byte transfer!\n",
958 cmd->c_datalen);
959 return EIO;
960 }
961
962 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
963 sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
964
965 sc->sc_idma_idst = 0;
966
967 val = MMC_READ(sc, SUNXI_MMC_GCTRL);
968 val |= SUNXI_MMC_GCTRL_DMAEN;
969 val |= SUNXI_MMC_GCTRL_INTEN;
970 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
971 val |= SUNXI_MMC_GCTRL_DMARESET;
972 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
973 MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
974 MMC_WRITE(sc, SUNXI_MMC_DMAC,
975 SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
976 val = MMC_READ(sc, SUNXI_MMC_IDIE);
977 val &= ~(SUNXI_MMC_IDST_RECEIVE_INT|SUNXI_MMC_IDST_TRANSMIT_INT);
978 if (ISSET(cmd->c_flags, SCF_CMD_READ))
979 val |= SUNXI_MMC_IDST_RECEIVE_INT;
980 else
981 val |= SUNXI_MMC_IDST_TRANSMIT_INT;
982 MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
983 MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
984 MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
985
986 return 0;
987 }
988
989 static void
990 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
991 {
992 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
993 sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
994
995 if (cmd->c_dmamap == NULL) {
996 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
997 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
998 0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
999 memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1000 cmd->c_datalen);
1001 } else {
1002 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1003 0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1004 }
1005 }
1006 }
1007
1008 static void
1009 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1010 {
1011 struct sunxi_mmc_softc *sc = sch;
1012 uint32_t cmdval = SUNXI_MMC_CMD_START;
1013 const bool poll = (cmd->c_flags & SCF_POLL) != 0;
1014 int retry;
1015
1016 DPRINTF(sc->sc_dev,
1017 "opcode %d flags 0x%x data %p datalen %d blklen %d poll %d\n",
1018 cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1019 cmd->c_blklen, poll);
1020
1021 mutex_enter(&sc->sc_intr_lock);
1022
1023 if (cmd->c_opcode == 0)
1024 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1025 if (cmd->c_flags & SCF_RSP_PRESENT)
1026 cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1027 if (cmd->c_flags & SCF_RSP_136)
1028 cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1029 if (cmd->c_flags & SCF_RSP_CRC)
1030 cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1031
1032 if (cmd->c_datalen > 0) {
1033 unsigned int nblks;
1034
1035 cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1036 if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1037 cmdval |= SUNXI_MMC_CMD_WRITE;
1038 }
1039
1040 nblks = cmd->c_datalen / cmd->c_blklen;
1041 if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1042 ++nblks;
1043
1044 if (nblks > 1) {
1045 cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1046 }
1047
1048 MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1049 MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1050 }
1051
1052 sc->sc_intr_rint = 0;
1053
1054 MMC_WRITE(sc, SUNXI_MMC_A12A,
1055 (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1056
1057 MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1058
1059 DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1060
1061 if (cmd->c_datalen == 0) {
1062 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1063 } else {
1064 cmd->c_resid = cmd->c_datalen;
1065 cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1066 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1067 if (cmd->c_error == 0) {
1068 const uint32_t idst_mask =
1069 SUNXI_MMC_IDST_ERROR | SUNXI_MMC_IDST_COMPLETE;
1070 retry = 10;
1071 while ((sc->sc_idma_idst & idst_mask) == 0) {
1072 if (retry-- == 0) {
1073 cmd->c_error = ETIMEDOUT;
1074 break;
1075 }
1076 cv_timedwait(&sc->sc_idst_cv,
1077 &sc->sc_intr_lock, hz);
1078 }
1079 }
1080 sunxi_mmc_dma_complete(sc, cmd);
1081 if (sc->sc_idma_idst & SUNXI_MMC_IDST_ERROR) {
1082 cmd->c_error = EIO;
1083 } else if (!(sc->sc_idma_idst & SUNXI_MMC_IDST_COMPLETE)) {
1084 cmd->c_error = ETIMEDOUT;
1085 }
1086 if (cmd->c_error) {
1087 DPRINTF(sc->sc_dev,
1088 "xfer failed, error %d\n", cmd->c_error);
1089 goto done;
1090 }
1091 }
1092
1093 cmd->c_error = sunxi_mmc_wait_rint(sc,
1094 SUNXI_MMC_INT_ERROR|SUNXI_MMC_INT_CMD_DONE, hz * 10, poll);
1095 if (cmd->c_error == 0 && (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1096 if (sc->sc_intr_rint & SUNXI_MMC_INT_RESP_TIMEOUT) {
1097 cmd->c_error = ETIMEDOUT;
1098 } else {
1099 cmd->c_error = EIO;
1100 }
1101 }
1102 if (cmd->c_error) {
1103 DPRINTF(sc->sc_dev,
1104 "cmd failed, error %d\n", cmd->c_error);
1105 goto done;
1106 }
1107
1108 if (cmd->c_datalen > 0) {
1109 cmd->c_error = sunxi_mmc_wait_rint(sc,
1110 SUNXI_MMC_INT_ERROR|
1111 SUNXI_MMC_INT_AUTO_CMD_DONE|
1112 SUNXI_MMC_INT_DATA_OVER,
1113 hz*10, poll);
1114 if (cmd->c_error == 0 &&
1115 (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1116 cmd->c_error = ETIMEDOUT;
1117 }
1118 if (cmd->c_error) {
1119 DPRINTF(sc->sc_dev,
1120 "data timeout, rint = %08x\n",
1121 sc->sc_intr_rint);
1122 cmd->c_error = ETIMEDOUT;
1123 goto done;
1124 }
1125 }
1126
1127 if (cmd->c_flags & SCF_RSP_PRESENT) {
1128 if (cmd->c_flags & SCF_RSP_136) {
1129 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1130 cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1131 cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1132 cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1133 if (cmd->c_flags & SCF_RSP_CRC) {
1134 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1135 (cmd->c_resp[1] << 24);
1136 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1137 (cmd->c_resp[2] << 24);
1138 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1139 (cmd->c_resp[3] << 24);
1140 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1141 }
1142 } else {
1143 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1144 }
1145 }
1146
1147 done:
1148 cmd->c_flags |= SCF_ITSDONE;
1149 mutex_exit(&sc->sc_intr_lock);
1150
1151 if (cmd->c_error) {
1152 DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1153 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1154 MMC_READ(sc, SUNXI_MMC_GCTRL) |
1155 SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1156 for (retry = 0; retry < 1000; retry++) {
1157 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1158 break;
1159 delay(10);
1160 }
1161 sunxi_mmc_update_clock(sc);
1162 }
1163
1164 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1165 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1166 }
1167
1168 static void
1169 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1170 {
1171 struct sunxi_mmc_softc *sc = sch;
1172 uint32_t imask;
1173
1174 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1175 if (enable)
1176 imask |= SUNXI_MMC_INT_SDIO_INT;
1177 else
1178 imask &= ~SUNXI_MMC_INT_SDIO_INT;
1179 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1180 }
1181
1182 static void
1183 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1184 {
1185 struct sunxi_mmc_softc *sc = sch;
1186
1187 MMC_WRITE(sc, SUNXI_MMC_RINT, SUNXI_MMC_INT_SDIO_INT);
1188 }
1189