sunxi_mmc.c revision 1.15 1 /* $NetBSD: sunxi_mmc.c,v 1.15 2017/10/23 13:28:19 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_sunximmc.h"
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.15 2017/10/23 13:28:19 jmcneill Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45
46 #include <dev/fdt/fdtvar.h>
47
48 #include <arm/sunxi/sunxi_mmc.h>
49
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define DPRINTF(dev, fmt, ...) \
53 do { \
54 if (sunxi_mmc_debug & __BIT(device_unit(dev))) \
55 device_printf((dev), fmt, ##__VA_ARGS__); \
56 } while (0)
57 #else
58 #define DPRINTF(dev, fmt, ...) ((void)0)
59 #endif
60
61 enum sunxi_mmc_timing {
62 SUNXI_MMC_TIMING_400K,
63 SUNXI_MMC_TIMING_25M,
64 SUNXI_MMC_TIMING_50M,
65 SUNXI_MMC_TIMING_50M_DDR,
66 SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68
69 struct sunxi_mmc_delay {
70 u_int output_phase;
71 u_int sample_phase;
72 };
73
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
76 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
77 [SUNXI_MMC_TIMING_50M] = { 90, 120 },
78 [SUNXI_MMC_TIMING_50M_DDR] = { 60, 120 },
79 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 90, 180 },
80 };
81
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
84 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
85 [SUNXI_MMC_TIMING_50M] = { 150, 120 },
86 [SUNXI_MMC_TIMING_50M_DDR] = { 54, 36 },
87 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 72, 72 },
88 };
89
90 #define SUNXI_MMC_NDESC 16
91
92 struct sunxi_mmc_softc;
93
94 static int sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void sunxi_mmc_attach(device_t, device_t, void *);
96 static void sunxi_mmc_attach_i(device_t);
97
98 static int sunxi_mmc_intr(void *);
99 static int sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101
102 static int sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
103 static uint32_t sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
104 static int sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
105 static int sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
106 static int sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
107 static int sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
108 static int sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
109 static int sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
110 static int sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
111 static int sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
112 static void sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
113 struct sdmmc_command *);
114 static void sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
115 static void sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
116
117 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
118 .host_reset = sunxi_mmc_host_reset,
119 .host_ocr = sunxi_mmc_host_ocr,
120 .host_maxblklen = sunxi_mmc_host_maxblklen,
121 .card_detect = sunxi_mmc_card_detect,
122 .write_protect = sunxi_mmc_write_protect,
123 .bus_power = sunxi_mmc_bus_power,
124 .bus_clock_ddr = sunxi_mmc_bus_clock,
125 .bus_width = sunxi_mmc_bus_width,
126 .bus_rod = sunxi_mmc_bus_rod,
127 .signal_voltage = sunxi_mmc_signal_voltage,
128 .exec_command = sunxi_mmc_exec_command,
129 .card_enable_intr = sunxi_mmc_card_enable_intr,
130 .card_intr_ack = sunxi_mmc_card_intr_ack,
131 };
132
133 struct sunxi_mmc_config {
134 u_int idma_xferlen;
135 u_int flags;
136 #define SUNXI_MMC_FLAG_CALIB_REG 0x01
137 #define SUNXI_MMC_FLAG_NEW_TIMINGS 0x02
138 #define SUNXI_MMC_FLAG_MASK_DATA0 0x04
139 const struct sunxi_mmc_delay *delays;
140 uint32_t dma_ftrglevel;
141 };
142
143 struct sunxi_mmc_softc {
144 device_t sc_dev;
145 bus_space_tag_t sc_bst;
146 bus_space_handle_t sc_bsh;
147 bus_dma_tag_t sc_dmat;
148 int sc_phandle;
149
150 void *sc_ih;
151 kmutex_t sc_intr_lock;
152 kcondvar_t sc_intr_cv;
153 kcondvar_t sc_idst_cv;
154
155 int sc_mmc_width;
156 int sc_mmc_present;
157
158 device_t sc_sdmmc_dev;
159
160 struct sunxi_mmc_config *sc_config;
161
162 bus_dma_segment_t sc_idma_segs[1];
163 int sc_idma_nsegs;
164 bus_size_t sc_idma_size;
165 bus_dmamap_t sc_idma_map;
166 int sc_idma_ndesc;
167 void *sc_idma_desc;
168
169 bus_dmamap_t sc_dmabounce_map;
170 void *sc_dmabounce_buf;
171 size_t sc_dmabounce_buflen;
172
173 uint32_t sc_intr_rint;
174 uint32_t sc_idma_idst;
175
176 struct clk *sc_clk_ahb;
177 struct clk *sc_clk_mmc;
178 struct clk *sc_clk_output;
179 struct clk *sc_clk_sample;
180
181 struct fdtbus_reset *sc_rst_ahb;
182
183 struct fdtbus_gpio_pin *sc_gpio_cd;
184 int sc_gpio_cd_inverted;
185 struct fdtbus_gpio_pin *sc_gpio_wp;
186 int sc_gpio_wp_inverted;
187
188 struct fdtbus_regulator *sc_reg_vqmmc;
189
190 struct fdtbus_mmc_pwrseq *sc_pwrseq;
191 };
192
193 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
194 sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
195
196 #define MMC_WRITE(sc, reg, val) \
197 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
198 #define MMC_READ(sc, reg) \
199 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
200
201 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
202 .idma_xferlen = 0x2000,
203 .dma_ftrglevel = 0x20070008,
204 .delays = NULL,
205 .flags = 0,
206 };
207
208 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
209 .idma_xferlen = 0x10000,
210 .dma_ftrglevel = 0x20070008,
211 .delays = NULL,
212 .flags = 0,
213 };
214
215 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
216 .idma_xferlen = 0x2000,
217 .dma_ftrglevel = 0x20070008,
218 .delays = sun7i_mmc_delays,
219 .flags = 0,
220 };
221
222 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
223 .idma_xferlen = 0x10000,
224 .dma_ftrglevel = 0x200f0010,
225 .delays = sun9i_mmc_delays,
226 .flags = 0,
227 };
228
229 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
230 .idma_xferlen = 0x10000,
231 .dma_ftrglevel = 0x20070008,
232 .delays = NULL,
233 .flags = SUNXI_MMC_FLAG_CALIB_REG |
234 SUNXI_MMC_FLAG_NEW_TIMINGS |
235 SUNXI_MMC_FLAG_MASK_DATA0,
236 };
237
238 static const struct of_compat_data compat_data[] = {
239 { "allwinner,sun4i-a10-mmc", (uintptr_t)&sun4i_a10_mmc_config },
240 { "allwinner,sun5i-a13-mmc", (uintptr_t)&sun5i_a13_mmc_config },
241 { "allwinner,sun7i-a20-mmc", (uintptr_t)&sun7i_a20_mmc_config },
242 { "allwinner,sun9i-a80-mmc", (uintptr_t)&sun9i_a80_mmc_config },
243 { "allwinner,sun50i-a64-mmc", (uintptr_t)&sun50i_a64_mmc_config },
244 { NULL }
245 };
246
247 static int
248 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
249 {
250 struct fdt_attach_args * const faa = aux;
251
252 return of_match_compat_data(faa->faa_phandle, compat_data);
253 }
254
255 static void
256 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
257 {
258 struct sunxi_mmc_softc * const sc = device_private(self);
259 struct fdt_attach_args * const faa = aux;
260 const int phandle = faa->faa_phandle;
261 char intrstr[128];
262 bus_addr_t addr;
263 bus_size_t size;
264
265 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
266 aprint_error(": couldn't get registers\n");
267 return;
268 }
269
270 sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
271 sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
272 sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
273 sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
274
275 #if notyet
276 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
277 sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
278 #else
279 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
280 #endif
281 aprint_error(": couldn't get clocks\n");
282 return;
283 }
284
285 sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
286
287 sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
288
289 sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
290
291 if (clk_enable(sc->sc_clk_ahb) != 0 ||
292 clk_enable(sc->sc_clk_mmc) != 0) {
293 aprint_error(": couldn't enable clocks\n");
294 return;
295 }
296
297 if (sc->sc_rst_ahb != NULL) {
298 if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
299 aprint_error(": couldn't de-assert resets\n");
300 return;
301 }
302 }
303
304 sc->sc_dev = self;
305 sc->sc_phandle = phandle;
306 sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
307 sc->sc_bst = faa->faa_bst;
308 sc->sc_dmat = faa->faa_dmat;
309 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
310 cv_init(&sc->sc_intr_cv, "awinmmcirq");
311 cv_init(&sc->sc_idst_cv, "awinmmcdma");
312
313 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
314 aprint_error(": couldn't map registers\n");
315 return;
316 }
317
318 aprint_naive("\n");
319 aprint_normal(": SD/MMC controller\n");
320
321 sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
322 GPIO_PIN_INPUT);
323 sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
324 GPIO_PIN_INPUT);
325
326 sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
327 sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
328
329 if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
330 sunxi_mmc_idma_setup(sc) != 0) {
331 aprint_error_dev(self, "failed to setup DMA\n");
332 return;
333 }
334
335 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
336 aprint_error_dev(self, "failed to decode interrupt\n");
337 return;
338 }
339
340 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
341 sunxi_mmc_intr, sc);
342 if (sc->sc_ih == NULL) {
343 aprint_error_dev(self, "failed to establish interrupt on %s\n",
344 intrstr);
345 return;
346 }
347 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
348
349 config_interrupts(self, sunxi_mmc_attach_i);
350 }
351
352 static int
353 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
354 {
355 bus_dma_segment_t ds[1];
356 int error, rseg;
357
358 sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
359 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
360 sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
361 if (error)
362 return error;
363 error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
364 &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
365 if (error)
366 goto free;
367 error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
368 sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
369 if (error)
370 goto unmap;
371 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
372 sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
373 BUS_DMA_WAITOK);
374 if (error)
375 goto destroy;
376 return 0;
377
378 destroy:
379 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
380 unmap:
381 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
382 sc->sc_dmabounce_buflen);
383 free:
384 bus_dmamem_free(sc->sc_dmat, ds, rseg);
385 return error;
386 }
387
388 static int
389 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
390 {
391 int error;
392
393 sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
394 sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
395 sc->sc_idma_ndesc;
396 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
397 sc->sc_idma_size, sc->sc_idma_segs, 1,
398 &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
399 if (error)
400 return error;
401 error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
402 sc->sc_idma_nsegs, sc->sc_idma_size,
403 &sc->sc_idma_desc, BUS_DMA_WAITOK);
404 if (error)
405 goto free;
406 error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
407 sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
408 if (error)
409 goto unmap;
410 error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
411 sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
412 if (error)
413 goto destroy;
414 return 0;
415
416 destroy:
417 bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
418 unmap:
419 bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
420 free:
421 bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
422 return error;
423 }
424
425 static int
426 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr)
427 {
428 const struct sunxi_mmc_delay *delays;
429 int error, timing;
430
431 if (freq <= 400) {
432 timing = SUNXI_MMC_TIMING_400K;
433 } else if (freq <= 25000) {
434 timing = SUNXI_MMC_TIMING_25M;
435 } else if (freq <= 52000) {
436 if (ddr) {
437 timing = sc->sc_mmc_width == 8 ?
438 SUNXI_MMC_TIMING_50M_DDR_8BIT :
439 SUNXI_MMC_TIMING_50M_DDR;
440 } else {
441 timing = SUNXI_MMC_TIMING_50M;
442 }
443 } else
444 return EINVAL;
445
446 error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << ddr);
447 if (error != 0)
448 return error;
449
450 if (sc->sc_config->delays == NULL)
451 return 0;
452
453 delays = &sc->sc_config->delays[timing];
454
455 if (sc->sc_clk_sample) {
456 error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
457 if (error != 0)
458 return error;
459 }
460 if (sc->sc_clk_output) {
461 error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
462 if (error != 0)
463 return error;
464 }
465
466 return 0;
467 }
468
469 static void
470 sunxi_mmc_attach_i(device_t self)
471 {
472 struct sunxi_mmc_softc *sc = device_private(self);
473 struct sdmmcbus_attach_args saa;
474 uint32_t width;
475
476 if (sc->sc_pwrseq)
477 fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
478
479 sunxi_mmc_host_reset(sc);
480 sunxi_mmc_bus_width(sc, 1);
481 sunxi_mmc_set_clock(sc, 400, false);
482
483 if (sc->sc_pwrseq)
484 fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
485
486 if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
487 width = 4;
488
489 memset(&saa, 0, sizeof(saa));
490 saa.saa_busname = "sdmmc";
491 saa.saa_sct = &sunxi_mmc_chip_functions;
492 saa.saa_sch = sc;
493 saa.saa_dmat = sc->sc_dmat;
494 saa.saa_clkmin = 400;
495 saa.saa_clkmax = 52000;
496 saa.saa_caps = SMC_CAPS_DMA |
497 SMC_CAPS_MULTI_SEG_DMA |
498 SMC_CAPS_AUTO_STOP |
499 SMC_CAPS_SD_HIGHSPEED |
500 SMC_CAPS_MMC_HIGHSPEED |
501 SMC_CAPS_MMC_DDR52 |
502 SMC_CAPS_POLLING;
503 if (width == 4)
504 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
505 if (width == 8)
506 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
507
508 if (sc->sc_gpio_cd)
509 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
510
511 sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
512 }
513
514 static int
515 sunxi_mmc_intr(void *priv)
516 {
517 struct sunxi_mmc_softc *sc = priv;
518 uint32_t idst, rint;
519
520 mutex_enter(&sc->sc_intr_lock);
521 idst = MMC_READ(sc, SUNXI_MMC_IDST);
522 rint = MMC_READ(sc, SUNXI_MMC_RINT);
523 if (!idst && !rint) {
524 mutex_exit(&sc->sc_intr_lock);
525 return 0;
526 }
527 MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
528 MMC_WRITE(sc, SUNXI_MMC_RINT, rint);
529
530 DPRINTF(sc->sc_dev, "mmc intr idst=%08X rint=%08X\n",
531 idst, rint);
532
533 if (idst != 0) {
534 sc->sc_idma_idst |= idst;
535 cv_broadcast(&sc->sc_idst_cv);
536 }
537
538 if ((rint & ~SUNXI_MMC_INT_SDIO_INT) != 0) {
539 sc->sc_intr_rint |= (rint & ~SUNXI_MMC_INT_SDIO_INT);
540 cv_broadcast(&sc->sc_intr_cv);
541 }
542
543 if ((rint & SUNXI_MMC_INT_SDIO_INT) != 0) {
544 sdmmc_card_intr(sc->sc_sdmmc_dev);
545 }
546
547 mutex_exit(&sc->sc_intr_lock);
548
549 return 1;
550 }
551
552 static int
553 sunxi_mmc_wait_rint(struct sunxi_mmc_softc *sc, uint32_t mask,
554 int timeout, bool poll)
555 {
556 int retry;
557 int error;
558
559 KASSERT(mutex_owned(&sc->sc_intr_lock));
560
561 if (sc->sc_intr_rint & mask)
562 return 0;
563
564 if (poll)
565 retry = timeout / hz * 1000;
566 else
567 retry = timeout / hz;
568
569 while (retry > 0) {
570 if (poll) {
571 sc->sc_intr_rint |= MMC_READ(sc, SUNXI_MMC_RINT);
572 } else {
573 error = cv_timedwait(&sc->sc_intr_cv,
574 &sc->sc_intr_lock, hz);
575 if (error && error != EWOULDBLOCK)
576 return error;
577 }
578 if (sc->sc_intr_rint & mask)
579 return 0;
580 if (poll)
581 delay(1000);
582 --retry;
583 }
584
585 return ETIMEDOUT;
586 }
587
588 static int
589 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
590 {
591 struct sunxi_mmc_softc *sc = sch;
592 int retry = 1000;
593
594 DPRINTF(sc->sc_dev, "host reset\n");
595
596 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
597 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_RESET);
598 while (--retry > 0) {
599 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
600 break;
601 delay(100);
602 }
603
604 MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
605
606 MMC_WRITE(sc, SUNXI_MMC_IMASK,
607 SUNXI_MMC_INT_CMD_DONE | SUNXI_MMC_INT_ERROR |
608 SUNXI_MMC_INT_DATA_OVER | SUNXI_MMC_INT_AUTO_CMD_DONE);
609
610 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
611 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_INTEN);
612
613 return 0;
614 }
615
616 static uint32_t
617 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
618 {
619 return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
620 }
621
622 static int
623 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
624 {
625 return 8192;
626 }
627
628 static int
629 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
630 {
631 struct sunxi_mmc_softc *sc = sch;
632
633 if (sc->sc_gpio_cd == NULL) {
634 return 1; /* no card detect pin, assume present */
635 } else {
636 int v = 0, i;
637 for (i = 0; i < 5; i++) {
638 v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
639 sc->sc_gpio_cd_inverted);
640 delay(1000);
641 }
642 if (v == 5)
643 sc->sc_mmc_present = 0;
644 else if (v == 0)
645 sc->sc_mmc_present = 1;
646 return sc->sc_mmc_present;
647 }
648 }
649
650 static int
651 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
652 {
653 struct sunxi_mmc_softc *sc = sch;
654
655 if (sc->sc_gpio_wp == NULL) {
656 return 0; /* no write protect pin, assume rw */
657 } else {
658 return fdtbus_gpio_read(sc->sc_gpio_wp) ^
659 sc->sc_gpio_wp_inverted;
660 }
661 }
662
663 static int
664 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
665 {
666 return 0;
667 }
668
669 static int
670 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
671 {
672 uint32_t cmd;
673 int retry;
674
675 DPRINTF(sc->sc_dev, "update clock\n");
676
677 cmd = SUNXI_MMC_CMD_START |
678 SUNXI_MMC_CMD_UPCLK_ONLY |
679 SUNXI_MMC_CMD_WAIT_PRE_OVER;
680 MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
681 retry = 0xfffff;
682 while (--retry > 0) {
683 if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
684 break;
685 delay(10);
686 }
687
688 if (retry == 0) {
689 aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
690 DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
691 MMC_READ(sc, SUNXI_MMC_GCTRL));
692 DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
693 MMC_READ(sc, SUNXI_MMC_CLKCR));
694 DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
695 MMC_READ(sc, SUNXI_MMC_TIMEOUT));
696 DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
697 MMC_READ(sc, SUNXI_MMC_WIDTH));
698 DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
699 MMC_READ(sc, SUNXI_MMC_CMD));
700 DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
701 MMC_READ(sc, SUNXI_MMC_MINT));
702 DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
703 MMC_READ(sc, SUNXI_MMC_RINT));
704 DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
705 MMC_READ(sc, SUNXI_MMC_STATUS));
706 return ETIMEDOUT;
707 }
708
709 return 0;
710 }
711
712 static int
713 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
714 {
715 struct sunxi_mmc_softc *sc = sch;
716 uint32_t clkcr, gctrl, ntsr;
717 const u_int flags = sc->sc_config->flags;
718
719 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
720 if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
721 clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
722 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
723 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
724 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
725 if (sunxi_mmc_update_clock(sc) != 0)
726 return 1;
727 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
728 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
729 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
730 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
731 }
732 }
733
734 if (freq) {
735
736 clkcr &= ~SUNXI_MMC_CLKCR_DIV;
737 clkcr |= __SHIFTIN(ddr, SUNXI_MMC_CLKCR_DIV);
738 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
739
740 if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
741 ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
742 ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
743 MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
744 }
745
746 if (flags & SUNXI_MMC_FLAG_CALIB_REG)
747 MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
748
749 if (sunxi_mmc_update_clock(sc) != 0)
750 return 1;
751
752 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
753 if (ddr)
754 gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
755 else
756 gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
757 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
758
759 if (sunxi_mmc_set_clock(sc, freq, ddr) != 0)
760 return 1;
761
762 clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
763 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
764 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
765 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
766 if (sunxi_mmc_update_clock(sc) != 0)
767 return 1;
768 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
769 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
770 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
771 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
772 }
773 }
774
775 return 0;
776 }
777
778 static int
779 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
780 {
781 struct sunxi_mmc_softc *sc = sch;
782
783 DPRINTF(sc->sc_dev, "width = %d\n", width);
784
785 switch (width) {
786 case 1:
787 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
788 break;
789 case 4:
790 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
791 break;
792 case 8:
793 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
794 break;
795 default:
796 return 1;
797 }
798
799 sc->sc_mmc_width = width;
800
801 return 0;
802 }
803
804 static int
805 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
806 {
807 return -1;
808 }
809
810 static int
811 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
812 {
813 struct sunxi_mmc_softc *sc = sch;
814 u_int uvol;
815 int error;
816
817 if (sc->sc_reg_vqmmc == NULL)
818 return 0;
819
820 switch (signal_voltage) {
821 case SDMMC_SIGNAL_VOLTAGE_330:
822 uvol = 3300000;
823 break;
824 case SDMMC_SIGNAL_VOLTAGE_180:
825 uvol = 1800000;
826 break;
827 default:
828 return EINVAL;
829 }
830
831 error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
832 if (error != 0)
833 return error;
834
835 return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
836 }
837
838 static int
839 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
840 {
841 struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
842 bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
843 bus_dmamap_t map;
844 bus_size_t off;
845 int desc, resid, seg;
846 uint32_t val;
847
848 /*
849 * If the command includes a dma map use it, otherwise we need to
850 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
851 */
852 if (cmd->c_dmamap) {
853 map = cmd->c_dmamap;
854 } else {
855 if (cmd->c_datalen > sc->sc_dmabounce_buflen)
856 return E2BIG;
857 map = sc->sc_dmabounce_map;
858
859 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
860 memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
861 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
862 0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
863 } else {
864 memcpy(sc->sc_dmabounce_buf, cmd->c_data,
865 cmd->c_datalen);
866 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
867 0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
868 }
869 }
870
871 desc = 0;
872 for (seg = 0; seg < map->dm_nsegs; seg++) {
873 bus_addr_t paddr = map->dm_segs[seg].ds_addr;
874 bus_size_t len = map->dm_segs[seg].ds_len;
875 resid = min(len, cmd->c_resid);
876 off = 0;
877 while (resid > 0) {
878 if (desc == sc->sc_idma_ndesc)
879 break;
880 len = min(sc->sc_config->idma_xferlen, resid);
881 dma[desc].dma_buf_size = htole32(len);
882 dma[desc].dma_buf_addr = htole32(paddr + off);
883 dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
884 SUNXI_MMC_IDMA_CONFIG_OWN);
885 cmd->c_resid -= len;
886 resid -= len;
887 off += len;
888 if (desc == 0) {
889 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
890 }
891 if (cmd->c_resid == 0) {
892 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
893 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
894 dma[desc].dma_next = 0;
895 } else {
896 dma[desc].dma_config |=
897 htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
898 dma[desc].dma_next = htole32(
899 desc_paddr + ((desc+1) *
900 sizeof(struct sunxi_mmc_idma_descriptor)));
901 }
902 ++desc;
903 }
904 }
905 if (desc == sc->sc_idma_ndesc) {
906 aprint_error_dev(sc->sc_dev,
907 "not enough descriptors for %d byte transfer!\n",
908 cmd->c_datalen);
909 return EIO;
910 }
911
912 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
913 sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
914
915 sc->sc_idma_idst = 0;
916
917 val = MMC_READ(sc, SUNXI_MMC_GCTRL);
918 val |= SUNXI_MMC_GCTRL_DMAEN;
919 val |= SUNXI_MMC_GCTRL_INTEN;
920 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
921 val |= SUNXI_MMC_GCTRL_DMARESET;
922 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
923 MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
924 MMC_WRITE(sc, SUNXI_MMC_DMAC,
925 SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
926 val = MMC_READ(sc, SUNXI_MMC_IDIE);
927 val &= ~(SUNXI_MMC_IDST_RECEIVE_INT|SUNXI_MMC_IDST_TRANSMIT_INT);
928 if (ISSET(cmd->c_flags, SCF_CMD_READ))
929 val |= SUNXI_MMC_IDST_RECEIVE_INT;
930 else
931 val |= SUNXI_MMC_IDST_TRANSMIT_INT;
932 MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
933 MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
934 MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
935
936 return 0;
937 }
938
939 static void
940 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
941 {
942 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
943 sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
944
945 if (cmd->c_dmamap == NULL) {
946 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
947 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
948 0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
949 memcpy(cmd->c_data, sc->sc_dmabounce_buf,
950 cmd->c_datalen);
951 } else {
952 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
953 0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
954 }
955 }
956 }
957
958 static void
959 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
960 {
961 struct sunxi_mmc_softc *sc = sch;
962 uint32_t cmdval = SUNXI_MMC_CMD_START;
963 const bool poll = (cmd->c_flags & SCF_POLL) != 0;
964 int retry;
965
966 DPRINTF(sc->sc_dev,
967 "opcode %d flags 0x%x data %p datalen %d blklen %d poll %d\n",
968 cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
969 cmd->c_blklen, poll);
970
971 mutex_enter(&sc->sc_intr_lock);
972
973 if (cmd->c_opcode == 0)
974 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
975 if (cmd->c_flags & SCF_RSP_PRESENT)
976 cmdval |= SUNXI_MMC_CMD_RSP_EXP;
977 if (cmd->c_flags & SCF_RSP_136)
978 cmdval |= SUNXI_MMC_CMD_LONG_RSP;
979 if (cmd->c_flags & SCF_RSP_CRC)
980 cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
981
982 if (cmd->c_datalen > 0) {
983 unsigned int nblks;
984
985 cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
986 if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
987 cmdval |= SUNXI_MMC_CMD_WRITE;
988 }
989
990 nblks = cmd->c_datalen / cmd->c_blklen;
991 if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
992 ++nblks;
993
994 if (nblks > 1) {
995 cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
996 }
997
998 MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
999 MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1000 }
1001
1002 sc->sc_intr_rint = 0;
1003
1004 MMC_WRITE(sc, SUNXI_MMC_A12A,
1005 (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1006
1007 MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1008
1009 DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1010
1011 if (cmd->c_datalen == 0) {
1012 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1013 } else {
1014 cmd->c_resid = cmd->c_datalen;
1015 cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1016 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1017 if (cmd->c_error == 0) {
1018 const uint32_t idst_mask =
1019 SUNXI_MMC_IDST_ERROR | SUNXI_MMC_IDST_COMPLETE;
1020 retry = 10;
1021 while ((sc->sc_idma_idst & idst_mask) == 0) {
1022 if (retry-- == 0) {
1023 cmd->c_error = ETIMEDOUT;
1024 break;
1025 }
1026 cv_timedwait(&sc->sc_idst_cv,
1027 &sc->sc_intr_lock, hz);
1028 }
1029 }
1030 sunxi_mmc_dma_complete(sc, cmd);
1031 if (sc->sc_idma_idst & SUNXI_MMC_IDST_ERROR) {
1032 cmd->c_error = EIO;
1033 } else if (!(sc->sc_idma_idst & SUNXI_MMC_IDST_COMPLETE)) {
1034 cmd->c_error = ETIMEDOUT;
1035 }
1036 if (cmd->c_error) {
1037 DPRINTF(sc->sc_dev,
1038 "xfer failed, error %d\n", cmd->c_error);
1039 goto done;
1040 }
1041 }
1042
1043 cmd->c_error = sunxi_mmc_wait_rint(sc,
1044 SUNXI_MMC_INT_ERROR|SUNXI_MMC_INT_CMD_DONE, hz * 10, poll);
1045 if (cmd->c_error == 0 && (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1046 if (sc->sc_intr_rint & SUNXI_MMC_INT_RESP_TIMEOUT) {
1047 cmd->c_error = ETIMEDOUT;
1048 } else {
1049 cmd->c_error = EIO;
1050 }
1051 }
1052 if (cmd->c_error) {
1053 DPRINTF(sc->sc_dev,
1054 "cmd failed, error %d\n", cmd->c_error);
1055 goto done;
1056 }
1057
1058 if (cmd->c_datalen > 0) {
1059 cmd->c_error = sunxi_mmc_wait_rint(sc,
1060 SUNXI_MMC_INT_ERROR|
1061 SUNXI_MMC_INT_AUTO_CMD_DONE|
1062 SUNXI_MMC_INT_DATA_OVER,
1063 hz*10, poll);
1064 if (cmd->c_error == 0 &&
1065 (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1066 cmd->c_error = ETIMEDOUT;
1067 }
1068 if (cmd->c_error) {
1069 DPRINTF(sc->sc_dev,
1070 "data timeout, rint = %08x\n",
1071 sc->sc_intr_rint);
1072 cmd->c_error = ETIMEDOUT;
1073 goto done;
1074 }
1075 }
1076
1077 if (cmd->c_flags & SCF_RSP_PRESENT) {
1078 if (cmd->c_flags & SCF_RSP_136) {
1079 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1080 cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1081 cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1082 cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1083 if (cmd->c_flags & SCF_RSP_CRC) {
1084 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1085 (cmd->c_resp[1] << 24);
1086 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1087 (cmd->c_resp[2] << 24);
1088 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1089 (cmd->c_resp[3] << 24);
1090 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1091 }
1092 } else {
1093 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1094 }
1095 }
1096
1097 done:
1098 cmd->c_flags |= SCF_ITSDONE;
1099 mutex_exit(&sc->sc_intr_lock);
1100
1101 if (cmd->c_error) {
1102 DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1103 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1104 MMC_READ(sc, SUNXI_MMC_GCTRL) |
1105 SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1106 for (retry = 0; retry < 1000; retry++) {
1107 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1108 break;
1109 delay(10);
1110 }
1111 sunxi_mmc_update_clock(sc);
1112 }
1113
1114 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1115 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1116 }
1117
1118 static void
1119 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1120 {
1121 struct sunxi_mmc_softc *sc = sch;
1122 uint32_t imask;
1123
1124 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1125 if (enable)
1126 imask |= SUNXI_MMC_INT_SDIO_INT;
1127 else
1128 imask &= ~SUNXI_MMC_INT_SDIO_INT;
1129 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1130 }
1131
1132 static void
1133 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1134 {
1135 struct sunxi_mmc_softc *sc = sch;
1136
1137 MMC_WRITE(sc, SUNXI_MMC_RINT, SUNXI_MMC_INT_SDIO_INT);
1138 }
1139