sunxi_mmc.c revision 1.24 1 /* $NetBSD: sunxi_mmc.c,v 1.24 2018/05/21 22:04:27 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_sunximmc.h"
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.24 2018/05/21 22:04:27 jmcneill Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45
46 #include <dev/fdt/fdtvar.h>
47
48 #include <arm/sunxi/sunxi_mmc.h>
49
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define DPRINTF(dev, fmt, ...) \
53 do { \
54 if (sunxi_mmc_debug & __BIT(device_unit(dev))) \
55 device_printf((dev), fmt, ##__VA_ARGS__); \
56 } while (0)
57 #else
58 #define DPRINTF(dev, fmt, ...) ((void)0)
59 #endif
60
61 enum sunxi_mmc_timing {
62 SUNXI_MMC_TIMING_400K,
63 SUNXI_MMC_TIMING_25M,
64 SUNXI_MMC_TIMING_50M,
65 SUNXI_MMC_TIMING_50M_DDR,
66 SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68
69 struct sunxi_mmc_delay {
70 u_int output_phase;
71 u_int sample_phase;
72 };
73
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
76 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
77 [SUNXI_MMC_TIMING_50M] = { 90, 120 },
78 [SUNXI_MMC_TIMING_50M_DDR] = { 60, 120 },
79 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 90, 180 },
80 };
81
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
84 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
85 [SUNXI_MMC_TIMING_50M] = { 150, 120 },
86 [SUNXI_MMC_TIMING_50M_DDR] = { 54, 36 },
87 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 72, 72 },
88 };
89
90 #define SUNXI_MMC_NDESC 64
91
92 struct sunxi_mmc_softc;
93
94 static int sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void sunxi_mmc_attach(device_t, device_t, void *);
96 static void sunxi_mmc_attach_i(device_t);
97
98 static int sunxi_mmc_intr(void *);
99 static int sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101
102 static int sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
103 static uint32_t sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
104 static int sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
105 static int sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
106 static int sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
107 static int sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
108 static int sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
109 static int sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
110 static int sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
111 static int sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
112 static int sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t, int);
113 static void sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
114 struct sdmmc_command *);
115 static void sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
116 static void sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
117
118 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
119 .host_reset = sunxi_mmc_host_reset,
120 .host_ocr = sunxi_mmc_host_ocr,
121 .host_maxblklen = sunxi_mmc_host_maxblklen,
122 .card_detect = sunxi_mmc_card_detect,
123 .write_protect = sunxi_mmc_write_protect,
124 .bus_power = sunxi_mmc_bus_power,
125 .bus_clock_ddr = sunxi_mmc_bus_clock,
126 .bus_width = sunxi_mmc_bus_width,
127 .bus_rod = sunxi_mmc_bus_rod,
128 .signal_voltage = sunxi_mmc_signal_voltage,
129 .execute_tuning = sunxi_mmc_execute_tuning,
130 .exec_command = sunxi_mmc_exec_command,
131 .card_enable_intr = sunxi_mmc_card_enable_intr,
132 .card_intr_ack = sunxi_mmc_card_intr_ack,
133 };
134
135 struct sunxi_mmc_config {
136 u_int idma_xferlen;
137 u_int flags;
138 #define SUNXI_MMC_FLAG_CALIB_REG 0x01
139 #define SUNXI_MMC_FLAG_NEW_TIMINGS 0x02
140 #define SUNXI_MMC_FLAG_MASK_DATA0 0x04
141 #define SUNXI_MMC_FLAG_HS200 0x08
142 const struct sunxi_mmc_delay *delays;
143 uint32_t dma_ftrglevel;
144 };
145
146 struct sunxi_mmc_softc {
147 device_t sc_dev;
148 bus_space_tag_t sc_bst;
149 bus_space_handle_t sc_bsh;
150 bus_dma_tag_t sc_dmat;
151 int sc_phandle;
152
153 void *sc_ih;
154 kmutex_t sc_intr_lock;
155 kcondvar_t sc_intr_cv;
156 kcondvar_t sc_idst_cv;
157
158 int sc_mmc_width;
159 int sc_mmc_present;
160
161 u_int sc_max_frequency;
162
163 device_t sc_sdmmc_dev;
164
165 struct sunxi_mmc_config *sc_config;
166
167 bus_dma_segment_t sc_idma_segs[1];
168 int sc_idma_nsegs;
169 bus_size_t sc_idma_size;
170 bus_dmamap_t sc_idma_map;
171 int sc_idma_ndesc;
172 void *sc_idma_desc;
173
174 bus_dmamap_t sc_dmabounce_map;
175 void *sc_dmabounce_buf;
176 size_t sc_dmabounce_buflen;
177
178 uint32_t sc_intr_rint;
179 uint32_t sc_idma_idst;
180
181 struct clk *sc_clk_ahb;
182 struct clk *sc_clk_mmc;
183 struct clk *sc_clk_output;
184 struct clk *sc_clk_sample;
185
186 struct fdtbus_reset *sc_rst_ahb;
187
188 struct fdtbus_gpio_pin *sc_gpio_cd;
189 int sc_gpio_cd_inverted;
190 struct fdtbus_gpio_pin *sc_gpio_wp;
191 int sc_gpio_wp_inverted;
192
193 struct fdtbus_regulator *sc_reg_vqmmc;
194
195 struct fdtbus_mmc_pwrseq *sc_pwrseq;
196
197 bool sc_non_removable;
198 bool sc_broken_cd;
199 };
200
201 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
202 sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
203
204 #define MMC_WRITE(sc, reg, val) \
205 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
206 #define MMC_READ(sc, reg) \
207 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
208
209 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
210 .idma_xferlen = 0x2000,
211 .dma_ftrglevel = 0x20070008,
212 .delays = NULL,
213 .flags = 0,
214 };
215
216 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
217 .idma_xferlen = 0x10000,
218 .dma_ftrglevel = 0x20070008,
219 .delays = NULL,
220 .flags = 0,
221 };
222
223 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
224 .idma_xferlen = 0x2000,
225 .dma_ftrglevel = 0x20070008,
226 .delays = sun7i_mmc_delays,
227 .flags = 0,
228 };
229
230 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
231 .idma_xferlen = 0x10000,
232 .dma_ftrglevel = 0x20070008,
233 .delays = NULL,
234 .flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
235 };
236
237 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
238 .idma_xferlen = 0x10000,
239 .dma_ftrglevel = 0x200f0010,
240 .delays = sun9i_mmc_delays,
241 .flags = 0,
242 };
243
244 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
245 .idma_xferlen = 0x10000,
246 .dma_ftrglevel = 0x20070008,
247 .delays = NULL,
248 .flags = SUNXI_MMC_FLAG_CALIB_REG |
249 SUNXI_MMC_FLAG_NEW_TIMINGS |
250 SUNXI_MMC_FLAG_MASK_DATA0,
251 };
252
253 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
254 .idma_xferlen = 0x2000,
255 .dma_ftrglevel = 0x20070008,
256 .delays = NULL,
257 .flags = SUNXI_MMC_FLAG_CALIB_REG |
258 SUNXI_MMC_FLAG_HS200,
259 };
260
261 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
262 .idma_xferlen = 0x10000,
263 .dma_ftrglevel = 0x20070008,
264 .delays = NULL,
265 .flags = SUNXI_MMC_FLAG_CALIB_REG |
266 SUNXI_MMC_FLAG_NEW_TIMINGS |
267 SUNXI_MMC_FLAG_MASK_DATA0,
268 };
269
270 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
271 .idma_xferlen = 0x2000,
272 .dma_ftrglevel = 0x20070008,
273 .delays = NULL,
274 .flags = SUNXI_MMC_FLAG_CALIB_REG,
275 };
276
277 static const struct of_compat_data compat_data[] = {
278 { "allwinner,sun4i-a10-mmc", (uintptr_t)&sun4i_a10_mmc_config },
279 { "allwinner,sun5i-a13-mmc", (uintptr_t)&sun5i_a13_mmc_config },
280 { "allwinner,sun7i-a20-mmc", (uintptr_t)&sun7i_a20_mmc_config },
281 { "allwinner,sun8i-a83t-emmc", (uintptr_t)&sun8i_a83t_emmc_config },
282 { "allwinner,sun9i-a80-mmc", (uintptr_t)&sun9i_a80_mmc_config },
283 { "allwinner,sun50i-a64-mmc", (uintptr_t)&sun50i_a64_mmc_config },
284 { "allwinner,sun50i-a64-emmc", (uintptr_t)&sun50i_a64_emmc_config },
285 { "allwinner,sun50i-h6-mmc", (uintptr_t)&sun50i_h6_mmc_config },
286 { "allwinner,sun50i-h6-emmc", (uintptr_t)&sun50i_h6_emmc_config },
287 { NULL }
288 };
289
290 static int
291 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
292 {
293 struct fdt_attach_args * const faa = aux;
294
295 return of_match_compat_data(faa->faa_phandle, compat_data);
296 }
297
298 static void
299 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
300 {
301 struct sunxi_mmc_softc * const sc = device_private(self);
302 struct fdt_attach_args * const faa = aux;
303 const int phandle = faa->faa_phandle;
304 char intrstr[128];
305 bus_addr_t addr;
306 bus_size_t size;
307
308 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
309 aprint_error(": couldn't get registers\n");
310 return;
311 }
312
313 sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
314 sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
315 sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
316 sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
317
318 #if notyet
319 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
320 sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
321 #else
322 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
323 #endif
324 aprint_error(": couldn't get clocks\n");
325 return;
326 }
327
328 sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
329
330 sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
331
332 sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
333
334 if (clk_enable(sc->sc_clk_ahb) != 0 ||
335 clk_enable(sc->sc_clk_mmc) != 0) {
336 aprint_error(": couldn't enable clocks\n");
337 return;
338 }
339
340 if (sc->sc_rst_ahb != NULL) {
341 if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
342 aprint_error(": couldn't de-assert resets\n");
343 return;
344 }
345 }
346
347 sc->sc_dev = self;
348 sc->sc_phandle = phandle;
349 sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
350 sc->sc_bst = faa->faa_bst;
351 sc->sc_dmat = faa->faa_dmat;
352 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
353 cv_init(&sc->sc_intr_cv, "awinmmcirq");
354 cv_init(&sc->sc_idst_cv, "awinmmcdma");
355
356 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
357 aprint_error(": couldn't map registers\n");
358 return;
359 }
360
361 aprint_naive("\n");
362 aprint_normal(": SD/MMC controller\n");
363
364 sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
365 GPIO_PIN_INPUT);
366 sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
367 GPIO_PIN_INPUT);
368
369 sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
370 sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
371
372 sc->sc_non_removable = of_hasprop(phandle, "non-removable");
373 sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
374
375 if (of_getprop_uint32(phandle, "max-frequency", &sc->sc_max_frequency))
376 sc->sc_max_frequency = 52000000;
377
378 if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
379 sunxi_mmc_idma_setup(sc) != 0) {
380 aprint_error_dev(self, "failed to setup DMA\n");
381 return;
382 }
383
384 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
385 aprint_error_dev(self, "failed to decode interrupt\n");
386 return;
387 }
388
389 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
390 sunxi_mmc_intr, sc);
391 if (sc->sc_ih == NULL) {
392 aprint_error_dev(self, "failed to establish interrupt on %s\n",
393 intrstr);
394 return;
395 }
396 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
397
398 config_interrupts(self, sunxi_mmc_attach_i);
399 }
400
401 static int
402 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
403 {
404 bus_dma_segment_t ds[1];
405 int error, rseg;
406
407 sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
408 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
409 sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
410 if (error)
411 return error;
412 error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
413 &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
414 if (error)
415 goto free;
416 error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
417 sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
418 if (error)
419 goto unmap;
420 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
421 sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
422 BUS_DMA_WAITOK);
423 if (error)
424 goto destroy;
425 return 0;
426
427 destroy:
428 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
429 unmap:
430 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
431 sc->sc_dmabounce_buflen);
432 free:
433 bus_dmamem_free(sc->sc_dmat, ds, rseg);
434 return error;
435 }
436
437 static int
438 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
439 {
440 int error;
441
442 sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
443 sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
444 sc->sc_idma_ndesc;
445 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
446 sc->sc_idma_size, sc->sc_idma_segs, 1,
447 &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
448 if (error)
449 return error;
450 error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
451 sc->sc_idma_nsegs, sc->sc_idma_size,
452 &sc->sc_idma_desc, BUS_DMA_WAITOK);
453 if (error)
454 goto free;
455 error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
456 sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
457 if (error)
458 goto unmap;
459 error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
460 sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
461 if (error)
462 goto destroy;
463 return 0;
464
465 destroy:
466 bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
467 unmap:
468 bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
469 free:
470 bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
471 return error;
472 }
473
474 static int
475 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr)
476 {
477 const struct sunxi_mmc_delay *delays;
478 int error, timing = SUNXI_MMC_TIMING_400K;
479
480 if (sc->sc_config->delays) {
481 if (freq <= 400) {
482 timing = SUNXI_MMC_TIMING_400K;
483 } else if (freq <= 25000) {
484 timing = SUNXI_MMC_TIMING_25M;
485 } else if (freq <= 52000) {
486 if (ddr) {
487 timing = sc->sc_mmc_width == 8 ?
488 SUNXI_MMC_TIMING_50M_DDR_8BIT :
489 SUNXI_MMC_TIMING_50M_DDR;
490 } else {
491 timing = SUNXI_MMC_TIMING_50M;
492 }
493 } else
494 return EINVAL;
495 }
496 if (sc->sc_max_frequency) {
497 if (freq * 1000 > sc->sc_max_frequency)
498 return EINVAL;
499 }
500
501 error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << ddr);
502 if (error != 0)
503 return error;
504
505 if (sc->sc_config->delays == NULL)
506 return 0;
507
508 delays = &sc->sc_config->delays[timing];
509
510 if (sc->sc_clk_sample) {
511 error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
512 if (error != 0)
513 return error;
514 }
515 if (sc->sc_clk_output) {
516 error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
517 if (error != 0)
518 return error;
519 }
520
521 return 0;
522 }
523
524 static void
525 sunxi_mmc_attach_i(device_t self)
526 {
527 struct sunxi_mmc_softc *sc = device_private(self);
528 const u_int flags = sc->sc_config->flags;
529 struct sdmmcbus_attach_args saa;
530 uint32_t width;
531
532 if (sc->sc_pwrseq)
533 fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
534
535 sunxi_mmc_host_reset(sc);
536 sunxi_mmc_bus_width(sc, 1);
537 sunxi_mmc_set_clock(sc, 400, false);
538
539 if (sc->sc_pwrseq)
540 fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
541
542 if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
543 width = 4;
544
545 memset(&saa, 0, sizeof(saa));
546 saa.saa_busname = "sdmmc";
547 saa.saa_sct = &sunxi_mmc_chip_functions;
548 saa.saa_sch = sc;
549 saa.saa_dmat = sc->sc_dmat;
550 saa.saa_clkmin = 400;
551 saa.saa_clkmax = sc->sc_max_frequency / 1000;
552 saa.saa_caps = SMC_CAPS_DMA |
553 SMC_CAPS_MULTI_SEG_DMA |
554 SMC_CAPS_AUTO_STOP |
555 SMC_CAPS_SD_HIGHSPEED |
556 SMC_CAPS_MMC_HIGHSPEED |
557 SMC_CAPS_MMC_DDR52 |
558 SMC_CAPS_POLLING;
559 if (flags & SUNXI_MMC_FLAG_HS200)
560 saa.saa_caps |= SMC_CAPS_MMC_HS200;
561 if (width == 4)
562 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
563 if (width == 8)
564 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
565
566 if (sc->sc_gpio_cd)
567 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
568
569 sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
570 }
571
572 static int
573 sunxi_mmc_intr(void *priv)
574 {
575 struct sunxi_mmc_softc *sc = priv;
576 uint32_t idst, rint, imask;
577
578 mutex_enter(&sc->sc_intr_lock);
579 idst = MMC_READ(sc, SUNXI_MMC_IDST);
580 rint = MMC_READ(sc, SUNXI_MMC_RINT);
581 if (!idst && !rint) {
582 mutex_exit(&sc->sc_intr_lock);
583 return 0;
584 }
585 MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
586 MMC_WRITE(sc, SUNXI_MMC_RINT, rint & ~SUNXI_MMC_INT_SDIO_INT);
587
588 DPRINTF(sc->sc_dev, "mmc intr idst=%08X rint=%08X\n",
589 idst, rint);
590
591 if (idst != 0) {
592 MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
593 sc->sc_idma_idst |= idst;
594 cv_broadcast(&sc->sc_idst_cv);
595 }
596
597 if ((rint & ~SUNXI_MMC_INT_SDIO_INT) != 0) {
598 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
599 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_SDIO_INT);
600 sc->sc_intr_rint |= (rint & ~SUNXI_MMC_INT_SDIO_INT);
601 cv_broadcast(&sc->sc_intr_cv);
602 }
603
604 if ((rint & SUNXI_MMC_INT_SDIO_INT) != 0) {
605 sdmmc_card_intr(sc->sc_sdmmc_dev);
606 }
607
608 mutex_exit(&sc->sc_intr_lock);
609
610 return 1;
611 }
612
613 static int
614 sunxi_mmc_wait_rint(struct sunxi_mmc_softc *sc, uint32_t mask,
615 int timeout, bool poll)
616 {
617 int retry;
618 int error;
619
620 KASSERT(mutex_owned(&sc->sc_intr_lock));
621
622 if (sc->sc_intr_rint & mask)
623 return 0;
624
625 if (poll)
626 retry = timeout / hz * 1000;
627 else
628 retry = timeout / hz;
629
630 while (retry > 0) {
631 if (poll) {
632 sc->sc_intr_rint |= MMC_READ(sc, SUNXI_MMC_RINT);
633 } else {
634 error = cv_timedwait(&sc->sc_intr_cv,
635 &sc->sc_intr_lock, hz);
636 if (error && error != EWOULDBLOCK)
637 return error;
638 }
639 if (sc->sc_intr_rint & mask)
640 return 0;
641 if (poll)
642 delay(1000);
643 --retry;
644 }
645
646 return ETIMEDOUT;
647 }
648
649 static int
650 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
651 {
652 struct sunxi_mmc_softc *sc = sch;
653 uint32_t gctrl;
654 int retry = 1000;
655
656 DPRINTF(sc->sc_dev, "host reset\n");
657
658 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
659 gctrl |= SUNXI_MMC_GCTRL_RESET;
660 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
661 while (--retry > 0) {
662 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
663 break;
664 delay(100);
665 }
666
667 MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
668
669 MMC_WRITE(sc, SUNXI_MMC_IMASK, 0);
670
671 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffffffff);
672
673 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
674 gctrl |= SUNXI_MMC_GCTRL_INTEN;
675 gctrl &= ~SUNXI_MMC_GCTRL_WAIT_MEM_ACCESS_DONE;
676 gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
677 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
678
679 return 0;
680 }
681
682 static uint32_t
683 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
684 {
685 return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
686 }
687
688 static int
689 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
690 {
691 return 8192;
692 }
693
694 static int
695 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
696 {
697 struct sunxi_mmc_softc *sc = sch;
698
699 if (sc->sc_non_removable || sc->sc_broken_cd) {
700 /*
701 * Non-removable or broken card detect flag set in
702 * DT, assume always present
703 */
704 return 1;
705 } else if (sc->sc_gpio_cd != NULL) {
706 /* Use card detect GPIO */
707 int v = 0, i;
708 for (i = 0; i < 5; i++) {
709 v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
710 sc->sc_gpio_cd_inverted);
711 delay(1000);
712 }
713 if (v == 5)
714 sc->sc_mmc_present = 0;
715 else if (v == 0)
716 sc->sc_mmc_present = 1;
717 return sc->sc_mmc_present;
718 } else {
719 /* Use CARD_PRESENT field of SD_STATUS register */
720 const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
721 SUNXI_MMC_STATUS_CARD_PRESENT;
722 return present != 0;
723 }
724 }
725
726 static int
727 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
728 {
729 struct sunxi_mmc_softc *sc = sch;
730
731 if (sc->sc_gpio_wp == NULL) {
732 return 0; /* no write protect pin, assume rw */
733 } else {
734 return fdtbus_gpio_read(sc->sc_gpio_wp) ^
735 sc->sc_gpio_wp_inverted;
736 }
737 }
738
739 static int
740 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
741 {
742 return 0;
743 }
744
745 static int
746 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
747 {
748 uint32_t cmd;
749 int retry;
750
751 DPRINTF(sc->sc_dev, "update clock\n");
752
753 cmd = SUNXI_MMC_CMD_START |
754 SUNXI_MMC_CMD_UPCLK_ONLY |
755 SUNXI_MMC_CMD_WAIT_PRE_OVER;
756 MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
757 retry = 0xfffff;
758 while (--retry > 0) {
759 if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
760 break;
761 delay(10);
762 }
763
764 if (retry == 0) {
765 aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
766 DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
767 MMC_READ(sc, SUNXI_MMC_GCTRL));
768 DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
769 MMC_READ(sc, SUNXI_MMC_CLKCR));
770 DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
771 MMC_READ(sc, SUNXI_MMC_TIMEOUT));
772 DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
773 MMC_READ(sc, SUNXI_MMC_WIDTH));
774 DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
775 MMC_READ(sc, SUNXI_MMC_CMD));
776 DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
777 MMC_READ(sc, SUNXI_MMC_MINT));
778 DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
779 MMC_READ(sc, SUNXI_MMC_RINT));
780 DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
781 MMC_READ(sc, SUNXI_MMC_STATUS));
782 return ETIMEDOUT;
783 }
784
785 return 0;
786 }
787
788 static int
789 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
790 {
791 struct sunxi_mmc_softc *sc = sch;
792 uint32_t clkcr, gctrl, ntsr;
793 const u_int flags = sc->sc_config->flags;
794
795 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
796 if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
797 clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
798 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
799 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
800 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
801 if (sunxi_mmc_update_clock(sc) != 0)
802 return 1;
803 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
804 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
805 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
806 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
807 }
808 }
809
810 if (freq) {
811
812 clkcr &= ~SUNXI_MMC_CLKCR_DIV;
813 clkcr |= __SHIFTIN(ddr, SUNXI_MMC_CLKCR_DIV);
814 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
815
816 if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
817 ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
818 ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
819 MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
820 }
821
822 if (flags & SUNXI_MMC_FLAG_CALIB_REG)
823 MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
824
825 if (sunxi_mmc_update_clock(sc) != 0)
826 return 1;
827
828 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
829 if (ddr)
830 gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
831 else
832 gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
833 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
834
835 if (sunxi_mmc_set_clock(sc, freq, ddr) != 0)
836 return 1;
837
838 clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
839 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
840 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
841 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
842 if (sunxi_mmc_update_clock(sc) != 0)
843 return 1;
844 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
845 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
846 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
847 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
848 }
849 }
850
851 return 0;
852 }
853
854 static int
855 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
856 {
857 struct sunxi_mmc_softc *sc = sch;
858
859 DPRINTF(sc->sc_dev, "width = %d\n", width);
860
861 switch (width) {
862 case 1:
863 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
864 break;
865 case 4:
866 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
867 break;
868 case 8:
869 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
870 break;
871 default:
872 return 1;
873 }
874
875 sc->sc_mmc_width = width;
876
877 return 0;
878 }
879
880 static int
881 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
882 {
883 return -1;
884 }
885
886 static int
887 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
888 {
889 struct sunxi_mmc_softc *sc = sch;
890 u_int uvol;
891 int error;
892
893 if (sc->sc_reg_vqmmc == NULL)
894 return 0;
895
896 switch (signal_voltage) {
897 case SDMMC_SIGNAL_VOLTAGE_330:
898 uvol = 3300000;
899 break;
900 case SDMMC_SIGNAL_VOLTAGE_180:
901 uvol = 1800000;
902 break;
903 default:
904 return EINVAL;
905 }
906
907 error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
908 if (error != 0)
909 return error;
910
911 return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
912 }
913
914 static int
915 sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
916 {
917 switch (timing) {
918 case SDMMC_TIMING_MMC_HS200:
919 break;
920 default:
921 return EINVAL;
922 }
923
924 return 0;
925 }
926
927 static int
928 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
929 {
930 struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
931 bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
932 bus_dmamap_t map;
933 bus_size_t off;
934 int desc, resid, seg;
935 uint32_t val;
936
937 /*
938 * If the command includes a dma map use it, otherwise we need to
939 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
940 */
941 if (cmd->c_dmamap) {
942 map = cmd->c_dmamap;
943 } else {
944 if (cmd->c_datalen > sc->sc_dmabounce_buflen)
945 return E2BIG;
946 map = sc->sc_dmabounce_map;
947
948 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
949 memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
950 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
951 0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
952 } else {
953 memcpy(sc->sc_dmabounce_buf, cmd->c_data,
954 cmd->c_datalen);
955 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
956 0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
957 }
958 }
959
960 desc = 0;
961 for (seg = 0; seg < map->dm_nsegs; seg++) {
962 bus_addr_t paddr = map->dm_segs[seg].ds_addr;
963 bus_size_t len = map->dm_segs[seg].ds_len;
964 resid = min(len, cmd->c_resid);
965 off = 0;
966 while (resid > 0) {
967 if (desc == sc->sc_idma_ndesc)
968 break;
969 len = min(sc->sc_config->idma_xferlen, resid);
970 dma[desc].dma_buf_size = htole32(len);
971 dma[desc].dma_buf_addr = htole32(paddr + off);
972 dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
973 SUNXI_MMC_IDMA_CONFIG_OWN);
974 cmd->c_resid -= len;
975 resid -= len;
976 off += len;
977 if (desc == 0) {
978 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
979 }
980 if (cmd->c_resid == 0) {
981 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
982 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
983 dma[desc].dma_next = 0;
984 } else {
985 dma[desc].dma_config |=
986 htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
987 dma[desc].dma_next = htole32(
988 desc_paddr + ((desc+1) *
989 sizeof(struct sunxi_mmc_idma_descriptor)));
990 }
991 ++desc;
992 }
993 }
994 if (desc == sc->sc_idma_ndesc) {
995 aprint_error_dev(sc->sc_dev,
996 "not enough descriptors for %d byte transfer! "
997 "there are %u segments with a max xfer length of %u\n",
998 cmd->c_datalen, map->dm_nsegs, sc->sc_config->idma_xferlen);
999 return EIO;
1000 }
1001
1002 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1003 sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
1004
1005 sc->sc_idma_idst = 0;
1006
1007 MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
1008 MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
1009
1010 val = MMC_READ(sc, SUNXI_MMC_GCTRL);
1011 val |= SUNXI_MMC_GCTRL_DMAEN;
1012 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1013 val |= SUNXI_MMC_GCTRL_DMARESET;
1014 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1015
1016 MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
1017 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1018 val = SUNXI_MMC_IDST_RECEIVE_INT;
1019 else
1020 val = 0;
1021 MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
1022 MMC_WRITE(sc, SUNXI_MMC_DMAC,
1023 SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
1024
1025 return 0;
1026 }
1027
1028 static void
1029 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
1030 {
1031 MMC_WRITE(sc, SUNXI_MMC_DMAC, 0);
1032
1033 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1034 sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
1035
1036 if (cmd->c_dmamap == NULL) {
1037 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1038 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1039 0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
1040 memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1041 cmd->c_datalen);
1042 } else {
1043 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1044 0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1045 }
1046 }
1047 }
1048
1049 static void
1050 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1051 {
1052 struct sunxi_mmc_softc *sc = sch;
1053 uint32_t cmdval = SUNXI_MMC_CMD_START;
1054 uint32_t imask, oimask;
1055 const bool poll = (cmd->c_flags & SCF_POLL) != 0;
1056 int retry;
1057
1058 DPRINTF(sc->sc_dev,
1059 "opcode %d flags 0x%x data %p datalen %d blklen %d poll %d\n",
1060 cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1061 cmd->c_blklen, poll);
1062
1063 mutex_enter(&sc->sc_intr_lock);
1064
1065 if (cmd->c_opcode == 0)
1066 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1067 if (cmd->c_flags & SCF_RSP_PRESENT)
1068 cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1069 if (cmd->c_flags & SCF_RSP_136)
1070 cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1071 if (cmd->c_flags & SCF_RSP_CRC)
1072 cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1073
1074 imask = oimask = MMC_READ(sc, SUNXI_MMC_IMASK);
1075 imask |= SUNXI_MMC_INT_ERROR;
1076
1077 if (cmd->c_datalen > 0) {
1078 unsigned int nblks;
1079
1080 cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1081 if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1082 cmdval |= SUNXI_MMC_CMD_WRITE;
1083 }
1084
1085 nblks = cmd->c_datalen / cmd->c_blklen;
1086 if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1087 ++nblks;
1088
1089 if (nblks > 1) {
1090 cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1091 imask |= SUNXI_MMC_INT_AUTO_CMD_DONE;
1092 } else {
1093 imask |= SUNXI_MMC_INT_DATA_OVER;
1094 }
1095
1096 MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1097 MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1098 } else {
1099 imask |= SUNXI_MMC_INT_CMD_DONE;
1100 }
1101
1102 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1103 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1104
1105 sc->sc_intr_rint = 0;
1106
1107 MMC_WRITE(sc, SUNXI_MMC_A12A,
1108 (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1109
1110 MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1111
1112 DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1113
1114 if (cmd->c_datalen == 0) {
1115 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1116 } else {
1117 cmd->c_resid = cmd->c_datalen;
1118 cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1119 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1120 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_CMD_READ)) {
1121 const uint32_t idst_mask = SUNXI_MMC_IDST_RECEIVE_INT;
1122
1123 retry = 10;
1124 while ((sc->sc_idma_idst & idst_mask) == 0) {
1125 if (retry-- == 0) {
1126 cmd->c_error = ETIMEDOUT;
1127 break;
1128 }
1129 cv_timedwait(&sc->sc_idst_cv,
1130 &sc->sc_intr_lock, hz);
1131 }
1132 }
1133 }
1134
1135 cmd->c_error = sunxi_mmc_wait_rint(sc,
1136 SUNXI_MMC_INT_ERROR|SUNXI_MMC_INT_CMD_DONE, hz * 10, poll);
1137 if (cmd->c_error == 0 && (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1138 if (sc->sc_intr_rint & SUNXI_MMC_INT_RESP_TIMEOUT) {
1139 cmd->c_error = ETIMEDOUT;
1140 } else {
1141 cmd->c_error = EIO;
1142 }
1143 }
1144 if (cmd->c_error) {
1145 DPRINTF(sc->sc_dev,
1146 "cmd failed, error %d\n", cmd->c_error);
1147 goto done;
1148 }
1149
1150 if (cmd->c_datalen > 0) {
1151 sunxi_mmc_dma_complete(sc, cmd);
1152
1153 cmd->c_error = sunxi_mmc_wait_rint(sc,
1154 SUNXI_MMC_INT_ERROR|
1155 SUNXI_MMC_INT_AUTO_CMD_DONE|
1156 SUNXI_MMC_INT_DATA_OVER,
1157 hz*10, poll);
1158 if (cmd->c_error == 0 &&
1159 (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1160 cmd->c_error = ETIMEDOUT;
1161 }
1162 if (cmd->c_error) {
1163 DPRINTF(sc->sc_dev,
1164 "data timeout, rint = %08x\n",
1165 sc->sc_intr_rint);
1166 cmd->c_error = ETIMEDOUT;
1167 goto done;
1168 }
1169 }
1170
1171 if (cmd->c_flags & SCF_RSP_PRESENT) {
1172 if (cmd->c_flags & SCF_RSP_136) {
1173 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1174 cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1175 cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1176 cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1177 if (cmd->c_flags & SCF_RSP_CRC) {
1178 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1179 (cmd->c_resp[1] << 24);
1180 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1181 (cmd->c_resp[2] << 24);
1182 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1183 (cmd->c_resp[3] << 24);
1184 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1185 }
1186 } else {
1187 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1188 }
1189 }
1190
1191 done:
1192 cmd->c_flags |= SCF_ITSDONE;
1193 MMC_WRITE(sc, SUNXI_MMC_IMASK, oimask);
1194 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1195 MMC_WRITE(sc, SUNXI_MMC_IDST, 0x337);
1196 mutex_exit(&sc->sc_intr_lock);
1197
1198 if (cmd->c_error) {
1199 DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1200 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1201 MMC_READ(sc, SUNXI_MMC_GCTRL) |
1202 SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1203 for (retry = 0; retry < 1000; retry++) {
1204 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1205 break;
1206 delay(10);
1207 }
1208 sunxi_mmc_update_clock(sc);
1209 }
1210
1211 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1212 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1213 }
1214
1215 static void
1216 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1217 {
1218 struct sunxi_mmc_softc *sc = sch;
1219 uint32_t imask;
1220
1221 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1222 if (enable)
1223 imask |= SUNXI_MMC_INT_SDIO_INT;
1224 else
1225 imask &= ~SUNXI_MMC_INT_SDIO_INT;
1226 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1227 }
1228
1229 static void
1230 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1231 {
1232 struct sunxi_mmc_softc *sc = sch;
1233
1234 MMC_WRITE(sc, SUNXI_MMC_RINT, SUNXI_MMC_INT_SDIO_INT);
1235 }
1236