sunxi_mmc.c revision 1.39 1 /* $NetBSD: sunxi_mmc.c,v 1.39 2019/10/03 15:10:32 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_sunximmc.h"
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.39 2019/10/03 15:10:32 jmcneill Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45
46 #include <dev/fdt/fdtvar.h>
47
48 #include <arm/sunxi/sunxi_mmc.h>
49
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define DPRINTF(dev, fmt, ...) \
53 do { \
54 if (sunxi_mmc_debug & __BIT(device_unit(dev))) \
55 device_printf((dev), fmt, ##__VA_ARGS__); \
56 } while (0)
57 #else
58 #define DPRINTF(dev, fmt, ...) ((void)0)
59 #endif
60
61 enum sunxi_mmc_timing {
62 SUNXI_MMC_TIMING_400K,
63 SUNXI_MMC_TIMING_25M,
64 SUNXI_MMC_TIMING_50M,
65 SUNXI_MMC_TIMING_50M_DDR,
66 SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68
69 struct sunxi_mmc_delay {
70 u_int output_phase;
71 u_int sample_phase;
72 };
73
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
76 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
77 [SUNXI_MMC_TIMING_50M] = { 90, 120 },
78 [SUNXI_MMC_TIMING_50M_DDR] = { 60, 120 },
79 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 90, 180 },
80 };
81
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
84 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
85 [SUNXI_MMC_TIMING_50M] = { 150, 120 },
86 [SUNXI_MMC_TIMING_50M_DDR] = { 54, 36 },
87 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 72, 72 },
88 };
89
90 #define SUNXI_MMC_NDESC 64
91
92 struct sunxi_mmc_softc;
93
94 static int sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void sunxi_mmc_attach(device_t, device_t, void *);
96 static void sunxi_mmc_attach_i(device_t);
97
98 static int sunxi_mmc_intr(void *);
99 static int sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101 static void sunxi_mmc_dma_complete(struct sunxi_mmc_softc *, struct sdmmc_command *);
102
103 static int sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
104 static uint32_t sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
105 static int sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
106 static int sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
107 static int sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
108 static int sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
109 static int sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
110 static int sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
111 static int sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
112 static int sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
113 static int sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t, int);
114 static void sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
115 struct sdmmc_command *);
116 static void sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
117 static void sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
118
119 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
120 .host_reset = sunxi_mmc_host_reset,
121 .host_ocr = sunxi_mmc_host_ocr,
122 .host_maxblklen = sunxi_mmc_host_maxblklen,
123 .card_detect = sunxi_mmc_card_detect,
124 .write_protect = sunxi_mmc_write_protect,
125 .bus_power = sunxi_mmc_bus_power,
126 .bus_clock_ddr = sunxi_mmc_bus_clock,
127 .bus_width = sunxi_mmc_bus_width,
128 .bus_rod = sunxi_mmc_bus_rod,
129 .signal_voltage = sunxi_mmc_signal_voltage,
130 .execute_tuning = sunxi_mmc_execute_tuning,
131 .exec_command = sunxi_mmc_exec_command,
132 .card_enable_intr = sunxi_mmc_card_enable_intr,
133 .card_intr_ack = sunxi_mmc_card_intr_ack,
134 };
135
136 struct sunxi_mmc_config {
137 u_int idma_xferlen;
138 u_int flags;
139 #define SUNXI_MMC_FLAG_CALIB_REG 0x01
140 #define SUNXI_MMC_FLAG_NEW_TIMINGS 0x02
141 #define SUNXI_MMC_FLAG_MASK_DATA0 0x04
142 #define SUNXI_MMC_FLAG_HS200 0x08
143 const struct sunxi_mmc_delay *delays;
144 uint32_t dma_ftrglevel;
145 };
146
147 struct sunxi_mmc_softc {
148 device_t sc_dev;
149 bus_space_tag_t sc_bst;
150 bus_space_handle_t sc_bsh;
151 bus_dma_tag_t sc_dmat;
152 int sc_phandle;
153
154 void *sc_ih;
155 kmutex_t sc_intr_lock;
156 kcondvar_t sc_intr_cv;
157
158 int sc_mmc_width;
159 int sc_mmc_present;
160
161 u_int sc_max_frequency;
162
163 device_t sc_sdmmc_dev;
164
165 struct sunxi_mmc_config *sc_config;
166
167 bus_dma_segment_t sc_idma_segs[1];
168 int sc_idma_nsegs;
169 bus_size_t sc_idma_size;
170 bus_dmamap_t sc_idma_map;
171 int sc_idma_ndesc;
172 void *sc_idma_desc;
173
174 bus_dmamap_t sc_dmabounce_map;
175 void *sc_dmabounce_buf;
176 size_t sc_dmabounce_buflen;
177
178 struct clk *sc_clk_ahb;
179 struct clk *sc_clk_mmc;
180 struct clk *sc_clk_output;
181 struct clk *sc_clk_sample;
182
183 struct fdtbus_reset *sc_rst_ahb;
184
185 struct fdtbus_gpio_pin *sc_gpio_cd;
186 int sc_gpio_cd_inverted;
187 struct fdtbus_gpio_pin *sc_gpio_wp;
188 int sc_gpio_wp_inverted;
189
190 struct fdtbus_regulator *sc_reg_vmmc;
191 struct fdtbus_regulator *sc_reg_vqmmc;
192
193 struct fdtbus_mmc_pwrseq *sc_pwrseq;
194
195 bool sc_non_removable;
196 bool sc_broken_cd;
197
198 uint32_t sc_intr_card;
199 struct sdmmc_command *sc_curcmd;
200 bool sc_wait_dma;
201 bool sc_wait_cmd;
202 bool sc_wait_data;
203 };
204
205 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
206 sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
207
208 #define MMC_WRITE(sc, reg, val) \
209 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
210 #define MMC_READ(sc, reg) \
211 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
212
213 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
214 .idma_xferlen = 0x2000,
215 .dma_ftrglevel = 0x20070008,
216 .delays = NULL,
217 .flags = 0,
218 };
219
220 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
221 .idma_xferlen = 0x10000,
222 .dma_ftrglevel = 0x20070008,
223 .delays = NULL,
224 .flags = 0,
225 };
226
227 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
228 .idma_xferlen = 0x2000,
229 .dma_ftrglevel = 0x20070008,
230 .delays = sun7i_mmc_delays,
231 .flags = 0,
232 };
233
234 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
235 .idma_xferlen = 0x10000,
236 .dma_ftrglevel = 0x20070008,
237 .delays = NULL,
238 .flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
239 };
240
241 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
242 .idma_xferlen = 0x10000,
243 .dma_ftrglevel = 0x200f0010,
244 .delays = sun9i_mmc_delays,
245 .flags = 0,
246 };
247
248 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
249 .idma_xferlen = 0x10000,
250 .dma_ftrglevel = 0x20070008,
251 .delays = NULL,
252 .flags = SUNXI_MMC_FLAG_CALIB_REG |
253 SUNXI_MMC_FLAG_NEW_TIMINGS |
254 SUNXI_MMC_FLAG_MASK_DATA0,
255 };
256
257 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
258 .idma_xferlen = 0x2000,
259 .dma_ftrglevel = 0x20070008,
260 .delays = NULL,
261 .flags = SUNXI_MMC_FLAG_CALIB_REG |
262 SUNXI_MMC_FLAG_NEW_TIMINGS |
263 SUNXI_MMC_FLAG_HS200,
264 };
265
266 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
267 .idma_xferlen = 0x10000,
268 .dma_ftrglevel = 0x20070008,
269 .delays = NULL,
270 .flags = SUNXI_MMC_FLAG_CALIB_REG |
271 SUNXI_MMC_FLAG_NEW_TIMINGS |
272 SUNXI_MMC_FLAG_MASK_DATA0,
273 };
274
275 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
276 .idma_xferlen = 0x2000,
277 .dma_ftrglevel = 0x20070008,
278 .delays = NULL,
279 .flags = SUNXI_MMC_FLAG_CALIB_REG,
280 };
281
282 static const struct of_compat_data compat_data[] = {
283 { "allwinner,sun4i-a10-mmc", (uintptr_t)&sun4i_a10_mmc_config },
284 { "allwinner,sun5i-a13-mmc", (uintptr_t)&sun5i_a13_mmc_config },
285 { "allwinner,sun7i-a20-mmc", (uintptr_t)&sun7i_a20_mmc_config },
286 { "allwinner,sun8i-a83t-emmc", (uintptr_t)&sun8i_a83t_emmc_config },
287 { "allwinner,sun9i-a80-mmc", (uintptr_t)&sun9i_a80_mmc_config },
288 { "allwinner,sun50i-a64-mmc", (uintptr_t)&sun50i_a64_mmc_config },
289 { "allwinner,sun50i-a64-emmc", (uintptr_t)&sun50i_a64_emmc_config },
290 { "allwinner,sun50i-h6-mmc", (uintptr_t)&sun50i_h6_mmc_config },
291 { "allwinner,sun50i-h6-emmc", (uintptr_t)&sun50i_h6_emmc_config },
292 { NULL }
293 };
294
295 static int
296 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
297 {
298 struct fdt_attach_args * const faa = aux;
299
300 return of_match_compat_data(faa->faa_phandle, compat_data);
301 }
302
303 static void
304 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
305 {
306 struct sunxi_mmc_softc * const sc = device_private(self);
307 struct fdt_attach_args * const faa = aux;
308 const int phandle = faa->faa_phandle;
309 char intrstr[128];
310 bus_addr_t addr;
311 bus_size_t size;
312
313 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
314 aprint_error(": couldn't get registers\n");
315 return;
316 }
317
318 sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
319 sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
320 sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
321 sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
322
323 #if notyet
324 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
325 sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
326 #else
327 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
328 #endif
329 aprint_error(": couldn't get clocks\n");
330 return;
331 }
332
333 sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
334
335 sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
336
337 if (clk_enable(sc->sc_clk_ahb) != 0 ||
338 clk_enable(sc->sc_clk_mmc) != 0) {
339 aprint_error(": couldn't enable clocks\n");
340 return;
341 }
342
343 if (sc->sc_rst_ahb != NULL) {
344 if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
345 aprint_error(": couldn't de-assert resets\n");
346 return;
347 }
348 }
349
350 sc->sc_dev = self;
351 sc->sc_phandle = phandle;
352 sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
353 sc->sc_bst = faa->faa_bst;
354 sc->sc_dmat = faa->faa_dmat;
355 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
356 cv_init(&sc->sc_intr_cv, "sunximmcirq");
357
358 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
359 aprint_error(": couldn't map registers\n");
360 return;
361 }
362
363 sc->sc_reg_vmmc = fdtbus_regulator_acquire(phandle, "vmmc-supply");
364 if (sc->sc_reg_vmmc != NULL && fdtbus_regulator_enable(sc->sc_reg_vmmc)) {
365 aprint_error(": couldn't enable vmmc-supply\n");
366 return;
367 }
368
369 aprint_naive("\n");
370 aprint_normal(": SD/MMC controller\n");
371
372 sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
373
374 sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
375 GPIO_PIN_INPUT);
376 sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
377 GPIO_PIN_INPUT);
378
379 sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
380 sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
381
382 sc->sc_non_removable = of_hasprop(phandle, "non-removable");
383 sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
384
385 if (of_getprop_uint32(phandle, "max-frequency", &sc->sc_max_frequency))
386 sc->sc_max_frequency = 52000000;
387
388 if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
389 sunxi_mmc_idma_setup(sc) != 0) {
390 aprint_error_dev(self, "failed to setup DMA\n");
391 return;
392 }
393
394 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
395 aprint_error_dev(self, "failed to decode interrupt\n");
396 return;
397 }
398
399 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
400 sunxi_mmc_intr, sc);
401 if (sc->sc_ih == NULL) {
402 aprint_error_dev(self, "failed to establish interrupt on %s\n",
403 intrstr);
404 return;
405 }
406 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
407
408 config_interrupts(self, sunxi_mmc_attach_i);
409 }
410
411 static int
412 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
413 {
414 bus_dma_segment_t ds[1];
415 int error, rseg;
416
417 sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
418 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
419 sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
420 if (error)
421 return error;
422 error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
423 &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
424 if (error)
425 goto free;
426 error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
427 sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
428 if (error)
429 goto unmap;
430 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
431 sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
432 BUS_DMA_WAITOK);
433 if (error)
434 goto destroy;
435 return 0;
436
437 destroy:
438 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
439 unmap:
440 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
441 sc->sc_dmabounce_buflen);
442 free:
443 bus_dmamem_free(sc->sc_dmat, ds, rseg);
444 return error;
445 }
446
447 static int
448 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
449 {
450 int error;
451
452 sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
453 sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
454 sc->sc_idma_ndesc;
455 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
456 sc->sc_idma_size, sc->sc_idma_segs, 1,
457 &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
458 if (error)
459 return error;
460 error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
461 sc->sc_idma_nsegs, sc->sc_idma_size,
462 &sc->sc_idma_desc, BUS_DMA_WAITOK);
463 if (error)
464 goto free;
465 error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
466 sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
467 if (error)
468 goto unmap;
469 error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
470 sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
471 if (error)
472 goto destroy;
473 return 0;
474
475 destroy:
476 bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
477 unmap:
478 bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
479 free:
480 bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
481 return error;
482 }
483
484 static int
485 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr, bool dbl)
486 {
487 const struct sunxi_mmc_delay *delays;
488 int error, timing = SUNXI_MMC_TIMING_400K;
489
490 if (sc->sc_config->delays) {
491 if (freq <= 400) {
492 timing = SUNXI_MMC_TIMING_400K;
493 } else if (freq <= 25000) {
494 timing = SUNXI_MMC_TIMING_25M;
495 } else if (freq <= 52000) {
496 if (ddr) {
497 timing = sc->sc_mmc_width == 8 ?
498 SUNXI_MMC_TIMING_50M_DDR_8BIT :
499 SUNXI_MMC_TIMING_50M_DDR;
500 } else {
501 timing = SUNXI_MMC_TIMING_50M;
502 }
503 } else
504 return EINVAL;
505 }
506 if (sc->sc_max_frequency) {
507 if (freq * 1000 > sc->sc_max_frequency)
508 return EINVAL;
509 }
510
511 error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << dbl);
512 if (error != 0)
513 return error;
514
515 if (sc->sc_config->delays == NULL)
516 return 0;
517
518 delays = &sc->sc_config->delays[timing];
519
520 if (sc->sc_clk_sample) {
521 error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
522 if (error != 0)
523 return error;
524 }
525 if (sc->sc_clk_output) {
526 error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
527 if (error != 0)
528 return error;
529 }
530
531 return 0;
532 }
533
534 static void
535 sunxi_mmc_hw_reset(struct sunxi_mmc_softc *sc)
536 {
537 MMC_WRITE(sc, SUNXI_MMC_HWRST, 0);
538 delay(1000);
539 MMC_WRITE(sc, SUNXI_MMC_HWRST, 1);
540 delay(1000);
541 }
542
543 static void
544 sunxi_mmc_attach_i(device_t self)
545 {
546 struct sunxi_mmc_softc *sc = device_private(self);
547 const u_int flags = sc->sc_config->flags;
548 struct sdmmcbus_attach_args saa;
549 uint32_t width;
550
551 if (sc->sc_pwrseq)
552 fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
553
554 if (of_hasprop(sc->sc_phandle, "cap-mmc-hw-reset"))
555 sunxi_mmc_hw_reset(sc);
556
557 sunxi_mmc_host_reset(sc);
558 sunxi_mmc_bus_width(sc, 1);
559 sunxi_mmc_set_clock(sc, 400, false, false);
560
561 if (sc->sc_pwrseq)
562 fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
563
564 if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
565 width = 4;
566
567 memset(&saa, 0, sizeof(saa));
568 saa.saa_busname = "sdmmc";
569 saa.saa_sct = &sunxi_mmc_chip_functions;
570 saa.saa_sch = sc;
571 saa.saa_dmat = sc->sc_dmat;
572 saa.saa_clkmin = 400;
573 saa.saa_clkmax = sc->sc_max_frequency / 1000;
574 saa.saa_caps = SMC_CAPS_DMA |
575 SMC_CAPS_MULTI_SEG_DMA |
576 SMC_CAPS_AUTO_STOP |
577 SMC_CAPS_SD_HIGHSPEED |
578 SMC_CAPS_MMC_HIGHSPEED |
579 SMC_CAPS_POLLING;
580
581 if (sc->sc_config->delays || (flags & SUNXI_MMC_FLAG_NEW_TIMINGS))
582 saa.saa_caps |= SMC_CAPS_MMC_DDR52;
583
584 if (flags & SUNXI_MMC_FLAG_HS200)
585 saa.saa_caps |= SMC_CAPS_MMC_HS200;
586
587 if (width == 4)
588 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
589 if (width == 8)
590 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
591
592 if (sc->sc_gpio_cd)
593 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
594
595 sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
596 }
597
598 static int
599 sunxi_mmc_intr(void *priv)
600 {
601 struct sunxi_mmc_softc *sc = priv;
602 struct sdmmc_command *cmd;
603 uint32_t idst, mint, imask;
604
605 mutex_enter(&sc->sc_intr_lock);
606 idst = MMC_READ(sc, SUNXI_MMC_IDST);
607 mint = MMC_READ(sc, SUNXI_MMC_MINT);
608 if (!idst && !mint) {
609 mutex_exit(&sc->sc_intr_lock);
610 return 0;
611 }
612 MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
613 MMC_WRITE(sc, SUNXI_MMC_RINT, mint);
614
615 cmd = sc->sc_curcmd;
616
617 DPRINTF(sc->sc_dev, "mmc intr idst=%08X mint=%08X\n",
618 idst, mint);
619
620 /* Handle SDIO card interrupt */
621 if ((mint & SUNXI_MMC_INT_SDIO_INT) != 0) {
622 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
623 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_SDIO_INT);
624 sdmmc_card_intr(sc->sc_sdmmc_dev);
625 }
626
627 /* Error interrupts take priority over command and transfer interrupts */
628 if (cmd != NULL && (mint & SUNXI_MMC_INT_ERROR) != 0) {
629 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
630 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_ERROR);
631 if ((mint & SUNXI_MMC_INT_RESP_TIMEOUT) != 0) {
632 cmd->c_error = ETIMEDOUT;
633 /* Wait for command to complete */
634 sc->sc_wait_data = sc->sc_wait_dma = false;
635 if (cmd->c_opcode != SD_IO_SEND_OP_COND &&
636 cmd->c_opcode != SD_IO_RW_DIRECT &&
637 !ISSET(cmd->c_flags, SCF_TOUT_OK))
638 device_printf(sc->sc_dev, "host controller timeout, mint=0x%08x\n", mint);
639 } else {
640 device_printf(sc->sc_dev, "host controller error, mint=0x%08x\n", mint);
641 cmd->c_error = EIO;
642 SET(cmd->c_flags, SCF_ITSDONE);
643 goto done;
644 }
645 }
646
647 if (cmd != NULL && (idst & SUNXI_MMC_IDST_RECEIVE_INT) != 0) {
648 MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
649 if (sc->sc_wait_dma == false)
650 device_printf(sc->sc_dev, "unexpected DMA receive interrupt\n");
651 sc->sc_wait_dma = false;
652 }
653
654 if (cmd != NULL && (mint & SUNXI_MMC_INT_CMD_DONE) != 0) {
655 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
656 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_CMD_DONE);
657 if (sc->sc_wait_cmd == false)
658 device_printf(sc->sc_dev, "unexpected command complete interrupt\n");
659 sc->sc_wait_cmd = false;
660 }
661
662 const uint32_t dmadone_mask = SUNXI_MMC_INT_AUTO_CMD_DONE|SUNXI_MMC_INT_DATA_OVER;
663 if (cmd != NULL && (mint & dmadone_mask) != 0) {
664 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
665 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~dmadone_mask);
666 if (sc->sc_wait_data == false)
667 device_printf(sc->sc_dev, "unexpected data complete interrupt\n");
668 sc->sc_wait_data = false;
669 }
670
671 if (cmd != NULL &&
672 sc->sc_wait_dma == false &&
673 sc->sc_wait_cmd == false &&
674 sc->sc_wait_data == false) {
675 SET(cmd->c_flags, SCF_ITSDONE);
676 }
677
678 done:
679 if (cmd != NULL && ISSET(cmd->c_flags, SCF_ITSDONE)) {
680 cv_broadcast(&sc->sc_intr_cv);
681 }
682
683 mutex_exit(&sc->sc_intr_lock);
684
685 return 1;
686 }
687
688 static int
689 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
690 {
691 struct sunxi_mmc_softc *sc = sch;
692 uint32_t gctrl;
693 int retry = 1000;
694
695 DPRINTF(sc->sc_dev, "host reset\n");
696
697 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
698 gctrl |= SUNXI_MMC_GCTRL_RESET;
699 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
700 while (--retry > 0) {
701 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
702 break;
703 delay(100);
704 }
705
706 MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
707
708 MMC_WRITE(sc, SUNXI_MMC_IMASK, 0);
709
710 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffffffff);
711
712 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
713 gctrl |= SUNXI_MMC_GCTRL_INTEN;
714 gctrl &= ~SUNXI_MMC_GCTRL_WAIT_MEM_ACCESS_DONE;
715 gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
716 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
717
718 return 0;
719 }
720
721 static uint32_t
722 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
723 {
724 return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
725 }
726
727 static int
728 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
729 {
730 return 8192;
731 }
732
733 static int
734 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
735 {
736 struct sunxi_mmc_softc *sc = sch;
737
738 if (sc->sc_non_removable || sc->sc_broken_cd) {
739 /*
740 * Non-removable or broken card detect flag set in
741 * DT, assume always present
742 */
743 return 1;
744 } else if (sc->sc_gpio_cd != NULL) {
745 /* Use card detect GPIO */
746 int v = 0, i;
747 for (i = 0; i < 5; i++) {
748 v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
749 sc->sc_gpio_cd_inverted);
750 delay(1000);
751 }
752 if (v == 5)
753 sc->sc_mmc_present = 0;
754 else if (v == 0)
755 sc->sc_mmc_present = 1;
756 return sc->sc_mmc_present;
757 } else {
758 /* Use CARD_PRESENT field of SD_STATUS register */
759 const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
760 SUNXI_MMC_STATUS_CARD_PRESENT;
761 return present != 0;
762 }
763 }
764
765 static int
766 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
767 {
768 struct sunxi_mmc_softc *sc = sch;
769
770 if (sc->sc_gpio_wp == NULL) {
771 return 0; /* no write protect pin, assume rw */
772 } else {
773 return fdtbus_gpio_read(sc->sc_gpio_wp) ^
774 sc->sc_gpio_wp_inverted;
775 }
776 }
777
778 static int
779 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
780 {
781 return 0;
782 }
783
784 static int
785 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
786 {
787 uint32_t cmd;
788 int retry;
789
790 DPRINTF(sc->sc_dev, "update clock\n");
791
792 cmd = SUNXI_MMC_CMD_START |
793 SUNXI_MMC_CMD_UPCLK_ONLY |
794 SUNXI_MMC_CMD_WAIT_PRE_OVER;
795 MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
796 retry = 100000;
797 while (--retry > 0) {
798 if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
799 break;
800 delay(10);
801 }
802
803 if (retry == 0) {
804 aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
805 DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
806 MMC_READ(sc, SUNXI_MMC_GCTRL));
807 DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
808 MMC_READ(sc, SUNXI_MMC_CLKCR));
809 DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
810 MMC_READ(sc, SUNXI_MMC_TIMEOUT));
811 DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
812 MMC_READ(sc, SUNXI_MMC_WIDTH));
813 DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
814 MMC_READ(sc, SUNXI_MMC_CMD));
815 DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
816 MMC_READ(sc, SUNXI_MMC_MINT));
817 DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
818 MMC_READ(sc, SUNXI_MMC_RINT));
819 DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
820 MMC_READ(sc, SUNXI_MMC_STATUS));
821 return ETIMEDOUT;
822 }
823
824 return 0;
825 }
826
827 static int
828 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
829 {
830 struct sunxi_mmc_softc *sc = sch;
831 uint32_t clkcr, gctrl, ntsr;
832 const u_int flags = sc->sc_config->flags;
833 bool dbl = 0;
834
835 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
836 if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
837 clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
838 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
839 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
840 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
841 if (sunxi_mmc_update_clock(sc) != 0)
842 return 1;
843 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
844 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
845 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
846 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
847 }
848 }
849
850 if (freq) {
851 /* For 8bits ddr in old timing modes, and all ddr in new
852 * timing modes, the module clock has to be 2x the card clock.
853 */
854 if (ddr && ((flags & SUNXI_MMC_FLAG_NEW_TIMINGS) ||
855 sc->sc_mmc_width == 8))
856 dbl = 1;
857
858 clkcr &= ~SUNXI_MMC_CLKCR_DIV;
859 clkcr |= __SHIFTIN(dbl, SUNXI_MMC_CLKCR_DIV);
860 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
861
862 if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
863 ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
864 ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
865 MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
866 }
867
868 if (flags & SUNXI_MMC_FLAG_CALIB_REG)
869 MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
870
871 if (sunxi_mmc_update_clock(sc) != 0)
872 return 1;
873
874 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
875 if (ddr)
876 gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
877 else
878 gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
879 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
880
881 if (sunxi_mmc_set_clock(sc, freq, ddr, dbl) != 0)
882 return 1;
883
884 clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
885 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
886 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
887 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
888 if (sunxi_mmc_update_clock(sc) != 0)
889 return 1;
890 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
891 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
892 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
893 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
894 }
895 }
896
897 return 0;
898 }
899
900 static int
901 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
902 {
903 struct sunxi_mmc_softc *sc = sch;
904
905 DPRINTF(sc->sc_dev, "width = %d\n", width);
906
907 switch (width) {
908 case 1:
909 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
910 break;
911 case 4:
912 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
913 break;
914 case 8:
915 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
916 break;
917 default:
918 return 1;
919 }
920
921 sc->sc_mmc_width = width;
922
923 return 0;
924 }
925
926 static int
927 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
928 {
929 return -1;
930 }
931
932 static int
933 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
934 {
935 struct sunxi_mmc_softc *sc = sch;
936 u_int uvol;
937 int error;
938
939 if (sc->sc_reg_vqmmc == NULL)
940 return 0;
941
942 switch (signal_voltage) {
943 case SDMMC_SIGNAL_VOLTAGE_330:
944 uvol = 3300000;
945 break;
946 case SDMMC_SIGNAL_VOLTAGE_180:
947 uvol = 1800000;
948 break;
949 default:
950 return EINVAL;
951 }
952
953 error = fdtbus_regulator_supports_voltage(sc->sc_reg_vqmmc, uvol, uvol);
954 if (error != 0)
955 return 0;
956
957 error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
958 if (error != 0)
959 return error;
960
961 return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
962 }
963
964 static int
965 sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
966 {
967 switch (timing) {
968 case SDMMC_TIMING_MMC_HS200:
969 break;
970 default:
971 return EINVAL;
972 }
973
974 return 0;
975 }
976
977 static int
978 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
979 {
980 struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
981 bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
982 bus_dmamap_t map;
983 bus_size_t off;
984 int desc, resid, seg;
985 uint32_t val;
986
987 /*
988 * If the command includes a dma map use it, otherwise we need to
989 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
990 */
991 if (cmd->c_dmamap) {
992 map = cmd->c_dmamap;
993 } else {
994 if (cmd->c_datalen > sc->sc_dmabounce_buflen)
995 return E2BIG;
996 map = sc->sc_dmabounce_map;
997
998 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
999 memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
1000 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1001 0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
1002 } else {
1003 memcpy(sc->sc_dmabounce_buf, cmd->c_data,
1004 cmd->c_datalen);
1005 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1006 0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
1007 }
1008 }
1009
1010 desc = 0;
1011 for (seg = 0; seg < map->dm_nsegs; seg++) {
1012 bus_addr_t paddr = map->dm_segs[seg].ds_addr;
1013 bus_size_t len = map->dm_segs[seg].ds_len;
1014 resid = uimin(len, cmd->c_resid);
1015 off = 0;
1016 while (resid > 0) {
1017 if (desc == sc->sc_idma_ndesc)
1018 break;
1019 len = uimin(sc->sc_config->idma_xferlen, resid);
1020 dma[desc].dma_buf_size = htole32(len);
1021 dma[desc].dma_buf_addr = htole32(paddr + off);
1022 dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
1023 SUNXI_MMC_IDMA_CONFIG_OWN);
1024 cmd->c_resid -= len;
1025 resid -= len;
1026 off += len;
1027 if (desc == 0) {
1028 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
1029 }
1030 if (cmd->c_resid == 0) {
1031 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
1032 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
1033 dma[desc].dma_next = 0;
1034 } else {
1035 dma[desc].dma_config |=
1036 htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
1037 dma[desc].dma_next = htole32(
1038 desc_paddr + ((desc+1) *
1039 sizeof(struct sunxi_mmc_idma_descriptor)));
1040 }
1041 ++desc;
1042 }
1043 }
1044 if (desc == sc->sc_idma_ndesc) {
1045 aprint_error_dev(sc->sc_dev,
1046 "not enough descriptors for %d byte transfer! "
1047 "there are %u segments with a max xfer length of %u\n",
1048 cmd->c_datalen, map->dm_nsegs, sc->sc_config->idma_xferlen);
1049 return EIO;
1050 }
1051
1052 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1053 sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
1054
1055 MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
1056 MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
1057
1058 val = MMC_READ(sc, SUNXI_MMC_GCTRL);
1059 val |= SUNXI_MMC_GCTRL_DMAEN;
1060 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1061 val |= SUNXI_MMC_GCTRL_DMARESET;
1062 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1063
1064 MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
1065 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1066 val = SUNXI_MMC_IDST_RECEIVE_INT;
1067 else
1068 val = 0;
1069 MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
1070 MMC_WRITE(sc, SUNXI_MMC_DMAC,
1071 SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
1072
1073 return 0;
1074 }
1075
1076 static void
1077 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
1078 {
1079 MMC_WRITE(sc, SUNXI_MMC_DMAC, 0);
1080 MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
1081
1082 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1083 sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
1084
1085 if (cmd->c_dmamap == NULL) {
1086 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1087 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1088 0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
1089 memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1090 cmd->c_datalen);
1091 } else {
1092 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1093 0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1094 }
1095 }
1096 }
1097
1098 static void
1099 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1100 {
1101 struct sunxi_mmc_softc *sc = sch;
1102 uint32_t cmdval = SUNXI_MMC_CMD_START;
1103 uint32_t imask;
1104 int retry, error;
1105
1106 DPRINTF(sc->sc_dev,
1107 "opcode %d flags 0x%x data %p datalen %d blklen %d\n",
1108 cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1109 cmd->c_blklen);
1110
1111 mutex_enter(&sc->sc_intr_lock);
1112 if (sc->sc_curcmd != NULL) {
1113 device_printf(sc->sc_dev,
1114 "WARNING: driver submitted a command while the controller was busy\n");
1115 cmd->c_error = EBUSY;
1116 SET(cmd->c_flags, SCF_ITSDONE);
1117 mutex_exit(&sc->sc_intr_lock);
1118 return;
1119 }
1120 sc->sc_curcmd = cmd;
1121
1122 if (cmd->c_opcode == 0)
1123 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1124 if (cmd->c_flags & SCF_RSP_PRESENT)
1125 cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1126 if (cmd->c_flags & SCF_RSP_136)
1127 cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1128 if (cmd->c_flags & SCF_RSP_CRC)
1129 cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1130
1131 imask = SUNXI_MMC_INT_ERROR | SUNXI_MMC_INT_CMD_DONE;
1132
1133 if (cmd->c_datalen > 0) {
1134 unsigned int nblks;
1135
1136 cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1137 if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1138 cmdval |= SUNXI_MMC_CMD_WRITE;
1139 }
1140
1141 nblks = cmd->c_datalen / cmd->c_blklen;
1142 if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1143 ++nblks;
1144
1145 if (nblks > 1) {
1146 cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1147 imask |= SUNXI_MMC_INT_AUTO_CMD_DONE;
1148 } else {
1149 imask |= SUNXI_MMC_INT_DATA_OVER;
1150 }
1151
1152 MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1153 MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1154 }
1155
1156 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask | sc->sc_intr_card);
1157 MMC_WRITE(sc, SUNXI_MMC_RINT, 0x7fff);
1158
1159 MMC_WRITE(sc, SUNXI_MMC_A12A,
1160 (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1161
1162 MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1163
1164 cmd->c_resid = cmd->c_datalen;
1165 if (cmd->c_resid > 0) {
1166 cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1167 if (cmd->c_error != 0) {
1168 SET(cmd->c_flags, SCF_ITSDONE);
1169 goto done;
1170 }
1171 sc->sc_wait_dma = ISSET(cmd->c_flags, SCF_CMD_READ);
1172 sc->sc_wait_data = true;
1173 } else {
1174 sc->sc_wait_dma = false;
1175 sc->sc_wait_data = false;
1176 }
1177 sc->sc_wait_cmd = true;
1178
1179 DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1180
1181 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1182
1183 struct bintime timeout = { .sec = 15, .frac = 0 };
1184 const struct bintime epsilon = { .sec = 1, .frac = 0 };
1185 while (!ISSET(cmd->c_flags, SCF_ITSDONE)) {
1186 error = cv_timedwaitbt(&sc->sc_intr_cv,
1187 &sc->sc_intr_lock, &timeout, &epsilon);
1188 if (error != 0) {
1189 cmd->c_error = error;
1190 SET(cmd->c_flags, SCF_ITSDONE);
1191 goto done;
1192 }
1193 }
1194
1195 if (cmd->c_error == 0 && cmd->c_datalen > 0)
1196 sunxi_mmc_dma_complete(sc, cmd);
1197
1198 if (cmd->c_flags & SCF_RSP_PRESENT) {
1199 if (cmd->c_flags & SCF_RSP_136) {
1200 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1201 cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1202 cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1203 cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1204 if (cmd->c_flags & SCF_RSP_CRC) {
1205 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1206 (cmd->c_resp[1] << 24);
1207 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1208 (cmd->c_resp[2] << 24);
1209 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1210 (cmd->c_resp[3] << 24);
1211 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1212 }
1213 } else {
1214 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1215 }
1216 }
1217
1218 done:
1219 KASSERT(ISSET(cmd->c_flags, SCF_ITSDONE));
1220 MMC_WRITE(sc, SUNXI_MMC_IMASK, sc->sc_intr_card);
1221 MMC_WRITE(sc, SUNXI_MMC_RINT, 0x7fff);
1222 MMC_WRITE(sc, SUNXI_MMC_IDST, 0x337);
1223 sc->sc_curcmd = NULL;
1224 mutex_exit(&sc->sc_intr_lock);
1225
1226 if (cmd->c_error) {
1227 DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1228 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1229 MMC_READ(sc, SUNXI_MMC_GCTRL) |
1230 SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1231 for (retry = 0; retry < 1000; retry++) {
1232 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1233 break;
1234 delay(10);
1235 }
1236 sunxi_mmc_update_clock(sc);
1237 }
1238
1239 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1240 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1241 }
1242
1243 static void
1244 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1245 {
1246 struct sunxi_mmc_softc *sc = sch;
1247 uint32_t imask;
1248
1249 mutex_enter(&sc->sc_intr_lock);
1250 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1251 if (enable)
1252 imask |= SUNXI_MMC_INT_SDIO_INT;
1253 else
1254 imask &= ~SUNXI_MMC_INT_SDIO_INT;
1255 sc->sc_intr_card = imask & SUNXI_MMC_INT_SDIO_INT;
1256 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1257 mutex_exit(&sc->sc_intr_lock);
1258 }
1259
1260 static void
1261 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1262 {
1263 struct sunxi_mmc_softc *sc = sch;
1264 uint32_t imask;
1265
1266 mutex_enter(&sc->sc_intr_lock);
1267 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1268 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask | sc->sc_intr_card);
1269 mutex_exit(&sc->sc_intr_lock);
1270 }
1271