sunxi_mmc.c revision 1.20.2.6 1 /* $NetBSD: sunxi_mmc.c,v 1.20.2.6 2018/11/26 01:52:20 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_sunximmc.h"
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.20.2.6 2018/11/26 01:52:20 pgoyette Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45
46 #include <dev/fdt/fdtvar.h>
47
48 #include <arm/sunxi/sunxi_mmc.h>
49
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define DPRINTF(dev, fmt, ...) \
53 do { \
54 if (sunxi_mmc_debug & __BIT(device_unit(dev))) \
55 device_printf((dev), fmt, ##__VA_ARGS__); \
56 } while (0)
57 #else
58 #define DPRINTF(dev, fmt, ...) ((void)0)
59 #endif
60
61 enum sunxi_mmc_timing {
62 SUNXI_MMC_TIMING_400K,
63 SUNXI_MMC_TIMING_25M,
64 SUNXI_MMC_TIMING_50M,
65 SUNXI_MMC_TIMING_50M_DDR,
66 SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68
69 struct sunxi_mmc_delay {
70 u_int output_phase;
71 u_int sample_phase;
72 };
73
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
76 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
77 [SUNXI_MMC_TIMING_50M] = { 90, 120 },
78 [SUNXI_MMC_TIMING_50M_DDR] = { 60, 120 },
79 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 90, 180 },
80 };
81
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
84 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
85 [SUNXI_MMC_TIMING_50M] = { 150, 120 },
86 [SUNXI_MMC_TIMING_50M_DDR] = { 54, 36 },
87 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 72, 72 },
88 };
89
90 #define SUNXI_MMC_NDESC 64
91
92 struct sunxi_mmc_softc;
93
94 static int sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void sunxi_mmc_attach(device_t, device_t, void *);
96 static void sunxi_mmc_attach_i(device_t);
97
98 static int sunxi_mmc_intr(void *);
99 static int sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101
102 static int sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
103 static uint32_t sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
104 static int sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
105 static int sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
106 static int sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
107 static int sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
108 static int sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
109 static int sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
110 static int sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
111 static int sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
112 static int sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t, int);
113 static void sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
114 struct sdmmc_command *);
115 static void sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
116 static void sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
117
118 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
119 .host_reset = sunxi_mmc_host_reset,
120 .host_ocr = sunxi_mmc_host_ocr,
121 .host_maxblklen = sunxi_mmc_host_maxblklen,
122 .card_detect = sunxi_mmc_card_detect,
123 .write_protect = sunxi_mmc_write_protect,
124 .bus_power = sunxi_mmc_bus_power,
125 .bus_clock_ddr = sunxi_mmc_bus_clock,
126 .bus_width = sunxi_mmc_bus_width,
127 .bus_rod = sunxi_mmc_bus_rod,
128 .signal_voltage = sunxi_mmc_signal_voltage,
129 .execute_tuning = sunxi_mmc_execute_tuning,
130 .exec_command = sunxi_mmc_exec_command,
131 .card_enable_intr = sunxi_mmc_card_enable_intr,
132 .card_intr_ack = sunxi_mmc_card_intr_ack,
133 };
134
135 struct sunxi_mmc_config {
136 u_int idma_xferlen;
137 u_int flags;
138 #define SUNXI_MMC_FLAG_CALIB_REG 0x01
139 #define SUNXI_MMC_FLAG_NEW_TIMINGS 0x02
140 #define SUNXI_MMC_FLAG_MASK_DATA0 0x04
141 #define SUNXI_MMC_FLAG_HS200 0x08
142 const struct sunxi_mmc_delay *delays;
143 uint32_t dma_ftrglevel;
144 };
145
146 struct sunxi_mmc_softc {
147 device_t sc_dev;
148 bus_space_tag_t sc_bst;
149 bus_space_handle_t sc_bsh;
150 bus_dma_tag_t sc_dmat;
151 int sc_phandle;
152
153 void *sc_ih;
154 kmutex_t sc_intr_lock;
155 kcondvar_t sc_intr_cv;
156 kcondvar_t sc_idst_cv;
157
158 int sc_mmc_width;
159 int sc_mmc_present;
160
161 u_int sc_max_frequency;
162
163 device_t sc_sdmmc_dev;
164
165 struct sunxi_mmc_config *sc_config;
166
167 bus_dma_segment_t sc_idma_segs[1];
168 int sc_idma_nsegs;
169 bus_size_t sc_idma_size;
170 bus_dmamap_t sc_idma_map;
171 int sc_idma_ndesc;
172 void *sc_idma_desc;
173
174 bus_dmamap_t sc_dmabounce_map;
175 void *sc_dmabounce_buf;
176 size_t sc_dmabounce_buflen;
177
178 uint32_t sc_intr_rint;
179 uint32_t sc_idma_idst;
180
181 struct clk *sc_clk_ahb;
182 struct clk *sc_clk_mmc;
183 struct clk *sc_clk_output;
184 struct clk *sc_clk_sample;
185
186 struct fdtbus_reset *sc_rst_ahb;
187
188 struct fdtbus_gpio_pin *sc_gpio_cd;
189 int sc_gpio_cd_inverted;
190 struct fdtbus_gpio_pin *sc_gpio_wp;
191 int sc_gpio_wp_inverted;
192
193 struct fdtbus_regulator *sc_reg_vqmmc;
194
195 struct fdtbus_mmc_pwrseq *sc_pwrseq;
196
197 bool sc_non_removable;
198 bool sc_broken_cd;
199 };
200
201 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
202 sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
203
204 #define MMC_WRITE(sc, reg, val) \
205 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
206 #define MMC_READ(sc, reg) \
207 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
208
209 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
210 .idma_xferlen = 0x2000,
211 .dma_ftrglevel = 0x20070008,
212 .delays = NULL,
213 .flags = 0,
214 };
215
216 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
217 .idma_xferlen = 0x10000,
218 .dma_ftrglevel = 0x20070008,
219 .delays = NULL,
220 .flags = 0,
221 };
222
223 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
224 .idma_xferlen = 0x2000,
225 .dma_ftrglevel = 0x20070008,
226 .delays = sun7i_mmc_delays,
227 .flags = 0,
228 };
229
230 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
231 .idma_xferlen = 0x10000,
232 .dma_ftrglevel = 0x20070008,
233 .delays = NULL,
234 .flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
235 };
236
237 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
238 .idma_xferlen = 0x10000,
239 .dma_ftrglevel = 0x200f0010,
240 .delays = sun9i_mmc_delays,
241 .flags = 0,
242 };
243
244 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
245 .idma_xferlen = 0x10000,
246 .dma_ftrglevel = 0x20070008,
247 .delays = NULL,
248 .flags = SUNXI_MMC_FLAG_CALIB_REG |
249 SUNXI_MMC_FLAG_NEW_TIMINGS |
250 SUNXI_MMC_FLAG_MASK_DATA0,
251 };
252
253 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
254 .idma_xferlen = 0x2000,
255 .dma_ftrglevel = 0x20070008,
256 .delays = NULL,
257 .flags = SUNXI_MMC_FLAG_CALIB_REG |
258 SUNXI_MMC_FLAG_NEW_TIMINGS |
259 SUNXI_MMC_FLAG_HS200,
260 };
261
262 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
263 .idma_xferlen = 0x10000,
264 .dma_ftrglevel = 0x20070008,
265 .delays = NULL,
266 .flags = SUNXI_MMC_FLAG_CALIB_REG |
267 SUNXI_MMC_FLAG_NEW_TIMINGS |
268 SUNXI_MMC_FLAG_MASK_DATA0,
269 };
270
271 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
272 .idma_xferlen = 0x2000,
273 .dma_ftrglevel = 0x20070008,
274 .delays = NULL,
275 .flags = SUNXI_MMC_FLAG_CALIB_REG,
276 };
277
278 static const struct of_compat_data compat_data[] = {
279 { "allwinner,sun4i-a10-mmc", (uintptr_t)&sun4i_a10_mmc_config },
280 { "allwinner,sun5i-a13-mmc", (uintptr_t)&sun5i_a13_mmc_config },
281 { "allwinner,sun7i-a20-mmc", (uintptr_t)&sun7i_a20_mmc_config },
282 { "allwinner,sun8i-a83t-emmc", (uintptr_t)&sun8i_a83t_emmc_config },
283 { "allwinner,sun9i-a80-mmc", (uintptr_t)&sun9i_a80_mmc_config },
284 { "allwinner,sun50i-a64-mmc", (uintptr_t)&sun50i_a64_mmc_config },
285 { "allwinner,sun50i-a64-emmc", (uintptr_t)&sun50i_a64_emmc_config },
286 { "allwinner,sun50i-h6-mmc", (uintptr_t)&sun50i_h6_mmc_config },
287 { "allwinner,sun50i-h6-emmc", (uintptr_t)&sun50i_h6_emmc_config },
288 { NULL }
289 };
290
291 static int
292 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
293 {
294 struct fdt_attach_args * const faa = aux;
295
296 return of_match_compat_data(faa->faa_phandle, compat_data);
297 }
298
299 static void
300 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
301 {
302 struct sunxi_mmc_softc * const sc = device_private(self);
303 struct fdt_attach_args * const faa = aux;
304 const int phandle = faa->faa_phandle;
305 char intrstr[128];
306 bus_addr_t addr;
307 bus_size_t size;
308
309 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
310 aprint_error(": couldn't get registers\n");
311 return;
312 }
313
314 sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
315 sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
316 sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
317 sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
318
319 #if notyet
320 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
321 sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
322 #else
323 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
324 #endif
325 aprint_error(": couldn't get clocks\n");
326 return;
327 }
328
329 sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
330
331 sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
332
333 sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
334
335 if (clk_enable(sc->sc_clk_ahb) != 0 ||
336 clk_enable(sc->sc_clk_mmc) != 0) {
337 aprint_error(": couldn't enable clocks\n");
338 return;
339 }
340
341 if (sc->sc_rst_ahb != NULL) {
342 if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
343 aprint_error(": couldn't de-assert resets\n");
344 return;
345 }
346 }
347
348 sc->sc_dev = self;
349 sc->sc_phandle = phandle;
350 sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
351 sc->sc_bst = faa->faa_bst;
352 sc->sc_dmat = faa->faa_dmat;
353 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
354 cv_init(&sc->sc_intr_cv, "awinmmcirq");
355 cv_init(&sc->sc_idst_cv, "awinmmcdma");
356
357 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
358 aprint_error(": couldn't map registers\n");
359 return;
360 }
361
362 aprint_naive("\n");
363 aprint_normal(": SD/MMC controller\n");
364
365 sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
366 GPIO_PIN_INPUT);
367 sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
368 GPIO_PIN_INPUT);
369
370 sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
371 sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
372
373 sc->sc_non_removable = of_hasprop(phandle, "non-removable");
374 sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
375
376 if (of_getprop_uint32(phandle, "max-frequency", &sc->sc_max_frequency))
377 sc->sc_max_frequency = 52000000;
378
379 if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
380 sunxi_mmc_idma_setup(sc) != 0) {
381 aprint_error_dev(self, "failed to setup DMA\n");
382 return;
383 }
384
385 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
386 aprint_error_dev(self, "failed to decode interrupt\n");
387 return;
388 }
389
390 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
391 sunxi_mmc_intr, sc);
392 if (sc->sc_ih == NULL) {
393 aprint_error_dev(self, "failed to establish interrupt on %s\n",
394 intrstr);
395 return;
396 }
397 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
398
399 config_interrupts(self, sunxi_mmc_attach_i);
400 }
401
402 static int
403 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
404 {
405 bus_dma_segment_t ds[1];
406 int error, rseg;
407
408 sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
409 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
410 sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
411 if (error)
412 return error;
413 error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
414 &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
415 if (error)
416 goto free;
417 error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
418 sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
419 if (error)
420 goto unmap;
421 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
422 sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
423 BUS_DMA_WAITOK);
424 if (error)
425 goto destroy;
426 return 0;
427
428 destroy:
429 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
430 unmap:
431 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
432 sc->sc_dmabounce_buflen);
433 free:
434 bus_dmamem_free(sc->sc_dmat, ds, rseg);
435 return error;
436 }
437
438 static int
439 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
440 {
441 int error;
442
443 sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
444 sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
445 sc->sc_idma_ndesc;
446 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
447 sc->sc_idma_size, sc->sc_idma_segs, 1,
448 &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
449 if (error)
450 return error;
451 error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
452 sc->sc_idma_nsegs, sc->sc_idma_size,
453 &sc->sc_idma_desc, BUS_DMA_WAITOK);
454 if (error)
455 goto free;
456 error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
457 sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
458 if (error)
459 goto unmap;
460 error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
461 sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
462 if (error)
463 goto destroy;
464 return 0;
465
466 destroy:
467 bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
468 unmap:
469 bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
470 free:
471 bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
472 return error;
473 }
474
475 static int
476 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr)
477 {
478 const struct sunxi_mmc_delay *delays;
479 int error, timing = SUNXI_MMC_TIMING_400K;
480
481 if (sc->sc_config->delays) {
482 if (freq <= 400) {
483 timing = SUNXI_MMC_TIMING_400K;
484 } else if (freq <= 25000) {
485 timing = SUNXI_MMC_TIMING_25M;
486 } else if (freq <= 52000) {
487 if (ddr) {
488 timing = sc->sc_mmc_width == 8 ?
489 SUNXI_MMC_TIMING_50M_DDR_8BIT :
490 SUNXI_MMC_TIMING_50M_DDR;
491 } else {
492 timing = SUNXI_MMC_TIMING_50M;
493 }
494 } else
495 return EINVAL;
496 }
497 if (sc->sc_max_frequency) {
498 if (freq * 1000 > sc->sc_max_frequency)
499 return EINVAL;
500 }
501
502 error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << ddr);
503 if (error != 0)
504 return error;
505
506 if (sc->sc_config->delays == NULL)
507 return 0;
508
509 delays = &sc->sc_config->delays[timing];
510
511 if (sc->sc_clk_sample) {
512 error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
513 if (error != 0)
514 return error;
515 }
516 if (sc->sc_clk_output) {
517 error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
518 if (error != 0)
519 return error;
520 }
521
522 return 0;
523 }
524
525 static void
526 sunxi_mmc_attach_i(device_t self)
527 {
528 struct sunxi_mmc_softc *sc = device_private(self);
529 const u_int flags = sc->sc_config->flags;
530 struct sdmmcbus_attach_args saa;
531 uint32_t width;
532
533 if (sc->sc_pwrseq)
534 fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
535
536 sunxi_mmc_host_reset(sc);
537 sunxi_mmc_bus_width(sc, 1);
538 sunxi_mmc_set_clock(sc, 400, false);
539
540 if (sc->sc_pwrseq)
541 fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
542
543 if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
544 width = 4;
545
546 memset(&saa, 0, sizeof(saa));
547 saa.saa_busname = "sdmmc";
548 saa.saa_sct = &sunxi_mmc_chip_functions;
549 saa.saa_sch = sc;
550 saa.saa_dmat = sc->sc_dmat;
551 saa.saa_clkmin = 400;
552 saa.saa_clkmax = sc->sc_max_frequency / 1000;
553 saa.saa_caps = SMC_CAPS_DMA |
554 SMC_CAPS_MULTI_SEG_DMA |
555 SMC_CAPS_AUTO_STOP |
556 SMC_CAPS_SD_HIGHSPEED |
557 SMC_CAPS_MMC_HIGHSPEED |
558 SMC_CAPS_POLLING;
559
560 if (sc->sc_config->delays || (flags & SUNXI_MMC_FLAG_NEW_TIMINGS))
561 saa.saa_caps |= SMC_CAPS_MMC_DDR52;
562
563 if (flags & SUNXI_MMC_FLAG_HS200)
564 saa.saa_caps |= SMC_CAPS_MMC_HS200;
565
566 if (width == 4)
567 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
568 if (width == 8)
569 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
570
571 if (sc->sc_gpio_cd)
572 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
573
574 sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
575 }
576
577 static int
578 sunxi_mmc_intr(void *priv)
579 {
580 struct sunxi_mmc_softc *sc = priv;
581 uint32_t idst, rint, imask;
582
583 mutex_enter(&sc->sc_intr_lock);
584 idst = MMC_READ(sc, SUNXI_MMC_IDST);
585 rint = MMC_READ(sc, SUNXI_MMC_RINT);
586 if (!idst && !rint) {
587 mutex_exit(&sc->sc_intr_lock);
588 return 0;
589 }
590 MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
591 MMC_WRITE(sc, SUNXI_MMC_RINT, rint & ~SUNXI_MMC_INT_SDIO_INT);
592
593 DPRINTF(sc->sc_dev, "mmc intr idst=%08X rint=%08X\n",
594 idst, rint);
595
596 if (idst != 0) {
597 MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
598 sc->sc_idma_idst |= idst;
599 cv_broadcast(&sc->sc_idst_cv);
600 }
601
602 if ((rint & ~SUNXI_MMC_INT_SDIO_INT) != 0) {
603 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
604 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_SDIO_INT);
605 sc->sc_intr_rint |= (rint & ~SUNXI_MMC_INT_SDIO_INT);
606 cv_broadcast(&sc->sc_intr_cv);
607 }
608
609 if ((rint & SUNXI_MMC_INT_SDIO_INT) != 0) {
610 sdmmc_card_intr(sc->sc_sdmmc_dev);
611 }
612
613 mutex_exit(&sc->sc_intr_lock);
614
615 return 1;
616 }
617
618 static int
619 sunxi_mmc_wait_rint(struct sunxi_mmc_softc *sc, uint32_t mask,
620 int timeout, bool poll)
621 {
622 int retry;
623 int error;
624
625 KASSERT(mutex_owned(&sc->sc_intr_lock));
626
627 if (sc->sc_intr_rint & mask)
628 return 0;
629
630 if (poll)
631 retry = timeout / hz * 1000;
632 else
633 retry = timeout / hz;
634
635 while (retry > 0) {
636 if (poll) {
637 sc->sc_intr_rint |= MMC_READ(sc, SUNXI_MMC_RINT);
638 } else {
639 error = cv_timedwait(&sc->sc_intr_cv,
640 &sc->sc_intr_lock, hz);
641 if (error && error != EWOULDBLOCK)
642 return error;
643 }
644 if (sc->sc_intr_rint & mask)
645 return 0;
646 if (poll)
647 delay(1000);
648 --retry;
649 }
650
651 return ETIMEDOUT;
652 }
653
654 static int
655 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
656 {
657 struct sunxi_mmc_softc *sc = sch;
658 uint32_t gctrl;
659 int retry = 1000;
660
661 DPRINTF(sc->sc_dev, "host reset\n");
662
663 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
664 gctrl |= SUNXI_MMC_GCTRL_RESET;
665 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
666 while (--retry > 0) {
667 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
668 break;
669 delay(100);
670 }
671
672 MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
673
674 MMC_WRITE(sc, SUNXI_MMC_IMASK, 0);
675
676 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffffffff);
677
678 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
679 gctrl |= SUNXI_MMC_GCTRL_INTEN;
680 gctrl &= ~SUNXI_MMC_GCTRL_WAIT_MEM_ACCESS_DONE;
681 gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
682 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
683
684 return 0;
685 }
686
687 static uint32_t
688 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
689 {
690 return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
691 }
692
693 static int
694 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
695 {
696 return 8192;
697 }
698
699 static int
700 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
701 {
702 struct sunxi_mmc_softc *sc = sch;
703
704 if (sc->sc_non_removable || sc->sc_broken_cd) {
705 /*
706 * Non-removable or broken card detect flag set in
707 * DT, assume always present
708 */
709 return 1;
710 } else if (sc->sc_gpio_cd != NULL) {
711 /* Use card detect GPIO */
712 int v = 0, i;
713 for (i = 0; i < 5; i++) {
714 v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
715 sc->sc_gpio_cd_inverted);
716 delay(1000);
717 }
718 if (v == 5)
719 sc->sc_mmc_present = 0;
720 else if (v == 0)
721 sc->sc_mmc_present = 1;
722 return sc->sc_mmc_present;
723 } else {
724 /* Use CARD_PRESENT field of SD_STATUS register */
725 const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
726 SUNXI_MMC_STATUS_CARD_PRESENT;
727 return present != 0;
728 }
729 }
730
731 static int
732 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
733 {
734 struct sunxi_mmc_softc *sc = sch;
735
736 if (sc->sc_gpio_wp == NULL) {
737 return 0; /* no write protect pin, assume rw */
738 } else {
739 return fdtbus_gpio_read(sc->sc_gpio_wp) ^
740 sc->sc_gpio_wp_inverted;
741 }
742 }
743
744 static int
745 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
746 {
747 return 0;
748 }
749
750 static int
751 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
752 {
753 uint32_t cmd;
754 int retry;
755
756 DPRINTF(sc->sc_dev, "update clock\n");
757
758 cmd = SUNXI_MMC_CMD_START |
759 SUNXI_MMC_CMD_UPCLK_ONLY |
760 SUNXI_MMC_CMD_WAIT_PRE_OVER;
761 MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
762 retry = 0xfffff;
763 while (--retry > 0) {
764 if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
765 break;
766 delay(10);
767 }
768
769 if (retry == 0) {
770 aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
771 DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
772 MMC_READ(sc, SUNXI_MMC_GCTRL));
773 DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
774 MMC_READ(sc, SUNXI_MMC_CLKCR));
775 DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
776 MMC_READ(sc, SUNXI_MMC_TIMEOUT));
777 DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
778 MMC_READ(sc, SUNXI_MMC_WIDTH));
779 DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
780 MMC_READ(sc, SUNXI_MMC_CMD));
781 DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
782 MMC_READ(sc, SUNXI_MMC_MINT));
783 DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
784 MMC_READ(sc, SUNXI_MMC_RINT));
785 DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
786 MMC_READ(sc, SUNXI_MMC_STATUS));
787 return ETIMEDOUT;
788 }
789
790 return 0;
791 }
792
793 static int
794 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
795 {
796 struct sunxi_mmc_softc *sc = sch;
797 uint32_t clkcr, gctrl, ntsr;
798 const u_int flags = sc->sc_config->flags;
799
800 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
801 if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
802 clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
803 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
804 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
805 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
806 if (sunxi_mmc_update_clock(sc) != 0)
807 return 1;
808 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
809 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
810 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
811 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
812 }
813 }
814
815 if (freq) {
816
817 clkcr &= ~SUNXI_MMC_CLKCR_DIV;
818 clkcr |= __SHIFTIN(ddr, SUNXI_MMC_CLKCR_DIV);
819 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
820
821 if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
822 ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
823 ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
824 MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
825 }
826
827 if (flags & SUNXI_MMC_FLAG_CALIB_REG)
828 MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
829
830 if (sunxi_mmc_update_clock(sc) != 0)
831 return 1;
832
833 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
834 if (ddr)
835 gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
836 else
837 gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
838 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
839
840 if (sunxi_mmc_set_clock(sc, freq, ddr) != 0)
841 return 1;
842
843 clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
844 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
845 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
846 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
847 if (sunxi_mmc_update_clock(sc) != 0)
848 return 1;
849 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
850 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
851 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
852 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
853 }
854 }
855
856 return 0;
857 }
858
859 static int
860 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
861 {
862 struct sunxi_mmc_softc *sc = sch;
863
864 DPRINTF(sc->sc_dev, "width = %d\n", width);
865
866 switch (width) {
867 case 1:
868 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
869 break;
870 case 4:
871 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
872 break;
873 case 8:
874 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
875 break;
876 default:
877 return 1;
878 }
879
880 sc->sc_mmc_width = width;
881
882 return 0;
883 }
884
885 static int
886 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
887 {
888 return -1;
889 }
890
891 static int
892 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
893 {
894 struct sunxi_mmc_softc *sc = sch;
895 u_int uvol;
896 int error;
897
898 if (sc->sc_reg_vqmmc == NULL)
899 return 0;
900
901 switch (signal_voltage) {
902 case SDMMC_SIGNAL_VOLTAGE_330:
903 uvol = 3300000;
904 break;
905 case SDMMC_SIGNAL_VOLTAGE_180:
906 uvol = 1800000;
907 break;
908 default:
909 return EINVAL;
910 }
911
912 error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
913 if (error != 0)
914 return error;
915
916 return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
917 }
918
919 static int
920 sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
921 {
922 switch (timing) {
923 case SDMMC_TIMING_MMC_HS200:
924 break;
925 default:
926 return EINVAL;
927 }
928
929 return 0;
930 }
931
932 static int
933 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
934 {
935 struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
936 bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
937 bus_dmamap_t map;
938 bus_size_t off;
939 int desc, resid, seg;
940 uint32_t val;
941
942 /*
943 * If the command includes a dma map use it, otherwise we need to
944 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
945 */
946 if (cmd->c_dmamap) {
947 map = cmd->c_dmamap;
948 } else {
949 if (cmd->c_datalen > sc->sc_dmabounce_buflen)
950 return E2BIG;
951 map = sc->sc_dmabounce_map;
952
953 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
954 memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
955 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
956 0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
957 } else {
958 memcpy(sc->sc_dmabounce_buf, cmd->c_data,
959 cmd->c_datalen);
960 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
961 0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
962 }
963 }
964
965 desc = 0;
966 for (seg = 0; seg < map->dm_nsegs; seg++) {
967 bus_addr_t paddr = map->dm_segs[seg].ds_addr;
968 bus_size_t len = map->dm_segs[seg].ds_len;
969 resid = uimin(len, cmd->c_resid);
970 off = 0;
971 while (resid > 0) {
972 if (desc == sc->sc_idma_ndesc)
973 break;
974 len = uimin(sc->sc_config->idma_xferlen, resid);
975 dma[desc].dma_buf_size = htole32(len);
976 dma[desc].dma_buf_addr = htole32(paddr + off);
977 dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
978 SUNXI_MMC_IDMA_CONFIG_OWN);
979 cmd->c_resid -= len;
980 resid -= len;
981 off += len;
982 if (desc == 0) {
983 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
984 }
985 if (cmd->c_resid == 0) {
986 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
987 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
988 dma[desc].dma_next = 0;
989 } else {
990 dma[desc].dma_config |=
991 htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
992 dma[desc].dma_next = htole32(
993 desc_paddr + ((desc+1) *
994 sizeof(struct sunxi_mmc_idma_descriptor)));
995 }
996 ++desc;
997 }
998 }
999 if (desc == sc->sc_idma_ndesc) {
1000 aprint_error_dev(sc->sc_dev,
1001 "not enough descriptors for %d byte transfer! "
1002 "there are %u segments with a max xfer length of %u\n",
1003 cmd->c_datalen, map->dm_nsegs, sc->sc_config->idma_xferlen);
1004 return EIO;
1005 }
1006
1007 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1008 sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
1009
1010 sc->sc_idma_idst = 0;
1011
1012 MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
1013 MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
1014
1015 val = MMC_READ(sc, SUNXI_MMC_GCTRL);
1016 val |= SUNXI_MMC_GCTRL_DMAEN;
1017 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1018 val |= SUNXI_MMC_GCTRL_DMARESET;
1019 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1020
1021 MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
1022 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1023 val = SUNXI_MMC_IDST_RECEIVE_INT;
1024 else
1025 val = 0;
1026 MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
1027 MMC_WRITE(sc, SUNXI_MMC_DMAC,
1028 SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
1029
1030 return 0;
1031 }
1032
1033 static void
1034 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
1035 {
1036 MMC_WRITE(sc, SUNXI_MMC_DMAC, 0);
1037
1038 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1039 sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
1040
1041 if (cmd->c_dmamap == NULL) {
1042 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1043 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1044 0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
1045 memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1046 cmd->c_datalen);
1047 } else {
1048 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1049 0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1050 }
1051 }
1052 }
1053
1054 static void
1055 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1056 {
1057 struct sunxi_mmc_softc *sc = sch;
1058 uint32_t cmdval = SUNXI_MMC_CMD_START;
1059 uint32_t imask, oimask;
1060 const bool poll = (cmd->c_flags & SCF_POLL) != 0;
1061 int retry;
1062
1063 DPRINTF(sc->sc_dev,
1064 "opcode %d flags 0x%x data %p datalen %d blklen %d poll %d\n",
1065 cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1066 cmd->c_blklen, poll);
1067
1068 mutex_enter(&sc->sc_intr_lock);
1069
1070 if (cmd->c_opcode == 0)
1071 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1072 if (cmd->c_flags & SCF_RSP_PRESENT)
1073 cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1074 if (cmd->c_flags & SCF_RSP_136)
1075 cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1076 if (cmd->c_flags & SCF_RSP_CRC)
1077 cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1078
1079 imask = oimask = MMC_READ(sc, SUNXI_MMC_IMASK);
1080 imask |= SUNXI_MMC_INT_ERROR;
1081
1082 if (cmd->c_datalen > 0) {
1083 unsigned int nblks;
1084
1085 cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1086 if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1087 cmdval |= SUNXI_MMC_CMD_WRITE;
1088 }
1089
1090 nblks = cmd->c_datalen / cmd->c_blklen;
1091 if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1092 ++nblks;
1093
1094 if (nblks > 1) {
1095 cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1096 imask |= SUNXI_MMC_INT_AUTO_CMD_DONE;
1097 } else {
1098 imask |= SUNXI_MMC_INT_DATA_OVER;
1099 }
1100
1101 MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1102 MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1103 } else {
1104 imask |= SUNXI_MMC_INT_CMD_DONE;
1105 }
1106
1107 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1108 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1109
1110 sc->sc_intr_rint = 0;
1111
1112 MMC_WRITE(sc, SUNXI_MMC_A12A,
1113 (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1114
1115 MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1116
1117 DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1118
1119 if (cmd->c_datalen == 0) {
1120 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1121 } else {
1122 cmd->c_resid = cmd->c_datalen;
1123 cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1124 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1125 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_CMD_READ)) {
1126 const uint32_t idst_mask = SUNXI_MMC_IDST_RECEIVE_INT;
1127
1128 retry = 10;
1129 while ((sc->sc_idma_idst & idst_mask) == 0) {
1130 if (retry-- == 0) {
1131 cmd->c_error = ETIMEDOUT;
1132 break;
1133 }
1134 cv_timedwait(&sc->sc_idst_cv,
1135 &sc->sc_intr_lock, hz);
1136 }
1137 }
1138 }
1139
1140 cmd->c_error = sunxi_mmc_wait_rint(sc,
1141 SUNXI_MMC_INT_ERROR|SUNXI_MMC_INT_CMD_DONE, hz * 10, poll);
1142 if (cmd->c_error == 0 && (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1143 if (sc->sc_intr_rint & SUNXI_MMC_INT_RESP_TIMEOUT) {
1144 cmd->c_error = ETIMEDOUT;
1145 } else {
1146 cmd->c_error = EIO;
1147 }
1148 }
1149 if (cmd->c_error) {
1150 DPRINTF(sc->sc_dev,
1151 "cmd failed, error %d\n", cmd->c_error);
1152 goto done;
1153 }
1154
1155 if (cmd->c_datalen > 0) {
1156 sunxi_mmc_dma_complete(sc, cmd);
1157
1158 cmd->c_error = sunxi_mmc_wait_rint(sc,
1159 SUNXI_MMC_INT_ERROR|
1160 SUNXI_MMC_INT_AUTO_CMD_DONE|
1161 SUNXI_MMC_INT_DATA_OVER,
1162 hz*10, poll);
1163 if (cmd->c_error == 0 &&
1164 (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1165 cmd->c_error = ETIMEDOUT;
1166 }
1167 if (cmd->c_error) {
1168 DPRINTF(sc->sc_dev,
1169 "data timeout, rint = %08x\n",
1170 sc->sc_intr_rint);
1171 cmd->c_error = ETIMEDOUT;
1172 goto done;
1173 }
1174 }
1175
1176 if (cmd->c_flags & SCF_RSP_PRESENT) {
1177 if (cmd->c_flags & SCF_RSP_136) {
1178 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1179 cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1180 cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1181 cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1182 if (cmd->c_flags & SCF_RSP_CRC) {
1183 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1184 (cmd->c_resp[1] << 24);
1185 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1186 (cmd->c_resp[2] << 24);
1187 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1188 (cmd->c_resp[3] << 24);
1189 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1190 }
1191 } else {
1192 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1193 }
1194 }
1195
1196 done:
1197 cmd->c_flags |= SCF_ITSDONE;
1198 MMC_WRITE(sc, SUNXI_MMC_IMASK, oimask);
1199 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1200 MMC_WRITE(sc, SUNXI_MMC_IDST, 0x337);
1201 mutex_exit(&sc->sc_intr_lock);
1202
1203 if (cmd->c_error) {
1204 DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1205 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1206 MMC_READ(sc, SUNXI_MMC_GCTRL) |
1207 SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1208 for (retry = 0; retry < 1000; retry++) {
1209 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1210 break;
1211 delay(10);
1212 }
1213 sunxi_mmc_update_clock(sc);
1214 }
1215
1216 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1217 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1218 }
1219
1220 static void
1221 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1222 {
1223 struct sunxi_mmc_softc *sc = sch;
1224 uint32_t imask;
1225
1226 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1227 if (enable)
1228 imask |= SUNXI_MMC_INT_SDIO_INT;
1229 else
1230 imask &= ~SUNXI_MMC_INT_SDIO_INT;
1231 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1232 }
1233
1234 static void
1235 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1236 {
1237 struct sunxi_mmc_softc *sc = sch;
1238
1239 MMC_WRITE(sc, SUNXI_MMC_RINT, SUNXI_MMC_INT_SDIO_INT);
1240 }
1241