sunxi_mmc.c revision 1.33 1 /* $NetBSD: sunxi_mmc.c,v 1.33 2019/05/27 23:27:01 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_sunximmc.h"
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.33 2019/05/27 23:27:01 jmcneill Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45
46 #include <dev/fdt/fdtvar.h>
47
48 #include <arm/sunxi/sunxi_mmc.h>
49
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define DPRINTF(dev, fmt, ...) \
53 do { \
54 if (sunxi_mmc_debug & __BIT(device_unit(dev))) \
55 device_printf((dev), fmt, ##__VA_ARGS__); \
56 } while (0)
57 #else
58 #define DPRINTF(dev, fmt, ...) ((void)0)
59 #endif
60
61 enum sunxi_mmc_timing {
62 SUNXI_MMC_TIMING_400K,
63 SUNXI_MMC_TIMING_25M,
64 SUNXI_MMC_TIMING_50M,
65 SUNXI_MMC_TIMING_50M_DDR,
66 SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68
69 struct sunxi_mmc_delay {
70 u_int output_phase;
71 u_int sample_phase;
72 };
73
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
76 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
77 [SUNXI_MMC_TIMING_50M] = { 90, 120 },
78 [SUNXI_MMC_TIMING_50M_DDR] = { 60, 120 },
79 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 90, 180 },
80 };
81
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
84 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
85 [SUNXI_MMC_TIMING_50M] = { 150, 120 },
86 [SUNXI_MMC_TIMING_50M_DDR] = { 54, 36 },
87 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 72, 72 },
88 };
89
90 #define SUNXI_MMC_NDESC 64
91
92 struct sunxi_mmc_softc;
93
94 static int sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void sunxi_mmc_attach(device_t, device_t, void *);
96 static void sunxi_mmc_attach_i(device_t);
97
98 static int sunxi_mmc_intr(void *);
99 static int sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101
102 static int sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
103 static uint32_t sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
104 static int sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
105 static int sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
106 static int sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
107 static int sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
108 static int sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
109 static int sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
110 static int sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
111 static int sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
112 static int sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t, int);
113 static void sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
114 struct sdmmc_command *);
115 static void sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
116 static void sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
117
118 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
119 .host_reset = sunxi_mmc_host_reset,
120 .host_ocr = sunxi_mmc_host_ocr,
121 .host_maxblklen = sunxi_mmc_host_maxblklen,
122 .card_detect = sunxi_mmc_card_detect,
123 .write_protect = sunxi_mmc_write_protect,
124 .bus_power = sunxi_mmc_bus_power,
125 .bus_clock_ddr = sunxi_mmc_bus_clock,
126 .bus_width = sunxi_mmc_bus_width,
127 .bus_rod = sunxi_mmc_bus_rod,
128 .signal_voltage = sunxi_mmc_signal_voltage,
129 .execute_tuning = sunxi_mmc_execute_tuning,
130 .exec_command = sunxi_mmc_exec_command,
131 .card_enable_intr = sunxi_mmc_card_enable_intr,
132 .card_intr_ack = sunxi_mmc_card_intr_ack,
133 };
134
135 struct sunxi_mmc_config {
136 u_int idma_xferlen;
137 u_int flags;
138 #define SUNXI_MMC_FLAG_CALIB_REG 0x01
139 #define SUNXI_MMC_FLAG_NEW_TIMINGS 0x02
140 #define SUNXI_MMC_FLAG_MASK_DATA0 0x04
141 #define SUNXI_MMC_FLAG_HS200 0x08
142 const struct sunxi_mmc_delay *delays;
143 uint32_t dma_ftrglevel;
144 };
145
146 struct sunxi_mmc_softc {
147 device_t sc_dev;
148 bus_space_tag_t sc_bst;
149 bus_space_handle_t sc_bsh;
150 bus_dma_tag_t sc_dmat;
151 int sc_phandle;
152
153 void *sc_ih;
154 kmutex_t sc_intr_lock;
155 kcondvar_t sc_intr_cv;
156 kcondvar_t sc_idst_cv;
157
158 int sc_mmc_width;
159 int sc_mmc_present;
160
161 u_int sc_max_frequency;
162
163 device_t sc_sdmmc_dev;
164
165 struct sunxi_mmc_config *sc_config;
166
167 bus_dma_segment_t sc_idma_segs[1];
168 int sc_idma_nsegs;
169 bus_size_t sc_idma_size;
170 bus_dmamap_t sc_idma_map;
171 int sc_idma_ndesc;
172 void *sc_idma_desc;
173
174 bus_dmamap_t sc_dmabounce_map;
175 void *sc_dmabounce_buf;
176 size_t sc_dmabounce_buflen;
177
178 uint32_t sc_intr_rint;
179 uint32_t sc_idma_idst;
180
181 struct clk *sc_clk_ahb;
182 struct clk *sc_clk_mmc;
183 struct clk *sc_clk_output;
184 struct clk *sc_clk_sample;
185
186 struct fdtbus_reset *sc_rst_ahb;
187
188 struct fdtbus_gpio_pin *sc_gpio_cd;
189 int sc_gpio_cd_inverted;
190 struct fdtbus_gpio_pin *sc_gpio_wp;
191 int sc_gpio_wp_inverted;
192
193 struct fdtbus_regulator *sc_reg_vmmc;
194 struct fdtbus_regulator *sc_reg_vqmmc;
195
196 struct fdtbus_mmc_pwrseq *sc_pwrseq;
197
198 bool sc_non_removable;
199 bool sc_broken_cd;
200 };
201
202 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
203 sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
204
205 #define MMC_WRITE(sc, reg, val) \
206 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
207 #define MMC_READ(sc, reg) \
208 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
209
210 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
211 .idma_xferlen = 0x2000,
212 .dma_ftrglevel = 0x20070008,
213 .delays = NULL,
214 .flags = 0,
215 };
216
217 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
218 .idma_xferlen = 0x10000,
219 .dma_ftrglevel = 0x20070008,
220 .delays = NULL,
221 .flags = 0,
222 };
223
224 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
225 .idma_xferlen = 0x2000,
226 .dma_ftrglevel = 0x20070008,
227 .delays = sun7i_mmc_delays,
228 .flags = 0,
229 };
230
231 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
232 .idma_xferlen = 0x10000,
233 .dma_ftrglevel = 0x20070008,
234 .delays = NULL,
235 .flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
236 };
237
238 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
239 .idma_xferlen = 0x10000,
240 .dma_ftrglevel = 0x200f0010,
241 .delays = sun9i_mmc_delays,
242 .flags = 0,
243 };
244
245 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
246 .idma_xferlen = 0x10000,
247 .dma_ftrglevel = 0x20070008,
248 .delays = NULL,
249 .flags = SUNXI_MMC_FLAG_CALIB_REG |
250 SUNXI_MMC_FLAG_NEW_TIMINGS |
251 SUNXI_MMC_FLAG_MASK_DATA0,
252 };
253
254 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
255 .idma_xferlen = 0x2000,
256 .dma_ftrglevel = 0x20070008,
257 .delays = NULL,
258 .flags = SUNXI_MMC_FLAG_CALIB_REG |
259 SUNXI_MMC_FLAG_NEW_TIMINGS |
260 SUNXI_MMC_FLAG_HS200,
261 };
262
263 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
264 .idma_xferlen = 0x10000,
265 .dma_ftrglevel = 0x20070008,
266 .delays = NULL,
267 .flags = SUNXI_MMC_FLAG_CALIB_REG |
268 SUNXI_MMC_FLAG_NEW_TIMINGS |
269 SUNXI_MMC_FLAG_MASK_DATA0,
270 };
271
272 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
273 .idma_xferlen = 0x2000,
274 .dma_ftrglevel = 0x20070008,
275 .delays = NULL,
276 .flags = SUNXI_MMC_FLAG_CALIB_REG,
277 };
278
279 static const struct of_compat_data compat_data[] = {
280 { "allwinner,sun4i-a10-mmc", (uintptr_t)&sun4i_a10_mmc_config },
281 { "allwinner,sun5i-a13-mmc", (uintptr_t)&sun5i_a13_mmc_config },
282 { "allwinner,sun7i-a20-mmc", (uintptr_t)&sun7i_a20_mmc_config },
283 { "allwinner,sun8i-a83t-emmc", (uintptr_t)&sun8i_a83t_emmc_config },
284 { "allwinner,sun9i-a80-mmc", (uintptr_t)&sun9i_a80_mmc_config },
285 { "allwinner,sun50i-a64-mmc", (uintptr_t)&sun50i_a64_mmc_config },
286 { "allwinner,sun50i-a64-emmc", (uintptr_t)&sun50i_a64_emmc_config },
287 { "allwinner,sun50i-h6-mmc", (uintptr_t)&sun50i_h6_mmc_config },
288 { "allwinner,sun50i-h6-emmc", (uintptr_t)&sun50i_h6_emmc_config },
289 { NULL }
290 };
291
292 static int
293 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
294 {
295 struct fdt_attach_args * const faa = aux;
296
297 return of_match_compat_data(faa->faa_phandle, compat_data);
298 }
299
300 static void
301 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
302 {
303 struct sunxi_mmc_softc * const sc = device_private(self);
304 struct fdt_attach_args * const faa = aux;
305 const int phandle = faa->faa_phandle;
306 char intrstr[128];
307 bus_addr_t addr;
308 bus_size_t size;
309
310 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
311 aprint_error(": couldn't get registers\n");
312 return;
313 }
314
315 sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
316 sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
317 sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
318 sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
319
320 #if notyet
321 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
322 sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
323 #else
324 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
325 #endif
326 aprint_error(": couldn't get clocks\n");
327 return;
328 }
329
330 sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
331
332 sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
333
334 if (clk_enable(sc->sc_clk_ahb) != 0 ||
335 clk_enable(sc->sc_clk_mmc) != 0) {
336 aprint_error(": couldn't enable clocks\n");
337 return;
338 }
339
340 if (sc->sc_rst_ahb != NULL) {
341 if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
342 aprint_error(": couldn't de-assert resets\n");
343 return;
344 }
345 }
346
347 sc->sc_dev = self;
348 sc->sc_phandle = phandle;
349 sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
350 sc->sc_bst = faa->faa_bst;
351 sc->sc_dmat = faa->faa_dmat;
352 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
353 cv_init(&sc->sc_intr_cv, "awinmmcirq");
354 cv_init(&sc->sc_idst_cv, "awinmmcdma");
355
356 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
357 aprint_error(": couldn't map registers\n");
358 return;
359 }
360
361 sc->sc_reg_vmmc = fdtbus_regulator_acquire(phandle, "vmmc-supply");
362 if (sc->sc_reg_vmmc != NULL && fdtbus_regulator_enable(sc->sc_reg_vmmc)) {
363 aprint_error(": couldn't enable vmmc-supply\n");
364 return;
365 }
366
367 aprint_naive("\n");
368 aprint_normal(": SD/MMC controller\n");
369
370 sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
371
372 sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
373 GPIO_PIN_INPUT);
374 sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
375 GPIO_PIN_INPUT);
376
377 sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
378 sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
379
380 sc->sc_non_removable = of_hasprop(phandle, "non-removable");
381 sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
382
383 if (of_getprop_uint32(phandle, "max-frequency", &sc->sc_max_frequency))
384 sc->sc_max_frequency = 52000000;
385
386 if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
387 sunxi_mmc_idma_setup(sc) != 0) {
388 aprint_error_dev(self, "failed to setup DMA\n");
389 return;
390 }
391
392 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
393 aprint_error_dev(self, "failed to decode interrupt\n");
394 return;
395 }
396
397 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
398 sunxi_mmc_intr, sc);
399 if (sc->sc_ih == NULL) {
400 aprint_error_dev(self, "failed to establish interrupt on %s\n",
401 intrstr);
402 return;
403 }
404 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
405
406 config_interrupts(self, sunxi_mmc_attach_i);
407 }
408
409 static int
410 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
411 {
412 bus_dma_segment_t ds[1];
413 int error, rseg;
414
415 sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
416 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
417 sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
418 if (error)
419 return error;
420 error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
421 &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
422 if (error)
423 goto free;
424 error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
425 sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
426 if (error)
427 goto unmap;
428 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
429 sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
430 BUS_DMA_WAITOK);
431 if (error)
432 goto destroy;
433 return 0;
434
435 destroy:
436 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
437 unmap:
438 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
439 sc->sc_dmabounce_buflen);
440 free:
441 bus_dmamem_free(sc->sc_dmat, ds, rseg);
442 return error;
443 }
444
445 static int
446 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
447 {
448 int error;
449
450 sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
451 sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
452 sc->sc_idma_ndesc;
453 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
454 sc->sc_idma_size, sc->sc_idma_segs, 1,
455 &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
456 if (error)
457 return error;
458 error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
459 sc->sc_idma_nsegs, sc->sc_idma_size,
460 &sc->sc_idma_desc, BUS_DMA_WAITOK);
461 if (error)
462 goto free;
463 error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
464 sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
465 if (error)
466 goto unmap;
467 error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
468 sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
469 if (error)
470 goto destroy;
471 return 0;
472
473 destroy:
474 bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
475 unmap:
476 bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
477 free:
478 bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
479 return error;
480 }
481
482 static int
483 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr)
484 {
485 const struct sunxi_mmc_delay *delays;
486 int error, timing = SUNXI_MMC_TIMING_400K;
487
488 if (sc->sc_config->delays) {
489 if (freq <= 400) {
490 timing = SUNXI_MMC_TIMING_400K;
491 } else if (freq <= 25000) {
492 timing = SUNXI_MMC_TIMING_25M;
493 } else if (freq <= 52000) {
494 if (ddr) {
495 timing = sc->sc_mmc_width == 8 ?
496 SUNXI_MMC_TIMING_50M_DDR_8BIT :
497 SUNXI_MMC_TIMING_50M_DDR;
498 } else {
499 timing = SUNXI_MMC_TIMING_50M;
500 }
501 } else
502 return EINVAL;
503 }
504 if (sc->sc_max_frequency) {
505 if (freq * 1000 > sc->sc_max_frequency)
506 return EINVAL;
507 }
508
509 error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << ddr);
510 if (error != 0)
511 return error;
512
513 if (sc->sc_config->delays == NULL)
514 return 0;
515
516 delays = &sc->sc_config->delays[timing];
517
518 if (sc->sc_clk_sample) {
519 error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
520 if (error != 0)
521 return error;
522 }
523 if (sc->sc_clk_output) {
524 error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
525 if (error != 0)
526 return error;
527 }
528
529 return 0;
530 }
531
532 static void
533 sunxi_mmc_hw_reset(struct sunxi_mmc_softc *sc)
534 {
535 MMC_WRITE(sc, SUNXI_MMC_HWRST, 0);
536 delay(1000);
537 MMC_WRITE(sc, SUNXI_MMC_HWRST, 1);
538 delay(1000);
539 }
540
541 static void
542 sunxi_mmc_attach_i(device_t self)
543 {
544 struct sunxi_mmc_softc *sc = device_private(self);
545 const u_int flags = sc->sc_config->flags;
546 struct sdmmcbus_attach_args saa;
547 uint32_t width;
548
549 if (sc->sc_pwrseq)
550 fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
551
552 if (of_hasprop(sc->sc_phandle, "cap-mmc-hw-reset"))
553 sunxi_mmc_hw_reset(sc);
554
555 sunxi_mmc_host_reset(sc);
556 sunxi_mmc_bus_width(sc, 1);
557 sunxi_mmc_set_clock(sc, 400, false);
558
559 if (sc->sc_pwrseq)
560 fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
561
562 if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
563 width = 4;
564
565 memset(&saa, 0, sizeof(saa));
566 saa.saa_busname = "sdmmc";
567 saa.saa_sct = &sunxi_mmc_chip_functions;
568 saa.saa_sch = sc;
569 saa.saa_dmat = sc->sc_dmat;
570 saa.saa_clkmin = 400;
571 saa.saa_clkmax = sc->sc_max_frequency / 1000;
572 saa.saa_caps = SMC_CAPS_DMA |
573 SMC_CAPS_MULTI_SEG_DMA |
574 SMC_CAPS_AUTO_STOP |
575 SMC_CAPS_SD_HIGHSPEED |
576 SMC_CAPS_MMC_HIGHSPEED |
577 SMC_CAPS_POLLING;
578
579 if (sc->sc_config->delays || (flags & SUNXI_MMC_FLAG_NEW_TIMINGS))
580 saa.saa_caps |= SMC_CAPS_MMC_DDR52;
581
582 if (flags & SUNXI_MMC_FLAG_HS200)
583 saa.saa_caps |= SMC_CAPS_MMC_HS200;
584
585 if (width == 4)
586 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
587 if (width == 8)
588 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
589
590 if (sc->sc_gpio_cd)
591 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
592
593 sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
594 }
595
596 static int
597 sunxi_mmc_intr(void *priv)
598 {
599 struct sunxi_mmc_softc *sc = priv;
600 uint32_t idst, rint, imask;
601
602 mutex_enter(&sc->sc_intr_lock);
603 idst = MMC_READ(sc, SUNXI_MMC_IDST);
604 rint = MMC_READ(sc, SUNXI_MMC_RINT);
605 if (!idst && !rint) {
606 mutex_exit(&sc->sc_intr_lock);
607 return 0;
608 }
609 MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
610 MMC_WRITE(sc, SUNXI_MMC_RINT, rint & ~SUNXI_MMC_INT_SDIO_INT);
611
612 DPRINTF(sc->sc_dev, "mmc intr idst=%08X rint=%08X\n",
613 idst, rint);
614
615 if (idst != 0) {
616 MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
617 sc->sc_idma_idst |= idst;
618 cv_broadcast(&sc->sc_idst_cv);
619 }
620
621 if ((rint & ~SUNXI_MMC_INT_SDIO_INT) != 0) {
622 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
623 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_SDIO_INT);
624 sc->sc_intr_rint |= (rint & ~SUNXI_MMC_INT_SDIO_INT);
625 cv_broadcast(&sc->sc_intr_cv);
626 }
627
628 if ((rint & SUNXI_MMC_INT_SDIO_INT) != 0) {
629 sdmmc_card_intr(sc->sc_sdmmc_dev);
630 }
631
632 mutex_exit(&sc->sc_intr_lock);
633
634 return 1;
635 }
636
637 static int
638 sunxi_mmc_wait_rint(struct sunxi_mmc_softc *sc, uint32_t mask,
639 int timeout, bool poll)
640 {
641 int retry;
642 int error;
643
644 KASSERT(mutex_owned(&sc->sc_intr_lock));
645
646 if (sc->sc_intr_rint & mask)
647 return 0;
648
649 if (poll)
650 retry = timeout / hz * 1000;
651 else
652 retry = timeout / hz;
653
654 while (retry > 0) {
655 if (poll) {
656 sc->sc_intr_rint |= MMC_READ(sc, SUNXI_MMC_RINT);
657 } else {
658 error = cv_timedwait(&sc->sc_intr_cv,
659 &sc->sc_intr_lock, hz);
660 if (error && error != EWOULDBLOCK)
661 return error;
662 }
663 if (sc->sc_intr_rint & mask)
664 return 0;
665 if (poll)
666 delay(1000);
667 --retry;
668 }
669
670 return ETIMEDOUT;
671 }
672
673 static int
674 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
675 {
676 struct sunxi_mmc_softc *sc = sch;
677 uint32_t gctrl;
678 int retry = 1000;
679
680 DPRINTF(sc->sc_dev, "host reset\n");
681
682 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
683 gctrl |= SUNXI_MMC_GCTRL_RESET;
684 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
685 while (--retry > 0) {
686 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
687 break;
688 delay(100);
689 }
690
691 MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
692
693 MMC_WRITE(sc, SUNXI_MMC_IMASK, 0);
694
695 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffffffff);
696
697 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
698 gctrl |= SUNXI_MMC_GCTRL_INTEN;
699 gctrl &= ~SUNXI_MMC_GCTRL_WAIT_MEM_ACCESS_DONE;
700 gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
701 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
702
703 return 0;
704 }
705
706 static uint32_t
707 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
708 {
709 return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
710 }
711
712 static int
713 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
714 {
715 return 8192;
716 }
717
718 static int
719 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
720 {
721 struct sunxi_mmc_softc *sc = sch;
722
723 if (sc->sc_non_removable || sc->sc_broken_cd) {
724 /*
725 * Non-removable or broken card detect flag set in
726 * DT, assume always present
727 */
728 return 1;
729 } else if (sc->sc_gpio_cd != NULL) {
730 /* Use card detect GPIO */
731 int v = 0, i;
732 for (i = 0; i < 5; i++) {
733 v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
734 sc->sc_gpio_cd_inverted);
735 delay(1000);
736 }
737 if (v == 5)
738 sc->sc_mmc_present = 0;
739 else if (v == 0)
740 sc->sc_mmc_present = 1;
741 return sc->sc_mmc_present;
742 } else {
743 /* Use CARD_PRESENT field of SD_STATUS register */
744 const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
745 SUNXI_MMC_STATUS_CARD_PRESENT;
746 return present != 0;
747 }
748 }
749
750 static int
751 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
752 {
753 struct sunxi_mmc_softc *sc = sch;
754
755 if (sc->sc_gpio_wp == NULL) {
756 return 0; /* no write protect pin, assume rw */
757 } else {
758 return fdtbus_gpio_read(sc->sc_gpio_wp) ^
759 sc->sc_gpio_wp_inverted;
760 }
761 }
762
763 static int
764 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
765 {
766 return 0;
767 }
768
769 static int
770 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
771 {
772 uint32_t cmd;
773 int retry;
774
775 DPRINTF(sc->sc_dev, "update clock\n");
776
777 cmd = SUNXI_MMC_CMD_START |
778 SUNXI_MMC_CMD_UPCLK_ONLY |
779 SUNXI_MMC_CMD_WAIT_PRE_OVER;
780 MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
781 retry = 100000;
782 while (--retry > 0) {
783 if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
784 break;
785 delay(10);
786 }
787
788 if (retry == 0) {
789 aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
790 DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
791 MMC_READ(sc, SUNXI_MMC_GCTRL));
792 DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
793 MMC_READ(sc, SUNXI_MMC_CLKCR));
794 DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
795 MMC_READ(sc, SUNXI_MMC_TIMEOUT));
796 DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
797 MMC_READ(sc, SUNXI_MMC_WIDTH));
798 DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
799 MMC_READ(sc, SUNXI_MMC_CMD));
800 DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
801 MMC_READ(sc, SUNXI_MMC_MINT));
802 DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
803 MMC_READ(sc, SUNXI_MMC_RINT));
804 DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
805 MMC_READ(sc, SUNXI_MMC_STATUS));
806 return ETIMEDOUT;
807 }
808
809 return 0;
810 }
811
812 static int
813 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
814 {
815 struct sunxi_mmc_softc *sc = sch;
816 uint32_t clkcr, gctrl, ntsr;
817 const u_int flags = sc->sc_config->flags;
818
819 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
820 if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
821 clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
822 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
823 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
824 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
825 if (sunxi_mmc_update_clock(sc) != 0)
826 return 1;
827 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
828 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
829 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
830 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
831 }
832 }
833
834 if (freq) {
835
836 clkcr &= ~SUNXI_MMC_CLKCR_DIV;
837 clkcr |= __SHIFTIN(ddr, SUNXI_MMC_CLKCR_DIV);
838 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
839
840 if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
841 ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
842 ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
843 MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
844 }
845
846 if (flags & SUNXI_MMC_FLAG_CALIB_REG)
847 MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
848
849 if (sunxi_mmc_update_clock(sc) != 0)
850 return 1;
851
852 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
853 if (ddr)
854 gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
855 else
856 gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
857 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
858
859 if (sunxi_mmc_set_clock(sc, freq, ddr) != 0)
860 return 1;
861
862 clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
863 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
864 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
865 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
866 if (sunxi_mmc_update_clock(sc) != 0)
867 return 1;
868 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
869 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
870 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
871 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
872 }
873 }
874
875 return 0;
876 }
877
878 static int
879 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
880 {
881 struct sunxi_mmc_softc *sc = sch;
882
883 DPRINTF(sc->sc_dev, "width = %d\n", width);
884
885 switch (width) {
886 case 1:
887 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
888 break;
889 case 4:
890 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
891 break;
892 case 8:
893 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
894 break;
895 default:
896 return 1;
897 }
898
899 sc->sc_mmc_width = width;
900
901 return 0;
902 }
903
904 static int
905 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
906 {
907 return -1;
908 }
909
910 static int
911 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
912 {
913 struct sunxi_mmc_softc *sc = sch;
914 u_int uvol;
915 int error;
916
917 if (sc->sc_reg_vqmmc == NULL)
918 return 0;
919
920 switch (signal_voltage) {
921 case SDMMC_SIGNAL_VOLTAGE_330:
922 uvol = 3300000;
923 break;
924 case SDMMC_SIGNAL_VOLTAGE_180:
925 uvol = 1800000;
926 break;
927 default:
928 return EINVAL;
929 }
930
931 error = fdtbus_regulator_supports_voltage(sc->sc_reg_vqmmc, uvol, uvol);
932 if (error != 0)
933 return 0;
934
935 error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
936 if (error != 0)
937 return error;
938
939 return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
940 }
941
942 static int
943 sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
944 {
945 switch (timing) {
946 case SDMMC_TIMING_MMC_HS200:
947 break;
948 default:
949 return EINVAL;
950 }
951
952 return 0;
953 }
954
955 static int
956 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
957 {
958 struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
959 bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
960 bus_dmamap_t map;
961 bus_size_t off;
962 int desc, resid, seg;
963 uint32_t val;
964
965 /*
966 * If the command includes a dma map use it, otherwise we need to
967 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
968 */
969 if (cmd->c_dmamap) {
970 map = cmd->c_dmamap;
971 } else {
972 if (cmd->c_datalen > sc->sc_dmabounce_buflen)
973 return E2BIG;
974 map = sc->sc_dmabounce_map;
975
976 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
977 memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
978 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
979 0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
980 } else {
981 memcpy(sc->sc_dmabounce_buf, cmd->c_data,
982 cmd->c_datalen);
983 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
984 0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
985 }
986 }
987
988 desc = 0;
989 for (seg = 0; seg < map->dm_nsegs; seg++) {
990 bus_addr_t paddr = map->dm_segs[seg].ds_addr;
991 bus_size_t len = map->dm_segs[seg].ds_len;
992 resid = uimin(len, cmd->c_resid);
993 off = 0;
994 while (resid > 0) {
995 if (desc == sc->sc_idma_ndesc)
996 break;
997 len = uimin(sc->sc_config->idma_xferlen, resid);
998 dma[desc].dma_buf_size = htole32(len);
999 dma[desc].dma_buf_addr = htole32(paddr + off);
1000 dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
1001 SUNXI_MMC_IDMA_CONFIG_OWN);
1002 cmd->c_resid -= len;
1003 resid -= len;
1004 off += len;
1005 if (desc == 0) {
1006 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
1007 }
1008 if (cmd->c_resid == 0) {
1009 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
1010 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
1011 dma[desc].dma_next = 0;
1012 } else {
1013 dma[desc].dma_config |=
1014 htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
1015 dma[desc].dma_next = htole32(
1016 desc_paddr + ((desc+1) *
1017 sizeof(struct sunxi_mmc_idma_descriptor)));
1018 }
1019 ++desc;
1020 }
1021 }
1022 if (desc == sc->sc_idma_ndesc) {
1023 aprint_error_dev(sc->sc_dev,
1024 "not enough descriptors for %d byte transfer! "
1025 "there are %u segments with a max xfer length of %u\n",
1026 cmd->c_datalen, map->dm_nsegs, sc->sc_config->idma_xferlen);
1027 return EIO;
1028 }
1029
1030 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1031 sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
1032
1033 sc->sc_idma_idst = 0;
1034
1035 MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
1036 MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
1037
1038 val = MMC_READ(sc, SUNXI_MMC_GCTRL);
1039 val |= SUNXI_MMC_GCTRL_DMAEN;
1040 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1041 val |= SUNXI_MMC_GCTRL_DMARESET;
1042 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1043
1044 MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
1045 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1046 val = SUNXI_MMC_IDST_RECEIVE_INT;
1047 else
1048 val = 0;
1049 MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
1050 MMC_WRITE(sc, SUNXI_MMC_DMAC,
1051 SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
1052
1053 return 0;
1054 }
1055
1056 static void
1057 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
1058 {
1059 MMC_WRITE(sc, SUNXI_MMC_DMAC, 0);
1060
1061 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1062 sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
1063
1064 if (cmd->c_dmamap == NULL) {
1065 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1066 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1067 0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
1068 memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1069 cmd->c_datalen);
1070 } else {
1071 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1072 0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1073 }
1074 }
1075 }
1076
1077 static void
1078 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1079 {
1080 struct sunxi_mmc_softc *sc = sch;
1081 uint32_t cmdval = SUNXI_MMC_CMD_START;
1082 uint32_t imask, oimask;
1083 const bool poll = (cmd->c_flags & SCF_POLL) != 0;
1084 int retry;
1085
1086 DPRINTF(sc->sc_dev,
1087 "opcode %d flags 0x%x data %p datalen %d blklen %d poll %d\n",
1088 cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1089 cmd->c_blklen, poll);
1090
1091 mutex_enter(&sc->sc_intr_lock);
1092
1093 if (cmd->c_opcode == 0)
1094 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1095 if (cmd->c_flags & SCF_RSP_PRESENT)
1096 cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1097 if (cmd->c_flags & SCF_RSP_136)
1098 cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1099 if (cmd->c_flags & SCF_RSP_CRC)
1100 cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1101
1102 imask = oimask = MMC_READ(sc, SUNXI_MMC_IMASK);
1103 imask |= SUNXI_MMC_INT_ERROR;
1104
1105 if (cmd->c_datalen > 0) {
1106 unsigned int nblks;
1107
1108 cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1109 if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1110 cmdval |= SUNXI_MMC_CMD_WRITE;
1111 }
1112
1113 nblks = cmd->c_datalen / cmd->c_blklen;
1114 if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1115 ++nblks;
1116
1117 if (nblks > 1) {
1118 cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1119 imask |= SUNXI_MMC_INT_AUTO_CMD_DONE;
1120 } else {
1121 imask |= SUNXI_MMC_INT_DATA_OVER;
1122 }
1123
1124 MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1125 MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1126 } else {
1127 imask |= SUNXI_MMC_INT_CMD_DONE;
1128 }
1129
1130 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1131 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1132
1133 sc->sc_intr_rint = 0;
1134
1135 MMC_WRITE(sc, SUNXI_MMC_A12A,
1136 (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1137
1138 MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1139
1140 DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1141
1142 if (cmd->c_datalen == 0) {
1143 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1144 } else {
1145 cmd->c_resid = cmd->c_datalen;
1146 cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1147 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1148 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_CMD_READ)) {
1149 const uint32_t idst_mask = SUNXI_MMC_IDST_RECEIVE_INT;
1150
1151 retry = 10;
1152 while ((sc->sc_idma_idst & idst_mask) == 0) {
1153 if (retry-- == 0) {
1154 cmd->c_error = ETIMEDOUT;
1155 break;
1156 }
1157 cv_timedwait(&sc->sc_idst_cv,
1158 &sc->sc_intr_lock, hz);
1159 }
1160 }
1161 }
1162
1163 cmd->c_error = sunxi_mmc_wait_rint(sc,
1164 SUNXI_MMC_INT_ERROR|SUNXI_MMC_INT_CMD_DONE, hz * 3, poll);
1165 if (cmd->c_error == 0 && (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1166 if (sc->sc_intr_rint & SUNXI_MMC_INT_RESP_TIMEOUT) {
1167 cmd->c_error = ETIMEDOUT;
1168 } else {
1169 cmd->c_error = EIO;
1170 }
1171 }
1172 if (cmd->c_error) {
1173 DPRINTF(sc->sc_dev,
1174 "cmd failed, error %d\n", cmd->c_error);
1175 goto done;
1176 }
1177
1178 if (cmd->c_datalen > 0) {
1179 sunxi_mmc_dma_complete(sc, cmd);
1180
1181 cmd->c_error = sunxi_mmc_wait_rint(sc,
1182 SUNXI_MMC_INT_ERROR|
1183 SUNXI_MMC_INT_AUTO_CMD_DONE|
1184 SUNXI_MMC_INT_DATA_OVER,
1185 hz*3, poll);
1186 if (cmd->c_error == 0 &&
1187 (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1188 cmd->c_error = ETIMEDOUT;
1189 }
1190 if (cmd->c_error) {
1191 DPRINTF(sc->sc_dev,
1192 "data timeout, rint = %08x\n",
1193 sc->sc_intr_rint);
1194 cmd->c_error = ETIMEDOUT;
1195 goto done;
1196 }
1197 }
1198
1199 if (cmd->c_flags & SCF_RSP_PRESENT) {
1200 if (cmd->c_flags & SCF_RSP_136) {
1201 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1202 cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1203 cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1204 cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1205 if (cmd->c_flags & SCF_RSP_CRC) {
1206 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1207 (cmd->c_resp[1] << 24);
1208 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1209 (cmd->c_resp[2] << 24);
1210 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1211 (cmd->c_resp[3] << 24);
1212 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1213 }
1214 } else {
1215 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1216 }
1217 }
1218
1219 done:
1220 cmd->c_flags |= SCF_ITSDONE;
1221 MMC_WRITE(sc, SUNXI_MMC_IMASK, oimask);
1222 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1223 MMC_WRITE(sc, SUNXI_MMC_IDST, 0x337);
1224 mutex_exit(&sc->sc_intr_lock);
1225
1226 if (cmd->c_error) {
1227 DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1228 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1229 MMC_READ(sc, SUNXI_MMC_GCTRL) |
1230 SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1231 for (retry = 0; retry < 1000; retry++) {
1232 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1233 break;
1234 delay(10);
1235 }
1236 sunxi_mmc_update_clock(sc);
1237 }
1238
1239 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1240 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1241 }
1242
1243 static void
1244 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1245 {
1246 struct sunxi_mmc_softc *sc = sch;
1247 uint32_t imask;
1248
1249 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1250 if (enable)
1251 imask |= SUNXI_MMC_INT_SDIO_INT;
1252 else
1253 imask &= ~SUNXI_MMC_INT_SDIO_INT;
1254 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1255 }
1256
1257 static void
1258 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1259 {
1260 struct sunxi_mmc_softc *sc = sch;
1261
1262 MMC_WRITE(sc, SUNXI_MMC_RINT, SUNXI_MMC_INT_SDIO_INT);
1263 }
1264