sdmmc_mem.c revision 1.67 1 /* $NetBSD: sdmmc_mem.c,v 1.67 2019/05/28 00:25:27 jmcneill Exp $ */
2 /* $OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka (at) netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 /* Routines for SD/MMC memory cards. */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.67 2019/05/28 00:25:27 jmcneill Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s) do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s) do {} while (/*CONSTCOND*/0)
70 #endif
71
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78 sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80 uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_ssr(struct sdmmc_softc *, struct sdmmc_function *,
83 sdmmc_bitfield512_t *);
84 static int sdmmc_mem_decode_ssr(struct sdmmc_softc *, struct sdmmc_function *,
85 sdmmc_bitfield512_t *);
86 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
87 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
88 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
89 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
90 uint8_t, bool);
91 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
92 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
93 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
94 u_char *, size_t);
95 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
96 u_char *, size_t);
97 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
98 uint32_t, u_char *, size_t);
99 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
100 uint32_t, u_char *, size_t);
101 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
102 uint32_t, u_char *, size_t);
103 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
104 uint32_t, u_char *, size_t);
105
106 static const struct {
107 const char *name;
108 int v;
109 int freq;
110 } switch_group0_functions[] = {
111 /* Default/SDR12 */
112 { "Default/SDR12", 0, 25000 },
113
114 /* High-Speed/SDR25 */
115 { "High-Speed/SDR25", SMC_CAPS_SD_HIGHSPEED, 50000 },
116
117 /* SDR50 */
118 { "SDR50", SMC_CAPS_UHS_SDR50, 100000 },
119
120 /* SDR104 */
121 { "SDR104", SMC_CAPS_UHS_SDR104, 208000 },
122
123 /* DDR50 */
124 { "DDR50", SMC_CAPS_UHS_DDR50, 50000 },
125 };
126
127 /*
128 * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
129 */
130 int
131 sdmmc_mem_enable(struct sdmmc_softc *sc)
132 {
133 uint32_t host_ocr;
134 uint32_t card_ocr;
135 uint32_t new_ocr;
136 uint32_t ocr = 0;
137 int error;
138
139 SDMMC_LOCK(sc);
140
141 /* Set host mode to SD "combo" card or SD memory-only. */
142 CLR(sc->sc_flags, SMF_UHS_MODE);
143 SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
144
145 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
146 sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
147
148 /* Reset memory (*must* do that before CMD55 or CMD1). */
149 sdmmc_go_idle_state(sc);
150
151 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
152 /* Check SD Ver.2 */
153 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
154 if (error == 0 && card_ocr == 0x1aa)
155 SET(ocr, MMC_OCR_HCS);
156 }
157
158 /*
159 * Read the SD/MMC memory OCR value by issuing CMD55 followed
160 * by ACMD41 to read the OCR value from memory-only SD cards.
161 * MMC cards will not respond to CMD55 or ACMD41 and this is
162 * how we distinguish them from SD cards.
163 */
164 mmc_mode:
165 error = sdmmc_mem_send_op_cond(sc,
166 ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
167 if (error) {
168 if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
169 !ISSET(sc->sc_flags, SMF_IO_MODE)) {
170 /* Not a SD card, switch to MMC mode. */
171 DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
172 CLR(sc->sc_flags, SMF_SD_MODE);
173 goto mmc_mode;
174 }
175 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
176 DPRINTF(("%s: couldn't read memory OCR\n",
177 SDMMCDEVNAME(sc)));
178 goto out;
179 } else {
180 /* Not a "combo" card. */
181 CLR(sc->sc_flags, SMF_MEM_MODE);
182 error = 0;
183 goto out;
184 }
185 }
186 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
187 /* get card OCR */
188 error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
189 if (error) {
190 DPRINTF(("%s: couldn't read SPI memory OCR\n",
191 SDMMCDEVNAME(sc)));
192 goto out;
193 }
194 }
195
196 /* Set the lowest voltage supported by the card and host. */
197 host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
198 error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
199 if (error) {
200 DPRINTF(("%s: couldn't supply voltage requested by card\n",
201 SDMMCDEVNAME(sc)));
202 goto out;
203 }
204
205 DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
206 DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
207
208 host_ocr &= card_ocr; /* only allow the common voltages */
209 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
210 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
211 /* Tell the card(s) to enter the idle state (again). */
212 sdmmc_go_idle_state(sc);
213 /* Check SD Ver.2 */
214 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
215 if (error == 0 && card_ocr == 0x1aa)
216 SET(ocr, MMC_OCR_HCS);
217
218 if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
219 SET(ocr, MMC_OCR_S18A);
220 } else {
221 SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
222 }
223 }
224 host_ocr |= ocr;
225
226 /* Send the new OCR value until all cards are ready. */
227 error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
228 if (error) {
229 DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
230 goto out;
231 }
232
233 if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
234 /*
235 * Card and host support low voltage mode, begin switch
236 * sequence.
237 */
238 struct sdmmc_command cmd;
239 memset(&cmd, 0, sizeof(cmd));
240 cmd.c_arg = 0;
241 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
242 cmd.c_opcode = SD_VOLTAGE_SWITCH;
243 DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
244 error = sdmmc_mmc_command(sc, &cmd);
245 if (error) {
246 DPRINTF(("%s: voltage switch command failed\n",
247 SDMMCDEVNAME(sc)));
248 goto out;
249 }
250
251 error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
252 if (error)
253 goto out;
254
255 SET(sc->sc_flags, SMF_UHS_MODE);
256 }
257
258 out:
259 SDMMC_UNLOCK(sc);
260
261 if (error)
262 printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
263 __func__, error);
264
265 return error;
266 }
267
268 static int
269 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
270 {
271 int error;
272
273 /*
274 * Stop the clock
275 */
276 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
277 SDMMC_SDCLK_OFF, false);
278 if (error)
279 goto out;
280
281 delay(1000);
282
283 /*
284 * Card switch command was successful, update host controller
285 * signal voltage setting.
286 */
287 DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
288 signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
289 error = sdmmc_chip_signal_voltage(sc->sc_sct,
290 sc->sc_sch, signal_voltage);
291 if (error)
292 goto out;
293
294 delay(5000);
295
296 /*
297 * Switch to SDR12 timing
298 */
299 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
300 false);
301 if (error)
302 goto out;
303
304 delay(1000);
305
306 out:
307 return error;
308 }
309
310 /*
311 * Read the CSD and CID from all cards and assign each card a unique
312 * relative card address (RCA). CMD2 is ignored by SDIO-only cards.
313 */
314 void
315 sdmmc_mem_scan(struct sdmmc_softc *sc)
316 {
317 sdmmc_response resp;
318 struct sdmmc_function *sf;
319 uint16_t next_rca;
320 int error;
321 int retry;
322
323 SDMMC_LOCK(sc);
324
325 /*
326 * CMD2 is a broadcast command understood by SD cards and MMC
327 * cards. All cards begin to respond to the command, but back
328 * off if another card drives the CMD line to a different level.
329 * Only one card will get its entire response through. That
330 * card remains silent once it has been assigned a RCA.
331 */
332 for (retry = 0; retry < 100; retry++) {
333 error = sdmmc_mem_send_cid(sc, &resp);
334 if (error) {
335 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
336 error == ETIMEDOUT) {
337 /* No more cards there. */
338 break;
339 }
340 DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
341 break;
342 }
343
344 /* In MMC mode, find the next available RCA. */
345 next_rca = 1;
346 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
347 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
348 next_rca++;
349 }
350
351 /* Allocate a sdmmc_function structure. */
352 sf = sdmmc_function_alloc(sc);
353 sf->rca = next_rca;
354
355 /*
356 * Remember the CID returned in the CMD2 response for
357 * later decoding.
358 */
359 memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
360
361 /*
362 * Silence the card by assigning it a unique RCA, or
363 * querying it for its RCA in the case of SD.
364 */
365 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
366 if (sdmmc_set_relative_addr(sc, sf) != 0) {
367 aprint_error_dev(sc->sc_dev,
368 "couldn't set mem RCA\n");
369 sdmmc_function_free(sf);
370 break;
371 }
372 }
373
374 /*
375 * If this is a memory-only card, the card responding
376 * first becomes an alias for SDIO function 0.
377 */
378 if (sc->sc_fn0 == NULL)
379 sc->sc_fn0 = sf;
380
381 SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
382
383 /* only one function in SPI mode */
384 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
385 break;
386 }
387
388 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
389 /* Go to Data Transfer Mode, if possible. */
390 sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
391
392 /*
393 * All cards are either inactive or awaiting further commands.
394 * Read the CSDs and decode the raw CID for each card.
395 */
396 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
397 error = sdmmc_mem_send_csd(sc, sf, &resp);
398 if (error) {
399 SET(sf->flags, SFF_ERROR);
400 continue;
401 }
402
403 if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
404 sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
405 SET(sf->flags, SFF_ERROR);
406 continue;
407 }
408
409 #ifdef SDMMC_DEBUG
410 printf("%s: CID: ", SDMMCDEVNAME(sc));
411 sdmmc_print_cid(&sf->cid);
412 #endif
413 }
414
415 SDMMC_UNLOCK(sc);
416 }
417
418 int
419 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
420 struct sdmmc_function *sf)
421 {
422 /* TRAN_SPEED(2:0): transfer rate exponent */
423 static const int speed_exponent[8] = {
424 100 * 1, /* 100 Kbits/s */
425 1 * 1000, /* 1 Mbits/s */
426 10 * 1000, /* 10 Mbits/s */
427 100 * 1000, /* 100 Mbits/s */
428 0,
429 0,
430 0,
431 0,
432 };
433 /* TRAN_SPEED(6:3): time mantissa */
434 static const int speed_mantissa[16] = {
435 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
436 };
437 struct sdmmc_csd *csd = &sf->csd;
438 int e, m;
439
440 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
441 /*
442 * CSD version 1.0 corresponds to SD system
443 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
444 */
445 csd->csdver = SD_CSD_CSDVER(resp);
446 switch (csd->csdver) {
447 case SD_CSD_CSDVER_2_0:
448 DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
449 SET(sf->flags, SFF_SDHC);
450 csd->capacity = SD_CSD_V2_CAPACITY(resp);
451 csd->read_bl_len = SD_CSD_V2_BL_LEN;
452 break;
453
454 case SD_CSD_CSDVER_1_0:
455 DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
456 csd->capacity = SD_CSD_CAPACITY(resp);
457 csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
458 break;
459
460 default:
461 aprint_error_dev(sc->sc_dev,
462 "unknown SD CSD structure version 0x%x\n",
463 csd->csdver);
464 return 1;
465 }
466
467 csd->mmcver = SD_CSD_MMCVER(resp);
468 csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
469 csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
470 e = SD_CSD_SPEED_EXP(resp);
471 m = SD_CSD_SPEED_MANT(resp);
472 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
473 csd->ccc = SD_CSD_CCC(resp);
474 } else {
475 csd->csdver = MMC_CSD_CSDVER(resp);
476 if (csd->csdver == MMC_CSD_CSDVER_1_0) {
477 aprint_error_dev(sc->sc_dev,
478 "unknown MMC CSD structure version 0x%x\n",
479 csd->csdver);
480 return 1;
481 }
482
483 csd->mmcver = MMC_CSD_MMCVER(resp);
484 csd->capacity = MMC_CSD_CAPACITY(resp);
485 csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
486 csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
487 csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
488 e = MMC_CSD_TRAN_SPEED_EXP(resp);
489 m = MMC_CSD_TRAN_SPEED_MANT(resp);
490 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
491 }
492 if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
493 csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
494
495 #ifdef SDMMC_DUMP_CSD
496 sdmmc_print_csd(resp, csd);
497 #endif
498
499 return 0;
500 }
501
502 int
503 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
504 struct sdmmc_function *sf)
505 {
506 struct sdmmc_cid *cid = &sf->cid;
507
508 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
509 cid->mid = SD_CID_MID(resp);
510 cid->oid = SD_CID_OID(resp);
511 SD_CID_PNM_CPY(resp, cid->pnm);
512 cid->rev = SD_CID_REV(resp);
513 cid->psn = SD_CID_PSN(resp);
514 cid->mdt = SD_CID_MDT(resp);
515 } else {
516 switch(sf->csd.mmcver) {
517 case MMC_CSD_MMCVER_1_0:
518 case MMC_CSD_MMCVER_1_4:
519 cid->mid = MMC_CID_MID_V1(resp);
520 MMC_CID_PNM_V1_CPY(resp, cid->pnm);
521 cid->rev = MMC_CID_REV_V1(resp);
522 cid->psn = MMC_CID_PSN_V1(resp);
523 cid->mdt = MMC_CID_MDT_V1(resp);
524 break;
525 case MMC_CSD_MMCVER_2_0:
526 case MMC_CSD_MMCVER_3_1:
527 case MMC_CSD_MMCVER_4_0:
528 cid->mid = MMC_CID_MID_V2(resp);
529 cid->oid = MMC_CID_OID_V2(resp);
530 MMC_CID_PNM_V2_CPY(resp, cid->pnm);
531 cid->psn = MMC_CID_PSN_V2(resp);
532 break;
533 default:
534 aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
535 sf->csd.mmcver);
536 return 1;
537 }
538 }
539 return 0;
540 }
541
542 void
543 sdmmc_print_cid(struct sdmmc_cid *cid)
544 {
545
546 printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
547 " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
548 cid->mdt);
549 }
550
551 #ifdef SDMMC_DUMP_CSD
552 void
553 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
554 {
555
556 printf("csdver = %d\n", csd->csdver);
557 printf("mmcver = %d\n", csd->mmcver);
558 printf("capacity = 0x%08x\n", csd->capacity);
559 printf("read_bl_len = %d\n", csd->read_bl_len);
560 printf("write_bl_len = %d\n", csd->write_bl_len);
561 printf("r2w_factor = %d\n", csd->r2w_factor);
562 printf("tran_speed = %d\n", csd->tran_speed);
563 printf("ccc = 0x%x\n", csd->ccc);
564 }
565 #endif
566
567 /*
568 * Initialize a SD/MMC memory card.
569 */
570 int
571 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
572 {
573 int error = 0;
574
575 SDMMC_LOCK(sc);
576
577 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
578 error = sdmmc_select_card(sc, sf);
579 if (error)
580 goto out;
581 }
582
583 error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
584 if (error)
585 goto out;
586
587 if (ISSET(sc->sc_flags, SMF_SD_MODE))
588 error = sdmmc_mem_sd_init(sc, sf);
589 else
590 error = sdmmc_mem_mmc_init(sc, sf);
591
592 if (error != 0)
593 SET(sf->flags, SFF_ERROR);
594
595 out:
596 SDMMC_UNLOCK(sc);
597
598 return error;
599 }
600
601 /*
602 * Get or set the card's memory OCR value (SD or MMC).
603 */
604 int
605 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
606 {
607 struct sdmmc_command cmd;
608 int error;
609 int retry;
610
611 /* Don't lock */
612
613 DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
614 SDMMCDEVNAME(sc), ocr));
615
616 /*
617 * If we change the OCR value, retry the command until the OCR
618 * we receive in response has the "CARD BUSY" bit set, meaning
619 * that all cards are ready for identification.
620 */
621 for (retry = 0; retry < 100; retry++) {
622 memset(&cmd, 0, sizeof(cmd));
623 cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
624 ocr : (ocr & MMC_OCR_HCS);
625 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
626 | SCF_TOUT_OK;
627
628 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
629 cmd.c_opcode = SD_APP_OP_COND;
630 error = sdmmc_app_command(sc, NULL, &cmd);
631 } else {
632 cmd.c_opcode = MMC_SEND_OP_COND;
633 error = sdmmc_mmc_command(sc, &cmd);
634 }
635 if (error)
636 break;
637
638 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
639 if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
640 break;
641 } else {
642 if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
643 ocr == 0)
644 break;
645 }
646
647 error = ETIMEDOUT;
648 sdmmc_delay(10000);
649 }
650 if (ocrp != NULL) {
651 if (error == 0 &&
652 !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
653 *ocrp = MMC_R3(cmd.c_resp);
654 } else {
655 *ocrp = ocr;
656 }
657 }
658 DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
659 SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
660 return error;
661 }
662
663 int
664 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
665 {
666 struct sdmmc_command cmd;
667 int error;
668
669 /* Don't lock */
670
671 memset(&cmd, 0, sizeof(cmd));
672 cmd.c_arg = ocr;
673 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7;
674 cmd.c_opcode = SD_SEND_IF_COND;
675
676 error = sdmmc_mmc_command(sc, &cmd);
677 if (error == 0 && ocrp != NULL) {
678 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
679 *ocrp = MMC_SPI_R7(cmd.c_resp);
680 } else {
681 *ocrp = MMC_R7(cmd.c_resp);
682 }
683 DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
684 SDMMCDEVNAME(sc), error, *ocrp));
685 }
686 return error;
687 }
688
689 /*
690 * Set the read block length appropriately for this card, according to
691 * the card CSD register value.
692 */
693 int
694 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
695 int block_len)
696 {
697 struct sdmmc_command cmd;
698 int error;
699
700 /* Don't lock */
701
702 memset(&cmd, 0, sizeof(cmd));
703 cmd.c_opcode = MMC_SET_BLOCKLEN;
704 cmd.c_arg = block_len;
705 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
706
707 error = sdmmc_mmc_command(sc, &cmd);
708
709 DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
710 SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
711
712 return error;
713 }
714
715 /* make 512-bit BE quantity __bitfield()-compatible */
716 static void
717 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
718 size_t i;
719 uint32_t tmp0, tmp1;
720 const size_t bitswords = __arraycount(buf->_bits);
721 for (i = 0; i < bitswords/2; i++) {
722 tmp0 = buf->_bits[i];
723 tmp1 = buf->_bits[bitswords - 1 - i];
724 buf->_bits[i] = be32toh(tmp1);
725 buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
726 }
727 }
728
729 static int
730 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
731 {
732 if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
733 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
734 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
735 return SD_ACCESS_MODE_SDR104;
736 }
737 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
738 ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
739 return SD_ACCESS_MODE_DDR50;
740 }
741 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
742 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
743 return SD_ACCESS_MODE_SDR50;
744 }
745 }
746 if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
747 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
748 return SD_ACCESS_MODE_SDR25;
749 }
750 return SD_ACCESS_MODE_SDR12;
751 }
752
753 static int
754 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
755 {
756 int timing = -1;
757
758 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
759 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
760 return 0;
761
762 switch (sf->csd.tran_speed) {
763 case 100000:
764 timing = SDMMC_TIMING_UHS_SDR50;
765 break;
766 case 208000:
767 timing = SDMMC_TIMING_UHS_SDR104;
768 break;
769 default:
770 return 0;
771 }
772 } else {
773 switch (sf->csd.tran_speed) {
774 case 200000:
775 timing = SDMMC_TIMING_MMC_HS200;
776 break;
777 default:
778 return 0;
779 }
780 }
781
782 DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
783 timing));
784
785 return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
786 }
787
788 static int
789 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
790 {
791 int support_func, best_func, bus_clock, error, i;
792 sdmmc_bitfield512_t status;
793 bool ddr = false;
794
795 /* change bus clock */
796 bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
797 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
798 if (error) {
799 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
800 return error;
801 }
802
803 error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
804 if (error) {
805 aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
806 return error;
807 }
808 error = sdmmc_mem_decode_scr(sc, sf);
809 if (error)
810 return error;
811
812 if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
813 ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
814 DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
815 error = sdmmc_set_bus_width(sf, 4);
816 if (error) {
817 aprint_error_dev(sc->sc_dev,
818 "can't change bus width (%d bit)\n", 4);
819 return error;
820 }
821 sf->width = 4;
822 }
823
824 best_func = 0;
825 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
826 ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
827 DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
828 error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
829 if (error) {
830 aprint_error_dev(sc->sc_dev,
831 "switch func mode 0 failed\n");
832 return error;
833 }
834
835 support_func = SFUNC_STATUS_GROUP(&status, 1);
836
837 if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
838 /* XXX UHS-I card started in 1.8V mode, switch now */
839 error = sdmmc_mem_signal_voltage(sc,
840 SDMMC_SIGNAL_VOLTAGE_180);
841 if (error) {
842 aprint_error_dev(sc->sc_dev,
843 "failed to recover UHS card\n");
844 return error;
845 }
846 SET(sc->sc_flags, SMF_UHS_MODE);
847 }
848
849 for (i = 0; i < __arraycount(switch_group0_functions); i++) {
850 if (!(support_func & (1 << i)))
851 continue;
852 DPRINTF(("%s: card supports mode %s\n",
853 SDMMCDEVNAME(sc),
854 switch_group0_functions[i].name));
855 }
856
857 best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
858
859 DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
860 switch_group0_functions[best_func].name));
861
862 if (best_func != 0) {
863 DPRINTF(("%s: switch func mode 1(func=%d)\n",
864 SDMMCDEVNAME(sc), best_func));
865 error =
866 sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
867 if (error) {
868 aprint_error_dev(sc->sc_dev,
869 "switch func mode 1 failed:"
870 " group 1 function %d(0x%2x)\n",
871 best_func, support_func);
872 return error;
873 }
874 sf->csd.tran_speed =
875 switch_group0_functions[best_func].freq;
876
877 if (best_func == SD_ACCESS_MODE_DDR50)
878 ddr = true;
879
880 /* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
881 delay(25);
882 }
883 }
884
885 /* update bus clock */
886 if (sc->sc_busclk > sf->csd.tran_speed)
887 sc->sc_busclk = sf->csd.tran_speed;
888 if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
889 return 0;
890
891 /* change bus clock */
892 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
893 ddr);
894 if (error) {
895 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
896 return error;
897 }
898
899 sc->sc_transfer_mode = switch_group0_functions[best_func].name;
900 sc->sc_busddr = ddr;
901
902 /* get card status */
903 error = sdmmc_mem_send_ssr(sc, sf, &status);
904 if (error) {
905 aprint_error_dev(sc->sc_dev, "can't get SD status: %d\n",
906 error);
907 return error;
908 }
909 sdmmc_mem_decode_ssr(sc, sf, &status);
910
911 /* execute tuning (UHS) */
912 error = sdmmc_mem_execute_tuning(sc, sf);
913 if (error) {
914 aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
915 return error;
916 }
917
918 return 0;
919 }
920
921 static int
922 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
923 {
924 int width, value, hs_timing, bus_clock, error;
925 uint8_t ext_csd[512];
926 uint32_t sectors = 0;
927 bool ddr = false;
928
929 sc->sc_transfer_mode = NULL;
930
931 /* change bus clock */
932 bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
933 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
934 if (error) {
935 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
936 return error;
937 }
938
939 if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
940 error = sdmmc_mem_send_cxd_data(sc,
941 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
942 if (error) {
943 aprint_error_dev(sc->sc_dev,
944 "can't read EXT_CSD (error=%d)\n", error);
945 return error;
946 }
947 if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
948 (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
949 aprint_error_dev(sc->sc_dev,
950 "unrecognised future version (%d)\n",
951 ext_csd[EXT_CSD_STRUCTURE]);
952 return ENOTSUP;
953 }
954 sf->ext_csd.rev = ext_csd[EXT_CSD_REV];
955
956 if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
957 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
958 sf->csd.tran_speed = 200000; /* 200MHz SDR */
959 hs_timing = EXT_CSD_HS_TIMING_HS200;
960 } else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
961 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
962 sf->csd.tran_speed = 52000; /* 52MHz */
963 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
964 ddr = true;
965 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
966 sf->csd.tran_speed = 52000; /* 52MHz */
967 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
968 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
969 sf->csd.tran_speed = 26000; /* 26MHz */
970 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
971 } else {
972 aprint_error_dev(sc->sc_dev,
973 "unknown CARD_TYPE: 0x%x\n",
974 ext_csd[EXT_CSD_CARD_TYPE]);
975 return ENOTSUP;
976 }
977
978 if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
979 width = 8;
980 value = EXT_CSD_BUS_WIDTH_8;
981 } else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
982 width = 4;
983 value = EXT_CSD_BUS_WIDTH_4;
984 } else {
985 width = 1;
986 value = EXT_CSD_BUS_WIDTH_1;
987 }
988
989 if (width != 1) {
990 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
991 EXT_CSD_BUS_WIDTH, value, false);
992 if (error == 0)
993 error = sdmmc_chip_bus_width(sc->sc_sct,
994 sc->sc_sch, width);
995 else {
996 DPRINTF(("%s: can't change bus width"
997 " (%d bit)\n", SDMMCDEVNAME(sc), width));
998 return error;
999 }
1000
1001 /* XXXX: need bus test? (using by CMD14 & CMD19) */
1002 delay(10000);
1003 }
1004 sf->width = width;
1005
1006 if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1007 !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
1008 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
1009 }
1010 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1011 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
1012 EXT_CSD_HS_TIMING, hs_timing, false);
1013 if (error) {
1014 aprint_error_dev(sc->sc_dev,
1015 "can't change high speed %d, error %d\n",
1016 hs_timing, error);
1017 return error;
1018 }
1019 }
1020
1021 if (sc->sc_busclk > sf->csd.tran_speed)
1022 sc->sc_busclk = sf->csd.tran_speed;
1023 if (sc->sc_busclk != bus_clock) {
1024 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1025 sc->sc_busclk, false);
1026 if (error) {
1027 aprint_error_dev(sc->sc_dev,
1028 "can't change bus clock\n");
1029 return error;
1030 }
1031 }
1032
1033 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1034 error = sdmmc_mem_send_cxd_data(sc,
1035 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1036 if (error) {
1037 aprint_error_dev(sc->sc_dev,
1038 "can't re-read EXT_CSD\n");
1039 return error;
1040 }
1041 if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1042 aprint_error_dev(sc->sc_dev,
1043 "HS_TIMING set failed\n");
1044 return EINVAL;
1045 }
1046 }
1047
1048 /*
1049 * HS_TIMING must be set to 0x1 before setting BUS_WIDTH
1050 * for dual data rate operation
1051 */
1052 if (ddr &&
1053 hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1054 width > 1) {
1055 error = sdmmc_mem_mmc_switch(sf,
1056 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1057 (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1058 EXT_CSD_BUS_WIDTH_4_DDR, false);
1059 if (error) {
1060 DPRINTF(("%s: can't switch to DDR"
1061 " (%d bit)\n", SDMMCDEVNAME(sc), width));
1062 return error;
1063 }
1064
1065 delay(10000);
1066
1067 error = sdmmc_mem_signal_voltage(sc,
1068 SDMMC_SIGNAL_VOLTAGE_180);
1069 if (error) {
1070 aprint_error_dev(sc->sc_dev,
1071 "can't switch signaling voltage\n");
1072 return error;
1073 }
1074
1075 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1076 sc->sc_busclk, ddr);
1077 if (error) {
1078 aprint_error_dev(sc->sc_dev,
1079 "can't change bus clock\n");
1080 return error;
1081 }
1082
1083 delay(10000);
1084
1085 sc->sc_transfer_mode = "DDR52";
1086 sc->sc_busddr = ddr;
1087 }
1088
1089 sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1090 ext_csd[EXT_CSD_SEC_COUNT + 1] << 8 |
1091 ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1092 ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1093 if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1094 SET(sf->flags, SFF_SDHC);
1095 sf->csd.capacity = sectors;
1096 }
1097
1098 if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1099 sc->sc_transfer_mode = "HS200";
1100
1101 /* execute tuning (HS200) */
1102 error = sdmmc_mem_execute_tuning(sc, sf);
1103 if (error) {
1104 aprint_error_dev(sc->sc_dev,
1105 "can't execute MMC tuning\n");
1106 return error;
1107 }
1108 }
1109
1110 if (sf->ext_csd.rev >= 5) {
1111 sf->ext_csd.rst_n_function =
1112 ext_csd[EXT_CSD_RST_N_FUNCTION];
1113 }
1114
1115 if (sf->ext_csd.rev >= 6) {
1116 sf->ext_csd.cache_size =
1117 le32dec(&ext_csd[EXT_CSD_CACHE_SIZE]) * 1024;
1118 }
1119 if (sf->ext_csd.cache_size > 0) {
1120 /* eMMC cache present, enable it */
1121 error = sdmmc_mem_mmc_switch(sf,
1122 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL,
1123 EXT_CSD_CACHE_CTRL_CACHE_EN, false);
1124 if (error) {
1125 aprint_error_dev(sc->sc_dev,
1126 "can't enable cache: %d\n", error);
1127 } else {
1128 SET(sf->flags, SFF_CACHE_ENABLED);
1129 }
1130 }
1131 } else {
1132 if (sc->sc_busclk > sf->csd.tran_speed)
1133 sc->sc_busclk = sf->csd.tran_speed;
1134 if (sc->sc_busclk != bus_clock) {
1135 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1136 sc->sc_busclk, false);
1137 if (error) {
1138 aprint_error_dev(sc->sc_dev,
1139 "can't change bus clock\n");
1140 return error;
1141 }
1142 }
1143 }
1144
1145 return 0;
1146 }
1147
1148 static int
1149 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1150 {
1151 struct sdmmc_command cmd;
1152 int error;
1153
1154 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1155 memset(&cmd, 0, sizeof cmd);
1156 cmd.c_opcode = MMC_ALL_SEND_CID;
1157 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1158
1159 error = sdmmc_mmc_command(sc, &cmd);
1160 } else {
1161 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1162 sizeof(cmd.c_resp));
1163 }
1164
1165 #ifdef SDMMC_DEBUG
1166 if (error == 0)
1167 sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1168 #endif
1169 if (error == 0 && resp != NULL)
1170 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1171 return error;
1172 }
1173
1174 static int
1175 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1176 sdmmc_response *resp)
1177 {
1178 struct sdmmc_command cmd;
1179 int error;
1180
1181 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1182 memset(&cmd, 0, sizeof cmd);
1183 cmd.c_opcode = MMC_SEND_CSD;
1184 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1185 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1186
1187 error = sdmmc_mmc_command(sc, &cmd);
1188 } else {
1189 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1190 sizeof(cmd.c_resp));
1191 }
1192
1193 #ifdef SDMMC_DEBUG
1194 if (error == 0)
1195 sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1196 #endif
1197 if (error == 0 && resp != NULL)
1198 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1199 return error;
1200 }
1201
1202 static int
1203 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1204 uint32_t *scr)
1205 {
1206 struct sdmmc_command cmd;
1207 bus_dma_segment_t ds[1];
1208 void *ptr = NULL;
1209 int datalen = 8;
1210 int rseg;
1211 int error = 0;
1212
1213 /* Don't lock */
1214
1215 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1216 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1217 ds, 1, &rseg, BUS_DMA_NOWAIT);
1218 if (error)
1219 goto out;
1220 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1221 BUS_DMA_NOWAIT);
1222 if (error)
1223 goto dmamem_free;
1224 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1225 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1226 if (error)
1227 goto dmamem_unmap;
1228
1229 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1230 BUS_DMASYNC_PREREAD);
1231 } else {
1232 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1233 if (ptr == NULL)
1234 goto out;
1235 }
1236
1237 memset(&cmd, 0, sizeof(cmd));
1238 cmd.c_data = ptr;
1239 cmd.c_datalen = datalen;
1240 cmd.c_blklen = datalen;
1241 cmd.c_arg = 0;
1242 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1243 cmd.c_opcode = SD_APP_SEND_SCR;
1244 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1245 cmd.c_dmamap = sc->sc_dmap;
1246
1247 error = sdmmc_app_command(sc, sf, &cmd);
1248 if (error == 0) {
1249 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1250 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1251 BUS_DMASYNC_POSTREAD);
1252 }
1253 memcpy(scr, ptr, datalen);
1254 }
1255
1256 out:
1257 if (ptr != NULL) {
1258 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1259 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1260 dmamem_unmap:
1261 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1262 dmamem_free:
1263 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1264 } else {
1265 free(ptr, M_DEVBUF);
1266 }
1267 }
1268 DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1269 error));
1270
1271 #ifdef SDMMC_DEBUG
1272 if (error == 0)
1273 sdmmc_dump_data("SCR", scr, datalen);
1274 #endif
1275 return error;
1276 }
1277
1278 static int
1279 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1280 {
1281 sdmmc_response resp;
1282 int ver;
1283
1284 memset(resp, 0, sizeof(resp));
1285 /*
1286 * Change the raw-scr received from the DMA stream to resp.
1287 */
1288 resp[0] = be32toh(sf->raw_scr[1]) >> 8; // LSW
1289 resp[1] = be32toh(sf->raw_scr[0]); // MSW
1290 resp[0] |= (resp[1] & 0xff) << 24;
1291 resp[1] >>= 8;
1292
1293 ver = SCR_STRUCTURE(resp);
1294 sf->scr.sd_spec = SCR_SD_SPEC(resp);
1295 sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1296
1297 DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1298 SDMMCDEVNAME(sc), resp[1], resp[0],
1299 ver, sf->scr.sd_spec, sf->scr.bus_width));
1300
1301 if (ver != 0 && ver != 1) {
1302 DPRINTF(("%s: unknown structure version: %d\n",
1303 SDMMCDEVNAME(sc), ver));
1304 return EINVAL;
1305 }
1306 return 0;
1307 }
1308
1309 static int
1310 sdmmc_mem_send_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1311 sdmmc_bitfield512_t *ssr)
1312 {
1313 struct sdmmc_command cmd;
1314 bus_dma_segment_t ds[1];
1315 void *ptr = NULL;
1316 int datalen = 64;
1317 int rseg;
1318 int error = 0;
1319
1320 /* Don't lock */
1321
1322 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1323 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1324 ds, 1, &rseg, BUS_DMA_NOWAIT);
1325 if (error)
1326 goto out;
1327 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1328 BUS_DMA_NOWAIT);
1329 if (error)
1330 goto dmamem_free;
1331 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1332 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1333 if (error)
1334 goto dmamem_unmap;
1335
1336 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1337 BUS_DMASYNC_PREREAD);
1338 } else {
1339 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1340 if (ptr == NULL)
1341 goto out;
1342 }
1343
1344 memset(&cmd, 0, sizeof(cmd));
1345 cmd.c_data = ptr;
1346 cmd.c_datalen = datalen;
1347 cmd.c_blklen = datalen;
1348 cmd.c_arg = 0;
1349 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1350 cmd.c_opcode = SD_APP_SD_STATUS;
1351 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1352 cmd.c_dmamap = sc->sc_dmap;
1353
1354 error = sdmmc_app_command(sc, sf, &cmd);
1355 if (error == 0) {
1356 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1357 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1358 BUS_DMASYNC_POSTREAD);
1359 }
1360 memcpy(ssr, ptr, datalen);
1361 }
1362
1363 out:
1364 if (ptr != NULL) {
1365 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1366 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1367 dmamem_unmap:
1368 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1369 dmamem_free:
1370 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1371 } else {
1372 free(ptr, M_DEVBUF);
1373 }
1374 }
1375 DPRINTF(("%s: sdmem_mem_send_ssr: error = %d\n", SDMMCDEVNAME(sc),
1376 error));
1377
1378 if (error == 0)
1379 sdmmc_be512_to_bitfield512(ssr);
1380
1381 #ifdef SDMMC_DEBUG
1382 if (error == 0)
1383 sdmmc_dump_data("SSR", ssr, datalen);
1384 #endif
1385 return error;
1386 }
1387
1388 static int
1389 sdmmc_mem_decode_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1390 sdmmc_bitfield512_t *ssr_bitfield)
1391 {
1392 uint32_t *ssr = (uint32_t *)ssr_bitfield;
1393 int speed_class_val, bus_width_val;
1394
1395 const int bus_width = SSR_DAT_BUS_WIDTH(ssr);
1396 const int speed_class = SSR_SPEED_CLASS(ssr);
1397 const int uhs_speed_grade = SSR_UHS_SPEED_GRADE(ssr);
1398 const int video_speed_class = SSR_VIDEO_SPEED_CLASS(ssr);
1399 const int app_perf_class = SSR_APP_PERF_CLASS(ssr);
1400
1401 switch (speed_class) {
1402 case SSR_SPEED_CLASS_0: speed_class_val = 0; break;
1403 case SSR_SPEED_CLASS_2: speed_class_val = 2; break;
1404 case SSR_SPEED_CLASS_4: speed_class_val = 4; break;
1405 case SSR_SPEED_CLASS_6: speed_class_val = 6; break;
1406 case SSR_SPEED_CLASS_10: speed_class_val = 10; break;
1407 default: speed_class_val = -1; break;
1408 }
1409
1410 switch (bus_width) {
1411 case SSR_DAT_BUS_WIDTH_1: bus_width_val = 1; break;
1412 case SSR_DAT_BUS_WIDTH_4: bus_width_val = 4; break;
1413 default: bus_width_val = -1;
1414 }
1415
1416 /*
1417 * Log card status
1418 */
1419 device_printf(sc->sc_dev, "SD card status:");
1420 if (bus_width_val != -1)
1421 printf(" %d-bit", bus_width_val);
1422 else
1423 printf(" unknown bus width");
1424 if (speed_class_val != -1)
1425 printf(", C%d", speed_class_val);
1426 if (uhs_speed_grade)
1427 printf(", U%d", uhs_speed_grade);
1428 if (video_speed_class)
1429 printf(", V%d", video_speed_class);
1430 if (app_perf_class)
1431 printf(", A%d", app_perf_class);
1432 printf("\n");
1433
1434 return 0;
1435 }
1436
1437 static int
1438 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1439 size_t datalen)
1440 {
1441 struct sdmmc_command cmd;
1442 bus_dma_segment_t ds[1];
1443 void *ptr = NULL;
1444 int rseg;
1445 int error = 0;
1446
1447 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1448 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1449 1, &rseg, BUS_DMA_NOWAIT);
1450 if (error)
1451 goto out;
1452 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1453 BUS_DMA_NOWAIT);
1454 if (error)
1455 goto dmamem_free;
1456 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1457 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1458 if (error)
1459 goto dmamem_unmap;
1460
1461 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1462 BUS_DMASYNC_PREREAD);
1463 } else {
1464 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1465 if (ptr == NULL)
1466 goto out;
1467 }
1468
1469 memset(&cmd, 0, sizeof(cmd));
1470 cmd.c_data = ptr;
1471 cmd.c_datalen = datalen;
1472 cmd.c_blklen = datalen;
1473 cmd.c_opcode = opcode;
1474 cmd.c_arg = 0;
1475 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1476 if (opcode == MMC_SEND_EXT_CSD)
1477 SET(cmd.c_flags, SCF_RSP_R1);
1478 else
1479 SET(cmd.c_flags, SCF_RSP_R2);
1480 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1481 cmd.c_dmamap = sc->sc_dmap;
1482
1483 error = sdmmc_mmc_command(sc, &cmd);
1484 if (error == 0) {
1485 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1486 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1487 BUS_DMASYNC_POSTREAD);
1488 }
1489 memcpy(data, ptr, datalen);
1490 #ifdef SDMMC_DEBUG
1491 sdmmc_dump_data("CXD", data, datalen);
1492 #endif
1493 }
1494
1495 out:
1496 if (ptr != NULL) {
1497 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1498 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1499 dmamem_unmap:
1500 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1501 dmamem_free:
1502 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1503 } else {
1504 free(ptr, M_DEVBUF);
1505 }
1506 }
1507 return error;
1508 }
1509
1510 static int
1511 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1512 {
1513 struct sdmmc_softc *sc = sf->sc;
1514 struct sdmmc_command cmd;
1515 int error;
1516
1517 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1518 return ENODEV;
1519
1520 memset(&cmd, 0, sizeof(cmd));
1521 cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1522 cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1523
1524 switch (width) {
1525 case 1:
1526 cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1527 break;
1528
1529 case 4:
1530 cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1531 break;
1532
1533 default:
1534 return EINVAL;
1535 }
1536
1537 error = sdmmc_app_command(sc, sf, &cmd);
1538 if (error == 0)
1539 error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1540 return error;
1541 }
1542
1543 static int
1544 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1545 int function, sdmmc_bitfield512_t *status)
1546 {
1547 struct sdmmc_softc *sc = sf->sc;
1548 struct sdmmc_command cmd;
1549 bus_dma_segment_t ds[1];
1550 void *ptr = NULL;
1551 int gsft, rseg, error = 0;
1552 const int statlen = 64;
1553
1554 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1555 !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1556 return EINVAL;
1557
1558 if (group <= 0 || group > 6 ||
1559 function < 0 || function > 15)
1560 return EINVAL;
1561
1562 gsft = (group - 1) << 2;
1563
1564 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1565 error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1566 1, &rseg, BUS_DMA_NOWAIT);
1567 if (error)
1568 goto out;
1569 error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1570 BUS_DMA_NOWAIT);
1571 if (error)
1572 goto dmamem_free;
1573 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1574 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1575 if (error)
1576 goto dmamem_unmap;
1577
1578 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1579 BUS_DMASYNC_PREREAD);
1580 } else {
1581 ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1582 if (ptr == NULL)
1583 goto out;
1584 }
1585
1586 memset(&cmd, 0, sizeof(cmd));
1587 cmd.c_data = ptr;
1588 cmd.c_datalen = statlen;
1589 cmd.c_blklen = statlen;
1590 cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1591 cmd.c_arg =
1592 (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1593 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1594 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1595 cmd.c_dmamap = sc->sc_dmap;
1596
1597 error = sdmmc_mmc_command(sc, &cmd);
1598 if (error == 0) {
1599 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1600 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1601 BUS_DMASYNC_POSTREAD);
1602 }
1603 memcpy(status, ptr, statlen);
1604 }
1605
1606 out:
1607 if (ptr != NULL) {
1608 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1609 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1610 dmamem_unmap:
1611 bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1612 dmamem_free:
1613 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1614 } else {
1615 free(ptr, M_DEVBUF);
1616 }
1617 }
1618
1619 if (error == 0)
1620 sdmmc_be512_to_bitfield512(status);
1621
1622 return error;
1623 }
1624
1625 static int
1626 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1627 uint8_t value, bool poll)
1628 {
1629 struct sdmmc_softc *sc = sf->sc;
1630 struct sdmmc_command cmd;
1631 int error;
1632
1633 memset(&cmd, 0, sizeof(cmd));
1634 cmd.c_opcode = MMC_SWITCH;
1635 cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1636 (index << 16) | (value << 8) | set;
1637 cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1638
1639 if (poll)
1640 cmd.c_flags |= SCF_POLL;
1641
1642 error = sdmmc_mmc_command(sc, &cmd);
1643 if (error)
1644 return error;
1645
1646 if (index == EXT_CSD_FLUSH_CACHE || (index == EXT_CSD_HS_TIMING && value >= 2)) {
1647 do {
1648 memset(&cmd, 0, sizeof(cmd));
1649 cmd.c_opcode = MMC_SEND_STATUS;
1650 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1651 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1652 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1653 if (poll)
1654 cmd.c_flags |= SCF_POLL;
1655 error = sdmmc_mmc_command(sc, &cmd);
1656 if (error)
1657 break;
1658 if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1659 aprint_error_dev(sc->sc_dev, "switch error\n");
1660 return EINVAL;
1661 }
1662 /* XXX time out */
1663 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1664
1665 if (error) {
1666 aprint_error_dev(sc->sc_dev,
1667 "error waiting for data ready after switch command: %d\n",
1668 error);
1669 return error;
1670 }
1671 }
1672
1673 return 0;
1674 }
1675
1676 /*
1677 * SPI mode function
1678 */
1679 static int
1680 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1681 {
1682 struct sdmmc_command cmd;
1683 int error;
1684
1685 memset(&cmd, 0, sizeof(cmd));
1686 cmd.c_opcode = MMC_READ_OCR;
1687 cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1688 cmd.c_flags = SCF_RSP_SPI_R3;
1689
1690 error = sdmmc_mmc_command(sc, &cmd);
1691 if (error == 0 && card_ocr != NULL)
1692 *card_ocr = cmd.c_resp[1];
1693 DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1694 SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1695 return error;
1696 }
1697
1698 /*
1699 * read/write function
1700 */
1701 /* read */
1702 static int
1703 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1704 u_char *data, size_t datalen)
1705 {
1706 struct sdmmc_softc *sc = sf->sc;
1707 int error = 0;
1708 int i;
1709
1710 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1711 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1712
1713 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1714 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1715 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1716 if (error)
1717 break;
1718 }
1719 return error;
1720 }
1721
1722 /*
1723 * Simulate multi-segment dma transfer.
1724 */
1725 static int
1726 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1727 uint32_t blkno, u_char *data, size_t datalen)
1728 {
1729 struct sdmmc_softc *sc = sf->sc;
1730 bool use_bbuf = false;
1731 int error = 0;
1732 int i;
1733
1734 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1735 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1736 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1737 use_bbuf = true;
1738 break;
1739 }
1740 }
1741 if (use_bbuf) {
1742 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1743 BUS_DMASYNC_PREREAD);
1744
1745 error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1746 blkno, data, datalen);
1747 if (error) {
1748 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1749 return error;
1750 }
1751
1752 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1753 BUS_DMASYNC_POSTREAD);
1754
1755 /* Copy from bounce buffer */
1756 memcpy(data, sf->bbuf, datalen);
1757
1758 return 0;
1759 }
1760
1761 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1762 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1763
1764 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1765 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1766 if (error)
1767 return error;
1768
1769 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1770 BUS_DMASYNC_PREREAD);
1771
1772 error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1773 blkno, data, len);
1774 if (error) {
1775 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1776 return error;
1777 }
1778
1779 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1780 BUS_DMASYNC_POSTREAD);
1781
1782 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1783
1784 blkno += len / SDMMC_SECTOR_SIZE;
1785 data += len;
1786 }
1787 return 0;
1788 }
1789
1790 static int
1791 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1792 uint32_t blkno, u_char *data, size_t datalen)
1793 {
1794 struct sdmmc_softc *sc = sf->sc;
1795 struct sdmmc_command cmd;
1796 int error;
1797
1798 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1799 error = sdmmc_select_card(sc, sf);
1800 if (error)
1801 goto out;
1802 }
1803
1804 memset(&cmd, 0, sizeof(cmd));
1805 cmd.c_data = data;
1806 cmd.c_datalen = datalen;
1807 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1808 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1809 MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1810 cmd.c_arg = blkno;
1811 if (!ISSET(sf->flags, SFF_SDHC))
1812 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1813 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1814 if (ISSET(sf->flags, SFF_SDHC))
1815 cmd.c_flags |= SCF_XFER_SDHC;
1816 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1817 cmd.c_dmamap = dmap;
1818
1819 sc->sc_ev_xfer.ev_count++;
1820
1821 error = sdmmc_mmc_command(sc, &cmd);
1822 if (error) {
1823 sc->sc_ev_xfer_error.ev_count++;
1824 goto out;
1825 }
1826
1827 const u_int counter = __builtin_ctz(cmd.c_datalen);
1828 if (counter >= 9 && counter <= 16) {
1829 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1830 } else {
1831 sc->sc_ev_xfer_unaligned.ev_count++;
1832 }
1833
1834 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1835 if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1836 memset(&cmd, 0, sizeof cmd);
1837 cmd.c_opcode = MMC_STOP_TRANSMISSION;
1838 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1839 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1840 error = sdmmc_mmc_command(sc, &cmd);
1841 if (error)
1842 goto out;
1843 }
1844 }
1845
1846 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1847 do {
1848 memset(&cmd, 0, sizeof(cmd));
1849 cmd.c_opcode = MMC_SEND_STATUS;
1850 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1851 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1852 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1853 error = sdmmc_mmc_command(sc, &cmd);
1854 if (error)
1855 break;
1856 /* XXX time out */
1857 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1858 }
1859
1860 out:
1861 return error;
1862 }
1863
1864 int
1865 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1866 size_t datalen)
1867 {
1868 struct sdmmc_softc *sc = sf->sc;
1869 int error;
1870
1871 SDMMC_LOCK(sc);
1872 mutex_enter(&sc->sc_mtx);
1873
1874 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1875 error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1876 goto out;
1877 }
1878
1879 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1880 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1881 datalen);
1882 goto out;
1883 }
1884
1885 /* DMA transfer */
1886 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1887 BUS_DMA_NOWAIT|BUS_DMA_READ);
1888 if (error)
1889 goto out;
1890
1891 #ifdef SDMMC_DEBUG
1892 printf("data=%p, datalen=%zu\n", data, datalen);
1893 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1894 printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1895 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1896 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1897 }
1898 #endif
1899
1900 if (sc->sc_dmap->dm_nsegs > 1
1901 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1902 error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1903 data, datalen);
1904 goto unload;
1905 }
1906
1907 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1908 BUS_DMASYNC_PREREAD);
1909
1910 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1911 datalen);
1912 if (error)
1913 goto unload;
1914
1915 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1916 BUS_DMASYNC_POSTREAD);
1917 unload:
1918 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1919
1920 out:
1921 mutex_exit(&sc->sc_mtx);
1922 SDMMC_UNLOCK(sc);
1923
1924 return error;
1925 }
1926
1927 /* write */
1928 static int
1929 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1930 u_char *data, size_t datalen)
1931 {
1932 struct sdmmc_softc *sc = sf->sc;
1933 int error = 0;
1934 int i;
1935
1936 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1937 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1938
1939 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1940 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1941 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1942 if (error)
1943 break;
1944 }
1945 return error;
1946 }
1947
1948 /*
1949 * Simulate multi-segment dma transfer.
1950 */
1951 static int
1952 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1953 uint32_t blkno, u_char *data, size_t datalen)
1954 {
1955 struct sdmmc_softc *sc = sf->sc;
1956 bool use_bbuf = false;
1957 int error = 0;
1958 int i;
1959
1960 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1961 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1962 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1963 use_bbuf = true;
1964 break;
1965 }
1966 }
1967 if (use_bbuf) {
1968 /* Copy to bounce buffer */
1969 memcpy(sf->bbuf, data, datalen);
1970
1971 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1972 BUS_DMASYNC_PREWRITE);
1973
1974 error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1975 blkno, data, datalen);
1976 if (error) {
1977 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1978 return error;
1979 }
1980
1981 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1982 BUS_DMASYNC_POSTWRITE);
1983
1984 return 0;
1985 }
1986
1987 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1988 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1989
1990 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1991 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1992 if (error)
1993 return error;
1994
1995 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1996 BUS_DMASYNC_PREWRITE);
1997
1998 error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
1999 blkno, data, len);
2000 if (error) {
2001 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2002 return error;
2003 }
2004
2005 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2006 BUS_DMASYNC_POSTWRITE);
2007
2008 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2009
2010 blkno += len / SDMMC_SECTOR_SIZE;
2011 data += len;
2012 }
2013
2014 return error;
2015 }
2016
2017 static int
2018 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
2019 uint32_t blkno, u_char *data, size_t datalen)
2020 {
2021 struct sdmmc_softc *sc = sf->sc;
2022 struct sdmmc_command cmd;
2023 int error;
2024
2025 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2026 error = sdmmc_select_card(sc, sf);
2027 if (error)
2028 goto out;
2029 }
2030
2031 const int nblk = howmany(datalen, SDMMC_SECTOR_SIZE);
2032 if (ISSET(sc->sc_flags, SMF_SD_MODE) && nblk > 1) {
2033 /* Set the number of write blocks to be pre-erased */
2034 memset(&cmd, 0, sizeof(cmd));
2035 cmd.c_opcode = SD_APP_SET_WR_BLK_ERASE_COUNT;
2036 cmd.c_flags = SCF_RSP_R1 | SCF_RSP_SPI_R1 | SCF_CMD_AC;
2037 cmd.c_arg = nblk;
2038 error = sdmmc_app_command(sc, sf, &cmd);
2039 if (error)
2040 goto out;
2041 }
2042
2043 memset(&cmd, 0, sizeof(cmd));
2044 cmd.c_data = data;
2045 cmd.c_datalen = datalen;
2046 cmd.c_blklen = SDMMC_SECTOR_SIZE;
2047 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
2048 MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
2049 cmd.c_arg = blkno;
2050 if (!ISSET(sf->flags, SFF_SDHC))
2051 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2052 cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
2053 if (ISSET(sf->flags, SFF_SDHC))
2054 cmd.c_flags |= SCF_XFER_SDHC;
2055 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
2056 cmd.c_dmamap = dmap;
2057
2058 sc->sc_ev_xfer.ev_count++;
2059
2060 error = sdmmc_mmc_command(sc, &cmd);
2061 if (error) {
2062 sc->sc_ev_xfer_error.ev_count++;
2063 goto out;
2064 }
2065
2066 const u_int counter = __builtin_ctz(cmd.c_datalen);
2067 if (counter >= 9 && counter <= 16) {
2068 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
2069 } else {
2070 sc->sc_ev_xfer_unaligned.ev_count++;
2071 }
2072
2073 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
2074 if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
2075 memset(&cmd, 0, sizeof(cmd));
2076 cmd.c_opcode = MMC_STOP_TRANSMISSION;
2077 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
2078 error = sdmmc_mmc_command(sc, &cmd);
2079 if (error)
2080 goto out;
2081 }
2082 }
2083
2084 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2085 do {
2086 memset(&cmd, 0, sizeof(cmd));
2087 cmd.c_opcode = MMC_SEND_STATUS;
2088 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2089 cmd.c_arg = MMC_ARG_RCA(sf->rca);
2090 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
2091 error = sdmmc_mmc_command(sc, &cmd);
2092 if (error)
2093 break;
2094 /* XXX time out */
2095 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
2096 }
2097
2098 out:
2099 return error;
2100 }
2101
2102 int
2103 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
2104 size_t datalen)
2105 {
2106 struct sdmmc_softc *sc = sf->sc;
2107 int error;
2108
2109 SDMMC_LOCK(sc);
2110 mutex_enter(&sc->sc_mtx);
2111
2112 if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
2113 aprint_normal_dev(sc->sc_dev, "write-protected\n");
2114 error = EIO;
2115 goto out;
2116 }
2117
2118 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
2119 error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
2120 goto out;
2121 }
2122
2123 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
2124 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2125 datalen);
2126 goto out;
2127 }
2128
2129 /* DMA transfer */
2130 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
2131 BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2132 if (error)
2133 goto out;
2134
2135 #ifdef SDMMC_DEBUG
2136 aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
2137 __func__, data, datalen);
2138 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2139 aprint_normal_dev(sc->sc_dev,
2140 "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
2141 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
2142 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
2143 }
2144 #endif
2145
2146 if (sc->sc_dmap->dm_nsegs > 1
2147 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
2148 error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
2149 data, datalen);
2150 goto unload;
2151 }
2152
2153 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2154 BUS_DMASYNC_PREWRITE);
2155
2156 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2157 datalen);
2158 if (error)
2159 goto unload;
2160
2161 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2162 BUS_DMASYNC_POSTWRITE);
2163 unload:
2164 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
2165
2166 out:
2167 mutex_exit(&sc->sc_mtx);
2168 SDMMC_UNLOCK(sc);
2169
2170 return error;
2171 }
2172
2173 int
2174 sdmmc_mem_discard(struct sdmmc_function *sf, uint32_t sblkno, uint32_t eblkno)
2175 {
2176 struct sdmmc_softc *sc = sf->sc;
2177 struct sdmmc_command cmd;
2178 int error;
2179
2180 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2181 return ENODEV; /* XXX not tested */
2182
2183 if (eblkno < sblkno)
2184 return EINVAL;
2185
2186 SDMMC_LOCK(sc);
2187 mutex_enter(&sc->sc_mtx);
2188
2189 /* Set the address of the first write block to be erased */
2190 memset(&cmd, 0, sizeof(cmd));
2191 cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2192 SD_ERASE_WR_BLK_START : MMC_TAG_ERASE_GROUP_START;
2193 cmd.c_arg = sblkno;
2194 if (!ISSET(sf->flags, SFF_SDHC))
2195 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2196 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2197 error = sdmmc_mmc_command(sc, &cmd);
2198 if (error)
2199 goto out;
2200
2201 /* Set the address of the last write block to be erased */
2202 memset(&cmd, 0, sizeof(cmd));
2203 cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2204 SD_ERASE_WR_BLK_END : MMC_TAG_ERASE_GROUP_END;
2205 cmd.c_arg = eblkno;
2206 if (!ISSET(sf->flags, SFF_SDHC))
2207 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2208 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2209 error = sdmmc_mmc_command(sc, &cmd);
2210 if (error)
2211 goto out;
2212
2213 /* Start the erase operation */
2214 memset(&cmd, 0, sizeof(cmd));
2215 cmd.c_opcode = MMC_ERASE;
2216 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B;
2217 error = sdmmc_mmc_command(sc, &cmd);
2218 if (error)
2219 goto out;
2220
2221 out:
2222 mutex_exit(&sc->sc_mtx);
2223 SDMMC_UNLOCK(sc);
2224
2225 #ifdef SDMMC_DEBUG
2226 device_printf(sc->sc_dev, "discard blk %u-%u error %d\n",
2227 sblkno, eblkno, error);
2228 #endif
2229
2230 return error;
2231 }
2232
2233 int
2234 sdmmc_mem_flush_cache(struct sdmmc_function *sf, bool poll)
2235 {
2236 struct sdmmc_softc *sc = sf->sc;
2237 int error;
2238
2239 if (!ISSET(sf->flags, SFF_CACHE_ENABLED))
2240 return 0;
2241
2242 SDMMC_LOCK(sc);
2243 mutex_enter(&sc->sc_mtx);
2244
2245 error = sdmmc_mem_mmc_switch(sf,
2246 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE,
2247 EXT_CSD_FLUSH_CACHE_FLUSH, poll);
2248
2249 mutex_exit(&sc->sc_mtx);
2250 SDMMC_UNLOCK(sc);
2251
2252 #ifdef SDMMC_DEBUG
2253 device_printf(sc->sc_dev, "mmc flush cache error %d\n", error);
2254 #endif
2255
2256 return error;
2257 }
2258