sdmmc_mem.c revision 1.73 1 /* $NetBSD: sdmmc_mem.c,v 1.73 2021/06/13 09:50:02 mlelstv Exp $ */
2 /* $OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka (at) netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 /* Routines for SD/MMC memory cards. */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.73 2021/06/13 09:50:02 mlelstv Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s) do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s) do {} while (/*CONSTCOND*/0)
70 #endif
71
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78 sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80 uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_ssr(struct sdmmc_softc *, struct sdmmc_function *,
83 sdmmc_bitfield512_t *);
84 static int sdmmc_mem_decode_ssr(struct sdmmc_softc *, struct sdmmc_function *,
85 sdmmc_bitfield512_t *);
86 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
87 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
88 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
89 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
90 uint8_t, bool);
91 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
92 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
93 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
94 u_char *, size_t);
95 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
96 u_char *, size_t);
97 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
98 uint32_t, u_char *, size_t);
99 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
100 uint32_t, u_char *, size_t);
101 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
102 uint32_t, u_char *, size_t);
103 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
104 uint32_t, u_char *, size_t);
105
106 static const struct {
107 const char *name;
108 int v;
109 int freq;
110 } switch_group0_functions[] = {
111 /* Default/SDR12 */
112 { "Default/SDR12", 0, 25000 },
113
114 /* High-Speed/SDR25 */
115 { "High-Speed/SDR25", SMC_CAPS_SD_HIGHSPEED, 50000 },
116
117 /* SDR50 */
118 { "SDR50", SMC_CAPS_UHS_SDR50, 100000 },
119
120 /* SDR104 */
121 { "SDR104", SMC_CAPS_UHS_SDR104, 208000 },
122
123 /* DDR50 */
124 { "DDR50", SMC_CAPS_UHS_DDR50, 50000 },
125 };
126
127 static const int sdmmc_mmc_timings[] = {
128 [EXT_CSD_HS_TIMING_LEGACY] = 26000,
129 [EXT_CSD_HS_TIMING_HIGHSPEED] = 52000,
130 [EXT_CSD_HS_TIMING_HS200] = 200000
131 };
132
133 /*
134 * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
135 */
136 int
137 sdmmc_mem_enable(struct sdmmc_softc *sc)
138 {
139 uint32_t host_ocr;
140 uint32_t card_ocr;
141 uint32_t new_ocr;
142 uint32_t ocr = 0;
143 int error;
144
145 SDMMC_LOCK(sc);
146
147 /* Set host mode to SD "combo" card or SD memory-only. */
148 CLR(sc->sc_flags, SMF_UHS_MODE);
149 SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
150
151 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
152 sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
153
154 /* Reset memory (*must* do that before CMD55 or CMD1). */
155 sdmmc_go_idle_state(sc);
156
157 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
158 /* Check SD Ver.2 */
159 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
160 if (error == 0 && card_ocr == 0x1aa)
161 SET(ocr, MMC_OCR_HCS);
162 }
163
164 /*
165 * Read the SD/MMC memory OCR value by issuing CMD55 followed
166 * by ACMD41 to read the OCR value from memory-only SD cards.
167 * MMC cards will not respond to CMD55 or ACMD41 and this is
168 * how we distinguish them from SD cards.
169 */
170 mmc_mode:
171 error = sdmmc_mem_send_op_cond(sc,
172 ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
173 if (error) {
174 if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
175 !ISSET(sc->sc_flags, SMF_IO_MODE)) {
176 /* Not a SD card, switch to MMC mode. */
177 DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
178 CLR(sc->sc_flags, SMF_SD_MODE);
179 goto mmc_mode;
180 }
181 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
182 DPRINTF(("%s: couldn't read memory OCR\n",
183 SDMMCDEVNAME(sc)));
184 goto out;
185 } else {
186 /* Not a "combo" card. */
187 CLR(sc->sc_flags, SMF_MEM_MODE);
188 error = 0;
189 goto out;
190 }
191 }
192 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
193 /* get card OCR */
194 error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
195 if (error) {
196 DPRINTF(("%s: couldn't read SPI memory OCR\n",
197 SDMMCDEVNAME(sc)));
198 goto out;
199 }
200 }
201
202 /* Set the lowest voltage supported by the card and host. */
203 host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
204 error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
205 if (error) {
206 DPRINTF(("%s: couldn't supply voltage requested by card\n",
207 SDMMCDEVNAME(sc)));
208 goto out;
209 }
210
211 DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
212 DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
213
214 host_ocr &= card_ocr; /* only allow the common voltages */
215 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
216 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
217 /* Tell the card(s) to enter the idle state (again). */
218 sdmmc_go_idle_state(sc);
219 /* Check SD Ver.2 */
220 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
221 if (error == 0 && card_ocr == 0x1aa)
222 SET(ocr, MMC_OCR_HCS);
223
224 if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
225 SET(ocr, MMC_OCR_S18A);
226 } else {
227 SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
228 }
229 }
230 host_ocr |= ocr;
231
232 /* Send the new OCR value until all cards are ready. */
233 error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
234 if (error) {
235 DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
236 goto out;
237 }
238
239 if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
240 /*
241 * Card and host support low voltage mode, begin switch
242 * sequence.
243 */
244 struct sdmmc_command cmd;
245 memset(&cmd, 0, sizeof(cmd));
246 cmd.c_arg = 0;
247 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
248 cmd.c_opcode = SD_VOLTAGE_SWITCH;
249 DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
250 error = sdmmc_mmc_command(sc, &cmd);
251 if (error) {
252 DPRINTF(("%s: voltage switch command failed\n",
253 SDMMCDEVNAME(sc)));
254 goto out;
255 }
256
257 error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
258 if (error) {
259 DPRINTF(("%s: voltage change on host failed\n",
260 SDMMCDEVNAME(sc)));
261 goto out;
262 }
263
264 SET(sc->sc_flags, SMF_UHS_MODE);
265 }
266
267 out:
268 SDMMC_UNLOCK(sc);
269
270 return error;
271 }
272
273 static int
274 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
275 {
276 int error;
277
278 /*
279 * Stop the clock
280 */
281 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
282 SDMMC_SDCLK_OFF, false);
283 if (error)
284 goto out;
285
286 delay(1000);
287
288 /*
289 * Card switch command was successful, update host controller
290 * signal voltage setting.
291 */
292 DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
293 signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
294 error = sdmmc_chip_signal_voltage(sc->sc_sct,
295 sc->sc_sch, signal_voltage);
296 if (error)
297 goto out;
298
299 delay(5000);
300
301 /*
302 * Switch to SDR12 timing
303 */
304 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
305 false);
306 if (error)
307 goto out;
308
309 delay(1000);
310
311 out:
312 return error;
313 }
314
315 /*
316 * Read the CSD and CID from all cards and assign each card a unique
317 * relative card address (RCA). CMD2 is ignored by SDIO-only cards.
318 */
319 void
320 sdmmc_mem_scan(struct sdmmc_softc *sc)
321 {
322 sdmmc_response resp;
323 struct sdmmc_function *sf;
324 uint16_t next_rca;
325 int error;
326 int retry;
327
328 SDMMC_LOCK(sc);
329
330 /*
331 * CMD2 is a broadcast command understood by SD cards and MMC
332 * cards. All cards begin to respond to the command, but back
333 * off if another card drives the CMD line to a different level.
334 * Only one card will get its entire response through. That
335 * card remains silent once it has been assigned a RCA.
336 */
337 for (retry = 0; retry < 100; retry++) {
338 error = sdmmc_mem_send_cid(sc, &resp);
339 if (error) {
340 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
341 error == ETIMEDOUT) {
342 /* No more cards there. */
343 break;
344 }
345 DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
346 break;
347 }
348
349 /* In MMC mode, find the next available RCA. */
350 next_rca = 1;
351 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
352 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
353 next_rca++;
354 }
355
356 /* Allocate a sdmmc_function structure. */
357 sf = sdmmc_function_alloc(sc);
358 sf->rca = next_rca;
359
360 /*
361 * Remember the CID returned in the CMD2 response for
362 * later decoding.
363 */
364 memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
365
366 /*
367 * Silence the card by assigning it a unique RCA, or
368 * querying it for its RCA in the case of SD.
369 */
370 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
371 if (sdmmc_set_relative_addr(sc, sf) != 0) {
372 aprint_error_dev(sc->sc_dev,
373 "couldn't set mem RCA\n");
374 sdmmc_function_free(sf);
375 break;
376 }
377 }
378
379 /*
380 * If this is a memory-only card, the card responding
381 * first becomes an alias for SDIO function 0.
382 */
383 if (sc->sc_fn0 == NULL)
384 sc->sc_fn0 = sf;
385
386 SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
387
388 /* only one function in SPI mode */
389 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
390 break;
391 }
392
393 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
394 /* Go to Data Transfer Mode, if possible. */
395 sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
396
397 /*
398 * All cards are either inactive or awaiting further commands.
399 * Read the CSDs and decode the raw CID for each card.
400 */
401 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
402 error = sdmmc_mem_send_csd(sc, sf, &resp);
403 if (error) {
404 SET(sf->flags, SFF_ERROR);
405 continue;
406 }
407
408 if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
409 sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
410 SET(sf->flags, SFF_ERROR);
411 continue;
412 }
413
414 #ifdef SDMMC_DEBUG
415 printf("%s: CID: ", SDMMCDEVNAME(sc));
416 sdmmc_print_cid(&sf->cid);
417 #endif
418 }
419
420 SDMMC_UNLOCK(sc);
421 }
422
423 int
424 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
425 struct sdmmc_function *sf)
426 {
427 /* TRAN_SPEED(2:0): transfer rate exponent */
428 static const int speed_exponent[8] = {
429 100 * 1, /* 100 Kbits/s */
430 1 * 1000, /* 1 Mbits/s */
431 10 * 1000, /* 10 Mbits/s */
432 100 * 1000, /* 100 Mbits/s */
433 0,
434 0,
435 0,
436 0,
437 };
438 /* TRAN_SPEED(6:3): time mantissa */
439 static const int speed_mantissa[16] = {
440 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
441 };
442 struct sdmmc_csd *csd = &sf->csd;
443 int e, m;
444
445 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
446 /*
447 * CSD version 1.0 corresponds to SD system
448 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
449 */
450 csd->csdver = SD_CSD_CSDVER(resp);
451 switch (csd->csdver) {
452 case SD_CSD_CSDVER_2_0:
453 DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
454 SET(sf->flags, SFF_SDHC);
455 csd->capacity = SD_CSD_V2_CAPACITY(resp);
456 csd->read_bl_len = SD_CSD_V2_BL_LEN;
457 break;
458
459 case SD_CSD_CSDVER_1_0:
460 DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
461 csd->capacity = SD_CSD_CAPACITY(resp);
462 csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
463 break;
464
465 default:
466 aprint_error_dev(sc->sc_dev,
467 "unknown SD CSD structure version 0x%x\n",
468 csd->csdver);
469 return 1;
470 }
471
472 csd->mmcver = SD_CSD_MMCVER(resp);
473 csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
474 csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
475 e = SD_CSD_SPEED_EXP(resp);
476 m = SD_CSD_SPEED_MANT(resp);
477 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
478 csd->ccc = SD_CSD_CCC(resp);
479 } else {
480 csd->csdver = MMC_CSD_CSDVER(resp);
481 if (csd->csdver == MMC_CSD_CSDVER_1_0) {
482 aprint_error_dev(sc->sc_dev,
483 "unknown MMC CSD structure version 0x%x\n",
484 csd->csdver);
485 return 1;
486 }
487
488 csd->mmcver = MMC_CSD_MMCVER(resp);
489 csd->capacity = MMC_CSD_CAPACITY(resp);
490 csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
491 csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
492 csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
493 e = MMC_CSD_TRAN_SPEED_EXP(resp);
494 m = MMC_CSD_TRAN_SPEED_MANT(resp);
495 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
496 }
497 if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
498 csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
499
500 #ifdef SDMMC_DUMP_CSD
501 sdmmc_print_csd(resp, csd);
502 #endif
503
504 return 0;
505 }
506
507 int
508 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
509 struct sdmmc_function *sf)
510 {
511 struct sdmmc_cid *cid = &sf->cid;
512
513 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
514 cid->mid = SD_CID_MID(resp);
515 cid->oid = SD_CID_OID(resp);
516 SD_CID_PNM_CPY(resp, cid->pnm);
517 cid->rev = SD_CID_REV(resp);
518 cid->psn = SD_CID_PSN(resp);
519 cid->mdt = SD_CID_MDT(resp);
520 } else {
521 switch(sf->csd.mmcver) {
522 case MMC_CSD_MMCVER_1_0:
523 case MMC_CSD_MMCVER_1_4:
524 cid->mid = MMC_CID_MID_V1(resp);
525 MMC_CID_PNM_V1_CPY(resp, cid->pnm);
526 cid->rev = MMC_CID_REV_V1(resp);
527 cid->psn = MMC_CID_PSN_V1(resp);
528 cid->mdt = MMC_CID_MDT_V1(resp);
529 break;
530 case MMC_CSD_MMCVER_2_0:
531 case MMC_CSD_MMCVER_3_1:
532 case MMC_CSD_MMCVER_4_0:
533 cid->mid = MMC_CID_MID_V2(resp);
534 cid->oid = MMC_CID_OID_V2(resp);
535 MMC_CID_PNM_V2_CPY(resp, cid->pnm);
536 cid->psn = MMC_CID_PSN_V2(resp);
537 break;
538 default:
539 aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
540 sf->csd.mmcver);
541 return 1;
542 }
543 }
544 return 0;
545 }
546
547 void
548 sdmmc_print_cid(struct sdmmc_cid *cid)
549 {
550
551 printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
552 " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
553 cid->mdt);
554 }
555
556 #ifdef SDMMC_DUMP_CSD
557 void
558 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
559 {
560
561 printf("csdver = %d\n", csd->csdver);
562 printf("mmcver = %d\n", csd->mmcver);
563 printf("capacity = 0x%08x\n", csd->capacity);
564 printf("read_bl_len = %d\n", csd->read_bl_len);
565 printf("write_bl_len = %d\n", csd->write_bl_len);
566 printf("r2w_factor = %d\n", csd->r2w_factor);
567 printf("tran_speed = %d\n", csd->tran_speed);
568 printf("ccc = 0x%x\n", csd->ccc);
569 }
570 #endif
571
572 /*
573 * Initialize a SD/MMC memory card.
574 */
575 int
576 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
577 {
578 int error = 0;
579
580 SDMMC_LOCK(sc);
581
582 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
583 error = sdmmc_select_card(sc, sf);
584 if (error)
585 goto out;
586 }
587
588 error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
589 if (error)
590 goto out;
591
592 if (ISSET(sc->sc_flags, SMF_SD_MODE))
593 error = sdmmc_mem_sd_init(sc, sf);
594 else
595 error = sdmmc_mem_mmc_init(sc, sf);
596
597 if (error != 0)
598 SET(sf->flags, SFF_ERROR);
599
600 out:
601 SDMMC_UNLOCK(sc);
602
603 return error;
604 }
605
606 /*
607 * Get or set the card's memory OCR value (SD or MMC).
608 */
609 int
610 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
611 {
612 struct sdmmc_command cmd;
613 int error;
614 int retry;
615
616 /* Don't lock */
617
618 DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
619 SDMMCDEVNAME(sc), ocr));
620
621 /*
622 * If we change the OCR value, retry the command until the OCR
623 * we receive in response has the "CARD BUSY" bit set, meaning
624 * that all cards are ready for identification.
625 */
626 for (retry = 0; retry < 100; retry++) {
627 memset(&cmd, 0, sizeof(cmd));
628 cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
629 ocr : (ocr & MMC_OCR_HCS);
630 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
631 | SCF_TOUT_OK;
632
633 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
634 cmd.c_opcode = SD_APP_OP_COND;
635 error = sdmmc_app_command(sc, NULL, &cmd);
636 } else {
637 cmd.c_opcode = MMC_SEND_OP_COND;
638 error = sdmmc_mmc_command(sc, &cmd);
639 }
640 if (error)
641 break;
642
643 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
644 if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
645 break;
646 } else {
647 if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
648 ocr == 0)
649 break;
650 }
651
652 error = ETIMEDOUT;
653 sdmmc_pause(10000, NULL);
654 }
655 if (ocrp != NULL) {
656 if (error == 0 &&
657 !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
658 *ocrp = MMC_R3(cmd.c_resp);
659 } else {
660 *ocrp = ocr;
661 }
662 }
663 DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
664 SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
665 return error;
666 }
667
668 int
669 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
670 {
671 struct sdmmc_command cmd;
672 int error;
673
674 /* Don't lock */
675
676 memset(&cmd, 0, sizeof(cmd));
677 cmd.c_arg = ocr;
678 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7 | SCF_TOUT_OK;
679 cmd.c_opcode = SD_SEND_IF_COND;
680
681 error = sdmmc_mmc_command(sc, &cmd);
682 if (error == 0 && ocrp != NULL) {
683 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
684 *ocrp = MMC_SPI_R7(cmd.c_resp);
685 } else {
686 *ocrp = MMC_R7(cmd.c_resp);
687 }
688 DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
689 SDMMCDEVNAME(sc), error, *ocrp));
690 }
691 return error;
692 }
693
694 /*
695 * Set the read block length appropriately for this card, according to
696 * the card CSD register value.
697 */
698 int
699 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
700 int block_len)
701 {
702 struct sdmmc_command cmd;
703 int error;
704
705 /* Don't lock */
706
707 memset(&cmd, 0, sizeof(cmd));
708 cmd.c_opcode = MMC_SET_BLOCKLEN;
709 cmd.c_arg = block_len;
710 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
711
712 error = sdmmc_mmc_command(sc, &cmd);
713
714 DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
715 SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
716
717 return error;
718 }
719
720 /* make 512-bit BE quantity __bitfield()-compatible */
721 static void
722 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
723 size_t i;
724 uint32_t tmp0, tmp1;
725 const size_t bitswords = __arraycount(buf->_bits);
726 for (i = 0; i < bitswords/2; i++) {
727 tmp0 = buf->_bits[i];
728 tmp1 = buf->_bits[bitswords - 1 - i];
729 buf->_bits[i] = be32toh(tmp1);
730 buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
731 }
732 }
733
734 static int
735 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
736 {
737 if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
738 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
739 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
740 return SD_ACCESS_MODE_SDR104;
741 }
742 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
743 ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
744 return SD_ACCESS_MODE_DDR50;
745 }
746 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
747 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
748 return SD_ACCESS_MODE_SDR50;
749 }
750 }
751 if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
752 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
753 return SD_ACCESS_MODE_SDR25;
754 }
755 return SD_ACCESS_MODE_SDR12;
756 }
757
758 static int
759 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
760 {
761 int timing = -1;
762
763 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
764 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
765 return 0;
766
767 switch (sf->csd.tran_speed) {
768 case 100000:
769 timing = SDMMC_TIMING_UHS_SDR50;
770 break;
771 case 208000:
772 timing = SDMMC_TIMING_UHS_SDR104;
773 break;
774 default:
775 return 0;
776 }
777 } else {
778 switch (sf->csd.tran_speed) {
779 case 200000:
780 timing = SDMMC_TIMING_MMC_HS200;
781 break;
782 default:
783 return 0;
784 }
785 }
786
787 DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
788 timing));
789
790 return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
791 }
792
793 static int
794 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
795 {
796 int support_func, best_func, bus_clock, error, i;
797 sdmmc_bitfield512_t status;
798 bool ddr = false;
799
800 /* change bus clock */
801 bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
802 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
803 if (error) {
804 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
805 return error;
806 }
807
808 error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
809 if (error) {
810 aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
811 return error;
812 }
813 error = sdmmc_mem_decode_scr(sc, sf);
814 if (error)
815 return error;
816
817 if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
818 ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
819 DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
820 error = sdmmc_set_bus_width(sf, 4);
821 if (error) {
822 aprint_error_dev(sc->sc_dev,
823 "can't change bus width (%d bit)\n", 4);
824 return error;
825 }
826 sf->width = 4;
827 }
828
829 best_func = 0;
830 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
831 ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
832 DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
833 error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
834 if (error) {
835 if (error == ENOTSUP) {
836 /* Not supported by controller */
837 goto skipswitchfuncs;
838 } else {
839 aprint_error_dev(sc->sc_dev,
840 "switch func mode 0 failed\n");
841 return error;
842 }
843 }
844
845 support_func = SFUNC_STATUS_GROUP(&status, 1);
846
847 if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
848 /* XXX UHS-I card started in 1.8V mode, switch now */
849 error = sdmmc_mem_signal_voltage(sc,
850 SDMMC_SIGNAL_VOLTAGE_180);
851 if (error) {
852 aprint_error_dev(sc->sc_dev,
853 "failed to recover UHS card\n");
854 return error;
855 }
856 SET(sc->sc_flags, SMF_UHS_MODE);
857 }
858
859 for (i = 0; i < __arraycount(switch_group0_functions); i++) {
860 if (!(support_func & (1 << i)))
861 continue;
862 DPRINTF(("%s: card supports mode %s\n",
863 SDMMCDEVNAME(sc),
864 switch_group0_functions[i].name));
865 }
866
867 best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
868
869 DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
870 switch_group0_functions[best_func].name));
871
872 if (best_func != 0) {
873 DPRINTF(("%s: switch func mode 1(func=%d)\n",
874 SDMMCDEVNAME(sc), best_func));
875 error =
876 sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
877 if (error) {
878 aprint_error_dev(sc->sc_dev,
879 "switch func mode 1 failed:"
880 " group 1 function %d(0x%2x)\n",
881 best_func, support_func);
882 return error;
883 }
884 sf->csd.tran_speed =
885 switch_group0_functions[best_func].freq;
886
887 if (best_func == SD_ACCESS_MODE_DDR50)
888 ddr = true;
889
890 /* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
891 delay(25);
892 }
893 }
894 skipswitchfuncs:
895
896 /* update bus clock */
897 if (sc->sc_busclk > sf->csd.tran_speed)
898 sc->sc_busclk = sf->csd.tran_speed;
899 if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
900 return 0;
901
902 /* change bus clock */
903 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
904 ddr);
905 if (error) {
906 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
907 return error;
908 }
909
910 sc->sc_transfer_mode = switch_group0_functions[best_func].name;
911 sc->sc_busddr = ddr;
912
913 /* get card status */
914 error = sdmmc_mem_send_ssr(sc, sf, &status);
915 if (error) {
916 aprint_error_dev(sc->sc_dev, "can't get SD status: %d\n",
917 error);
918 return error;
919 }
920 sdmmc_mem_decode_ssr(sc, sf, &status);
921
922 /* execute tuning (UHS) */
923 error = sdmmc_mem_execute_tuning(sc, sf);
924 if (error) {
925 aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
926 return error;
927 }
928
929 return 0;
930 }
931
932 static int
933 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
934 {
935 int width, value, hs_timing, bus_clock, error;
936 uint8_t ext_csd[512];
937 uint32_t sectors = 0;
938 bool ddr = false;
939
940 sc->sc_transfer_mode = NULL;
941
942 /* change bus clock */
943 bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
944 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
945 if (error) {
946 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
947 return error;
948 }
949
950 if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
951 error = sdmmc_mem_send_cxd_data(sc,
952 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
953 if (error) {
954 aprint_error_dev(sc->sc_dev,
955 "can't read EXT_CSD (error=%d)\n", error);
956 return error;
957 }
958 if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
959 (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
960 aprint_error_dev(sc->sc_dev,
961 "unrecognised future version (%d)\n",
962 ext_csd[EXT_CSD_STRUCTURE]);
963 return ENOTSUP;
964 }
965 sf->ext_csd.rev = ext_csd[EXT_CSD_REV];
966
967 if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
968 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
969 hs_timing = EXT_CSD_HS_TIMING_HS200;
970 } else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
971 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
972 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
973 ddr = true;
974 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
975 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
976 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
977 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
978 } else {
979 aprint_error_dev(sc->sc_dev,
980 "unknown CARD_TYPE: 0x%x\n",
981 ext_csd[EXT_CSD_CARD_TYPE]);
982 return ENOTSUP;
983 }
984
985 if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
986 width = 8;
987 value = EXT_CSD_BUS_WIDTH_8;
988 } else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
989 width = 4;
990 value = EXT_CSD_BUS_WIDTH_4;
991 } else {
992 width = 1;
993 value = EXT_CSD_BUS_WIDTH_1;
994 }
995
996 if (width != 1) {
997 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
998 EXT_CSD_BUS_WIDTH, value, false);
999 if (error == 0)
1000 error = sdmmc_chip_bus_width(sc->sc_sct,
1001 sc->sc_sch, width);
1002 else {
1003 DPRINTF(("%s: can't change bus width"
1004 " (%d bit)\n", SDMMCDEVNAME(sc), width));
1005 return error;
1006 }
1007
1008 /* XXXX: need bus test? (using by CMD14 & CMD19) */
1009 delay(10000);
1010 }
1011 sf->width = width;
1012
1013 if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1014 !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
1015 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
1016 }
1017
1018 const int target_timing = hs_timing;
1019 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1020 while (hs_timing >= EXT_CSD_HS_TIMING_LEGACY) {
1021 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
1022 EXT_CSD_HS_TIMING, hs_timing, false);
1023 if (error == 0 || hs_timing == EXT_CSD_HS_TIMING_LEGACY)
1024 break;
1025 hs_timing--;
1026 }
1027 }
1028 if (hs_timing != target_timing) {
1029 aprint_debug_dev(sc->sc_dev,
1030 "card failed to switch to timing mode %d, using %d\n",
1031 target_timing, hs_timing);
1032 }
1033
1034 KASSERT(hs_timing < __arraycount(sdmmc_mmc_timings));
1035 sf->csd.tran_speed = sdmmc_mmc_timings[hs_timing];
1036
1037 if (sc->sc_busclk > sf->csd.tran_speed)
1038 sc->sc_busclk = sf->csd.tran_speed;
1039 if (sc->sc_busclk != bus_clock) {
1040 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1041 sc->sc_busclk, false);
1042 if (error) {
1043 aprint_error_dev(sc->sc_dev,
1044 "can't change bus clock\n");
1045 return error;
1046 }
1047 }
1048
1049 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1050 error = sdmmc_mem_send_cxd_data(sc,
1051 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1052 if (error) {
1053 aprint_error_dev(sc->sc_dev,
1054 "can't re-read EXT_CSD\n");
1055 return error;
1056 }
1057 if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1058 aprint_error_dev(sc->sc_dev,
1059 "HS_TIMING set failed\n");
1060 return EINVAL;
1061 }
1062 }
1063
1064 /*
1065 * HS_TIMING must be set to 0x1 before setting BUS_WIDTH
1066 * for dual data rate operation
1067 */
1068 if (ddr &&
1069 hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1070 width > 1) {
1071 error = sdmmc_mem_mmc_switch(sf,
1072 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1073 (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1074 EXT_CSD_BUS_WIDTH_4_DDR, false);
1075 if (error) {
1076 DPRINTF(("%s: can't switch to DDR"
1077 " (%d bit)\n", SDMMCDEVNAME(sc), width));
1078 return error;
1079 }
1080
1081 delay(10000);
1082
1083 error = sdmmc_mem_signal_voltage(sc,
1084 SDMMC_SIGNAL_VOLTAGE_180);
1085 if (error) {
1086 aprint_error_dev(sc->sc_dev,
1087 "can't switch signaling voltage\n");
1088 return error;
1089 }
1090
1091 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1092 sc->sc_busclk, ddr);
1093 if (error) {
1094 aprint_error_dev(sc->sc_dev,
1095 "can't change bus clock\n");
1096 return error;
1097 }
1098
1099 delay(10000);
1100
1101 sc->sc_transfer_mode = "DDR52";
1102 sc->sc_busddr = ddr;
1103 }
1104
1105 sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1106 ext_csd[EXT_CSD_SEC_COUNT + 1] << 8 |
1107 ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1108 ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1109 if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1110 SET(sf->flags, SFF_SDHC);
1111 sf->csd.capacity = sectors;
1112 }
1113
1114 if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1115 sc->sc_transfer_mode = "HS200";
1116
1117 /* execute tuning (HS200) */
1118 error = sdmmc_mem_execute_tuning(sc, sf);
1119 if (error) {
1120 aprint_error_dev(sc->sc_dev,
1121 "can't execute MMC tuning\n");
1122 return error;
1123 }
1124 }
1125
1126 if (sf->ext_csd.rev >= 5) {
1127 sf->ext_csd.rst_n_function =
1128 ext_csd[EXT_CSD_RST_N_FUNCTION];
1129 }
1130
1131 if (sf->ext_csd.rev >= 6) {
1132 sf->ext_csd.cache_size =
1133 le32dec(&ext_csd[EXT_CSD_CACHE_SIZE]) * 1024;
1134 }
1135 if (sf->ext_csd.cache_size > 0) {
1136 /* eMMC cache present, enable it */
1137 error = sdmmc_mem_mmc_switch(sf,
1138 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL,
1139 EXT_CSD_CACHE_CTRL_CACHE_EN, false);
1140 if (error) {
1141 aprint_error_dev(sc->sc_dev,
1142 "can't enable cache: %d\n", error);
1143 } else {
1144 SET(sf->flags, SFF_CACHE_ENABLED);
1145 }
1146 }
1147 } else {
1148 if (sc->sc_busclk > sf->csd.tran_speed)
1149 sc->sc_busclk = sf->csd.tran_speed;
1150 if (sc->sc_busclk != bus_clock) {
1151 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1152 sc->sc_busclk, false);
1153 if (error) {
1154 aprint_error_dev(sc->sc_dev,
1155 "can't change bus clock\n");
1156 return error;
1157 }
1158 }
1159 }
1160
1161 return 0;
1162 }
1163
1164 static int
1165 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1166 {
1167 struct sdmmc_command cmd;
1168 int error;
1169
1170 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1171 memset(&cmd, 0, sizeof cmd);
1172 cmd.c_opcode = MMC_ALL_SEND_CID;
1173 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1174
1175 error = sdmmc_mmc_command(sc, &cmd);
1176 } else {
1177 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1178 sizeof(cmd.c_resp));
1179 }
1180
1181 #ifdef SDMMC_DEBUG
1182 if (error == 0)
1183 sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1184 #endif
1185 if (error == 0 && resp != NULL)
1186 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1187 return error;
1188 }
1189
1190 static int
1191 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1192 sdmmc_response *resp)
1193 {
1194 struct sdmmc_command cmd;
1195 int error;
1196
1197 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1198 memset(&cmd, 0, sizeof cmd);
1199 cmd.c_opcode = MMC_SEND_CSD;
1200 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1201 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1202
1203 error = sdmmc_mmc_command(sc, &cmd);
1204 } else {
1205 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1206 sizeof(cmd.c_resp));
1207 }
1208
1209 #ifdef SDMMC_DEBUG
1210 if (error == 0)
1211 sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1212 #endif
1213 if (error == 0 && resp != NULL)
1214 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1215 return error;
1216 }
1217
1218 static int
1219 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1220 uint32_t *scr)
1221 {
1222 struct sdmmc_command cmd;
1223 bus_dma_segment_t ds[1];
1224 void *ptr = NULL;
1225 int datalen = 8;
1226 int rseg;
1227 int error = 0;
1228
1229 /* Don't lock */
1230
1231 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1232 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1233 ds, 1, &rseg, BUS_DMA_NOWAIT);
1234 if (error)
1235 goto out;
1236 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1237 BUS_DMA_NOWAIT);
1238 if (error)
1239 goto dmamem_free;
1240 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1241 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1242 if (error)
1243 goto dmamem_unmap;
1244
1245 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1246 BUS_DMASYNC_PREREAD);
1247 } else {
1248 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1249 if (ptr == NULL)
1250 goto out;
1251 }
1252
1253 memset(&cmd, 0, sizeof(cmd));
1254 cmd.c_data = ptr;
1255 cmd.c_datalen = datalen;
1256 cmd.c_blklen = datalen;
1257 cmd.c_arg = 0;
1258 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1259 cmd.c_opcode = SD_APP_SEND_SCR;
1260 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1261 cmd.c_dmamap = sc->sc_dmap;
1262
1263 error = sdmmc_app_command(sc, sf, &cmd);
1264 if (error == 0) {
1265 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1266 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1267 BUS_DMASYNC_POSTREAD);
1268 }
1269 memcpy(scr, ptr, datalen);
1270 }
1271
1272 out:
1273 if (ptr != NULL) {
1274 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1275 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1276 dmamem_unmap:
1277 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1278 dmamem_free:
1279 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1280 } else {
1281 free(ptr, M_DEVBUF);
1282 }
1283 }
1284 DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1285 error));
1286
1287 #ifdef SDMMC_DEBUG
1288 if (error == 0)
1289 sdmmc_dump_data("SCR", scr, datalen);
1290 #endif
1291 return error;
1292 }
1293
1294 static int
1295 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1296 {
1297 sdmmc_response resp;
1298 int ver;
1299
1300 memset(resp, 0, sizeof(resp));
1301 /*
1302 * Change the raw-scr received from the DMA stream to resp.
1303 */
1304 resp[0] = be32toh(sf->raw_scr[1]) >> 8; // LSW
1305 resp[1] = be32toh(sf->raw_scr[0]); // MSW
1306 resp[0] |= (resp[1] & 0xff) << 24;
1307 resp[1] >>= 8;
1308
1309 ver = SCR_STRUCTURE(resp);
1310 sf->scr.sd_spec = SCR_SD_SPEC(resp);
1311 sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1312
1313 DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1314 SDMMCDEVNAME(sc), resp[1], resp[0],
1315 ver, sf->scr.sd_spec, sf->scr.bus_width));
1316
1317 if (ver != 0 && ver != 1) {
1318 DPRINTF(("%s: unknown structure version: %d\n",
1319 SDMMCDEVNAME(sc), ver));
1320 return EINVAL;
1321 }
1322 return 0;
1323 }
1324
1325 static int
1326 sdmmc_mem_send_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1327 sdmmc_bitfield512_t *ssr)
1328 {
1329 struct sdmmc_command cmd;
1330 bus_dma_segment_t ds[1];
1331 void *ptr = NULL;
1332 int datalen = 64;
1333 int rseg;
1334 int error = 0;
1335
1336 /* Don't lock */
1337
1338 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1339 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1340 ds, 1, &rseg, BUS_DMA_NOWAIT);
1341 if (error)
1342 goto out;
1343 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1344 BUS_DMA_NOWAIT);
1345 if (error)
1346 goto dmamem_free;
1347 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1348 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1349 if (error)
1350 goto dmamem_unmap;
1351
1352 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1353 BUS_DMASYNC_PREREAD);
1354 } else {
1355 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1356 if (ptr == NULL)
1357 goto out;
1358 }
1359
1360 memset(&cmd, 0, sizeof(cmd));
1361 cmd.c_data = ptr;
1362 cmd.c_datalen = datalen;
1363 cmd.c_blklen = datalen;
1364 cmd.c_arg = 0;
1365 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1366 cmd.c_opcode = SD_APP_SD_STATUS;
1367 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1368 cmd.c_dmamap = sc->sc_dmap;
1369
1370 error = sdmmc_app_command(sc, sf, &cmd);
1371 if (error == 0) {
1372 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1373 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1374 BUS_DMASYNC_POSTREAD);
1375 }
1376 memcpy(ssr, ptr, datalen);
1377 }
1378
1379 out:
1380 if (ptr != NULL) {
1381 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1382 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1383 dmamem_unmap:
1384 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1385 dmamem_free:
1386 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1387 } else {
1388 free(ptr, M_DEVBUF);
1389 }
1390 }
1391 DPRINTF(("%s: sdmem_mem_send_ssr: error = %d\n", SDMMCDEVNAME(sc),
1392 error));
1393
1394 if (error == 0)
1395 sdmmc_be512_to_bitfield512(ssr);
1396
1397 #ifdef SDMMC_DEBUG
1398 if (error == 0)
1399 sdmmc_dump_data("SSR", ssr, datalen);
1400 #endif
1401 return error;
1402 }
1403
1404 static int
1405 sdmmc_mem_decode_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1406 sdmmc_bitfield512_t *ssr_bitfield)
1407 {
1408 uint32_t *ssr = (uint32_t *)ssr_bitfield;
1409 int speed_class_val, bus_width_val;
1410
1411 const int bus_width = SSR_DAT_BUS_WIDTH(ssr);
1412 const int speed_class = SSR_SPEED_CLASS(ssr);
1413 const int uhs_speed_grade = SSR_UHS_SPEED_GRADE(ssr);
1414 const int video_speed_class = SSR_VIDEO_SPEED_CLASS(ssr);
1415 const int app_perf_class = SSR_APP_PERF_CLASS(ssr);
1416
1417 switch (speed_class) {
1418 case SSR_SPEED_CLASS_0: speed_class_val = 0; break;
1419 case SSR_SPEED_CLASS_2: speed_class_val = 2; break;
1420 case SSR_SPEED_CLASS_4: speed_class_val = 4; break;
1421 case SSR_SPEED_CLASS_6: speed_class_val = 6; break;
1422 case SSR_SPEED_CLASS_10: speed_class_val = 10; break;
1423 default: speed_class_val = -1; break;
1424 }
1425
1426 switch (bus_width) {
1427 case SSR_DAT_BUS_WIDTH_1: bus_width_val = 1; break;
1428 case SSR_DAT_BUS_WIDTH_4: bus_width_val = 4; break;
1429 default: bus_width_val = -1;
1430 }
1431
1432 /*
1433 * Log card status
1434 */
1435 device_printf(sc->sc_dev, "SD card status:");
1436 if (bus_width_val != -1)
1437 printf(" %d-bit", bus_width_val);
1438 else
1439 printf(" unknown bus width");
1440 if (speed_class_val != -1)
1441 printf(", C%d", speed_class_val);
1442 if (uhs_speed_grade)
1443 printf(", U%d", uhs_speed_grade);
1444 if (video_speed_class)
1445 printf(", V%d", video_speed_class);
1446 if (app_perf_class)
1447 printf(", A%d", app_perf_class);
1448 printf("\n");
1449
1450 return 0;
1451 }
1452
1453 static int
1454 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1455 size_t datalen)
1456 {
1457 struct sdmmc_command cmd;
1458 bus_dma_segment_t ds[1];
1459 void *ptr = NULL;
1460 int rseg;
1461 int error = 0;
1462
1463 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1464 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1465 1, &rseg, BUS_DMA_NOWAIT);
1466 if (error)
1467 goto out;
1468 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1469 BUS_DMA_NOWAIT);
1470 if (error)
1471 goto dmamem_free;
1472 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1473 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1474 if (error)
1475 goto dmamem_unmap;
1476
1477 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1478 BUS_DMASYNC_PREREAD);
1479 } else {
1480 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1481 if (ptr == NULL)
1482 goto out;
1483 }
1484
1485 memset(&cmd, 0, sizeof(cmd));
1486 cmd.c_data = ptr;
1487 cmd.c_datalen = datalen;
1488 cmd.c_blklen = datalen;
1489 cmd.c_opcode = opcode;
1490 cmd.c_arg = 0;
1491 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1492 if (opcode == MMC_SEND_EXT_CSD)
1493 SET(cmd.c_flags, SCF_RSP_R1);
1494 else
1495 SET(cmd.c_flags, SCF_RSP_R2);
1496 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1497 cmd.c_dmamap = sc->sc_dmap;
1498
1499 error = sdmmc_mmc_command(sc, &cmd);
1500 if (error == 0) {
1501 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1502 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1503 BUS_DMASYNC_POSTREAD);
1504 }
1505 memcpy(data, ptr, datalen);
1506 #ifdef SDMMC_DEBUG
1507 sdmmc_dump_data("CXD", data, datalen);
1508 #endif
1509 }
1510
1511 out:
1512 if (ptr != NULL) {
1513 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1514 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1515 dmamem_unmap:
1516 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1517 dmamem_free:
1518 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1519 } else {
1520 free(ptr, M_DEVBUF);
1521 }
1522 }
1523 return error;
1524 }
1525
1526 static int
1527 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1528 {
1529 struct sdmmc_softc *sc = sf->sc;
1530 struct sdmmc_command cmd;
1531 int error;
1532
1533 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1534 return ENODEV;
1535
1536 memset(&cmd, 0, sizeof(cmd));
1537 cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1538 cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1539
1540 switch (width) {
1541 case 1:
1542 cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1543 break;
1544
1545 case 4:
1546 cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1547 break;
1548
1549 default:
1550 return EINVAL;
1551 }
1552
1553 error = sdmmc_app_command(sc, sf, &cmd);
1554 if (error == 0)
1555 error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1556 return error;
1557 }
1558
1559 static int
1560 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1561 int function, sdmmc_bitfield512_t *status)
1562 {
1563 struct sdmmc_softc *sc = sf->sc;
1564 struct sdmmc_command cmd;
1565 bus_dma_segment_t ds[1];
1566 void *ptr = NULL;
1567 int gsft, rseg, error = 0;
1568 const int statlen = 64;
1569
1570 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1571 !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1572 return EINVAL;
1573
1574 if (group <= 0 || group > 6 ||
1575 function < 0 || function > 15)
1576 return EINVAL;
1577
1578 gsft = (group - 1) << 2;
1579
1580 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1581 error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1582 1, &rseg, BUS_DMA_NOWAIT);
1583 if (error)
1584 goto out;
1585 error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1586 BUS_DMA_NOWAIT);
1587 if (error)
1588 goto dmamem_free;
1589 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1590 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1591 if (error)
1592 goto dmamem_unmap;
1593
1594 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1595 BUS_DMASYNC_PREREAD);
1596 } else {
1597 ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1598 if (ptr == NULL)
1599 goto out;
1600 }
1601
1602 memset(&cmd, 0, sizeof(cmd));
1603 cmd.c_data = ptr;
1604 cmd.c_datalen = statlen;
1605 cmd.c_blklen = statlen;
1606 cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1607 cmd.c_arg =
1608 (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1609 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1610 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1611 cmd.c_dmamap = sc->sc_dmap;
1612
1613 error = sdmmc_mmc_command(sc, &cmd);
1614 if (error == 0) {
1615 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1616 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1617 BUS_DMASYNC_POSTREAD);
1618 }
1619 memcpy(status, ptr, statlen);
1620 }
1621
1622 out:
1623 if (ptr != NULL) {
1624 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1625 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1626 dmamem_unmap:
1627 bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1628 dmamem_free:
1629 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1630 } else {
1631 free(ptr, M_DEVBUF);
1632 }
1633 }
1634
1635 if (error == 0)
1636 sdmmc_be512_to_bitfield512(status);
1637
1638 return error;
1639 }
1640
1641 static int
1642 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1643 uint8_t value, bool poll)
1644 {
1645 struct sdmmc_softc *sc = sf->sc;
1646 struct sdmmc_command cmd;
1647 int error;
1648
1649 memset(&cmd, 0, sizeof(cmd));
1650 cmd.c_opcode = MMC_SWITCH;
1651 cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1652 (index << 16) | (value << 8) | set;
1653 cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1654
1655 if (poll)
1656 cmd.c_flags |= SCF_POLL;
1657
1658 error = sdmmc_mmc_command(sc, &cmd);
1659 if (error)
1660 return error;
1661
1662 if (index == EXT_CSD_FLUSH_CACHE || (index == EXT_CSD_HS_TIMING && value >= 2)) {
1663 do {
1664 memset(&cmd, 0, sizeof(cmd));
1665 cmd.c_opcode = MMC_SEND_STATUS;
1666 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1667 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1668 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1669 if (poll)
1670 cmd.c_flags |= SCF_POLL;
1671 error = sdmmc_mmc_command(sc, &cmd);
1672 if (error)
1673 break;
1674 if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1675 aprint_error_dev(sc->sc_dev, "switch error\n");
1676 return EINVAL;
1677 }
1678 /* XXX time out */
1679 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1680
1681 if (error) {
1682 aprint_error_dev(sc->sc_dev,
1683 "error waiting for data ready after switch command: %d\n",
1684 error);
1685 return error;
1686 }
1687 }
1688
1689 return 0;
1690 }
1691
1692 /*
1693 * SPI mode function
1694 */
1695 static int
1696 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1697 {
1698 struct sdmmc_command cmd;
1699 int error;
1700
1701 memset(&cmd, 0, sizeof(cmd));
1702 cmd.c_opcode = MMC_READ_OCR;
1703 cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1704 cmd.c_flags = SCF_RSP_SPI_R3;
1705
1706 error = sdmmc_mmc_command(sc, &cmd);
1707 if (error == 0 && card_ocr != NULL)
1708 *card_ocr = cmd.c_resp[1];
1709 DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1710 SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1711 return error;
1712 }
1713
1714 /*
1715 * read/write function
1716 */
1717 /* read */
1718 static int
1719 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1720 u_char *data, size_t datalen)
1721 {
1722 struct sdmmc_softc *sc = sf->sc;
1723 int error = 0;
1724 int i;
1725
1726 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1727 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1728
1729 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1730 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1731 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1732 if (error)
1733 break;
1734 }
1735 return error;
1736 }
1737
1738 /*
1739 * Simulate multi-segment dma transfer.
1740 */
1741 static int
1742 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1743 uint32_t blkno, u_char *data, size_t datalen)
1744 {
1745 struct sdmmc_softc *sc = sf->sc;
1746 bool use_bbuf = false;
1747 int error = 0;
1748 int i;
1749
1750 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1751 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1752 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1753 use_bbuf = true;
1754 break;
1755 }
1756 }
1757 if (use_bbuf) {
1758 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1759 BUS_DMASYNC_PREREAD);
1760
1761 error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1762 blkno, data, datalen);
1763 if (error) {
1764 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1765 return error;
1766 }
1767
1768 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1769 BUS_DMASYNC_POSTREAD);
1770
1771 /* Copy from bounce buffer */
1772 memcpy(data, sf->bbuf, datalen);
1773
1774 return 0;
1775 }
1776
1777 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1778 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1779
1780 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1781 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1782 if (error)
1783 return error;
1784
1785 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1786 BUS_DMASYNC_PREREAD);
1787
1788 error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1789 blkno, data, len);
1790 if (error) {
1791 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1792 return error;
1793 }
1794
1795 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1796 BUS_DMASYNC_POSTREAD);
1797
1798 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1799
1800 blkno += len / SDMMC_SECTOR_SIZE;
1801 data += len;
1802 }
1803 return 0;
1804 }
1805
1806 static int
1807 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1808 uint32_t blkno, u_char *data, size_t datalen)
1809 {
1810 struct sdmmc_softc *sc = sf->sc;
1811 struct sdmmc_command cmd;
1812 int error;
1813
1814 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1815 error = sdmmc_select_card(sc, sf);
1816 if (error)
1817 goto out;
1818 }
1819
1820 memset(&cmd, 0, sizeof(cmd));
1821 cmd.c_data = data;
1822 cmd.c_datalen = datalen;
1823 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1824 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1825 MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1826 cmd.c_arg = blkno;
1827 if (!ISSET(sf->flags, SFF_SDHC))
1828 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1829 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1830 if (ISSET(sf->flags, SFF_SDHC))
1831 cmd.c_flags |= SCF_XFER_SDHC;
1832 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1833 cmd.c_dmamap = dmap;
1834
1835 sc->sc_ev_xfer.ev_count++;
1836
1837 error = sdmmc_mmc_command(sc, &cmd);
1838 if (error) {
1839 sc->sc_ev_xfer_error.ev_count++;
1840 goto out;
1841 }
1842
1843 const u_int counter = __builtin_ctz(cmd.c_datalen);
1844 if (counter >= 9 && counter <= 16) {
1845 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1846 } else {
1847 sc->sc_ev_xfer_unaligned.ev_count++;
1848 }
1849
1850 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1851 if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1852 memset(&cmd, 0, sizeof cmd);
1853 cmd.c_opcode = MMC_STOP_TRANSMISSION;
1854 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1855 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1856 error = sdmmc_mmc_command(sc, &cmd);
1857 if (error)
1858 goto out;
1859 }
1860 }
1861
1862 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1863 do {
1864 memset(&cmd, 0, sizeof(cmd));
1865 cmd.c_opcode = MMC_SEND_STATUS;
1866 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1867 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1868 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1869 error = sdmmc_mmc_command(sc, &cmd);
1870 if (error)
1871 break;
1872 /* XXX time out */
1873 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1874 }
1875
1876 out:
1877 return error;
1878 }
1879
1880 int
1881 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1882 size_t datalen)
1883 {
1884 struct sdmmc_softc *sc = sf->sc;
1885 int error;
1886
1887 SDMMC_LOCK(sc);
1888 mutex_enter(&sc->sc_mtx);
1889
1890 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1891 error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1892 goto out;
1893 }
1894
1895 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1896 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1897 datalen);
1898 goto out;
1899 }
1900
1901 /* DMA transfer */
1902 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1903 BUS_DMA_NOWAIT|BUS_DMA_READ);
1904 if (error)
1905 goto out;
1906
1907 #ifdef SDMMC_DEBUG
1908 printf("data=%p, datalen=%zu\n", data, datalen);
1909 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1910 printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1911 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1912 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1913 }
1914 #endif
1915
1916 if (sc->sc_dmap->dm_nsegs > 1
1917 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1918 error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1919 data, datalen);
1920 goto unload;
1921 }
1922
1923 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1924 BUS_DMASYNC_PREREAD);
1925
1926 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1927 datalen);
1928 if (error)
1929 goto unload;
1930
1931 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1932 BUS_DMASYNC_POSTREAD);
1933 unload:
1934 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1935
1936 out:
1937 mutex_exit(&sc->sc_mtx);
1938 SDMMC_UNLOCK(sc);
1939
1940 return error;
1941 }
1942
1943 /* write */
1944 static int
1945 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1946 u_char *data, size_t datalen)
1947 {
1948 struct sdmmc_softc *sc = sf->sc;
1949 int error = 0;
1950 int i;
1951
1952 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1953 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1954
1955 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1956 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1957 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1958 if (error)
1959 break;
1960 }
1961 return error;
1962 }
1963
1964 /*
1965 * Simulate multi-segment dma transfer.
1966 */
1967 static int
1968 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1969 uint32_t blkno, u_char *data, size_t datalen)
1970 {
1971 struct sdmmc_softc *sc = sf->sc;
1972 bool use_bbuf = false;
1973 int error = 0;
1974 int i;
1975
1976 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1977 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1978 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1979 use_bbuf = true;
1980 break;
1981 }
1982 }
1983 if (use_bbuf) {
1984 /* Copy to bounce buffer */
1985 memcpy(sf->bbuf, data, datalen);
1986
1987 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1988 BUS_DMASYNC_PREWRITE);
1989
1990 error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1991 blkno, data, datalen);
1992 if (error) {
1993 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1994 return error;
1995 }
1996
1997 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1998 BUS_DMASYNC_POSTWRITE);
1999
2000 return 0;
2001 }
2002
2003 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2004 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
2005
2006 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
2007 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2008 if (error)
2009 return error;
2010
2011 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2012 BUS_DMASYNC_PREWRITE);
2013
2014 error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
2015 blkno, data, len);
2016 if (error) {
2017 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2018 return error;
2019 }
2020
2021 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2022 BUS_DMASYNC_POSTWRITE);
2023
2024 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2025
2026 blkno += len / SDMMC_SECTOR_SIZE;
2027 data += len;
2028 }
2029
2030 return error;
2031 }
2032
2033 static int
2034 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
2035 uint32_t blkno, u_char *data, size_t datalen)
2036 {
2037 struct sdmmc_softc *sc = sf->sc;
2038 struct sdmmc_command cmd;
2039 int error;
2040
2041 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2042 error = sdmmc_select_card(sc, sf);
2043 if (error)
2044 goto out;
2045 }
2046
2047 const int nblk = howmany(datalen, SDMMC_SECTOR_SIZE);
2048 if (ISSET(sc->sc_flags, SMF_SD_MODE) && nblk > 1) {
2049 /* Set the number of write blocks to be pre-erased */
2050 memset(&cmd, 0, sizeof(cmd));
2051 cmd.c_opcode = SD_APP_SET_WR_BLK_ERASE_COUNT;
2052 cmd.c_flags = SCF_RSP_R1 | SCF_RSP_SPI_R1 | SCF_CMD_AC;
2053 cmd.c_arg = nblk;
2054 error = sdmmc_app_command(sc, sf, &cmd);
2055 if (error)
2056 goto out;
2057 }
2058
2059 memset(&cmd, 0, sizeof(cmd));
2060 cmd.c_data = data;
2061 cmd.c_datalen = datalen;
2062 cmd.c_blklen = SDMMC_SECTOR_SIZE;
2063 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
2064 MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
2065 cmd.c_arg = blkno;
2066 if (!ISSET(sf->flags, SFF_SDHC))
2067 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2068 cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
2069 if (ISSET(sf->flags, SFF_SDHC))
2070 cmd.c_flags |= SCF_XFER_SDHC;
2071 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
2072 cmd.c_dmamap = dmap;
2073
2074 sc->sc_ev_xfer.ev_count++;
2075
2076 error = sdmmc_mmc_command(sc, &cmd);
2077 if (error) {
2078 sc->sc_ev_xfer_error.ev_count++;
2079 goto out;
2080 }
2081
2082 const u_int counter = __builtin_ctz(cmd.c_datalen);
2083 if (counter >= 9 && counter <= 16) {
2084 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
2085 } else {
2086 sc->sc_ev_xfer_unaligned.ev_count++;
2087 }
2088
2089 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
2090 if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
2091 memset(&cmd, 0, sizeof(cmd));
2092 cmd.c_opcode = MMC_STOP_TRANSMISSION;
2093 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
2094 error = sdmmc_mmc_command(sc, &cmd);
2095 if (error)
2096 goto out;
2097 }
2098 }
2099
2100 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2101 do {
2102 memset(&cmd, 0, sizeof(cmd));
2103 cmd.c_opcode = MMC_SEND_STATUS;
2104 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2105 cmd.c_arg = MMC_ARG_RCA(sf->rca);
2106 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
2107 error = sdmmc_mmc_command(sc, &cmd);
2108 if (error)
2109 break;
2110 /* XXX time out */
2111 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
2112 }
2113
2114 out:
2115 return error;
2116 }
2117
2118 int
2119 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
2120 size_t datalen)
2121 {
2122 struct sdmmc_softc *sc = sf->sc;
2123 int error;
2124
2125 SDMMC_LOCK(sc);
2126 mutex_enter(&sc->sc_mtx);
2127
2128 if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
2129 aprint_normal_dev(sc->sc_dev, "write-protected\n");
2130 error = EIO;
2131 goto out;
2132 }
2133
2134 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
2135 error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
2136 goto out;
2137 }
2138
2139 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
2140 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2141 datalen);
2142 goto out;
2143 }
2144
2145 /* DMA transfer */
2146 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
2147 BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2148 if (error)
2149 goto out;
2150
2151 #ifdef SDMMC_DEBUG
2152 aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
2153 __func__, data, datalen);
2154 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2155 aprint_normal_dev(sc->sc_dev,
2156 "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
2157 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
2158 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
2159 }
2160 #endif
2161
2162 if (sc->sc_dmap->dm_nsegs > 1
2163 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
2164 error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
2165 data, datalen);
2166 goto unload;
2167 }
2168
2169 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2170 BUS_DMASYNC_PREWRITE);
2171
2172 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2173 datalen);
2174 if (error)
2175 goto unload;
2176
2177 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2178 BUS_DMASYNC_POSTWRITE);
2179 unload:
2180 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
2181
2182 out:
2183 mutex_exit(&sc->sc_mtx);
2184 SDMMC_UNLOCK(sc);
2185
2186 return error;
2187 }
2188
2189 int
2190 sdmmc_mem_discard(struct sdmmc_function *sf, uint32_t sblkno, uint32_t eblkno)
2191 {
2192 struct sdmmc_softc *sc = sf->sc;
2193 struct sdmmc_command cmd;
2194 int error;
2195
2196 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2197 return ENODEV; /* XXX not tested */
2198
2199 if (eblkno < sblkno)
2200 return EINVAL;
2201
2202 SDMMC_LOCK(sc);
2203 mutex_enter(&sc->sc_mtx);
2204
2205 /* Set the address of the first write block to be erased */
2206 memset(&cmd, 0, sizeof(cmd));
2207 cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2208 SD_ERASE_WR_BLK_START : MMC_TAG_ERASE_GROUP_START;
2209 cmd.c_arg = sblkno;
2210 if (!ISSET(sf->flags, SFF_SDHC))
2211 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2212 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2213 error = sdmmc_mmc_command(sc, &cmd);
2214 if (error)
2215 goto out;
2216
2217 /* Set the address of the last write block to be erased */
2218 memset(&cmd, 0, sizeof(cmd));
2219 cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2220 SD_ERASE_WR_BLK_END : MMC_TAG_ERASE_GROUP_END;
2221 cmd.c_arg = eblkno;
2222 if (!ISSET(sf->flags, SFF_SDHC))
2223 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2224 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2225 error = sdmmc_mmc_command(sc, &cmd);
2226 if (error)
2227 goto out;
2228
2229 /* Start the erase operation */
2230 memset(&cmd, 0, sizeof(cmd));
2231 cmd.c_opcode = MMC_ERASE;
2232 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B;
2233 error = sdmmc_mmc_command(sc, &cmd);
2234 if (error)
2235 goto out;
2236
2237 out:
2238 mutex_exit(&sc->sc_mtx);
2239 SDMMC_UNLOCK(sc);
2240
2241 #ifdef SDMMC_DEBUG
2242 device_printf(sc->sc_dev, "discard blk %u-%u error %d\n",
2243 sblkno, eblkno, error);
2244 #endif
2245
2246 return error;
2247 }
2248
2249 int
2250 sdmmc_mem_flush_cache(struct sdmmc_function *sf, bool poll)
2251 {
2252 struct sdmmc_softc *sc = sf->sc;
2253 int error;
2254
2255 if (!ISSET(sf->flags, SFF_CACHE_ENABLED))
2256 return 0;
2257
2258 SDMMC_LOCK(sc);
2259 mutex_enter(&sc->sc_mtx);
2260
2261 error = sdmmc_mem_mmc_switch(sf,
2262 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE,
2263 EXT_CSD_FLUSH_CACHE_FLUSH, poll);
2264
2265 mutex_exit(&sc->sc_mtx);
2266 SDMMC_UNLOCK(sc);
2267
2268 #ifdef SDMMC_DEBUG
2269 device_printf(sc->sc_dev, "mmc flush cache error %d\n", error);
2270 #endif
2271
2272 return error;
2273 }
2274