sdmmc_mem.c revision 1.77 1 /* $NetBSD: sdmmc_mem.c,v 1.77 2024/10/24 10:50:31 skrll Exp $ */
2 /* $OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka (at) netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 /* Routines for SD/MMC memory cards. */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.77 2024/10/24 10:50:31 skrll Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s) do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s) do {} while (/*CONSTCOND*/0)
70 #endif
71
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78 sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80 uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_ssr(struct sdmmc_softc *, struct sdmmc_function *,
83 sdmmc_bitfield512_t *);
84 static int sdmmc_mem_decode_ssr(struct sdmmc_softc *, struct sdmmc_function *,
85 sdmmc_bitfield512_t *);
86 static int sdmmc_mem_decode_general_info(struct sdmmc_softc *,
87 struct sdmmc_function * ,const uint8_t *);
88 static int sdmmc_mem_pef_enable_cache(struct sdmmc_softc *,
89 struct sdmmc_function *);
90 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
91 static int sdmmc_mem_read_extr_single(struct sdmmc_softc *, struct sdmmc_function *,
92 uint8_t, uint8_t, uint32_t, uint16_t, void *);
93 static int sdmmc_mem_write_extr_single(struct sdmmc_softc *, struct sdmmc_function *,
94 uint8_t, uint8_t, uint32_t, uint8_t, bool);
95 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
96 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
97 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
98 uint8_t, bool);
99 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
100 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
101 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
102 u_char *, size_t);
103 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
104 u_char *, size_t);
105 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
106 uint32_t, u_char *, size_t);
107 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
108 uint32_t, u_char *, size_t);
109 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
110 uint32_t, u_char *, size_t);
111 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
112 uint32_t, u_char *, size_t);
113
114 static const struct {
115 const char *name;
116 int v;
117 int freq;
118 } switch_group0_functions[] = {
119 /* Default/SDR12 */
120 { "Default/SDR12", 0, 25000 },
121
122 /* High-Speed/SDR25 */
123 { "High-Speed/SDR25", SMC_CAPS_SD_HIGHSPEED, 50000 },
124
125 /* SDR50 */
126 { "SDR50", SMC_CAPS_UHS_SDR50, 100000 },
127
128 /* SDR104 */
129 { "SDR104", SMC_CAPS_UHS_SDR104, 208000 },
130
131 /* DDR50 */
132 { "DDR50", SMC_CAPS_UHS_DDR50, 50000 },
133 };
134
135 static const int sdmmc_mmc_timings[] = {
136 [EXT_CSD_HS_TIMING_LEGACY] = 26000,
137 [EXT_CSD_HS_TIMING_HIGHSPEED] = 52000,
138 [EXT_CSD_HS_TIMING_HS200] = 200000
139 };
140
141 /*
142 * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
143 */
144 int
145 sdmmc_mem_enable(struct sdmmc_softc *sc)
146 {
147 uint32_t host_ocr;
148 uint32_t card_ocr;
149 uint32_t new_ocr;
150 uint32_t ocr = 0;
151 int error;
152
153 SDMMC_LOCK(sc);
154
155 /* Set host mode to SD "combo" card or SD memory-only. */
156 CLR(sc->sc_flags, SMF_UHS_MODE);
157 SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
158
159 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
160 sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
161
162 /* Reset memory (*must* do that before CMD55 or CMD1). */
163 sdmmc_go_idle_state(sc);
164
165 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
166 /* Check SD Ver.2 */
167 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
168 if (error == 0 && card_ocr == 0x1aa)
169 SET(ocr, MMC_OCR_HCS);
170 }
171
172 /*
173 * Read the SD/MMC memory OCR value by issuing CMD55 followed
174 * by ACMD41 to read the OCR value from memory-only SD cards.
175 * MMC cards will not respond to CMD55 or ACMD41 and this is
176 * how we distinguish them from SD cards.
177 */
178 mmc_mode:
179 error = sdmmc_mem_send_op_cond(sc,
180 ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
181 if (error) {
182 if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
183 !ISSET(sc->sc_flags, SMF_IO_MODE)) {
184 /* Not a SD card, switch to MMC mode. */
185 DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
186 CLR(sc->sc_flags, SMF_SD_MODE);
187 goto mmc_mode;
188 }
189 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
190 DPRINTF(("%s: couldn't read memory OCR\n",
191 SDMMCDEVNAME(sc)));
192 goto out;
193 } else {
194 /* Not a "combo" card. */
195 CLR(sc->sc_flags, SMF_MEM_MODE);
196 error = 0;
197 goto out;
198 }
199 }
200 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
201 /* get card OCR */
202 error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
203 if (error) {
204 DPRINTF(("%s: couldn't read SPI memory OCR\n",
205 SDMMCDEVNAME(sc)));
206 goto out;
207 }
208 }
209
210 /* Set the lowest voltage supported by the card and host. */
211 host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
212 error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
213 if (error) {
214 DPRINTF(("%s: couldn't supply voltage requested by card\n",
215 SDMMCDEVNAME(sc)));
216 goto out;
217 }
218
219 DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
220 DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
221
222 host_ocr &= card_ocr; /* only allow the common voltages */
223 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
224 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
225 /* Tell the card(s) to enter the idle state (again). */
226 sdmmc_go_idle_state(sc);
227 /* Check SD Ver.2 */
228 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
229 if (error == 0 && card_ocr == 0x1aa)
230 SET(ocr, MMC_OCR_HCS);
231
232 if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
233 SET(ocr, MMC_OCR_S18A);
234 } else {
235 SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
236 }
237 }
238 host_ocr |= ocr;
239
240 /* Send the new OCR value until all cards are ready. */
241 error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
242 if (error) {
243 DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
244 goto out;
245 }
246
247 if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
248 /*
249 * Card and host support low voltage mode, begin switch
250 * sequence.
251 */
252 struct sdmmc_command cmd;
253 memset(&cmd, 0, sizeof(cmd));
254 cmd.c_arg = 0;
255 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
256 cmd.c_opcode = SD_VOLTAGE_SWITCH;
257 DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
258 error = sdmmc_mmc_command(sc, &cmd);
259 if (error) {
260 DPRINTF(("%s: voltage switch command failed\n",
261 SDMMCDEVNAME(sc)));
262 goto out;
263 }
264
265 error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
266 if (error) {
267 DPRINTF(("%s: voltage change on host failed\n",
268 SDMMCDEVNAME(sc)));
269 goto out;
270 }
271
272 SET(sc->sc_flags, SMF_UHS_MODE);
273 }
274
275 out:
276 SDMMC_UNLOCK(sc);
277
278 return error;
279 }
280
281 static int
282 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
283 {
284 int error;
285
286 /*
287 * Stop the clock
288 */
289 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
290 SDMMC_SDCLK_OFF, false);
291 if (error)
292 goto out;
293
294 delay(1000);
295
296 /*
297 * Card switch command was successful, update host controller
298 * signal voltage setting.
299 */
300 DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
301 signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
302 error = sdmmc_chip_signal_voltage(sc->sc_sct,
303 sc->sc_sch, signal_voltage);
304 if (error)
305 goto out;
306
307 delay(5000);
308
309 /*
310 * Switch to SDR12 timing
311 */
312 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
313 false);
314 if (error)
315 goto out;
316
317 delay(1000);
318
319 out:
320 return error;
321 }
322
323 /*
324 * Read the CSD and CID from all cards and assign each card a unique
325 * relative card address (RCA). CMD2 is ignored by SDIO-only cards.
326 */
327 void
328 sdmmc_mem_scan(struct sdmmc_softc *sc)
329 {
330 sdmmc_response resp;
331 struct sdmmc_function *sf;
332 uint16_t next_rca;
333 int error;
334 int retry;
335
336 SDMMC_LOCK(sc);
337
338 /*
339 * CMD2 is a broadcast command understood by SD cards and MMC
340 * cards. All cards begin to respond to the command, but back
341 * off if another card drives the CMD line to a different level.
342 * Only one card will get its entire response through. That
343 * card remains silent once it has been assigned a RCA.
344 */
345 for (retry = 0; retry < 100; retry++) {
346 error = sdmmc_mem_send_cid(sc, &resp);
347 if (error) {
348 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
349 error == ETIMEDOUT) {
350 /* No more cards there. */
351 break;
352 }
353 DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
354 break;
355 }
356
357 /* In MMC mode, find the next available RCA. */
358 next_rca = 1;
359 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
360 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
361 next_rca++;
362 }
363
364 /* Allocate a sdmmc_function structure. */
365 sf = sdmmc_function_alloc(sc);
366 sf->rca = next_rca;
367
368 /*
369 * Remember the CID returned in the CMD2 response for
370 * later decoding.
371 */
372 memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
373
374 /*
375 * Silence the card by assigning it a unique RCA, or
376 * querying it for its RCA in the case of SD.
377 */
378 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
379 if (sdmmc_set_relative_addr(sc, sf) != 0) {
380 aprint_error_dev(sc->sc_dev,
381 "couldn't set mem RCA\n");
382 sdmmc_function_free(sf);
383 break;
384 }
385 }
386
387 /*
388 * If this is a memory-only card, the card responding
389 * first becomes an alias for SDIO function 0.
390 */
391 if (sc->sc_fn0 == NULL)
392 sc->sc_fn0 = sf;
393
394 SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
395
396 /* only one function in SPI mode */
397 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
398 break;
399 }
400
401 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
402 /* Go to Data Transfer Mode, if possible. */
403 sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
404
405 /*
406 * All cards are either inactive or awaiting further commands.
407 * Read the CSDs and decode the raw CID for each card.
408 */
409 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
410 error = sdmmc_mem_send_csd(sc, sf, &resp);
411 if (error) {
412 SET(sf->flags, SFF_ERROR);
413 continue;
414 }
415
416 if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
417 sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
418 SET(sf->flags, SFF_ERROR);
419 continue;
420 }
421
422 #ifdef SDMMC_DEBUG
423 printf("%s: CID: ", SDMMCDEVNAME(sc));
424 sdmmc_print_cid(&sf->cid);
425 #endif
426 }
427
428 SDMMC_UNLOCK(sc);
429 }
430
431 int
432 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
433 struct sdmmc_function *sf)
434 {
435 /* TRAN_SPEED(2:0): transfer rate exponent */
436 static const int speed_exponent[8] = {
437 100 * 1, /* 100 Kbits/s */
438 1 * 1000, /* 1 Mbits/s */
439 10 * 1000, /* 10 Mbits/s */
440 100 * 1000, /* 100 Mbits/s */
441 0,
442 0,
443 0,
444 0,
445 };
446 /* TRAN_SPEED(6:3): time mantissa */
447 static const int speed_mantissa[16] = {
448 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
449 };
450 struct sdmmc_csd *csd = &sf->csd;
451 int e, m;
452
453 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
454 /*
455 * CSD version 1.0 corresponds to SD system
456 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
457 */
458 csd->csdver = SD_CSD_CSDVER(resp);
459 switch (csd->csdver) {
460 case SD_CSD_CSDVER_2_0:
461 DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
462 SET(sf->flags, SFF_SDHC);
463 csd->capacity = SD_CSD_V2_CAPACITY(resp);
464 csd->read_bl_len = SD_CSD_V2_BL_LEN;
465 break;
466
467 case SD_CSD_CSDVER_1_0:
468 DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
469 csd->capacity = SD_CSD_CAPACITY(resp);
470 csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
471 break;
472
473 default:
474 aprint_error_dev(sc->sc_dev,
475 "unknown SD CSD structure version 0x%x\n",
476 csd->csdver);
477 return 1;
478 }
479
480 csd->mmcver = SD_CSD_MMCVER(resp);
481 csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
482 csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
483 e = SD_CSD_SPEED_EXP(resp);
484 m = SD_CSD_SPEED_MANT(resp);
485 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
486 csd->ccc = SD_CSD_CCC(resp);
487 } else {
488 csd->csdver = MMC_CSD_CSDVER(resp);
489 if (csd->csdver == MMC_CSD_CSDVER_1_0) {
490 aprint_error_dev(sc->sc_dev,
491 "unknown MMC CSD structure version 0x%x\n",
492 csd->csdver);
493 return 1;
494 }
495
496 csd->mmcver = MMC_CSD_MMCVER(resp);
497 csd->capacity = MMC_CSD_CAPACITY(resp);
498 csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
499 csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
500 csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
501 e = MMC_CSD_TRAN_SPEED_EXP(resp);
502 m = MMC_CSD_TRAN_SPEED_MANT(resp);
503 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
504 }
505 if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
506 csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
507
508 #ifdef SDMMC_DUMP_CSD
509 sdmmc_print_csd(resp, csd);
510 #endif
511
512 return 0;
513 }
514
515 int
516 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
517 struct sdmmc_function *sf)
518 {
519 struct sdmmc_cid *cid = &sf->cid;
520
521 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
522 cid->mid = SD_CID_MID(resp);
523 cid->oid = SD_CID_OID(resp);
524 SD_CID_PNM_CPY(resp, cid->pnm);
525 cid->rev = SD_CID_REV(resp);
526 cid->psn = SD_CID_PSN(resp);
527 cid->mdt = SD_CID_MDT(resp);
528 } else {
529 switch(sf->csd.mmcver) {
530 case MMC_CSD_MMCVER_1_0:
531 case MMC_CSD_MMCVER_1_4:
532 cid->mid = MMC_CID_MID_V1(resp);
533 MMC_CID_PNM_V1_CPY(resp, cid->pnm);
534 cid->rev = MMC_CID_REV_V1(resp);
535 cid->psn = MMC_CID_PSN_V1(resp);
536 cid->mdt = MMC_CID_MDT_V1(resp);
537 break;
538 case MMC_CSD_MMCVER_2_0:
539 case MMC_CSD_MMCVER_3_1:
540 case MMC_CSD_MMCVER_4_0:
541 cid->mid = MMC_CID_MID_V2(resp);
542 cid->oid = MMC_CID_OID_V2(resp);
543 MMC_CID_PNM_V2_CPY(resp, cid->pnm);
544 cid->psn = MMC_CID_PSN_V2(resp);
545 break;
546 default:
547 aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
548 sf->csd.mmcver);
549 return 1;
550 }
551 }
552 return 0;
553 }
554
555 void
556 sdmmc_print_cid(struct sdmmc_cid *cid)
557 {
558
559 printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
560 " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
561 cid->mdt);
562 }
563
564 #ifdef SDMMC_DUMP_CSD
565 void
566 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
567 {
568
569 printf("csdver = %d\n", csd->csdver);
570 printf("mmcver = %d\n", csd->mmcver);
571 printf("capacity = 0x%08x\n", csd->capacity);
572 printf("read_bl_len = %d\n", csd->read_bl_len);
573 printf("write_bl_len = %d\n", csd->write_bl_len);
574 printf("r2w_factor = %d\n", csd->r2w_factor);
575 printf("tran_speed = %d\n", csd->tran_speed);
576 printf("ccc = 0x%x\n", csd->ccc);
577 }
578 #endif
579
580 /*
581 * Initialize a SD/MMC memory card.
582 */
583 int
584 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
585 {
586 int error = 0;
587
588 SDMMC_LOCK(sc);
589
590 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
591 error = sdmmc_select_card(sc, sf);
592 if (error)
593 goto out;
594 }
595
596 error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
597 if (error)
598 goto out;
599
600 if (ISSET(sc->sc_flags, SMF_SD_MODE))
601 error = sdmmc_mem_sd_init(sc, sf);
602 else
603 error = sdmmc_mem_mmc_init(sc, sf);
604
605 if (error != 0)
606 SET(sf->flags, SFF_ERROR);
607
608 out:
609 SDMMC_UNLOCK(sc);
610
611 return error;
612 }
613
614 /*
615 * Get or set the card's memory OCR value (SD or MMC).
616 */
617 int
618 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
619 {
620 struct sdmmc_command cmd;
621 int error;
622 int retry;
623
624 /* Don't lock */
625
626 DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
627 SDMMCDEVNAME(sc), ocr));
628
629 /*
630 * If we change the OCR value, retry the command until the OCR
631 * we receive in response has the "CARD BUSY" bit set, meaning
632 * that all cards are ready for identification.
633 */
634 for (retry = 0; retry < 100; retry++) {
635 memset(&cmd, 0, sizeof(cmd));
636 cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
637 ocr : (ocr & MMC_OCR_HCS);
638 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
639 | SCF_TOUT_OK;
640
641 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
642 cmd.c_opcode = SD_APP_OP_COND;
643 error = sdmmc_app_command(sc, NULL, &cmd);
644 } else {
645 cmd.c_opcode = MMC_SEND_OP_COND;
646 error = sdmmc_mmc_command(sc, &cmd);
647 }
648 if (error)
649 break;
650
651 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
652 if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
653 break;
654 } else {
655 if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
656 ocr == 0)
657 break;
658 }
659
660 error = ETIMEDOUT;
661 sdmmc_pause(10000, NULL);
662 }
663 if (ocrp != NULL) {
664 if (error == 0 &&
665 !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
666 *ocrp = MMC_R3(cmd.c_resp);
667 } else {
668 *ocrp = ocr;
669 }
670 }
671 DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
672 SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
673 return error;
674 }
675
676 int
677 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
678 {
679 struct sdmmc_command cmd;
680 int error;
681
682 /* Don't lock */
683
684 memset(&cmd, 0, sizeof(cmd));
685 cmd.c_arg = ocr;
686 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7 | SCF_TOUT_OK;
687 cmd.c_opcode = SD_SEND_IF_COND;
688
689 error = sdmmc_mmc_command(sc, &cmd);
690 if (error == 0 && ocrp != NULL) {
691 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
692 *ocrp = MMC_SPI_R7(cmd.c_resp);
693 } else {
694 *ocrp = MMC_R7(cmd.c_resp);
695 }
696 DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
697 SDMMCDEVNAME(sc), error, *ocrp));
698 }
699 return error;
700 }
701
702 /*
703 * Set the read block length appropriately for this card, according to
704 * the card CSD register value.
705 */
706 int
707 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
708 int block_len)
709 {
710 struct sdmmc_command cmd;
711 int error;
712
713 /* Don't lock */
714
715 memset(&cmd, 0, sizeof(cmd));
716 cmd.c_opcode = MMC_SET_BLOCKLEN;
717 cmd.c_arg = block_len;
718 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
719
720 error = sdmmc_mmc_command(sc, &cmd);
721
722 DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
723 SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
724
725 return error;
726 }
727
728 /* make 512-bit BE quantity __bitfield()-compatible */
729 static void
730 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
731 size_t i;
732 uint32_t tmp0, tmp1;
733 const size_t bitswords = __arraycount(buf->_bits);
734 for (i = 0; i < bitswords/2; i++) {
735 tmp0 = buf->_bits[i];
736 tmp1 = buf->_bits[bitswords - 1 - i];
737 buf->_bits[i] = be32toh(tmp1);
738 buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
739 }
740 }
741
742 static int
743 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
744 {
745 if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
746 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
747 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
748 return SD_ACCESS_MODE_SDR104;
749 }
750 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
751 ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
752 return SD_ACCESS_MODE_DDR50;
753 }
754 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
755 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
756 return SD_ACCESS_MODE_SDR50;
757 }
758 }
759 if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
760 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
761 return SD_ACCESS_MODE_SDR25;
762 }
763 return SD_ACCESS_MODE_SDR12;
764 }
765
766 static int
767 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
768 {
769 int timing = -1;
770
771 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
772 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
773 return 0;
774
775 switch (sf->csd.tran_speed) {
776 case 100000:
777 timing = SDMMC_TIMING_UHS_SDR50;
778 break;
779 case 208000:
780 timing = SDMMC_TIMING_UHS_SDR104;
781 break;
782 default:
783 return 0;
784 }
785 } else {
786 switch (sf->csd.tran_speed) {
787 case 200000:
788 timing = SDMMC_TIMING_MMC_HS200;
789 break;
790 default:
791 return 0;
792 }
793 }
794
795 DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
796 timing));
797
798 return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
799 }
800
801 static int
802 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
803 {
804 int support_func, best_func, bus_clock, error, i;
805 sdmmc_bitfield512_t status;
806 bool ddr = false;
807
808 /* change bus clock */
809 bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
810 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
811 if (error) {
812 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
813 return error;
814 }
815
816 error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
817 if (error) {
818 aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
819 return error;
820 }
821 error = sdmmc_mem_decode_scr(sc, sf);
822 if (error)
823 return error;
824
825 if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
826 ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
827 DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
828 error = sdmmc_set_bus_width(sf, 4);
829 if (error) {
830 aprint_error_dev(sc->sc_dev,
831 "can't change bus width (%d bit)\n", 4);
832 return error;
833 }
834 sf->width = 4;
835 }
836
837 best_func = 0;
838 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
839 ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
840 DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
841 error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
842 if (error) {
843 if (error == ENOTSUP) {
844 /* Not supported by controller */
845 goto skipswitchfuncs;
846 } else {
847 aprint_error_dev(sc->sc_dev,
848 "switch func mode 0 failed\n");
849 return error;
850 }
851 }
852
853 support_func = SFUNC_STATUS_GROUP(&status, 1);
854
855 if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
856 /* XXX UHS-I card started in 1.8V mode, switch now */
857 error = sdmmc_mem_signal_voltage(sc,
858 SDMMC_SIGNAL_VOLTAGE_180);
859 if (error) {
860 aprint_error_dev(sc->sc_dev,
861 "failed to recover UHS card\n");
862 return error;
863 }
864 SET(sc->sc_flags, SMF_UHS_MODE);
865 }
866
867 for (i = 0; i < __arraycount(switch_group0_functions); i++) {
868 if (!(support_func & (1 << i)))
869 continue;
870 DPRINTF(("%s: card supports mode %s\n",
871 SDMMCDEVNAME(sc),
872 switch_group0_functions[i].name));
873 }
874
875 best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
876
877 DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
878 switch_group0_functions[best_func].name));
879
880 if (best_func != 0) {
881 DPRINTF(("%s: switch func mode 1(func=%d)\n",
882 SDMMCDEVNAME(sc), best_func));
883 error =
884 sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
885 if (error) {
886 aprint_error_dev(sc->sc_dev,
887 "switch func mode 1 failed:"
888 " group 1 function %d(0x%2x)\n",
889 best_func, support_func);
890 return error;
891 }
892 sf->csd.tran_speed =
893 switch_group0_functions[best_func].freq;
894
895 if (best_func == SD_ACCESS_MODE_DDR50)
896 ddr = true;
897
898 /* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
899 delay(25);
900 }
901 }
902 skipswitchfuncs:
903
904 /* update bus clock */
905 if (sc->sc_busclk > sf->csd.tran_speed)
906 sc->sc_busclk = sf->csd.tran_speed;
907 if (sc->sc_busclk != bus_clock || sc->sc_busddr != ddr) {
908 /* change bus clock */
909 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
910 ddr);
911 if (error) {
912 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
913 return error;
914 }
915
916 sc->sc_transfer_mode = switch_group0_functions[best_func].name;
917 sc->sc_busddr = ddr;
918 }
919
920 /* get card status */
921 error = sdmmc_mem_send_ssr(sc, sf, &status);
922 if (error) {
923 aprint_error_dev(sc->sc_dev, "can't get SD status: %d\n",
924 error);
925 return error;
926 }
927 sdmmc_mem_decode_ssr(sc, sf, &status);
928
929 /* execute tuning (UHS) */
930 error = sdmmc_mem_execute_tuning(sc, sf);
931 if (error) {
932 aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
933 return error;
934 }
935
936 /* detect extended functions */
937 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) && sf->scr.support_cmd48) {
938 uint8_t ginfo[512];
939 error = sdmmc_mem_read_extr_single(sc, sf, SD_EXTR_MIO_MEM, 0, 0,
940 sizeof(ginfo), ginfo);
941 if (error == 0) {
942 sdmmc_mem_decode_general_info(sc, sf, ginfo);
943 }
944 }
945
946 /* enable card cache if supported */
947 if (sf->ssr.cache && sf->ext_sd.pef.valid) {
948 error = sdmmc_mem_pef_enable_cache(sc, sf);
949 if (error != 0) {
950 aprint_error_dev(sc->sc_dev,
951 "can't enable cache: %d", error);
952 } else {
953 SET(sf->flags, SFF_CACHE_ENABLED);
954 }
955 }
956
957 return 0;
958 }
959
960 static int
961 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
962 {
963 int width, value, hs_timing, bus_clock, error;
964 uint8_t ext_csd[512];
965 uint32_t sectors = 0;
966 bool ddr = false;
967
968 sc->sc_transfer_mode = NULL;
969
970 /* change bus clock */
971 bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
972 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
973 if (error) {
974 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
975 return error;
976 }
977
978 if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
979 error = sdmmc_mem_send_cxd_data(sc,
980 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
981 if (error) {
982 aprint_error_dev(sc->sc_dev,
983 "can't read EXT_CSD (error=%d)\n", error);
984 return error;
985 }
986 if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
987 (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
988 aprint_error_dev(sc->sc_dev,
989 "unrecognised future version (%d)\n",
990 ext_csd[EXT_CSD_STRUCTURE]);
991 return ENOTSUP;
992 }
993 sf->ext_csd.rev = ext_csd[EXT_CSD_REV];
994
995 if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
996 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
997 hs_timing = EXT_CSD_HS_TIMING_HS200;
998 } else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
999 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
1000 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
1001 ddr = true;
1002 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
1003 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
1004 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
1005 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
1006 } else {
1007 aprint_error_dev(sc->sc_dev,
1008 "unknown CARD_TYPE: 0x%x\n",
1009 ext_csd[EXT_CSD_CARD_TYPE]);
1010 return ENOTSUP;
1011 }
1012
1013 if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
1014 width = 8;
1015 value = EXT_CSD_BUS_WIDTH_8;
1016 } else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
1017 width = 4;
1018 value = EXT_CSD_BUS_WIDTH_4;
1019 } else {
1020 width = 1;
1021 value = EXT_CSD_BUS_WIDTH_1;
1022 }
1023
1024 if (width != 1) {
1025 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
1026 EXT_CSD_BUS_WIDTH, value, false);
1027 if (error == 0)
1028 error = sdmmc_chip_bus_width(sc->sc_sct,
1029 sc->sc_sch, width);
1030 else {
1031 DPRINTF(("%s: can't change bus width"
1032 " (%d bit)\n", SDMMCDEVNAME(sc), width));
1033 return error;
1034 }
1035
1036 /* XXXX: need bus test? (using by CMD14 & CMD19) */
1037 delay(10000);
1038 }
1039 sf->width = width;
1040
1041 if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1042 !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
1043 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
1044 }
1045
1046 const int target_timing = hs_timing;
1047 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1048 while (hs_timing >= EXT_CSD_HS_TIMING_LEGACY) {
1049 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
1050 EXT_CSD_HS_TIMING, hs_timing, false);
1051 if (error == 0 || hs_timing == EXT_CSD_HS_TIMING_LEGACY)
1052 break;
1053 hs_timing--;
1054 }
1055 }
1056 if (hs_timing != target_timing) {
1057 aprint_debug_dev(sc->sc_dev,
1058 "card failed to switch to timing mode %d, using %d\n",
1059 target_timing, hs_timing);
1060 }
1061
1062 KASSERT(hs_timing < __arraycount(sdmmc_mmc_timings));
1063 sf->csd.tran_speed = sdmmc_mmc_timings[hs_timing];
1064
1065 if (sc->sc_busclk > sf->csd.tran_speed)
1066 sc->sc_busclk = sf->csd.tran_speed;
1067 if (sc->sc_busclk != bus_clock) {
1068 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1069 sc->sc_busclk, false);
1070 if (error) {
1071 aprint_error_dev(sc->sc_dev,
1072 "can't change bus clock\n");
1073 return error;
1074 }
1075 }
1076
1077 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1078 error = sdmmc_mem_send_cxd_data(sc,
1079 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1080 if (error) {
1081 aprint_error_dev(sc->sc_dev,
1082 "can't re-read EXT_CSD\n");
1083 return error;
1084 }
1085 if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1086 aprint_error_dev(sc->sc_dev,
1087 "HS_TIMING set failed\n");
1088 return EINVAL;
1089 }
1090 }
1091
1092 /*
1093 * HS_TIMING must be set to 0x1 before setting BUS_WIDTH
1094 * for dual data rate operation
1095 */
1096 if (ddr &&
1097 hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1098 width > 1) {
1099 error = sdmmc_mem_mmc_switch(sf,
1100 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1101 (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1102 EXT_CSD_BUS_WIDTH_4_DDR, false);
1103 if (error) {
1104 DPRINTF(("%s: can't switch to DDR"
1105 " (%d bit)\n", SDMMCDEVNAME(sc), width));
1106 return error;
1107 }
1108
1109 delay(10000);
1110
1111 error = sdmmc_mem_signal_voltage(sc,
1112 SDMMC_SIGNAL_VOLTAGE_180);
1113 if (error) {
1114 aprint_error_dev(sc->sc_dev,
1115 "can't switch signaling voltage\n");
1116 return error;
1117 }
1118
1119 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1120 sc->sc_busclk, ddr);
1121 if (error) {
1122 aprint_error_dev(sc->sc_dev,
1123 "can't change bus clock\n");
1124 return error;
1125 }
1126
1127 delay(10000);
1128
1129 sc->sc_transfer_mode = "DDR52";
1130 sc->sc_busddr = ddr;
1131 }
1132
1133 sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1134 ext_csd[EXT_CSD_SEC_COUNT + 1] << 8 |
1135 ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1136 ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1137 if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1138 SET(sf->flags, SFF_SDHC);
1139 sf->csd.capacity = sectors;
1140 }
1141
1142 if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1143 sc->sc_transfer_mode = "HS200";
1144
1145 /* execute tuning (HS200) */
1146 error = sdmmc_mem_execute_tuning(sc, sf);
1147 if (error) {
1148 aprint_error_dev(sc->sc_dev,
1149 "can't execute MMC tuning\n");
1150 return error;
1151 }
1152 }
1153
1154 if (sf->ext_csd.rev >= 5) {
1155 sf->ext_csd.rst_n_function =
1156 ext_csd[EXT_CSD_RST_N_FUNCTION];
1157 }
1158
1159 if (sf->ext_csd.rev >= 6) {
1160 sf->ext_csd.cache_size =
1161 le32dec(&ext_csd[EXT_CSD_CACHE_SIZE]) * 1024;
1162 }
1163 if (sf->ext_csd.cache_size > 0) {
1164 /* eMMC cache present, enable it */
1165 error = sdmmc_mem_mmc_switch(sf,
1166 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL,
1167 EXT_CSD_CACHE_CTRL_CACHE_EN, false);
1168 if (error) {
1169 aprint_error_dev(sc->sc_dev,
1170 "can't enable cache: %d\n", error);
1171 } else {
1172 SET(sf->flags, SFF_CACHE_ENABLED);
1173 }
1174 }
1175 } else {
1176 if (sc->sc_busclk > sf->csd.tran_speed)
1177 sc->sc_busclk = sf->csd.tran_speed;
1178 if (sc->sc_busclk != bus_clock) {
1179 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1180 sc->sc_busclk, false);
1181 if (error) {
1182 aprint_error_dev(sc->sc_dev,
1183 "can't change bus clock\n");
1184 return error;
1185 }
1186 }
1187 }
1188
1189 return 0;
1190 }
1191
1192 static int
1193 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1194 {
1195 struct sdmmc_command cmd;
1196 int error;
1197
1198 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1199 memset(&cmd, 0, sizeof cmd);
1200 cmd.c_opcode = MMC_ALL_SEND_CID;
1201 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1202
1203 error = sdmmc_mmc_command(sc, &cmd);
1204 } else {
1205 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1206 sizeof(cmd.c_resp));
1207 }
1208
1209 #ifdef SDMMC_DEBUG
1210 if (error == 0)
1211 sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1212 #endif
1213 if (error == 0 && resp != NULL)
1214 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1215 return error;
1216 }
1217
1218 static int
1219 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1220 sdmmc_response *resp)
1221 {
1222 struct sdmmc_command cmd;
1223 int error;
1224
1225 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1226 memset(&cmd, 0, sizeof cmd);
1227 cmd.c_opcode = MMC_SEND_CSD;
1228 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1229 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1230
1231 error = sdmmc_mmc_command(sc, &cmd);
1232 } else {
1233 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1234 sizeof(cmd.c_resp));
1235 }
1236
1237 #ifdef SDMMC_DEBUG
1238 if (error == 0)
1239 sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1240 #endif
1241 if (error == 0 && resp != NULL)
1242 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1243 return error;
1244 }
1245
1246 static int
1247 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1248 uint32_t *scr)
1249 {
1250 struct sdmmc_command cmd;
1251 bus_dma_segment_t ds[1];
1252 void *ptr = NULL;
1253 int datalen = 8;
1254 int rseg;
1255 int error = 0;
1256
1257 /* Don't lock */
1258
1259 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1260 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1261 ds, 1, &rseg, BUS_DMA_NOWAIT);
1262 if (error)
1263 goto out;
1264 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1265 BUS_DMA_NOWAIT);
1266 if (error)
1267 goto dmamem_free;
1268 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1269 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1270 if (error)
1271 goto dmamem_unmap;
1272
1273 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1274 BUS_DMASYNC_PREREAD);
1275 } else {
1276 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1277 if (ptr == NULL)
1278 goto out;
1279 }
1280
1281 memset(&cmd, 0, sizeof(cmd));
1282 cmd.c_data = ptr;
1283 cmd.c_datalen = datalen;
1284 cmd.c_blklen = datalen;
1285 cmd.c_arg = 0;
1286 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1287 cmd.c_opcode = SD_APP_SEND_SCR;
1288 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1289 cmd.c_dmamap = sc->sc_dmap;
1290
1291 error = sdmmc_app_command(sc, sf, &cmd);
1292 if (error == 0) {
1293 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1294 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1295 BUS_DMASYNC_POSTREAD);
1296 }
1297 memcpy(scr, ptr, datalen);
1298 }
1299
1300 out:
1301 if (ptr != NULL) {
1302 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1303 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1304 dmamem_unmap:
1305 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1306 dmamem_free:
1307 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1308 } else {
1309 free(ptr, M_DEVBUF);
1310 }
1311 }
1312 DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1313 error));
1314
1315 #ifdef SDMMC_DEBUG
1316 if (error == 0)
1317 sdmmc_dump_data("SCR", scr, datalen);
1318 #endif
1319 return error;
1320 }
1321
1322 static int
1323 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1324 {
1325 sdmmc_response resp;
1326 int ver;
1327
1328 memset(resp, 0, sizeof(resp));
1329 /*
1330 * Change the raw-scr received from the DMA stream to resp.
1331 */
1332 resp[0] = be32toh(sf->raw_scr[1]) >> 8; // LSW
1333 resp[1] = be32toh(sf->raw_scr[0]); // MSW
1334 resp[0] |= (resp[1] & 0xff) << 24;
1335 resp[1] >>= 8;
1336
1337 ver = SCR_STRUCTURE(resp);
1338 sf->scr.sd_spec = SCR_SD_SPEC(resp);
1339 if (sf->scr.sd_spec == 2) {
1340 sf->scr.sd_spec3 = SCR_SD_SPEC3(resp);
1341 if (sf->scr.sd_spec3) {
1342 sf->scr.sd_spec4 = SCR_SD_SPEC4(resp);
1343 }
1344 }
1345 sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1346 if (sf->scr.sd_spec4) {
1347 sf->scr.support_cmd48 = SCR_CMD_SUPPORT_CMD48(resp);
1348 }
1349
1350 DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d,%d,%d, bus width=%d\n",
1351 SDMMCDEVNAME(sc), resp[1], resp[0],
1352 ver, sf->scr.sd_spec, sf->scr.sd_spec3, sf->scr.sd_spec4, sf->scr.bus_width));
1353
1354 if (ver != 0 && ver != 1) {
1355 DPRINTF(("%s: unknown structure version: %d\n",
1356 SDMMCDEVNAME(sc), ver));
1357 return EINVAL;
1358 }
1359 return 0;
1360 }
1361
1362 static int
1363 sdmmc_mem_send_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1364 sdmmc_bitfield512_t *ssr)
1365 {
1366 struct sdmmc_command cmd;
1367 bus_dma_segment_t ds[1];
1368 void *ptr = NULL;
1369 int datalen = 64;
1370 int rseg;
1371 int error = 0;
1372
1373 /* Don't lock */
1374
1375 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1376 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1377 ds, 1, &rseg, BUS_DMA_NOWAIT);
1378 if (error)
1379 goto out;
1380 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1381 BUS_DMA_NOWAIT);
1382 if (error)
1383 goto dmamem_free;
1384 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1385 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1386 if (error)
1387 goto dmamem_unmap;
1388
1389 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1390 BUS_DMASYNC_PREREAD);
1391 } else {
1392 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1393 if (ptr == NULL)
1394 goto out;
1395 }
1396
1397 memset(&cmd, 0, sizeof(cmd));
1398 cmd.c_data = ptr;
1399 cmd.c_datalen = datalen;
1400 cmd.c_blklen = datalen;
1401 cmd.c_arg = 0;
1402 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1403 cmd.c_opcode = SD_APP_SD_STATUS;
1404 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1405 cmd.c_dmamap = sc->sc_dmap;
1406
1407 error = sdmmc_app_command(sc, sf, &cmd);
1408 if (error == 0) {
1409 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1410 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1411 BUS_DMASYNC_POSTREAD);
1412 }
1413 memcpy(ssr, ptr, datalen);
1414 }
1415
1416 out:
1417 if (ptr != NULL) {
1418 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1419 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1420 dmamem_unmap:
1421 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1422 dmamem_free:
1423 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1424 } else {
1425 free(ptr, M_DEVBUF);
1426 }
1427 }
1428 DPRINTF(("%s: sdmem_mem_send_ssr: error = %d\n", SDMMCDEVNAME(sc),
1429 error));
1430
1431 if (error == 0)
1432 sdmmc_be512_to_bitfield512(ssr);
1433
1434 #ifdef SDMMC_DEBUG
1435 if (error == 0)
1436 sdmmc_dump_data("SSR", ssr, datalen);
1437 #endif
1438 return error;
1439 }
1440
1441 static int
1442 sdmmc_mem_decode_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1443 sdmmc_bitfield512_t *ssr_bitfield)
1444 {
1445 uint32_t *ssr = (uint32_t *)ssr_bitfield;
1446 int speed_class_val, bus_width_val;
1447
1448 const int bus_width = SSR_DAT_BUS_WIDTH(ssr);
1449 const int speed_class = SSR_SPEED_CLASS(ssr);
1450 const int uhs_speed_grade = SSR_UHS_SPEED_GRADE(ssr);
1451 const int video_speed_class = SSR_VIDEO_SPEED_CLASS(ssr);
1452 const int app_perf_class = SSR_APP_PERF_CLASS(ssr);
1453 const uint64_t perf_enhance = SSR_PERFORMANCE_ENHANCE(ssr);
1454
1455 switch (speed_class) {
1456 case SSR_SPEED_CLASS_0: speed_class_val = 0; break;
1457 case SSR_SPEED_CLASS_2: speed_class_val = 2; break;
1458 case SSR_SPEED_CLASS_4: speed_class_val = 4; break;
1459 case SSR_SPEED_CLASS_6: speed_class_val = 6; break;
1460 case SSR_SPEED_CLASS_10: speed_class_val = 10; break;
1461 default: speed_class_val = -1; break;
1462 }
1463
1464 switch (bus_width) {
1465 case SSR_DAT_BUS_WIDTH_1: bus_width_val = 1; break;
1466 case SSR_DAT_BUS_WIDTH_4: bus_width_val = 4; break;
1467 default: bus_width_val = -1;
1468 }
1469
1470 if (ISSET(perf_enhance, SSR_PERFORMANCE_ENHANCE_CACHE)) {
1471 sf->ssr.cache = true;
1472 }
1473
1474 /*
1475 * Log card status
1476 */
1477 device_printf(sc->sc_dev, "SD card status:");
1478 if (bus_width_val != -1)
1479 printf(" %d-bit", bus_width_val);
1480 else
1481 printf(" unknown bus width");
1482 if (speed_class_val != -1)
1483 printf(", C%d", speed_class_val);
1484 if (uhs_speed_grade)
1485 printf(", U%d", uhs_speed_grade);
1486 if (video_speed_class)
1487 printf(", V%d", video_speed_class);
1488 if (app_perf_class)
1489 printf(", A%d", app_perf_class);
1490 if (ISSET(perf_enhance, SSR_PERFORMANCE_ENHANCE_CACHE))
1491 printf(", Cache");
1492 if (ISSET(perf_enhance, SSR_PERFORMANCE_ENHANCE_HOST_MAINT|
1493 SSR_PERFORMANCE_ENHANCE_CARD_MAINT)) {
1494 printf(", %s self-maintenance",
1495 perf_enhance == SSR_PERFORMANCE_ENHANCE_HOST_MAINT ? "Host" :
1496 perf_enhance == SSR_PERFORMANCE_ENHANCE_CARD_MAINT ? "Card" :
1497 "Host/Card");
1498 }
1499 printf("\n");
1500
1501 return 0;
1502 }
1503
1504 static int
1505 sdmmc_mem_decode_general_info(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1506 const uint8_t *ginfo)
1507 {
1508 uint16_t len = SD_GENERAL_INFO_HDR_LENGTH(ginfo);
1509 unsigned num_ext = SD_GENERAL_INFO_HDR_NUM_EXT(ginfo);
1510 unsigned index = SD_GENERAL_INFO_EXT_FIRST;
1511 unsigned ext;
1512
1513 DPRINTF(("%s: sdmmc_mem_decode_general_info: rev=%u, len=%u, num_ext=%u\n",
1514 SDMMCDEVNAME(sc), SD_GENERAL_INFO_HDR_REVISION(ginfo),
1515 len, num_ext));
1516
1517 /*
1518 * General Information Length can span more than one page, but for
1519 * now just parse the first one.
1520 */
1521 len = uimin(SDMMC_SECTOR_SIZE, len);
1522
1523 for (ext = 0; ext < num_ext && index < len && index != 0; ext++) {
1524 uint16_t sfc = SD_EXTENSION_INFO_SFC(ginfo, index);
1525 unsigned num_reg = SD_EXTENSION_INFO_NUM_REG(ginfo, index);
1526 uint32_t reg;
1527
1528 if (num_reg == 0) {
1529 goto next_ext;
1530 }
1531 reg = SD_EXTENSION_INFO_REG(ginfo, index, 0);
1532
1533 DPRINTF(("%s: sdmmc_mem_decode_general_info: sfc=0x%04x, reg=0x%08x\n",
1534 SDMMCDEVNAME(sc), sfc, reg));
1535
1536 switch (sfc) {
1537 case SD_SFC_PEF:
1538 sf->ext_sd.pef.valid = true;
1539 sf->ext_sd.pef.fno =
1540 SD_EXTENSION_INFO_REG_FNO(reg);
1541 sf->ext_sd.pef.start_addr =
1542 SD_EXTENSION_INFO_REG_START_ADDR(reg);
1543 break;
1544 }
1545
1546 next_ext:
1547 index = SD_EXTENSION_INFO_NEXT(ginfo, index);
1548 }
1549
1550 return 0;
1551 }
1552
1553 static int
1554 sdmmc_mem_pef_enable_cache(struct sdmmc_softc *sc,
1555 struct sdmmc_function *sf)
1556 {
1557 uint8_t data[512];
1558 int error;
1559
1560 error = sdmmc_mem_read_extr_single(sc, sf, SD_EXTR_MIO_MEM,
1561 sf->ext_sd.pef.fno, sf->ext_sd.pef.start_addr,
1562 sizeof(data), data);
1563 if (error != 0) {
1564 return error;
1565 }
1566
1567 if (SD_PEF_CACHE_ENABLE(data)) {
1568 /* Cache is already enabled. */
1569 return 0;
1570 }
1571
1572 error = sdmmc_mem_write_extr_single(sc, sf, SD_EXTR_MIO_MEM,
1573 sf->ext_sd.pef.fno,
1574 sf->ext_sd.pef.start_addr + SD_PEF_CACHE_ENABLE_OFFSET, 1,
1575 false);
1576 if (error != 0) {
1577 device_printf(sc->sc_dev,
1578 "setting cache enable failed: %d\n", error);
1579 return error;
1580 }
1581
1582 device_printf(sc->sc_dev, "cache enabled\n");
1583
1584 return 0;
1585 }
1586
1587 static int
1588 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1589 size_t datalen)
1590 {
1591 struct sdmmc_command cmd;
1592 bus_dma_segment_t ds[1];
1593 void *ptr = NULL;
1594 int rseg;
1595 int error = 0;
1596
1597 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1598 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1599 1, &rseg, BUS_DMA_NOWAIT);
1600 if (error)
1601 goto out;
1602 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1603 BUS_DMA_NOWAIT);
1604 if (error)
1605 goto dmamem_free;
1606 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1607 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1608 if (error)
1609 goto dmamem_unmap;
1610
1611 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1612 BUS_DMASYNC_PREREAD);
1613 } else {
1614 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1615 if (ptr == NULL)
1616 goto out;
1617 }
1618
1619 memset(&cmd, 0, sizeof(cmd));
1620 cmd.c_data = ptr;
1621 cmd.c_datalen = datalen;
1622 cmd.c_blklen = datalen;
1623 cmd.c_opcode = opcode;
1624 cmd.c_arg = 0;
1625 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1626 if (opcode == MMC_SEND_EXT_CSD)
1627 SET(cmd.c_flags, SCF_RSP_R1);
1628 else
1629 SET(cmd.c_flags, SCF_RSP_R2);
1630 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1631 cmd.c_dmamap = sc->sc_dmap;
1632
1633 error = sdmmc_mmc_command(sc, &cmd);
1634 if (error == 0) {
1635 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1636 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1637 BUS_DMASYNC_POSTREAD);
1638 }
1639 memcpy(data, ptr, datalen);
1640 #ifdef SDMMC_DEBUG
1641 sdmmc_dump_data("CXD", data, datalen);
1642 #endif
1643 }
1644
1645 out:
1646 if (ptr != NULL) {
1647 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1648 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1649 dmamem_unmap:
1650 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1651 dmamem_free:
1652 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1653 } else {
1654 free(ptr, M_DEVBUF);
1655 }
1656 }
1657 return error;
1658 }
1659
1660 static int
1661 sdmmc_mem_read_extr_single(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1662 uint8_t mio, uint8_t fno, uint32_t addr, uint16_t datalen, void *data)
1663 {
1664 struct sdmmc_command cmd;
1665 bus_dma_segment_t ds[1];
1666 void *ptr = NULL;
1667 int rseg;
1668 int error = 0;
1669
1670 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1671 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1672 1, &rseg, BUS_DMA_NOWAIT);
1673 if (error)
1674 goto out;
1675 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1676 BUS_DMA_NOWAIT);
1677 if (error)
1678 goto dmamem_free;
1679 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1680 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1681 if (error)
1682 goto dmamem_unmap;
1683
1684 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1685 BUS_DMASYNC_PREREAD);
1686 } else {
1687 ptr = data;
1688 }
1689
1690 memset(&cmd, 0, sizeof(cmd));
1691 cmd.c_data = ptr;
1692 cmd.c_datalen = datalen;
1693 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1694 cmd.c_opcode = SD_READ_EXTR_SINGLE;
1695 cmd.c_arg = __SHIFTIN((uint32_t)mio, SD_EXTR_MIO) |
1696 __SHIFTIN((uint32_t)fno, SD_EXTR_FNO) |
1697 __SHIFTIN(addr, SD_EXTR_ADDR) |
1698 __SHIFTIN(datalen - 1, SD_EXTR_LEN);
1699 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1700 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1701 cmd.c_dmamap = sc->sc_dmap;
1702
1703 error = sdmmc_mmc_command(sc, &cmd);
1704 if (error == 0) {
1705 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1706 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1707 BUS_DMASYNC_POSTREAD);
1708 memcpy(data, ptr, datalen);
1709 }
1710 #ifdef SDMMC_DEBUG
1711 sdmmc_dump_data("EXT", data, datalen);
1712 #endif
1713 }
1714
1715 out:
1716 if (ptr != NULL) {
1717 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1718 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1719 dmamem_unmap:
1720 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1721 dmamem_free:
1722 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1723 }
1724 }
1725 return error;
1726 }
1727
1728 static int
1729 sdmmc_mem_write_extr_single(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1730 uint8_t mio, uint8_t fno, uint32_t addr, uint8_t value, bool poll)
1731 {
1732 struct sdmmc_command cmd;
1733 bus_dma_segment_t ds[1];
1734 uint8_t buf[512];
1735 uint16_t buflen = sizeof(buf);
1736 void *ptr = NULL;
1737 int rseg;
1738 int error = 0;
1739
1740 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1741 error = bus_dmamem_alloc(sc->sc_dmat, buflen, PAGE_SIZE, 0, ds,
1742 1, &rseg, BUS_DMA_NOWAIT);
1743 if (error)
1744 goto out;
1745 error = bus_dmamem_map(sc->sc_dmat, ds, 1, buflen, &ptr,
1746 BUS_DMA_NOWAIT);
1747 if (error)
1748 goto dmamem_free;
1749 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, buflen,
1750 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_WRITE);
1751 if (error)
1752 goto dmamem_unmap;
1753
1754 memset(ptr, 0, buflen);
1755 *(uint8_t *)ptr = value;
1756
1757 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, buflen,
1758 BUS_DMASYNC_PREWRITE);
1759 } else {
1760 buf[0] = value;
1761 ptr = buf;
1762 }
1763
1764 memset(&cmd, 0, sizeof(cmd));
1765 cmd.c_data = ptr;
1766 cmd.c_datalen = buflen;
1767 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1768 cmd.c_opcode = SD_WRITE_EXTR_SINGLE;
1769 cmd.c_arg = __SHIFTIN((uint32_t)mio, SD_EXTR_MIO) |
1770 __SHIFTIN((uint32_t)fno, SD_EXTR_FNO) |
1771 __SHIFTIN(addr, SD_EXTR_ADDR) |
1772 __SHIFTIN(0, SD_EXTR_LEN);
1773 cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
1774 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1775 cmd.c_dmamap = sc->sc_dmap;
1776
1777 error = sdmmc_mmc_command(sc, &cmd);
1778 if (error == 0) {
1779 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1780 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, buflen,
1781 BUS_DMASYNC_POSTWRITE);
1782 }
1783 }
1784
1785 out:
1786 if (ptr != NULL) {
1787 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1788 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1789 dmamem_unmap:
1790 bus_dmamem_unmap(sc->sc_dmat, ptr, buflen);
1791 dmamem_free:
1792 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1793 }
1794 }
1795
1796 if (!error) {
1797 do {
1798 memset(&cmd, 0, sizeof(cmd));
1799 cmd.c_opcode = MMC_SEND_STATUS;
1800 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1801 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2 |
1802 SCF_TOUT_OK;
1803 if (poll) {
1804 cmd.c_flags |= SCF_POLL;
1805 }
1806 error = sdmmc_mmc_command(sc, &cmd);
1807 if (error)
1808 break;
1809 /* XXX time out */
1810 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1811
1812 if (error) {
1813 aprint_error_dev(sc->sc_dev,
1814 "error waiting for data ready after ext write : %d\n",
1815 error);
1816 }
1817 }
1818
1819 return error;
1820 }
1821
1822 static int
1823 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1824 {
1825 struct sdmmc_softc *sc = sf->sc;
1826 struct sdmmc_command cmd;
1827 int error;
1828
1829 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1830 return ENODEV;
1831
1832 memset(&cmd, 0, sizeof(cmd));
1833 cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1834 cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1835
1836 switch (width) {
1837 case 1:
1838 cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1839 break;
1840
1841 case 4:
1842 cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1843 break;
1844
1845 default:
1846 return EINVAL;
1847 }
1848
1849 error = sdmmc_app_command(sc, sf, &cmd);
1850 if (error == 0)
1851 error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1852 return error;
1853 }
1854
1855 static int
1856 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1857 int function, sdmmc_bitfield512_t *status)
1858 {
1859 struct sdmmc_softc *sc = sf->sc;
1860 struct sdmmc_command cmd;
1861 bus_dma_segment_t ds[1];
1862 void *ptr = NULL;
1863 int gsft, rseg, error = 0;
1864 const int statlen = 64;
1865
1866 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1867 !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1868 return EINVAL;
1869
1870 if (group <= 0 || group > 6 ||
1871 function < 0 || function > 15)
1872 return EINVAL;
1873
1874 gsft = (group - 1) << 2;
1875
1876 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1877 error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1878 1, &rseg, BUS_DMA_NOWAIT);
1879 if (error)
1880 goto out;
1881 error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1882 BUS_DMA_NOWAIT);
1883 if (error)
1884 goto dmamem_free;
1885 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1886 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1887 if (error)
1888 goto dmamem_unmap;
1889
1890 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1891 BUS_DMASYNC_PREREAD);
1892 } else {
1893 ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1894 if (ptr == NULL)
1895 goto out;
1896 }
1897
1898 memset(&cmd, 0, sizeof(cmd));
1899 cmd.c_data = ptr;
1900 cmd.c_datalen = statlen;
1901 cmd.c_blklen = statlen;
1902 cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1903 cmd.c_arg = ((uint32_t)!!mode << 31) |
1904 (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1905 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1906 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1907 cmd.c_dmamap = sc->sc_dmap;
1908
1909 error = sdmmc_mmc_command(sc, &cmd);
1910 if (error == 0) {
1911 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1912 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1913 BUS_DMASYNC_POSTREAD);
1914 }
1915 memcpy(status, ptr, statlen);
1916 }
1917
1918 out:
1919 if (ptr != NULL) {
1920 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1921 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1922 dmamem_unmap:
1923 bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1924 dmamem_free:
1925 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1926 } else {
1927 free(ptr, M_DEVBUF);
1928 }
1929 }
1930
1931 if (error == 0)
1932 sdmmc_be512_to_bitfield512(status);
1933
1934 return error;
1935 }
1936
1937 static int
1938 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1939 uint8_t value, bool poll)
1940 {
1941 struct sdmmc_softc *sc = sf->sc;
1942 struct sdmmc_command cmd;
1943 int error;
1944
1945 memset(&cmd, 0, sizeof(cmd));
1946 cmd.c_opcode = MMC_SWITCH;
1947 cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1948 (index << 16) | (value << 8) | set;
1949 cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1950
1951 if (poll)
1952 cmd.c_flags |= SCF_POLL;
1953
1954 error = sdmmc_mmc_command(sc, &cmd);
1955 if (error)
1956 return error;
1957
1958 if (index == EXT_CSD_FLUSH_CACHE || (index == EXT_CSD_HS_TIMING && value >= 2)) {
1959 do {
1960 memset(&cmd, 0, sizeof(cmd));
1961 cmd.c_opcode = MMC_SEND_STATUS;
1962 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1963 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1964 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1965 if (poll)
1966 cmd.c_flags |= SCF_POLL;
1967 error = sdmmc_mmc_command(sc, &cmd);
1968 if (error)
1969 break;
1970 if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1971 aprint_error_dev(sc->sc_dev, "switch error\n");
1972 return EINVAL;
1973 }
1974 /* XXX time out */
1975 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1976
1977 if (error) {
1978 aprint_error_dev(sc->sc_dev,
1979 "error waiting for data ready after switch command: %d\n",
1980 error);
1981 return error;
1982 }
1983 }
1984
1985 return 0;
1986 }
1987
1988 /*
1989 * SPI mode function
1990 */
1991 static int
1992 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1993 {
1994 struct sdmmc_command cmd;
1995 int error;
1996
1997 memset(&cmd, 0, sizeof(cmd));
1998 cmd.c_opcode = MMC_READ_OCR;
1999 cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
2000 cmd.c_flags = SCF_RSP_SPI_R3;
2001
2002 error = sdmmc_mmc_command(sc, &cmd);
2003 if (error == 0 && card_ocr != NULL)
2004 *card_ocr = cmd.c_resp[1];
2005 DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
2006 SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
2007 return error;
2008 }
2009
2010 /*
2011 * read/write function
2012 */
2013 /* read */
2014 static int
2015 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
2016 u_char *data, size_t datalen)
2017 {
2018 struct sdmmc_softc *sc = sf->sc;
2019 int error = 0;
2020 int i;
2021
2022 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
2023 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
2024
2025 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
2026 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
2027 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
2028 if (error)
2029 break;
2030 }
2031 return error;
2032 }
2033
2034 /*
2035 * Simulate multi-segment dma transfer.
2036 */
2037 static int
2038 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
2039 uint32_t blkno, u_char *data, size_t datalen)
2040 {
2041 struct sdmmc_softc *sc = sf->sc;
2042 bool use_bbuf = false;
2043 int error = 0;
2044 int i;
2045
2046 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2047 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
2048 if ((len % SDMMC_SECTOR_SIZE) != 0) {
2049 use_bbuf = true;
2050 break;
2051 }
2052 }
2053 if (use_bbuf) {
2054 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
2055 BUS_DMASYNC_PREREAD);
2056
2057 error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
2058 blkno, data, datalen);
2059 if (error) {
2060 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
2061 return error;
2062 }
2063
2064 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
2065 BUS_DMASYNC_POSTREAD);
2066
2067 /* Copy from bounce buffer */
2068 memcpy(data, sf->bbuf, datalen);
2069
2070 return 0;
2071 }
2072
2073 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2074 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
2075
2076 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
2077 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
2078 if (error)
2079 return error;
2080
2081 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2082 BUS_DMASYNC_PREREAD);
2083
2084 error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
2085 blkno, data, len);
2086 if (error) {
2087 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2088 return error;
2089 }
2090
2091 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2092 BUS_DMASYNC_POSTREAD);
2093
2094 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2095
2096 blkno += len / SDMMC_SECTOR_SIZE;
2097 data += len;
2098 }
2099 return 0;
2100 }
2101
2102 static int
2103 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
2104 uint32_t blkno, u_char *data, size_t datalen)
2105 {
2106 struct sdmmc_softc *sc = sf->sc;
2107 struct sdmmc_command cmd;
2108 int error;
2109
2110 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2111 error = sdmmc_select_card(sc, sf);
2112 if (error)
2113 goto out;
2114 }
2115
2116 memset(&cmd, 0, sizeof(cmd));
2117 cmd.c_data = data;
2118 cmd.c_datalen = datalen;
2119 cmd.c_blklen = SDMMC_SECTOR_SIZE;
2120 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
2121 MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
2122 cmd.c_arg = blkno;
2123 if (!ISSET(sf->flags, SFF_SDHC))
2124 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2125 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
2126 if (ISSET(sf->flags, SFF_SDHC))
2127 cmd.c_flags |= SCF_XFER_SDHC;
2128 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
2129 cmd.c_dmamap = dmap;
2130
2131 sc->sc_ev_xfer.ev_count++;
2132
2133 error = sdmmc_mmc_command(sc, &cmd);
2134 if (error) {
2135 sc->sc_ev_xfer_error.ev_count++;
2136 goto out;
2137 }
2138
2139 const u_int counter = __builtin_ctz(cmd.c_datalen);
2140 if (counter >= 9 && counter <= 16) {
2141 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
2142 } else {
2143 sc->sc_ev_xfer_unaligned.ev_count++;
2144 }
2145
2146 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
2147 if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
2148 memset(&cmd, 0, sizeof cmd);
2149 cmd.c_opcode = MMC_STOP_TRANSMISSION;
2150 cmd.c_arg = MMC_ARG_RCA(sf->rca);
2151 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
2152 error = sdmmc_mmc_command(sc, &cmd);
2153 if (error)
2154 goto out;
2155 }
2156 }
2157
2158 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2159 do {
2160 memset(&cmd, 0, sizeof(cmd));
2161 cmd.c_opcode = MMC_SEND_STATUS;
2162 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2163 cmd.c_arg = MMC_ARG_RCA(sf->rca);
2164 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
2165 error = sdmmc_mmc_command(sc, &cmd);
2166 if (error)
2167 break;
2168 /* XXX time out */
2169 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
2170 }
2171
2172 out:
2173 return error;
2174 }
2175
2176 int
2177 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
2178 size_t datalen)
2179 {
2180 struct sdmmc_softc *sc = sf->sc;
2181 int error;
2182
2183 SDMMC_LOCK(sc);
2184 mutex_enter(&sc->sc_mtx);
2185
2186 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
2187 error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
2188 goto out;
2189 }
2190
2191 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
2192 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
2193 datalen);
2194 goto out;
2195 }
2196
2197 /* DMA transfer */
2198 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
2199 BUS_DMA_NOWAIT|BUS_DMA_READ);
2200 if (error)
2201 goto out;
2202
2203 #ifdef SDMMC_DEBUG
2204 printf("data=%p, datalen=%zu\n", data, datalen);
2205 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2206 printf("seg#%d: addr=%#lx, size=%#lx\n", i,
2207 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
2208 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
2209 }
2210 #endif
2211
2212 if (sc->sc_dmap->dm_nsegs > 1
2213 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
2214 error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
2215 data, datalen);
2216 goto unload;
2217 }
2218
2219 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2220 BUS_DMASYNC_PREREAD);
2221
2222 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
2223 datalen);
2224 if (error)
2225 goto unload;
2226
2227 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2228 BUS_DMASYNC_POSTREAD);
2229 unload:
2230 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
2231
2232 out:
2233 mutex_exit(&sc->sc_mtx);
2234 SDMMC_UNLOCK(sc);
2235
2236 return error;
2237 }
2238
2239 /* write */
2240 static int
2241 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
2242 u_char *data, size_t datalen)
2243 {
2244 struct sdmmc_softc *sc = sf->sc;
2245 int error = 0;
2246 int i;
2247
2248 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
2249 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
2250
2251 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
2252 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
2253 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
2254 if (error)
2255 break;
2256 }
2257 return error;
2258 }
2259
2260 /*
2261 * Simulate multi-segment dma transfer.
2262 */
2263 static int
2264 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
2265 uint32_t blkno, u_char *data, size_t datalen)
2266 {
2267 struct sdmmc_softc *sc = sf->sc;
2268 bool use_bbuf = false;
2269 int error = 0;
2270 int i;
2271
2272 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2273 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
2274 if ((len % SDMMC_SECTOR_SIZE) != 0) {
2275 use_bbuf = true;
2276 break;
2277 }
2278 }
2279 if (use_bbuf) {
2280 /* Copy to bounce buffer */
2281 memcpy(sf->bbuf, data, datalen);
2282
2283 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
2284 BUS_DMASYNC_PREWRITE);
2285
2286 error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
2287 blkno, data, datalen);
2288 if (error) {
2289 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
2290 return error;
2291 }
2292
2293 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
2294 BUS_DMASYNC_POSTWRITE);
2295
2296 return 0;
2297 }
2298
2299 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2300 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
2301
2302 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
2303 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2304 if (error)
2305 return error;
2306
2307 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2308 BUS_DMASYNC_PREWRITE);
2309
2310 error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
2311 blkno, data, len);
2312 if (error) {
2313 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2314 return error;
2315 }
2316
2317 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2318 BUS_DMASYNC_POSTWRITE);
2319
2320 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2321
2322 blkno += len / SDMMC_SECTOR_SIZE;
2323 data += len;
2324 }
2325
2326 return error;
2327 }
2328
2329 static int
2330 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
2331 uint32_t blkno, u_char *data, size_t datalen)
2332 {
2333 struct sdmmc_softc *sc = sf->sc;
2334 struct sdmmc_command cmd;
2335 int error;
2336
2337 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2338 error = sdmmc_select_card(sc, sf);
2339 if (error)
2340 goto out;
2341 }
2342
2343 const int nblk = howmany(datalen, SDMMC_SECTOR_SIZE);
2344 if (ISSET(sc->sc_flags, SMF_SD_MODE) && nblk > 1) {
2345 /* Set the number of write blocks to be pre-erased */
2346 memset(&cmd, 0, sizeof(cmd));
2347 cmd.c_opcode = SD_APP_SET_WR_BLK_ERASE_COUNT;
2348 cmd.c_flags = SCF_RSP_R1 | SCF_RSP_SPI_R1 | SCF_CMD_AC;
2349 cmd.c_arg = nblk;
2350 error = sdmmc_app_command(sc, sf, &cmd);
2351 if (error)
2352 goto out;
2353 }
2354
2355 memset(&cmd, 0, sizeof(cmd));
2356 cmd.c_data = data;
2357 cmd.c_datalen = datalen;
2358 cmd.c_blklen = SDMMC_SECTOR_SIZE;
2359 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
2360 MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
2361 cmd.c_arg = blkno;
2362 if (!ISSET(sf->flags, SFF_SDHC))
2363 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2364 cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
2365 if (ISSET(sf->flags, SFF_SDHC))
2366 cmd.c_flags |= SCF_XFER_SDHC;
2367 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
2368 cmd.c_dmamap = dmap;
2369
2370 sc->sc_ev_xfer.ev_count++;
2371
2372 error = sdmmc_mmc_command(sc, &cmd);
2373 if (error) {
2374 sc->sc_ev_xfer_error.ev_count++;
2375 goto out;
2376 }
2377
2378 const u_int counter = __builtin_ctz(cmd.c_datalen);
2379 if (counter >= 9 && counter <= 16) {
2380 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
2381 } else {
2382 sc->sc_ev_xfer_unaligned.ev_count++;
2383 }
2384
2385 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
2386 if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
2387 memset(&cmd, 0, sizeof(cmd));
2388 cmd.c_opcode = MMC_STOP_TRANSMISSION;
2389 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
2390 error = sdmmc_mmc_command(sc, &cmd);
2391 if (error)
2392 goto out;
2393 }
2394 }
2395
2396 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2397 do {
2398 memset(&cmd, 0, sizeof(cmd));
2399 cmd.c_opcode = MMC_SEND_STATUS;
2400 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2401 cmd.c_arg = MMC_ARG_RCA(sf->rca);
2402 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
2403 error = sdmmc_mmc_command(sc, &cmd);
2404 if (error)
2405 break;
2406 /* XXX time out */
2407 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
2408 }
2409
2410 out:
2411 return error;
2412 }
2413
2414 int
2415 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
2416 size_t datalen)
2417 {
2418 struct sdmmc_softc *sc = sf->sc;
2419 int error;
2420
2421 SDMMC_LOCK(sc);
2422 mutex_enter(&sc->sc_mtx);
2423
2424 if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
2425 sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
2426 aprint_normal_dev(sc->sc_dev, "write-protected\n");
2427 error = EIO;
2428 goto out;
2429 }
2430
2431 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
2432 error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
2433 goto out;
2434 }
2435
2436 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
2437 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2438 datalen);
2439 goto out;
2440 }
2441
2442 /* DMA transfer */
2443 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
2444 BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2445 if (error)
2446 goto out;
2447
2448 #ifdef SDMMC_DEBUG
2449 aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
2450 __func__, data, datalen);
2451 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2452 aprint_normal_dev(sc->sc_dev,
2453 "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
2454 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
2455 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
2456 }
2457 #endif
2458
2459 if (sc->sc_dmap->dm_nsegs > 1
2460 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
2461 error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
2462 data, datalen);
2463 goto unload;
2464 }
2465
2466 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2467 BUS_DMASYNC_PREWRITE);
2468
2469 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2470 datalen);
2471 if (error)
2472 goto unload;
2473
2474 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2475 BUS_DMASYNC_POSTWRITE);
2476 unload:
2477 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
2478
2479 out:
2480 mutex_exit(&sc->sc_mtx);
2481 SDMMC_UNLOCK(sc);
2482
2483 return error;
2484 }
2485
2486 int
2487 sdmmc_mem_discard(struct sdmmc_function *sf, uint32_t sblkno, uint32_t eblkno)
2488 {
2489 struct sdmmc_softc *sc = sf->sc;
2490 struct sdmmc_command cmd;
2491 int error;
2492
2493 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2494 return ENODEV; /* XXX not tested */
2495
2496 if (eblkno < sblkno)
2497 return EINVAL;
2498
2499 SDMMC_LOCK(sc);
2500 mutex_enter(&sc->sc_mtx);
2501
2502 /* Set the address of the first write block to be erased */
2503 memset(&cmd, 0, sizeof(cmd));
2504 cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2505 SD_ERASE_WR_BLK_START : MMC_TAG_ERASE_GROUP_START;
2506 cmd.c_arg = sblkno;
2507 if (!ISSET(sf->flags, SFF_SDHC))
2508 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2509 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2510 error = sdmmc_mmc_command(sc, &cmd);
2511 if (error)
2512 goto out;
2513
2514 /* Set the address of the last write block to be erased */
2515 memset(&cmd, 0, sizeof(cmd));
2516 cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2517 SD_ERASE_WR_BLK_END : MMC_TAG_ERASE_GROUP_END;
2518 cmd.c_arg = eblkno;
2519 if (!ISSET(sf->flags, SFF_SDHC))
2520 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2521 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2522 error = sdmmc_mmc_command(sc, &cmd);
2523 if (error)
2524 goto out;
2525
2526 /* Start the erase operation */
2527 memset(&cmd, 0, sizeof(cmd));
2528 cmd.c_opcode = MMC_ERASE;
2529 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B;
2530 error = sdmmc_mmc_command(sc, &cmd);
2531 if (error)
2532 goto out;
2533
2534 out:
2535 mutex_exit(&sc->sc_mtx);
2536 SDMMC_UNLOCK(sc);
2537
2538 #ifdef SDMMC_DEBUG
2539 device_printf(sc->sc_dev, "discard blk %u-%u error %d\n",
2540 sblkno, eblkno, error);
2541 #endif
2542
2543 return error;
2544 }
2545
2546 int
2547 sdmmc_mem_flush_cache(struct sdmmc_function *sf, bool poll)
2548 {
2549 struct sdmmc_softc *sc = sf->sc;
2550 int error;
2551
2552 if (!ISSET(sf->flags, SFF_CACHE_ENABLED))
2553 return 0;
2554
2555 SDMMC_LOCK(sc);
2556 mutex_enter(&sc->sc_mtx);
2557
2558 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
2559 KASSERT(sf->ext_sd.pef.valid);
2560 error = sdmmc_mem_write_extr_single(sc, sf, SD_EXTR_MIO_MEM,
2561 sf->ext_sd.pef.fno,
2562 sf->ext_sd.pef.start_addr + SD_PEF_CACHE_FLUSH_OFFSET, 1,
2563 poll);
2564 if (error == 0) {
2565 uint8_t data[512];
2566
2567 error = sdmmc_mem_read_extr_single(sc, sf, SD_EXTR_MIO_MEM,
2568 sf->ext_sd.pef.fno, sf->ext_sd.pef.start_addr,
2569 sizeof(data), data);
2570 if (error == 0 && SD_PEF_CACHE_FLUSH(data) != 0) {
2571 device_printf(sc->sc_dev, "cache flush failed\n");
2572 }
2573 }
2574 } else {
2575 error = sdmmc_mem_mmc_switch(sf,
2576 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE,
2577 EXT_CSD_FLUSH_CACHE_FLUSH, poll);
2578 }
2579
2580 mutex_exit(&sc->sc_mtx);
2581 SDMMC_UNLOCK(sc);
2582
2583 #ifdef SDMMC_DEBUG
2584 device_printf(sc->sc_dev, "flush cache error %d\n", error);
2585 #endif
2586
2587 return error;
2588 }
2589