sdmmc_mem.c revision 1.54 1 /* $NetBSD: sdmmc_mem.c,v 1.54 2017/02/17 10:50:43 nonaka Exp $ */
2 /* $OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka (at) netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 /* Routines for SD/MMC memory cards. */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.54 2017/02/17 10:50:43 nonaka Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s) do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s) do {} while (/*CONSTCOND*/0)
70 #endif
71
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78 sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80 uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
83 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
84 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
85 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
86 uint8_t);
87 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
88 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
89 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
90 u_char *, size_t);
91 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
92 u_char *, size_t);
93 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
94 uint32_t, u_char *, size_t);
95 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
96 uint32_t, u_char *, size_t);
97 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
98 uint32_t, u_char *, size_t);
99 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
100 uint32_t, u_char *, size_t);
101
102 static const struct {
103 const char *name;
104 int v;
105 int freq;
106 } switch_group0_functions[] = {
107 /* Default/SDR12 */
108 { "Default/SDR12", 0, 25000 },
109
110 /* High-Speed/SDR25 */
111 { "High-Speed/SDR25", SMC_CAPS_SD_HIGHSPEED, 50000 },
112
113 /* SDR50 */
114 { "SDR50", SMC_CAPS_UHS_SDR50, 100000 },
115
116 /* SDR104 */
117 { "SDR104", SMC_CAPS_UHS_SDR104, 208000 },
118
119 /* DDR50 */
120 { "DDR50", SMC_CAPS_UHS_DDR50, 50000 },
121 };
122
123 /*
124 * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
125 */
126 int
127 sdmmc_mem_enable(struct sdmmc_softc *sc)
128 {
129 uint32_t host_ocr;
130 uint32_t card_ocr;
131 uint32_t new_ocr;
132 uint32_t ocr = 0;
133 int error;
134
135 SDMMC_LOCK(sc);
136
137 /* Set host mode to SD "combo" card or SD memory-only. */
138 CLR(sc->sc_flags, SMF_UHS_MODE);
139 SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
140
141 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
142 sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
143
144 /* Reset memory (*must* do that before CMD55 or CMD1). */
145 sdmmc_go_idle_state(sc);
146
147 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
148 /* Check SD Ver.2 */
149 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
150 if (error == 0 && card_ocr == 0x1aa)
151 SET(ocr, MMC_OCR_HCS);
152 }
153
154 /*
155 * Read the SD/MMC memory OCR value by issuing CMD55 followed
156 * by ACMD41 to read the OCR value from memory-only SD cards.
157 * MMC cards will not respond to CMD55 or ACMD41 and this is
158 * how we distinguish them from SD cards.
159 */
160 mmc_mode:
161 error = sdmmc_mem_send_op_cond(sc,
162 ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
163 if (error) {
164 if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
165 !ISSET(sc->sc_flags, SMF_IO_MODE)) {
166 /* Not a SD card, switch to MMC mode. */
167 DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
168 CLR(sc->sc_flags, SMF_SD_MODE);
169 goto mmc_mode;
170 }
171 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
172 DPRINTF(("%s: couldn't read memory OCR\n",
173 SDMMCDEVNAME(sc)));
174 goto out;
175 } else {
176 /* Not a "combo" card. */
177 CLR(sc->sc_flags, SMF_MEM_MODE);
178 error = 0;
179 goto out;
180 }
181 }
182 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
183 /* get card OCR */
184 error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
185 if (error) {
186 DPRINTF(("%s: couldn't read SPI memory OCR\n",
187 SDMMCDEVNAME(sc)));
188 goto out;
189 }
190 }
191
192 /* Set the lowest voltage supported by the card and host. */
193 host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
194 error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
195 if (error) {
196 DPRINTF(("%s: couldn't supply voltage requested by card\n",
197 SDMMCDEVNAME(sc)));
198 goto out;
199 }
200
201 DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
202 DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
203
204 host_ocr &= card_ocr; /* only allow the common voltages */
205 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
206 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
207 /* Tell the card(s) to enter the idle state (again). */
208 sdmmc_go_idle_state(sc);
209 /* Check SD Ver.2 */
210 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
211 if (error == 0 && card_ocr == 0x1aa)
212 SET(ocr, MMC_OCR_HCS);
213
214 if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
215 SET(ocr, MMC_OCR_S18A);
216 } else {
217 SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
218 }
219 }
220 host_ocr |= ocr;
221
222 /* Send the new OCR value until all cards are ready. */
223 error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
224 if (error) {
225 DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
226 goto out;
227 }
228
229 if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
230 /*
231 * Card and host support low voltage mode, begin switch
232 * sequence.
233 */
234 struct sdmmc_command cmd;
235 memset(&cmd, 0, sizeof(cmd));
236 cmd.c_arg = 0;
237 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
238 cmd.c_opcode = SD_VOLTAGE_SWITCH;
239 DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
240 error = sdmmc_mmc_command(sc, &cmd);
241 if (error) {
242 DPRINTF(("%s: voltage switch command failed\n",
243 SDMMCDEVNAME(sc)));
244 goto out;
245 }
246
247 error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
248 if (error)
249 goto out;
250
251 SET(sc->sc_flags, SMF_UHS_MODE);
252 }
253
254 out:
255 SDMMC_UNLOCK(sc);
256
257 if (error)
258 printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
259 __func__, error);
260
261 return error;
262 }
263
264 static int
265 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
266 {
267 int error;
268
269 /*
270 * Stop the clock
271 */
272 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
273 SDMMC_SDCLK_OFF, false);
274 if (error)
275 goto out;
276
277 delay(1000);
278
279 /*
280 * Card switch command was successful, update host controller
281 * signal voltage setting.
282 */
283 DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
284 signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
285 error = sdmmc_chip_signal_voltage(sc->sc_sct,
286 sc->sc_sch, signal_voltage);
287 if (error)
288 goto out;
289
290 delay(5000);
291
292 /*
293 * Switch to SDR12 timing
294 */
295 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
296 false);
297 if (error)
298 goto out;
299
300 delay(1000);
301
302 out:
303 return error;
304 }
305
306 /*
307 * Read the CSD and CID from all cards and assign each card a unique
308 * relative card address (RCA). CMD2 is ignored by SDIO-only cards.
309 */
310 void
311 sdmmc_mem_scan(struct sdmmc_softc *sc)
312 {
313 sdmmc_response resp;
314 struct sdmmc_function *sf;
315 uint16_t next_rca;
316 int error;
317 int retry;
318
319 SDMMC_LOCK(sc);
320
321 /*
322 * CMD2 is a broadcast command understood by SD cards and MMC
323 * cards. All cards begin to respond to the command, but back
324 * off if another card drives the CMD line to a different level.
325 * Only one card will get its entire response through. That
326 * card remains silent once it has been assigned a RCA.
327 */
328 for (retry = 0; retry < 100; retry++) {
329 error = sdmmc_mem_send_cid(sc, &resp);
330 if (error) {
331 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
332 error == ETIMEDOUT) {
333 /* No more cards there. */
334 break;
335 }
336 DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
337 break;
338 }
339
340 /* In MMC mode, find the next available RCA. */
341 next_rca = 1;
342 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
343 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
344 next_rca++;
345 }
346
347 /* Allocate a sdmmc_function structure. */
348 sf = sdmmc_function_alloc(sc);
349 sf->rca = next_rca;
350
351 /*
352 * Remember the CID returned in the CMD2 response for
353 * later decoding.
354 */
355 memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
356
357 /*
358 * Silence the card by assigning it a unique RCA, or
359 * querying it for its RCA in the case of SD.
360 */
361 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
362 if (sdmmc_set_relative_addr(sc, sf) != 0) {
363 aprint_error_dev(sc->sc_dev,
364 "couldn't set mem RCA\n");
365 sdmmc_function_free(sf);
366 break;
367 }
368 }
369
370 /*
371 * If this is a memory-only card, the card responding
372 * first becomes an alias for SDIO function 0.
373 */
374 if (sc->sc_fn0 == NULL)
375 sc->sc_fn0 = sf;
376
377 SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
378
379 /* only one function in SPI mode */
380 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
381 break;
382 }
383
384 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
385 /* Go to Data Transfer Mode, if possible. */
386 sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
387
388 /*
389 * All cards are either inactive or awaiting further commands.
390 * Read the CSDs and decode the raw CID for each card.
391 */
392 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
393 error = sdmmc_mem_send_csd(sc, sf, &resp);
394 if (error) {
395 SET(sf->flags, SFF_ERROR);
396 continue;
397 }
398
399 if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
400 sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
401 SET(sf->flags, SFF_ERROR);
402 continue;
403 }
404
405 #ifdef SDMMC_DEBUG
406 printf("%s: CID: ", SDMMCDEVNAME(sc));
407 sdmmc_print_cid(&sf->cid);
408 #endif
409 }
410
411 SDMMC_UNLOCK(sc);
412 }
413
414 int
415 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
416 struct sdmmc_function *sf)
417 {
418 /* TRAN_SPEED(2:0): transfer rate exponent */
419 static const int speed_exponent[8] = {
420 100 * 1, /* 100 Kbits/s */
421 1 * 1000, /* 1 Mbits/s */
422 10 * 1000, /* 10 Mbits/s */
423 100 * 1000, /* 100 Mbits/s */
424 0,
425 0,
426 0,
427 0,
428 };
429 /* TRAN_SPEED(6:3): time mantissa */
430 static const int speed_mantissa[16] = {
431 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
432 };
433 struct sdmmc_csd *csd = &sf->csd;
434 int e, m;
435
436 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
437 /*
438 * CSD version 1.0 corresponds to SD system
439 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
440 */
441 csd->csdver = SD_CSD_CSDVER(resp);
442 switch (csd->csdver) {
443 case SD_CSD_CSDVER_2_0:
444 DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
445 SET(sf->flags, SFF_SDHC);
446 csd->capacity = SD_CSD_V2_CAPACITY(resp);
447 csd->read_bl_len = SD_CSD_V2_BL_LEN;
448 break;
449
450 case SD_CSD_CSDVER_1_0:
451 DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
452 csd->capacity = SD_CSD_CAPACITY(resp);
453 csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
454 break;
455
456 default:
457 aprint_error_dev(sc->sc_dev,
458 "unknown SD CSD structure version 0x%x\n",
459 csd->csdver);
460 return 1;
461 }
462
463 csd->mmcver = SD_CSD_MMCVER(resp);
464 csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
465 csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
466 e = SD_CSD_SPEED_EXP(resp);
467 m = SD_CSD_SPEED_MANT(resp);
468 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
469 csd->ccc = SD_CSD_CCC(resp);
470 } else {
471 csd->csdver = MMC_CSD_CSDVER(resp);
472 if (csd->csdver == MMC_CSD_CSDVER_1_0) {
473 aprint_error_dev(sc->sc_dev,
474 "unknown MMC CSD structure version 0x%x\n",
475 csd->csdver);
476 return 1;
477 }
478
479 csd->mmcver = MMC_CSD_MMCVER(resp);
480 csd->capacity = MMC_CSD_CAPACITY(resp);
481 csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
482 csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
483 csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
484 e = MMC_CSD_TRAN_SPEED_EXP(resp);
485 m = MMC_CSD_TRAN_SPEED_MANT(resp);
486 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
487 }
488 if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
489 csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
490
491 #ifdef SDMMC_DUMP_CSD
492 sdmmc_print_csd(resp, csd);
493 #endif
494
495 return 0;
496 }
497
498 int
499 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
500 struct sdmmc_function *sf)
501 {
502 struct sdmmc_cid *cid = &sf->cid;
503
504 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
505 cid->mid = SD_CID_MID(resp);
506 cid->oid = SD_CID_OID(resp);
507 SD_CID_PNM_CPY(resp, cid->pnm);
508 cid->rev = SD_CID_REV(resp);
509 cid->psn = SD_CID_PSN(resp);
510 cid->mdt = SD_CID_MDT(resp);
511 } else {
512 switch(sf->csd.mmcver) {
513 case MMC_CSD_MMCVER_1_0:
514 case MMC_CSD_MMCVER_1_4:
515 cid->mid = MMC_CID_MID_V1(resp);
516 MMC_CID_PNM_V1_CPY(resp, cid->pnm);
517 cid->rev = MMC_CID_REV_V1(resp);
518 cid->psn = MMC_CID_PSN_V1(resp);
519 cid->mdt = MMC_CID_MDT_V1(resp);
520 break;
521 case MMC_CSD_MMCVER_2_0:
522 case MMC_CSD_MMCVER_3_1:
523 case MMC_CSD_MMCVER_4_0:
524 cid->mid = MMC_CID_MID_V2(resp);
525 cid->oid = MMC_CID_OID_V2(resp);
526 MMC_CID_PNM_V2_CPY(resp, cid->pnm);
527 cid->psn = MMC_CID_PSN_V2(resp);
528 break;
529 default:
530 aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
531 sf->csd.mmcver);
532 return 1;
533 }
534 }
535 return 0;
536 }
537
538 void
539 sdmmc_print_cid(struct sdmmc_cid *cid)
540 {
541
542 printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
543 " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
544 cid->mdt);
545 }
546
547 #ifdef SDMMC_DUMP_CSD
548 void
549 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
550 {
551
552 printf("csdver = %d\n", csd->csdver);
553 printf("mmcver = %d\n", csd->mmcver);
554 printf("capacity = 0x%08x\n", csd->capacity);
555 printf("read_bl_len = %d\n", csd->read_bl_len);
556 printf("write_bl_len = %d\n", csd->write_bl_len);
557 printf("r2w_factor = %d\n", csd->r2w_factor);
558 printf("tran_speed = %d\n", csd->tran_speed);
559 printf("ccc = 0x%x\n", csd->ccc);
560 }
561 #endif
562
563 /*
564 * Initialize a SD/MMC memory card.
565 */
566 int
567 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
568 {
569 int error = 0;
570
571 SDMMC_LOCK(sc);
572
573 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
574 error = sdmmc_select_card(sc, sf);
575 if (error)
576 goto out;
577 }
578
579 error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
580 if (error)
581 goto out;
582
583 if (ISSET(sc->sc_flags, SMF_SD_MODE))
584 error = sdmmc_mem_sd_init(sc, sf);
585 else
586 error = sdmmc_mem_mmc_init(sc, sf);
587
588 out:
589 SDMMC_UNLOCK(sc);
590
591 return error;
592 }
593
594 /*
595 * Get or set the card's memory OCR value (SD or MMC).
596 */
597 int
598 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
599 {
600 struct sdmmc_command cmd;
601 int error;
602 int retry;
603
604 /* Don't lock */
605
606 DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
607 SDMMCDEVNAME(sc), ocr));
608
609 /*
610 * If we change the OCR value, retry the command until the OCR
611 * we receive in response has the "CARD BUSY" bit set, meaning
612 * that all cards are ready for identification.
613 */
614 for (retry = 0; retry < 100; retry++) {
615 memset(&cmd, 0, sizeof(cmd));
616 cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
617 ocr : (ocr & MMC_OCR_HCS);
618 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
619 | SCF_TOUT_OK;
620
621 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
622 cmd.c_opcode = SD_APP_OP_COND;
623 error = sdmmc_app_command(sc, NULL, &cmd);
624 } else {
625 cmd.c_opcode = MMC_SEND_OP_COND;
626 error = sdmmc_mmc_command(sc, &cmd);
627 }
628 if (error)
629 break;
630
631 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
632 if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
633 break;
634 } else {
635 if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
636 ocr == 0)
637 break;
638 }
639
640 error = ETIMEDOUT;
641 sdmmc_delay(10000);
642 }
643 if (error == 0 &&
644 ocrp != NULL &&
645 !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
646 *ocrp = MMC_R3(cmd.c_resp);
647 DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
648 SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
649 return error;
650 }
651
652 int
653 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
654 {
655 struct sdmmc_command cmd;
656 int error;
657
658 /* Don't lock */
659
660 memset(&cmd, 0, sizeof(cmd));
661 cmd.c_arg = ocr;
662 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7;
663 cmd.c_opcode = SD_SEND_IF_COND;
664
665 error = sdmmc_mmc_command(sc, &cmd);
666 if (error == 0 && ocrp != NULL) {
667 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
668 *ocrp = MMC_SPI_R7(cmd.c_resp);
669 } else {
670 *ocrp = MMC_R7(cmd.c_resp);
671 }
672 DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
673 SDMMCDEVNAME(sc), error, *ocrp));
674 }
675 return error;
676 }
677
678 /*
679 * Set the read block length appropriately for this card, according to
680 * the card CSD register value.
681 */
682 int
683 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
684 int block_len)
685 {
686 struct sdmmc_command cmd;
687 int error;
688
689 /* Don't lock */
690
691 memset(&cmd, 0, sizeof(cmd));
692 cmd.c_opcode = MMC_SET_BLOCKLEN;
693 cmd.c_arg = block_len;
694 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
695
696 error = sdmmc_mmc_command(sc, &cmd);
697
698 DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
699 SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
700
701 return error;
702 }
703
704 /* make 512-bit BE quantity __bitfield()-compatible */
705 static void
706 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
707 size_t i;
708 uint32_t tmp0, tmp1;
709 const size_t bitswords = __arraycount(buf->_bits);
710 for (i = 0; i < bitswords/2; i++) {
711 tmp0 = buf->_bits[i];
712 tmp1 = buf->_bits[bitswords - 1 - i];
713 buf->_bits[i] = be32toh(tmp1);
714 buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
715 }
716 }
717
718 static int
719 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
720 {
721 if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
722 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
723 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
724 return SD_ACCESS_MODE_SDR104;
725 }
726 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
727 ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
728 return SD_ACCESS_MODE_DDR50;
729 }
730 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
731 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
732 return SD_ACCESS_MODE_SDR50;
733 }
734 }
735 if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
736 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
737 return SD_ACCESS_MODE_SDR25;
738 }
739 return SD_ACCESS_MODE_SDR12;
740 }
741
742 static int
743 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
744 {
745 int timing = -1;
746
747 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
748 return 0;
749
750 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
751 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
752 return 0;
753
754 switch (sf->csd.tran_speed) {
755 case 100000:
756 timing = SDMMC_TIMING_UHS_SDR50;
757 break;
758 case 208000:
759 timing = SDMMC_TIMING_UHS_SDR104;
760 break;
761 default:
762 return 0;
763 }
764 } else {
765 switch (sf->csd.tran_speed) {
766 case 200000:
767 timing = SDMMC_TIMING_MMC_HS200;
768 break;
769 default:
770 return 0;
771 }
772 }
773
774 DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
775 timing));
776
777 return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
778 }
779
780 static int
781 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
782 {
783 int support_func, best_func, bus_clock, error, i;
784 sdmmc_bitfield512_t status; /* Switch Function Status */
785 bool ddr = false;
786
787 /* change bus clock */
788 bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
789 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
790 if (error) {
791 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
792 return error;
793 }
794
795 error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
796 if (error) {
797 aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
798 return error;
799 }
800 error = sdmmc_mem_decode_scr(sc, sf);
801 if (error)
802 return error;
803
804 if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
805 ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
806 DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
807 error = sdmmc_set_bus_width(sf, 4);
808 if (error) {
809 aprint_error_dev(sc->sc_dev,
810 "can't change bus width (%d bit)\n", 4);
811 return error;
812 }
813 sf->width = 4;
814 }
815
816 best_func = 0;
817 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
818 ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
819 DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
820 error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
821 if (error) {
822 aprint_error_dev(sc->sc_dev,
823 "switch func mode 0 failed\n");
824 return error;
825 }
826
827 support_func = SFUNC_STATUS_GROUP(&status, 1);
828
829 if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
830 /* XXX UHS-I card started in 1.8V mode, switch now */
831 error = sdmmc_mem_signal_voltage(sc,
832 SDMMC_SIGNAL_VOLTAGE_180);
833 if (error) {
834 aprint_error_dev(sc->sc_dev,
835 "failed to recover UHS card\n");
836 return error;
837 }
838 SET(sc->sc_flags, SMF_UHS_MODE);
839 }
840
841 for (i = 0; i < __arraycount(switch_group0_functions); i++) {
842 if (!(support_func & (1 << i)))
843 continue;
844 DPRINTF(("%s: card supports mode %s\n",
845 SDMMCDEVNAME(sc),
846 switch_group0_functions[i].name));
847 }
848
849 best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
850
851 DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
852 switch_group0_functions[best_func].name));
853
854 if (best_func != 0) {
855 DPRINTF(("%s: switch func mode 1(func=%d)\n",
856 SDMMCDEVNAME(sc), best_func));
857 error =
858 sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
859 if (error) {
860 aprint_error_dev(sc->sc_dev,
861 "switch func mode 1 failed:"
862 " group 1 function %d(0x%2x)\n",
863 best_func, support_func);
864 return error;
865 }
866 sf->csd.tran_speed =
867 switch_group0_functions[best_func].freq;
868
869 if (best_func == SD_ACCESS_MODE_DDR50)
870 ddr = true;
871
872 /* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
873 delay(25);
874 }
875 }
876
877 /* update bus clock */
878 if (sc->sc_busclk > sf->csd.tran_speed)
879 sc->sc_busclk = sf->csd.tran_speed;
880 if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
881 return 0;
882
883 /* change bus clock */
884 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
885 ddr);
886 if (error) {
887 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
888 return error;
889 }
890
891 sc->sc_transfer_mode = switch_group0_functions[best_func].name;
892 sc->sc_busddr = ddr;
893
894 /* execute tuning (UHS) */
895 error = sdmmc_mem_execute_tuning(sc, sf);
896 if (error) {
897 aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
898 return error;
899 }
900
901 return 0;
902 }
903
904 static int
905 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
906 {
907 int width, value, hs_timing, bus_clock, error;
908 uint8_t ext_csd[512];
909 uint32_t sectors = 0;
910 bool ddr = false;
911
912 sc->sc_transfer_mode = NULL;
913
914 /* change bus clock */
915 bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
916 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
917 if (error) {
918 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
919 return error;
920 }
921
922 if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
923 error = sdmmc_mem_send_cxd_data(sc,
924 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
925 if (error) {
926 aprint_error_dev(sc->sc_dev,
927 "can't read EXT_CSD (error=%d)\n", error);
928 return error;
929 }
930 if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
931 (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
932 aprint_error_dev(sc->sc_dev,
933 "unrecognised future version (%d)\n",
934 ext_csd[EXT_CSD_STRUCTURE]);
935 return ENOTSUP;
936 }
937
938 if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
939 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
940 sf->csd.tran_speed = 200000; /* 200MHz SDR */
941 hs_timing = EXT_CSD_HS_TIMING_HS200;
942 } else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
943 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
944 sf->csd.tran_speed = 52000; /* 52MHz */
945 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
946 ddr = true;
947 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
948 sf->csd.tran_speed = 52000; /* 52MHz */
949 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
950 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
951 sf->csd.tran_speed = 26000; /* 26MHz */
952 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
953 } else {
954 aprint_error_dev(sc->sc_dev,
955 "unknown CARD_TYPE: 0x%x\n",
956 ext_csd[EXT_CSD_CARD_TYPE]);
957 return ENOTSUP;
958 }
959
960 if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
961 width = 8;
962 value = EXT_CSD_BUS_WIDTH_8;
963 } else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
964 width = 4;
965 value = EXT_CSD_BUS_WIDTH_4;
966 } else {
967 width = 1;
968 value = EXT_CSD_BUS_WIDTH_1;
969 }
970
971 if (width != 1) {
972 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
973 EXT_CSD_BUS_WIDTH, value);
974 if (error == 0)
975 error = sdmmc_chip_bus_width(sc->sc_sct,
976 sc->sc_sch, width);
977 else {
978 DPRINTF(("%s: can't change bus width"
979 " (%d bit)\n", SDMMCDEVNAME(sc), width));
980 return error;
981 }
982
983 /* XXXX: need bus test? (using by CMD14 & CMD19) */
984 delay(10000);
985 }
986 sf->width = width;
987
988 if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
989 !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
990 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
991 }
992 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
993 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
994 EXT_CSD_HS_TIMING, hs_timing);
995 if (error) {
996 aprint_error_dev(sc->sc_dev,
997 "can't change high speed %d, error %d\n",
998 hs_timing, error);
999 return error;
1000 }
1001 }
1002
1003 if (sc->sc_busclk > sf->csd.tran_speed)
1004 sc->sc_busclk = sf->csd.tran_speed;
1005 if (sc->sc_busclk != bus_clock) {
1006 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1007 sc->sc_busclk, false);
1008 if (error) {
1009 aprint_error_dev(sc->sc_dev,
1010 "can't change bus clock\n");
1011 return error;
1012 }
1013 }
1014
1015 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1016 error = sdmmc_mem_send_cxd_data(sc,
1017 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1018 if (error) {
1019 aprint_error_dev(sc->sc_dev,
1020 "can't re-read EXT_CSD\n");
1021 return error;
1022 }
1023 if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1024 aprint_error_dev(sc->sc_dev,
1025 "HS_TIMING set failed\n");
1026 return EINVAL;
1027 }
1028 }
1029
1030 /*
1031 * HS_TIMING must be set to 0x1 before setting BUS_WIDTH
1032 * for dual data rate operation
1033 */
1034 if (ddr &&
1035 hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1036 width > 1) {
1037 error = sdmmc_mem_mmc_switch(sf,
1038 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1039 (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1040 EXT_CSD_BUS_WIDTH_4_DDR);
1041 if (error) {
1042 DPRINTF(("%s: can't switch to DDR"
1043 " (%d bit)\n", SDMMCDEVNAME(sc), width));
1044 return error;
1045 }
1046
1047 delay(10000);
1048
1049 error = sdmmc_mem_signal_voltage(sc,
1050 SDMMC_SIGNAL_VOLTAGE_180);
1051 if (error) {
1052 aprint_error_dev(sc->sc_dev,
1053 "can't switch signaling voltage\n");
1054 return error;
1055 }
1056
1057 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1058 sc->sc_busclk, ddr);
1059 if (error) {
1060 aprint_error_dev(sc->sc_dev,
1061 "can't change bus clock\n");
1062 return error;
1063 }
1064
1065 delay(10000);
1066
1067 sc->sc_transfer_mode = "DDR52";
1068 sc->sc_busddr = ddr;
1069 }
1070
1071 sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1072 ext_csd[EXT_CSD_SEC_COUNT + 1] << 8 |
1073 ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1074 ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1075 if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1076 SET(sf->flags, SFF_SDHC);
1077 sf->csd.capacity = sectors;
1078 }
1079
1080 if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1081 sc->sc_transfer_mode = "HS200";
1082
1083 /* execute tuning (HS200) */
1084 error = sdmmc_mem_execute_tuning(sc, sf);
1085 if (error) {
1086 aprint_error_dev(sc->sc_dev,
1087 "can't execute MMC tuning\n");
1088 return error;
1089 }
1090 }
1091 } else {
1092 if (sc->sc_busclk > sf->csd.tran_speed)
1093 sc->sc_busclk = sf->csd.tran_speed;
1094 if (sc->sc_busclk != bus_clock) {
1095 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1096 sc->sc_busclk, false);
1097 if (error) {
1098 aprint_error_dev(sc->sc_dev,
1099 "can't change bus clock\n");
1100 return error;
1101 }
1102 }
1103 }
1104
1105 return 0;
1106 }
1107
1108 static int
1109 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1110 {
1111 struct sdmmc_command cmd;
1112 int error;
1113
1114 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1115 memset(&cmd, 0, sizeof cmd);
1116 cmd.c_opcode = MMC_ALL_SEND_CID;
1117 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1118
1119 error = sdmmc_mmc_command(sc, &cmd);
1120 } else {
1121 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1122 sizeof(cmd.c_resp));
1123 }
1124
1125 #ifdef SDMMC_DEBUG
1126 if (error == 0)
1127 sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1128 #endif
1129 if (error == 0 && resp != NULL)
1130 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1131 return error;
1132 }
1133
1134 static int
1135 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1136 sdmmc_response *resp)
1137 {
1138 struct sdmmc_command cmd;
1139 int error;
1140
1141 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1142 memset(&cmd, 0, sizeof cmd);
1143 cmd.c_opcode = MMC_SEND_CSD;
1144 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1145 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1146
1147 error = sdmmc_mmc_command(sc, &cmd);
1148 } else {
1149 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1150 sizeof(cmd.c_resp));
1151 }
1152
1153 #ifdef SDMMC_DEBUG
1154 if (error == 0)
1155 sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1156 #endif
1157 if (error == 0 && resp != NULL)
1158 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1159 return error;
1160 }
1161
1162 static int
1163 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1164 uint32_t *scr)
1165 {
1166 struct sdmmc_command cmd;
1167 bus_dma_segment_t ds[1];
1168 void *ptr = NULL;
1169 int datalen = 8;
1170 int rseg;
1171 int error = 0;
1172
1173 /* Don't lock */
1174
1175 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1176 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1177 ds, 1, &rseg, BUS_DMA_NOWAIT);
1178 if (error)
1179 goto out;
1180 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1181 BUS_DMA_NOWAIT);
1182 if (error)
1183 goto dmamem_free;
1184 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1185 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1186 if (error)
1187 goto dmamem_unmap;
1188
1189 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1190 BUS_DMASYNC_PREREAD);
1191 } else {
1192 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1193 if (ptr == NULL)
1194 goto out;
1195 }
1196
1197 memset(&cmd, 0, sizeof(cmd));
1198 cmd.c_data = ptr;
1199 cmd.c_datalen = datalen;
1200 cmd.c_blklen = datalen;
1201 cmd.c_arg = 0;
1202 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1203 cmd.c_opcode = SD_APP_SEND_SCR;
1204 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1205 cmd.c_dmamap = sc->sc_dmap;
1206
1207 error = sdmmc_app_command(sc, sf, &cmd);
1208 if (error == 0) {
1209 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1210 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1211 BUS_DMASYNC_POSTREAD);
1212 }
1213 memcpy(scr, ptr, datalen);
1214 }
1215
1216 out:
1217 if (ptr != NULL) {
1218 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1219 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1220 dmamem_unmap:
1221 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1222 dmamem_free:
1223 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1224 } else {
1225 free(ptr, M_DEVBUF);
1226 }
1227 }
1228 DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1229 error));
1230
1231 #ifdef SDMMC_DEBUG
1232 if (error == 0)
1233 sdmmc_dump_data("SCR", scr, datalen);
1234 #endif
1235 return error;
1236 }
1237
1238 static int
1239 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1240 {
1241 sdmmc_response resp;
1242 int ver;
1243
1244 memset(resp, 0, sizeof(resp));
1245 /*
1246 * Change the raw-scr received from the DMA stream to resp.
1247 */
1248 resp[0] = be32toh(sf->raw_scr[1]) >> 8; // LSW
1249 resp[1] = be32toh(sf->raw_scr[0]); // MSW
1250 resp[0] |= (resp[1] & 0xff) << 24;
1251 resp[1] >>= 8;
1252
1253 ver = SCR_STRUCTURE(resp);
1254 sf->scr.sd_spec = SCR_SD_SPEC(resp);
1255 sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1256
1257 DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1258 SDMMCDEVNAME(sc), resp[1], resp[0],
1259 ver, sf->scr.sd_spec, sf->scr.bus_width));
1260
1261 if (ver != 0 && ver != 1) {
1262 DPRINTF(("%s: unknown structure version: %d\n",
1263 SDMMCDEVNAME(sc), ver));
1264 return EINVAL;
1265 }
1266 return 0;
1267 }
1268
1269 static int
1270 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1271 size_t datalen)
1272 {
1273 struct sdmmc_command cmd;
1274 bus_dma_segment_t ds[1];
1275 void *ptr = NULL;
1276 int rseg;
1277 int error = 0;
1278
1279 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1280 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1281 1, &rseg, BUS_DMA_NOWAIT);
1282 if (error)
1283 goto out;
1284 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1285 BUS_DMA_NOWAIT);
1286 if (error)
1287 goto dmamem_free;
1288 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1289 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1290 if (error)
1291 goto dmamem_unmap;
1292
1293 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1294 BUS_DMASYNC_PREREAD);
1295 } else {
1296 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1297 if (ptr == NULL)
1298 goto out;
1299 }
1300
1301 memset(&cmd, 0, sizeof(cmd));
1302 cmd.c_data = ptr;
1303 cmd.c_datalen = datalen;
1304 cmd.c_blklen = datalen;
1305 cmd.c_opcode = opcode;
1306 cmd.c_arg = 0;
1307 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1308 if (opcode == MMC_SEND_EXT_CSD)
1309 SET(cmd.c_flags, SCF_RSP_R1);
1310 else
1311 SET(cmd.c_flags, SCF_RSP_R2);
1312 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1313 cmd.c_dmamap = sc->sc_dmap;
1314
1315 error = sdmmc_mmc_command(sc, &cmd);
1316 if (error == 0) {
1317 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1318 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1319 BUS_DMASYNC_POSTREAD);
1320 }
1321 memcpy(data, ptr, datalen);
1322 #ifdef SDMMC_DEBUG
1323 sdmmc_dump_data("CXD", data, datalen);
1324 #endif
1325 }
1326
1327 out:
1328 if (ptr != NULL) {
1329 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1330 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1331 dmamem_unmap:
1332 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1333 dmamem_free:
1334 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1335 } else {
1336 free(ptr, M_DEVBUF);
1337 }
1338 }
1339 return error;
1340 }
1341
1342 static int
1343 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1344 {
1345 struct sdmmc_softc *sc = sf->sc;
1346 struct sdmmc_command cmd;
1347 int error;
1348
1349 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1350 return ENODEV;
1351
1352 memset(&cmd, 0, sizeof(cmd));
1353 cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1354 cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1355
1356 switch (width) {
1357 case 1:
1358 cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1359 break;
1360
1361 case 4:
1362 cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1363 break;
1364
1365 default:
1366 return EINVAL;
1367 }
1368
1369 error = sdmmc_app_command(sc, sf, &cmd);
1370 if (error == 0)
1371 error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1372 return error;
1373 }
1374
1375 static int
1376 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1377 int function, sdmmc_bitfield512_t *status)
1378 {
1379 struct sdmmc_softc *sc = sf->sc;
1380 struct sdmmc_command cmd;
1381 bus_dma_segment_t ds[1];
1382 void *ptr = NULL;
1383 int gsft, rseg, error = 0;
1384 const int statlen = 64;
1385
1386 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1387 !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1388 return EINVAL;
1389
1390 if (group <= 0 || group > 6 ||
1391 function < 0 || function > 15)
1392 return EINVAL;
1393
1394 gsft = (group - 1) << 2;
1395
1396 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1397 error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1398 1, &rseg, BUS_DMA_NOWAIT);
1399 if (error)
1400 goto out;
1401 error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1402 BUS_DMA_NOWAIT);
1403 if (error)
1404 goto dmamem_free;
1405 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1406 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1407 if (error)
1408 goto dmamem_unmap;
1409
1410 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1411 BUS_DMASYNC_PREREAD);
1412 } else {
1413 ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1414 if (ptr == NULL)
1415 goto out;
1416 }
1417
1418 memset(&cmd, 0, sizeof(cmd));
1419 cmd.c_data = ptr;
1420 cmd.c_datalen = statlen;
1421 cmd.c_blklen = statlen;
1422 cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1423 cmd.c_arg =
1424 (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1425 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1426 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1427 cmd.c_dmamap = sc->sc_dmap;
1428
1429 error = sdmmc_mmc_command(sc, &cmd);
1430 if (error == 0) {
1431 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1432 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1433 BUS_DMASYNC_POSTREAD);
1434 }
1435 memcpy(status, ptr, statlen);
1436 }
1437
1438 out:
1439 if (ptr != NULL) {
1440 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1441 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1442 dmamem_unmap:
1443 bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1444 dmamem_free:
1445 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1446 } else {
1447 free(ptr, M_DEVBUF);
1448 }
1449 }
1450
1451 if (error == 0)
1452 sdmmc_be512_to_bitfield512(status);
1453
1454 return error;
1455 }
1456
1457 static int
1458 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1459 uint8_t value)
1460 {
1461 struct sdmmc_softc *sc = sf->sc;
1462 struct sdmmc_command cmd;
1463 int error;
1464
1465 memset(&cmd, 0, sizeof(cmd));
1466 cmd.c_opcode = MMC_SWITCH;
1467 cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1468 (index << 16) | (value << 8) | set;
1469 cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1470
1471 error = sdmmc_mmc_command(sc, &cmd);
1472 if (error)
1473 return error;
1474
1475 if (index == EXT_CSD_HS_TIMING && value >= 2) {
1476 do {
1477 memset(&cmd, 0, sizeof(cmd));
1478 cmd.c_opcode = MMC_SEND_STATUS;
1479 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1480 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1481 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1482 error = sdmmc_mmc_command(sc, &cmd);
1483 if (error)
1484 break;
1485 if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1486 aprint_error_dev(sc->sc_dev, "switch error\n");
1487 return EINVAL;
1488 }
1489 /* XXX time out */
1490 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1491
1492 if (error) {
1493 aprint_error_dev(sc->sc_dev,
1494 "error waiting for high speed switch: %d\n",
1495 error);
1496 return error;
1497 }
1498 }
1499
1500 return 0;
1501 }
1502
1503 /*
1504 * SPI mode function
1505 */
1506 static int
1507 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1508 {
1509 struct sdmmc_command cmd;
1510 int error;
1511
1512 memset(&cmd, 0, sizeof(cmd));
1513 cmd.c_opcode = MMC_READ_OCR;
1514 cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1515 cmd.c_flags = SCF_RSP_SPI_R3;
1516
1517 error = sdmmc_mmc_command(sc, &cmd);
1518 if (error == 0 && card_ocr != NULL)
1519 *card_ocr = cmd.c_resp[1];
1520 DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1521 SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1522 return error;
1523 }
1524
1525 /*
1526 * read/write function
1527 */
1528 /* read */
1529 static int
1530 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1531 u_char *data, size_t datalen)
1532 {
1533 struct sdmmc_softc *sc = sf->sc;
1534 int error = 0;
1535 int i;
1536
1537 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1538 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1539
1540 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1541 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1542 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1543 if (error)
1544 break;
1545 }
1546 return error;
1547 }
1548
1549 /*
1550 * Simulate multi-segment dma transfer.
1551 */
1552 static int
1553 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1554 uint32_t blkno, u_char *data, size_t datalen)
1555 {
1556 struct sdmmc_softc *sc = sf->sc;
1557 bool use_bbuf = false;
1558 int error = 0;
1559 int i;
1560
1561 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1562 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1563 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1564 use_bbuf = true;
1565 break;
1566 }
1567 }
1568 if (use_bbuf) {
1569 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1570 BUS_DMASYNC_PREREAD);
1571
1572 error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1573 blkno, data, datalen);
1574 if (error) {
1575 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1576 return error;
1577 }
1578
1579 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1580 BUS_DMASYNC_POSTREAD);
1581
1582 /* Copy from bounce buffer */
1583 memcpy(data, sf->bbuf, datalen);
1584
1585 return 0;
1586 }
1587
1588 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1589 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1590
1591 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1592 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1593 if (error)
1594 return error;
1595
1596 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1597 BUS_DMASYNC_PREREAD);
1598
1599 error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1600 blkno, data, len);
1601 if (error) {
1602 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1603 return error;
1604 }
1605
1606 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1607 BUS_DMASYNC_POSTREAD);
1608
1609 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1610
1611 blkno += len / SDMMC_SECTOR_SIZE;
1612 data += len;
1613 }
1614 return 0;
1615 }
1616
1617 static int
1618 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1619 uint32_t blkno, u_char *data, size_t datalen)
1620 {
1621 struct sdmmc_softc *sc = sf->sc;
1622 struct sdmmc_command cmd;
1623 int error;
1624
1625 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1626 error = sdmmc_select_card(sc, sf);
1627 if (error)
1628 goto out;
1629 }
1630
1631 memset(&cmd, 0, sizeof(cmd));
1632 cmd.c_data = data;
1633 cmd.c_datalen = datalen;
1634 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1635 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1636 MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1637 cmd.c_arg = blkno;
1638 if (!ISSET(sf->flags, SFF_SDHC))
1639 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1640 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1641 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1642 cmd.c_dmamap = dmap;
1643
1644 sc->sc_ev_xfer.ev_count++;
1645
1646 error = sdmmc_mmc_command(sc, &cmd);
1647 if (error) {
1648 sc->sc_ev_xfer_error.ev_count++;
1649 goto out;
1650 }
1651
1652 const u_int counter = __builtin_ctz(cmd.c_datalen);
1653 if (counter >= 9 && counter <= 16) {
1654 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1655 } else {
1656 sc->sc_ev_xfer_unaligned.ev_count++;
1657 }
1658
1659 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1660 if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1661 memset(&cmd, 0, sizeof cmd);
1662 cmd.c_opcode = MMC_STOP_TRANSMISSION;
1663 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1664 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1665 error = sdmmc_mmc_command(sc, &cmd);
1666 if (error)
1667 goto out;
1668 }
1669 }
1670
1671 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1672 do {
1673 memset(&cmd, 0, sizeof(cmd));
1674 cmd.c_opcode = MMC_SEND_STATUS;
1675 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1676 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1677 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1678 error = sdmmc_mmc_command(sc, &cmd);
1679 if (error)
1680 break;
1681 /* XXX time out */
1682 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1683 }
1684
1685 out:
1686 return error;
1687 }
1688
1689 int
1690 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1691 size_t datalen)
1692 {
1693 struct sdmmc_softc *sc = sf->sc;
1694 int error;
1695
1696 SDMMC_LOCK(sc);
1697 mutex_enter(&sc->sc_mtx);
1698
1699 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1700 error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1701 goto out;
1702 }
1703
1704 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1705 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1706 datalen);
1707 goto out;
1708 }
1709
1710 /* DMA transfer */
1711 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1712 BUS_DMA_NOWAIT|BUS_DMA_READ);
1713 if (error)
1714 goto out;
1715
1716 #ifdef SDMMC_DEBUG
1717 printf("data=%p, datalen=%zu\n", data, datalen);
1718 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1719 printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1720 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1721 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1722 }
1723 #endif
1724
1725 if (sc->sc_dmap->dm_nsegs > 1
1726 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1727 error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1728 data, datalen);
1729 goto unload;
1730 }
1731
1732 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1733 BUS_DMASYNC_PREREAD);
1734
1735 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1736 datalen);
1737 if (error)
1738 goto unload;
1739
1740 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1741 BUS_DMASYNC_POSTREAD);
1742 unload:
1743 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1744
1745 out:
1746 mutex_exit(&sc->sc_mtx);
1747 SDMMC_UNLOCK(sc);
1748
1749 return error;
1750 }
1751
1752 /* write */
1753 static int
1754 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1755 u_char *data, size_t datalen)
1756 {
1757 struct sdmmc_softc *sc = sf->sc;
1758 int error = 0;
1759 int i;
1760
1761 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1762 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1763
1764 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1765 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1766 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1767 if (error)
1768 break;
1769 }
1770 return error;
1771 }
1772
1773 /*
1774 * Simulate multi-segment dma transfer.
1775 */
1776 static int
1777 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1778 uint32_t blkno, u_char *data, size_t datalen)
1779 {
1780 struct sdmmc_softc *sc = sf->sc;
1781 bool use_bbuf = false;
1782 int error = 0;
1783 int i;
1784
1785 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1786 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1787 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1788 use_bbuf = true;
1789 break;
1790 }
1791 }
1792 if (use_bbuf) {
1793 /* Copy to bounce buffer */
1794 memcpy(sf->bbuf, data, datalen);
1795
1796 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1797 BUS_DMASYNC_PREWRITE);
1798
1799 error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1800 blkno, data, datalen);
1801 if (error) {
1802 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1803 return error;
1804 }
1805
1806 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1807 BUS_DMASYNC_POSTWRITE);
1808
1809 return 0;
1810 }
1811
1812 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1813 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1814
1815 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1816 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1817 if (error)
1818 return error;
1819
1820 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1821 BUS_DMASYNC_PREWRITE);
1822
1823 error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
1824 blkno, data, len);
1825 if (error) {
1826 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1827 return error;
1828 }
1829
1830 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1831 BUS_DMASYNC_POSTWRITE);
1832
1833 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1834
1835 blkno += len / SDMMC_SECTOR_SIZE;
1836 data += len;
1837 }
1838
1839 return error;
1840 }
1841
1842 static int
1843 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1844 uint32_t blkno, u_char *data, size_t datalen)
1845 {
1846 struct sdmmc_softc *sc = sf->sc;
1847 struct sdmmc_command cmd;
1848 int error;
1849
1850 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1851 error = sdmmc_select_card(sc, sf);
1852 if (error)
1853 goto out;
1854 }
1855
1856 memset(&cmd, 0, sizeof(cmd));
1857 cmd.c_data = data;
1858 cmd.c_datalen = datalen;
1859 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1860 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1861 MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
1862 cmd.c_arg = blkno;
1863 if (!ISSET(sf->flags, SFF_SDHC))
1864 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1865 cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
1866 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1867 cmd.c_dmamap = dmap;
1868
1869 sc->sc_ev_xfer.ev_count++;
1870
1871 error = sdmmc_mmc_command(sc, &cmd);
1872 if (error) {
1873 sc->sc_ev_xfer_error.ev_count++;
1874 goto out;
1875 }
1876
1877 const u_int counter = __builtin_ctz(cmd.c_datalen);
1878 if (counter >= 9 && counter <= 16) {
1879 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1880 } else {
1881 sc->sc_ev_xfer_unaligned.ev_count++;
1882 }
1883
1884 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1885 if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
1886 memset(&cmd, 0, sizeof(cmd));
1887 cmd.c_opcode = MMC_STOP_TRANSMISSION;
1888 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1889 error = sdmmc_mmc_command(sc, &cmd);
1890 if (error)
1891 goto out;
1892 }
1893 }
1894
1895 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1896 do {
1897 memset(&cmd, 0, sizeof(cmd));
1898 cmd.c_opcode = MMC_SEND_STATUS;
1899 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1900 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1901 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1902 error = sdmmc_mmc_command(sc, &cmd);
1903 if (error)
1904 break;
1905 /* XXX time out */
1906 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1907 }
1908
1909 out:
1910 return error;
1911 }
1912
1913 int
1914 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1915 size_t datalen)
1916 {
1917 struct sdmmc_softc *sc = sf->sc;
1918 int error;
1919
1920 SDMMC_LOCK(sc);
1921 mutex_enter(&sc->sc_mtx);
1922
1923 if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
1924 aprint_normal_dev(sc->sc_dev, "write-protected\n");
1925 error = EIO;
1926 goto out;
1927 }
1928
1929 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1930 error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
1931 goto out;
1932 }
1933
1934 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1935 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
1936 datalen);
1937 goto out;
1938 }
1939
1940 /* DMA transfer */
1941 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1942 BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1943 if (error)
1944 goto out;
1945
1946 #ifdef SDMMC_DEBUG
1947 aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
1948 __func__, data, datalen);
1949 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1950 aprint_normal_dev(sc->sc_dev,
1951 "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
1952 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1953 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1954 }
1955 #endif
1956
1957 if (sc->sc_dmap->dm_nsegs > 1
1958 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1959 error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
1960 data, datalen);
1961 goto unload;
1962 }
1963
1964 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1965 BUS_DMASYNC_PREWRITE);
1966
1967 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
1968 datalen);
1969 if (error)
1970 goto unload;
1971
1972 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1973 BUS_DMASYNC_POSTWRITE);
1974 unload:
1975 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1976
1977 out:
1978 mutex_exit(&sc->sc_mtx);
1979 SDMMC_UNLOCK(sc);
1980
1981 return error;
1982 }
1983