sdmmc_mem.c revision 1.55 1 /* $NetBSD: sdmmc_mem.c,v 1.55 2017/02/17 10:51:48 nonaka Exp $ */
2 /* $OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka (at) netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 /* Routines for SD/MMC memory cards. */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.55 2017/02/17 10:51:48 nonaka Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s) do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s) do {} while (/*CONSTCOND*/0)
70 #endif
71
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78 sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80 uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
83 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
84 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
85 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
86 uint8_t);
87 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
88 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
89 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
90 u_char *, size_t);
91 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
92 u_char *, size_t);
93 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
94 uint32_t, u_char *, size_t);
95 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
96 uint32_t, u_char *, size_t);
97 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
98 uint32_t, u_char *, size_t);
99 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
100 uint32_t, u_char *, size_t);
101
102 static const struct {
103 const char *name;
104 int v;
105 int freq;
106 } switch_group0_functions[] = {
107 /* Default/SDR12 */
108 { "Default/SDR12", 0, 25000 },
109
110 /* High-Speed/SDR25 */
111 { "High-Speed/SDR25", SMC_CAPS_SD_HIGHSPEED, 50000 },
112
113 /* SDR50 */
114 { "SDR50", SMC_CAPS_UHS_SDR50, 100000 },
115
116 /* SDR104 */
117 { "SDR104", SMC_CAPS_UHS_SDR104, 208000 },
118
119 /* DDR50 */
120 { "DDR50", SMC_CAPS_UHS_DDR50, 50000 },
121 };
122
123 /*
124 * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
125 */
126 int
127 sdmmc_mem_enable(struct sdmmc_softc *sc)
128 {
129 uint32_t host_ocr;
130 uint32_t card_ocr;
131 uint32_t new_ocr;
132 uint32_t ocr = 0;
133 int error;
134
135 SDMMC_LOCK(sc);
136
137 /* Set host mode to SD "combo" card or SD memory-only. */
138 CLR(sc->sc_flags, SMF_UHS_MODE);
139 SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
140
141 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
142 sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
143
144 /* Reset memory (*must* do that before CMD55 or CMD1). */
145 sdmmc_go_idle_state(sc);
146
147 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
148 /* Check SD Ver.2 */
149 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
150 if (error == 0 && card_ocr == 0x1aa)
151 SET(ocr, MMC_OCR_HCS);
152 }
153
154 /*
155 * Read the SD/MMC memory OCR value by issuing CMD55 followed
156 * by ACMD41 to read the OCR value from memory-only SD cards.
157 * MMC cards will not respond to CMD55 or ACMD41 and this is
158 * how we distinguish them from SD cards.
159 */
160 mmc_mode:
161 error = sdmmc_mem_send_op_cond(sc,
162 ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
163 if (error) {
164 if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
165 !ISSET(sc->sc_flags, SMF_IO_MODE)) {
166 /* Not a SD card, switch to MMC mode. */
167 DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
168 CLR(sc->sc_flags, SMF_SD_MODE);
169 goto mmc_mode;
170 }
171 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
172 DPRINTF(("%s: couldn't read memory OCR\n",
173 SDMMCDEVNAME(sc)));
174 goto out;
175 } else {
176 /* Not a "combo" card. */
177 CLR(sc->sc_flags, SMF_MEM_MODE);
178 error = 0;
179 goto out;
180 }
181 }
182 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
183 /* get card OCR */
184 error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
185 if (error) {
186 DPRINTF(("%s: couldn't read SPI memory OCR\n",
187 SDMMCDEVNAME(sc)));
188 goto out;
189 }
190 }
191
192 /* Set the lowest voltage supported by the card and host. */
193 host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
194 error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
195 if (error) {
196 DPRINTF(("%s: couldn't supply voltage requested by card\n",
197 SDMMCDEVNAME(sc)));
198 goto out;
199 }
200
201 DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
202 DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
203
204 host_ocr &= card_ocr; /* only allow the common voltages */
205 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
206 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
207 /* Tell the card(s) to enter the idle state (again). */
208 sdmmc_go_idle_state(sc);
209 /* Check SD Ver.2 */
210 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
211 if (error == 0 && card_ocr == 0x1aa)
212 SET(ocr, MMC_OCR_HCS);
213
214 if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
215 SET(ocr, MMC_OCR_S18A);
216 } else {
217 SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
218 }
219 }
220 host_ocr |= ocr;
221
222 /* Send the new OCR value until all cards are ready. */
223 error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
224 if (error) {
225 DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
226 goto out;
227 }
228
229 if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
230 /*
231 * Card and host support low voltage mode, begin switch
232 * sequence.
233 */
234 struct sdmmc_command cmd;
235 memset(&cmd, 0, sizeof(cmd));
236 cmd.c_arg = 0;
237 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
238 cmd.c_opcode = SD_VOLTAGE_SWITCH;
239 DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
240 error = sdmmc_mmc_command(sc, &cmd);
241 if (error) {
242 DPRINTF(("%s: voltage switch command failed\n",
243 SDMMCDEVNAME(sc)));
244 goto out;
245 }
246
247 error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
248 if (error)
249 goto out;
250
251 SET(sc->sc_flags, SMF_UHS_MODE);
252 }
253
254 out:
255 SDMMC_UNLOCK(sc);
256
257 if (error)
258 printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
259 __func__, error);
260
261 return error;
262 }
263
264 static int
265 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
266 {
267 int error;
268
269 /*
270 * Stop the clock
271 */
272 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
273 SDMMC_SDCLK_OFF, false);
274 if (error)
275 goto out;
276
277 delay(1000);
278
279 /*
280 * Card switch command was successful, update host controller
281 * signal voltage setting.
282 */
283 DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
284 signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
285 error = sdmmc_chip_signal_voltage(sc->sc_sct,
286 sc->sc_sch, signal_voltage);
287 if (error)
288 goto out;
289
290 delay(5000);
291
292 /*
293 * Switch to SDR12 timing
294 */
295 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
296 false);
297 if (error)
298 goto out;
299
300 delay(1000);
301
302 out:
303 return error;
304 }
305
306 /*
307 * Read the CSD and CID from all cards and assign each card a unique
308 * relative card address (RCA). CMD2 is ignored by SDIO-only cards.
309 */
310 void
311 sdmmc_mem_scan(struct sdmmc_softc *sc)
312 {
313 sdmmc_response resp;
314 struct sdmmc_function *sf;
315 uint16_t next_rca;
316 int error;
317 int retry;
318
319 SDMMC_LOCK(sc);
320
321 /*
322 * CMD2 is a broadcast command understood by SD cards and MMC
323 * cards. All cards begin to respond to the command, but back
324 * off if another card drives the CMD line to a different level.
325 * Only one card will get its entire response through. That
326 * card remains silent once it has been assigned a RCA.
327 */
328 for (retry = 0; retry < 100; retry++) {
329 error = sdmmc_mem_send_cid(sc, &resp);
330 if (error) {
331 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
332 error == ETIMEDOUT) {
333 /* No more cards there. */
334 break;
335 }
336 DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
337 break;
338 }
339
340 /* In MMC mode, find the next available RCA. */
341 next_rca = 1;
342 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
343 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
344 next_rca++;
345 }
346
347 /* Allocate a sdmmc_function structure. */
348 sf = sdmmc_function_alloc(sc);
349 sf->rca = next_rca;
350
351 /*
352 * Remember the CID returned in the CMD2 response for
353 * later decoding.
354 */
355 memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
356
357 /*
358 * Silence the card by assigning it a unique RCA, or
359 * querying it for its RCA in the case of SD.
360 */
361 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
362 if (sdmmc_set_relative_addr(sc, sf) != 0) {
363 aprint_error_dev(sc->sc_dev,
364 "couldn't set mem RCA\n");
365 sdmmc_function_free(sf);
366 break;
367 }
368 }
369
370 /*
371 * If this is a memory-only card, the card responding
372 * first becomes an alias for SDIO function 0.
373 */
374 if (sc->sc_fn0 == NULL)
375 sc->sc_fn0 = sf;
376
377 SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
378
379 /* only one function in SPI mode */
380 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
381 break;
382 }
383
384 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
385 /* Go to Data Transfer Mode, if possible. */
386 sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
387
388 /*
389 * All cards are either inactive or awaiting further commands.
390 * Read the CSDs and decode the raw CID for each card.
391 */
392 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
393 error = sdmmc_mem_send_csd(sc, sf, &resp);
394 if (error) {
395 SET(sf->flags, SFF_ERROR);
396 continue;
397 }
398
399 if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
400 sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
401 SET(sf->flags, SFF_ERROR);
402 continue;
403 }
404
405 #ifdef SDMMC_DEBUG
406 printf("%s: CID: ", SDMMCDEVNAME(sc));
407 sdmmc_print_cid(&sf->cid);
408 #endif
409 }
410
411 SDMMC_UNLOCK(sc);
412 }
413
414 int
415 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
416 struct sdmmc_function *sf)
417 {
418 /* TRAN_SPEED(2:0): transfer rate exponent */
419 static const int speed_exponent[8] = {
420 100 * 1, /* 100 Kbits/s */
421 1 * 1000, /* 1 Mbits/s */
422 10 * 1000, /* 10 Mbits/s */
423 100 * 1000, /* 100 Mbits/s */
424 0,
425 0,
426 0,
427 0,
428 };
429 /* TRAN_SPEED(6:3): time mantissa */
430 static const int speed_mantissa[16] = {
431 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
432 };
433 struct sdmmc_csd *csd = &sf->csd;
434 int e, m;
435
436 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
437 /*
438 * CSD version 1.0 corresponds to SD system
439 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
440 */
441 csd->csdver = SD_CSD_CSDVER(resp);
442 switch (csd->csdver) {
443 case SD_CSD_CSDVER_2_0:
444 DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
445 SET(sf->flags, SFF_SDHC);
446 csd->capacity = SD_CSD_V2_CAPACITY(resp);
447 csd->read_bl_len = SD_CSD_V2_BL_LEN;
448 break;
449
450 case SD_CSD_CSDVER_1_0:
451 DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
452 csd->capacity = SD_CSD_CAPACITY(resp);
453 csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
454 break;
455
456 default:
457 aprint_error_dev(sc->sc_dev,
458 "unknown SD CSD structure version 0x%x\n",
459 csd->csdver);
460 return 1;
461 }
462
463 csd->mmcver = SD_CSD_MMCVER(resp);
464 csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
465 csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
466 e = SD_CSD_SPEED_EXP(resp);
467 m = SD_CSD_SPEED_MANT(resp);
468 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
469 csd->ccc = SD_CSD_CCC(resp);
470 } else {
471 csd->csdver = MMC_CSD_CSDVER(resp);
472 if (csd->csdver == MMC_CSD_CSDVER_1_0) {
473 aprint_error_dev(sc->sc_dev,
474 "unknown MMC CSD structure version 0x%x\n",
475 csd->csdver);
476 return 1;
477 }
478
479 csd->mmcver = MMC_CSD_MMCVER(resp);
480 csd->capacity = MMC_CSD_CAPACITY(resp);
481 csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
482 csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
483 csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
484 e = MMC_CSD_TRAN_SPEED_EXP(resp);
485 m = MMC_CSD_TRAN_SPEED_MANT(resp);
486 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
487 }
488 if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
489 csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
490
491 #ifdef SDMMC_DUMP_CSD
492 sdmmc_print_csd(resp, csd);
493 #endif
494
495 return 0;
496 }
497
498 int
499 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
500 struct sdmmc_function *sf)
501 {
502 struct sdmmc_cid *cid = &sf->cid;
503
504 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
505 cid->mid = SD_CID_MID(resp);
506 cid->oid = SD_CID_OID(resp);
507 SD_CID_PNM_CPY(resp, cid->pnm);
508 cid->rev = SD_CID_REV(resp);
509 cid->psn = SD_CID_PSN(resp);
510 cid->mdt = SD_CID_MDT(resp);
511 } else {
512 switch(sf->csd.mmcver) {
513 case MMC_CSD_MMCVER_1_0:
514 case MMC_CSD_MMCVER_1_4:
515 cid->mid = MMC_CID_MID_V1(resp);
516 MMC_CID_PNM_V1_CPY(resp, cid->pnm);
517 cid->rev = MMC_CID_REV_V1(resp);
518 cid->psn = MMC_CID_PSN_V1(resp);
519 cid->mdt = MMC_CID_MDT_V1(resp);
520 break;
521 case MMC_CSD_MMCVER_2_0:
522 case MMC_CSD_MMCVER_3_1:
523 case MMC_CSD_MMCVER_4_0:
524 cid->mid = MMC_CID_MID_V2(resp);
525 cid->oid = MMC_CID_OID_V2(resp);
526 MMC_CID_PNM_V2_CPY(resp, cid->pnm);
527 cid->psn = MMC_CID_PSN_V2(resp);
528 break;
529 default:
530 aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
531 sf->csd.mmcver);
532 return 1;
533 }
534 }
535 return 0;
536 }
537
538 void
539 sdmmc_print_cid(struct sdmmc_cid *cid)
540 {
541
542 printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
543 " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
544 cid->mdt);
545 }
546
547 #ifdef SDMMC_DUMP_CSD
548 void
549 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
550 {
551
552 printf("csdver = %d\n", csd->csdver);
553 printf("mmcver = %d\n", csd->mmcver);
554 printf("capacity = 0x%08x\n", csd->capacity);
555 printf("read_bl_len = %d\n", csd->read_bl_len);
556 printf("write_bl_len = %d\n", csd->write_bl_len);
557 printf("r2w_factor = %d\n", csd->r2w_factor);
558 printf("tran_speed = %d\n", csd->tran_speed);
559 printf("ccc = 0x%x\n", csd->ccc);
560 }
561 #endif
562
563 /*
564 * Initialize a SD/MMC memory card.
565 */
566 int
567 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
568 {
569 int error = 0;
570
571 SDMMC_LOCK(sc);
572
573 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
574 error = sdmmc_select_card(sc, sf);
575 if (error)
576 goto out;
577 }
578
579 error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
580 if (error)
581 goto out;
582
583 if (ISSET(sc->sc_flags, SMF_SD_MODE))
584 error = sdmmc_mem_sd_init(sc, sf);
585 else
586 error = sdmmc_mem_mmc_init(sc, sf);
587
588 out:
589 SDMMC_UNLOCK(sc);
590
591 return error;
592 }
593
594 /*
595 * Get or set the card's memory OCR value (SD or MMC).
596 */
597 int
598 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
599 {
600 struct sdmmc_command cmd;
601 int error;
602 int retry;
603
604 /* Don't lock */
605
606 DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
607 SDMMCDEVNAME(sc), ocr));
608
609 /*
610 * If we change the OCR value, retry the command until the OCR
611 * we receive in response has the "CARD BUSY" bit set, meaning
612 * that all cards are ready for identification.
613 */
614 for (retry = 0; retry < 100; retry++) {
615 memset(&cmd, 0, sizeof(cmd));
616 cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
617 ocr : (ocr & MMC_OCR_HCS);
618 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
619 | SCF_TOUT_OK;
620
621 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
622 cmd.c_opcode = SD_APP_OP_COND;
623 error = sdmmc_app_command(sc, NULL, &cmd);
624 } else {
625 cmd.c_opcode = MMC_SEND_OP_COND;
626 error = sdmmc_mmc_command(sc, &cmd);
627 }
628 if (error)
629 break;
630
631 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
632 if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
633 break;
634 } else {
635 if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
636 ocr == 0)
637 break;
638 }
639
640 error = ETIMEDOUT;
641 sdmmc_delay(10000);
642 }
643 if (error == 0 &&
644 ocrp != NULL &&
645 !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
646 *ocrp = MMC_R3(cmd.c_resp);
647 DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
648 SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
649 return error;
650 }
651
652 int
653 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
654 {
655 struct sdmmc_command cmd;
656 int error;
657
658 /* Don't lock */
659
660 memset(&cmd, 0, sizeof(cmd));
661 cmd.c_arg = ocr;
662 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7;
663 cmd.c_opcode = SD_SEND_IF_COND;
664
665 error = sdmmc_mmc_command(sc, &cmd);
666 if (error == 0 && ocrp != NULL) {
667 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
668 *ocrp = MMC_SPI_R7(cmd.c_resp);
669 } else {
670 *ocrp = MMC_R7(cmd.c_resp);
671 }
672 DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
673 SDMMCDEVNAME(sc), error, *ocrp));
674 }
675 return error;
676 }
677
678 /*
679 * Set the read block length appropriately for this card, according to
680 * the card CSD register value.
681 */
682 int
683 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
684 int block_len)
685 {
686 struct sdmmc_command cmd;
687 int error;
688
689 /* Don't lock */
690
691 memset(&cmd, 0, sizeof(cmd));
692 cmd.c_opcode = MMC_SET_BLOCKLEN;
693 cmd.c_arg = block_len;
694 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
695
696 error = sdmmc_mmc_command(sc, &cmd);
697
698 DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
699 SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
700
701 return error;
702 }
703
704 /* make 512-bit BE quantity __bitfield()-compatible */
705 static void
706 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
707 size_t i;
708 uint32_t tmp0, tmp1;
709 const size_t bitswords = __arraycount(buf->_bits);
710 for (i = 0; i < bitswords/2; i++) {
711 tmp0 = buf->_bits[i];
712 tmp1 = buf->_bits[bitswords - 1 - i];
713 buf->_bits[i] = be32toh(tmp1);
714 buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
715 }
716 }
717
718 static int
719 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
720 {
721 if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
722 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
723 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
724 return SD_ACCESS_MODE_SDR104;
725 }
726 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
727 ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
728 return SD_ACCESS_MODE_DDR50;
729 }
730 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
731 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
732 return SD_ACCESS_MODE_SDR50;
733 }
734 }
735 if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
736 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
737 return SD_ACCESS_MODE_SDR25;
738 }
739 return SD_ACCESS_MODE_SDR12;
740 }
741
742 static int
743 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
744 {
745 int timing = -1;
746
747 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
748 return 0;
749
750 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
751 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
752 return 0;
753
754 switch (sf->csd.tran_speed) {
755 case 100000:
756 timing = SDMMC_TIMING_UHS_SDR50;
757 break;
758 case 208000:
759 timing = SDMMC_TIMING_UHS_SDR104;
760 break;
761 default:
762 return 0;
763 }
764 } else {
765 switch (sf->csd.tran_speed) {
766 case 200000:
767 timing = SDMMC_TIMING_MMC_HS200;
768 break;
769 default:
770 return 0;
771 }
772 }
773
774 DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
775 timing));
776
777 return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
778 }
779
780 static int
781 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
782 {
783 int support_func, best_func, bus_clock, error, i;
784 sdmmc_bitfield512_t status; /* Switch Function Status */
785 bool ddr = false;
786
787 /* change bus clock */
788 bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
789 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
790 if (error) {
791 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
792 return error;
793 }
794
795 error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
796 if (error) {
797 aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
798 return error;
799 }
800 error = sdmmc_mem_decode_scr(sc, sf);
801 if (error)
802 return error;
803
804 if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
805 ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
806 DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
807 error = sdmmc_set_bus_width(sf, 4);
808 if (error) {
809 aprint_error_dev(sc->sc_dev,
810 "can't change bus width (%d bit)\n", 4);
811 return error;
812 }
813 sf->width = 4;
814 }
815
816 best_func = 0;
817 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
818 ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
819 DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
820 error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
821 if (error) {
822 aprint_error_dev(sc->sc_dev,
823 "switch func mode 0 failed\n");
824 return error;
825 }
826
827 support_func = SFUNC_STATUS_GROUP(&status, 1);
828
829 if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
830 /* XXX UHS-I card started in 1.8V mode, switch now */
831 error = sdmmc_mem_signal_voltage(sc,
832 SDMMC_SIGNAL_VOLTAGE_180);
833 if (error) {
834 aprint_error_dev(sc->sc_dev,
835 "failed to recover UHS card\n");
836 return error;
837 }
838 SET(sc->sc_flags, SMF_UHS_MODE);
839 }
840
841 for (i = 0; i < __arraycount(switch_group0_functions); i++) {
842 if (!(support_func & (1 << i)))
843 continue;
844 DPRINTF(("%s: card supports mode %s\n",
845 SDMMCDEVNAME(sc),
846 switch_group0_functions[i].name));
847 }
848
849 best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
850
851 DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
852 switch_group0_functions[best_func].name));
853
854 if (best_func != 0) {
855 DPRINTF(("%s: switch func mode 1(func=%d)\n",
856 SDMMCDEVNAME(sc), best_func));
857 error =
858 sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
859 if (error) {
860 aprint_error_dev(sc->sc_dev,
861 "switch func mode 1 failed:"
862 " group 1 function %d(0x%2x)\n",
863 best_func, support_func);
864 return error;
865 }
866 sf->csd.tran_speed =
867 switch_group0_functions[best_func].freq;
868
869 if (best_func == SD_ACCESS_MODE_DDR50)
870 ddr = true;
871
872 /* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
873 delay(25);
874 }
875 }
876
877 /* update bus clock */
878 if (sc->sc_busclk > sf->csd.tran_speed)
879 sc->sc_busclk = sf->csd.tran_speed;
880 if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
881 return 0;
882
883 /* change bus clock */
884 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
885 ddr);
886 if (error) {
887 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
888 return error;
889 }
890
891 sc->sc_transfer_mode = switch_group0_functions[best_func].name;
892 sc->sc_busddr = ddr;
893
894 /* execute tuning (UHS) */
895 error = sdmmc_mem_execute_tuning(sc, sf);
896 if (error) {
897 aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
898 return error;
899 }
900
901 return 0;
902 }
903
904 static int
905 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
906 {
907 int width, value, hs_timing, bus_clock, error;
908 uint8_t ext_csd[512];
909 uint32_t sectors = 0;
910 bool ddr = false;
911
912 sc->sc_transfer_mode = NULL;
913
914 /* change bus clock */
915 bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
916 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
917 if (error) {
918 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
919 return error;
920 }
921
922 if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
923 error = sdmmc_mem_send_cxd_data(sc,
924 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
925 if (error) {
926 aprint_error_dev(sc->sc_dev,
927 "can't read EXT_CSD (error=%d)\n", error);
928 return error;
929 }
930 if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
931 (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
932 aprint_error_dev(sc->sc_dev,
933 "unrecognised future version (%d)\n",
934 ext_csd[EXT_CSD_STRUCTURE]);
935 return ENOTSUP;
936 }
937 sf->ext_csd.rev = ext_csd[EXT_CSD_REV];
938
939 if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
940 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
941 sf->csd.tran_speed = 200000; /* 200MHz SDR */
942 hs_timing = EXT_CSD_HS_TIMING_HS200;
943 } else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
944 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
945 sf->csd.tran_speed = 52000; /* 52MHz */
946 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
947 ddr = true;
948 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
949 sf->csd.tran_speed = 52000; /* 52MHz */
950 hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
951 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
952 sf->csd.tran_speed = 26000; /* 26MHz */
953 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
954 } else {
955 aprint_error_dev(sc->sc_dev,
956 "unknown CARD_TYPE: 0x%x\n",
957 ext_csd[EXT_CSD_CARD_TYPE]);
958 return ENOTSUP;
959 }
960
961 if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
962 width = 8;
963 value = EXT_CSD_BUS_WIDTH_8;
964 } else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
965 width = 4;
966 value = EXT_CSD_BUS_WIDTH_4;
967 } else {
968 width = 1;
969 value = EXT_CSD_BUS_WIDTH_1;
970 }
971
972 if (width != 1) {
973 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
974 EXT_CSD_BUS_WIDTH, value);
975 if (error == 0)
976 error = sdmmc_chip_bus_width(sc->sc_sct,
977 sc->sc_sch, width);
978 else {
979 DPRINTF(("%s: can't change bus width"
980 " (%d bit)\n", SDMMCDEVNAME(sc), width));
981 return error;
982 }
983
984 /* XXXX: need bus test? (using by CMD14 & CMD19) */
985 delay(10000);
986 }
987 sf->width = width;
988
989 if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
990 !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
991 hs_timing = EXT_CSD_HS_TIMING_LEGACY;
992 }
993 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
994 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
995 EXT_CSD_HS_TIMING, hs_timing);
996 if (error) {
997 aprint_error_dev(sc->sc_dev,
998 "can't change high speed %d, error %d\n",
999 hs_timing, error);
1000 return error;
1001 }
1002 }
1003
1004 if (sc->sc_busclk > sf->csd.tran_speed)
1005 sc->sc_busclk = sf->csd.tran_speed;
1006 if (sc->sc_busclk != bus_clock) {
1007 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1008 sc->sc_busclk, false);
1009 if (error) {
1010 aprint_error_dev(sc->sc_dev,
1011 "can't change bus clock\n");
1012 return error;
1013 }
1014 }
1015
1016 if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1017 error = sdmmc_mem_send_cxd_data(sc,
1018 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1019 if (error) {
1020 aprint_error_dev(sc->sc_dev,
1021 "can't re-read EXT_CSD\n");
1022 return error;
1023 }
1024 if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1025 aprint_error_dev(sc->sc_dev,
1026 "HS_TIMING set failed\n");
1027 return EINVAL;
1028 }
1029 }
1030
1031 /*
1032 * HS_TIMING must be set to 0x1 before setting BUS_WIDTH
1033 * for dual data rate operation
1034 */
1035 if (ddr &&
1036 hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1037 width > 1) {
1038 error = sdmmc_mem_mmc_switch(sf,
1039 EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1040 (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1041 EXT_CSD_BUS_WIDTH_4_DDR);
1042 if (error) {
1043 DPRINTF(("%s: can't switch to DDR"
1044 " (%d bit)\n", SDMMCDEVNAME(sc), width));
1045 return error;
1046 }
1047
1048 delay(10000);
1049
1050 error = sdmmc_mem_signal_voltage(sc,
1051 SDMMC_SIGNAL_VOLTAGE_180);
1052 if (error) {
1053 aprint_error_dev(sc->sc_dev,
1054 "can't switch signaling voltage\n");
1055 return error;
1056 }
1057
1058 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1059 sc->sc_busclk, ddr);
1060 if (error) {
1061 aprint_error_dev(sc->sc_dev,
1062 "can't change bus clock\n");
1063 return error;
1064 }
1065
1066 delay(10000);
1067
1068 sc->sc_transfer_mode = "DDR52";
1069 sc->sc_busddr = ddr;
1070 }
1071
1072 sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1073 ext_csd[EXT_CSD_SEC_COUNT + 1] << 8 |
1074 ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1075 ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1076 if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1077 SET(sf->flags, SFF_SDHC);
1078 sf->csd.capacity = sectors;
1079 }
1080
1081 if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1082 sc->sc_transfer_mode = "HS200";
1083
1084 /* execute tuning (HS200) */
1085 error = sdmmc_mem_execute_tuning(sc, sf);
1086 if (error) {
1087 aprint_error_dev(sc->sc_dev,
1088 "can't execute MMC tuning\n");
1089 return error;
1090 }
1091 }
1092
1093 if (sf->ext_csd.rev >= 5) {
1094 sf->ext_csd.rst_n_function =
1095 ext_csd[EXT_CSD_RST_N_FUNCTION];
1096 }
1097 } else {
1098 if (sc->sc_busclk > sf->csd.tran_speed)
1099 sc->sc_busclk = sf->csd.tran_speed;
1100 if (sc->sc_busclk != bus_clock) {
1101 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1102 sc->sc_busclk, false);
1103 if (error) {
1104 aprint_error_dev(sc->sc_dev,
1105 "can't change bus clock\n");
1106 return error;
1107 }
1108 }
1109 }
1110
1111 return 0;
1112 }
1113
1114 static int
1115 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1116 {
1117 struct sdmmc_command cmd;
1118 int error;
1119
1120 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1121 memset(&cmd, 0, sizeof cmd);
1122 cmd.c_opcode = MMC_ALL_SEND_CID;
1123 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1124
1125 error = sdmmc_mmc_command(sc, &cmd);
1126 } else {
1127 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1128 sizeof(cmd.c_resp));
1129 }
1130
1131 #ifdef SDMMC_DEBUG
1132 if (error == 0)
1133 sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1134 #endif
1135 if (error == 0 && resp != NULL)
1136 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1137 return error;
1138 }
1139
1140 static int
1141 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1142 sdmmc_response *resp)
1143 {
1144 struct sdmmc_command cmd;
1145 int error;
1146
1147 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1148 memset(&cmd, 0, sizeof cmd);
1149 cmd.c_opcode = MMC_SEND_CSD;
1150 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1151 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1152
1153 error = sdmmc_mmc_command(sc, &cmd);
1154 } else {
1155 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1156 sizeof(cmd.c_resp));
1157 }
1158
1159 #ifdef SDMMC_DEBUG
1160 if (error == 0)
1161 sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1162 #endif
1163 if (error == 0 && resp != NULL)
1164 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1165 return error;
1166 }
1167
1168 static int
1169 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1170 uint32_t *scr)
1171 {
1172 struct sdmmc_command cmd;
1173 bus_dma_segment_t ds[1];
1174 void *ptr = NULL;
1175 int datalen = 8;
1176 int rseg;
1177 int error = 0;
1178
1179 /* Don't lock */
1180
1181 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1182 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1183 ds, 1, &rseg, BUS_DMA_NOWAIT);
1184 if (error)
1185 goto out;
1186 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1187 BUS_DMA_NOWAIT);
1188 if (error)
1189 goto dmamem_free;
1190 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1191 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1192 if (error)
1193 goto dmamem_unmap;
1194
1195 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1196 BUS_DMASYNC_PREREAD);
1197 } else {
1198 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1199 if (ptr == NULL)
1200 goto out;
1201 }
1202
1203 memset(&cmd, 0, sizeof(cmd));
1204 cmd.c_data = ptr;
1205 cmd.c_datalen = datalen;
1206 cmd.c_blklen = datalen;
1207 cmd.c_arg = 0;
1208 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1209 cmd.c_opcode = SD_APP_SEND_SCR;
1210 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1211 cmd.c_dmamap = sc->sc_dmap;
1212
1213 error = sdmmc_app_command(sc, sf, &cmd);
1214 if (error == 0) {
1215 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1216 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1217 BUS_DMASYNC_POSTREAD);
1218 }
1219 memcpy(scr, ptr, datalen);
1220 }
1221
1222 out:
1223 if (ptr != NULL) {
1224 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1225 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1226 dmamem_unmap:
1227 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1228 dmamem_free:
1229 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1230 } else {
1231 free(ptr, M_DEVBUF);
1232 }
1233 }
1234 DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1235 error));
1236
1237 #ifdef SDMMC_DEBUG
1238 if (error == 0)
1239 sdmmc_dump_data("SCR", scr, datalen);
1240 #endif
1241 return error;
1242 }
1243
1244 static int
1245 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1246 {
1247 sdmmc_response resp;
1248 int ver;
1249
1250 memset(resp, 0, sizeof(resp));
1251 /*
1252 * Change the raw-scr received from the DMA stream to resp.
1253 */
1254 resp[0] = be32toh(sf->raw_scr[1]) >> 8; // LSW
1255 resp[1] = be32toh(sf->raw_scr[0]); // MSW
1256 resp[0] |= (resp[1] & 0xff) << 24;
1257 resp[1] >>= 8;
1258
1259 ver = SCR_STRUCTURE(resp);
1260 sf->scr.sd_spec = SCR_SD_SPEC(resp);
1261 sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1262
1263 DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1264 SDMMCDEVNAME(sc), resp[1], resp[0],
1265 ver, sf->scr.sd_spec, sf->scr.bus_width));
1266
1267 if (ver != 0 && ver != 1) {
1268 DPRINTF(("%s: unknown structure version: %d\n",
1269 SDMMCDEVNAME(sc), ver));
1270 return EINVAL;
1271 }
1272 return 0;
1273 }
1274
1275 static int
1276 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1277 size_t datalen)
1278 {
1279 struct sdmmc_command cmd;
1280 bus_dma_segment_t ds[1];
1281 void *ptr = NULL;
1282 int rseg;
1283 int error = 0;
1284
1285 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1286 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1287 1, &rseg, BUS_DMA_NOWAIT);
1288 if (error)
1289 goto out;
1290 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1291 BUS_DMA_NOWAIT);
1292 if (error)
1293 goto dmamem_free;
1294 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1295 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1296 if (error)
1297 goto dmamem_unmap;
1298
1299 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1300 BUS_DMASYNC_PREREAD);
1301 } else {
1302 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1303 if (ptr == NULL)
1304 goto out;
1305 }
1306
1307 memset(&cmd, 0, sizeof(cmd));
1308 cmd.c_data = ptr;
1309 cmd.c_datalen = datalen;
1310 cmd.c_blklen = datalen;
1311 cmd.c_opcode = opcode;
1312 cmd.c_arg = 0;
1313 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1314 if (opcode == MMC_SEND_EXT_CSD)
1315 SET(cmd.c_flags, SCF_RSP_R1);
1316 else
1317 SET(cmd.c_flags, SCF_RSP_R2);
1318 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1319 cmd.c_dmamap = sc->sc_dmap;
1320
1321 error = sdmmc_mmc_command(sc, &cmd);
1322 if (error == 0) {
1323 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1324 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1325 BUS_DMASYNC_POSTREAD);
1326 }
1327 memcpy(data, ptr, datalen);
1328 #ifdef SDMMC_DEBUG
1329 sdmmc_dump_data("CXD", data, datalen);
1330 #endif
1331 }
1332
1333 out:
1334 if (ptr != NULL) {
1335 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1336 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1337 dmamem_unmap:
1338 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1339 dmamem_free:
1340 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1341 } else {
1342 free(ptr, M_DEVBUF);
1343 }
1344 }
1345 return error;
1346 }
1347
1348 static int
1349 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1350 {
1351 struct sdmmc_softc *sc = sf->sc;
1352 struct sdmmc_command cmd;
1353 int error;
1354
1355 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1356 return ENODEV;
1357
1358 memset(&cmd, 0, sizeof(cmd));
1359 cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1360 cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1361
1362 switch (width) {
1363 case 1:
1364 cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1365 break;
1366
1367 case 4:
1368 cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1369 break;
1370
1371 default:
1372 return EINVAL;
1373 }
1374
1375 error = sdmmc_app_command(sc, sf, &cmd);
1376 if (error == 0)
1377 error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1378 return error;
1379 }
1380
1381 static int
1382 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1383 int function, sdmmc_bitfield512_t *status)
1384 {
1385 struct sdmmc_softc *sc = sf->sc;
1386 struct sdmmc_command cmd;
1387 bus_dma_segment_t ds[1];
1388 void *ptr = NULL;
1389 int gsft, rseg, error = 0;
1390 const int statlen = 64;
1391
1392 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1393 !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1394 return EINVAL;
1395
1396 if (group <= 0 || group > 6 ||
1397 function < 0 || function > 15)
1398 return EINVAL;
1399
1400 gsft = (group - 1) << 2;
1401
1402 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1403 error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1404 1, &rseg, BUS_DMA_NOWAIT);
1405 if (error)
1406 goto out;
1407 error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1408 BUS_DMA_NOWAIT);
1409 if (error)
1410 goto dmamem_free;
1411 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1412 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1413 if (error)
1414 goto dmamem_unmap;
1415
1416 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1417 BUS_DMASYNC_PREREAD);
1418 } else {
1419 ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1420 if (ptr == NULL)
1421 goto out;
1422 }
1423
1424 memset(&cmd, 0, sizeof(cmd));
1425 cmd.c_data = ptr;
1426 cmd.c_datalen = statlen;
1427 cmd.c_blklen = statlen;
1428 cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1429 cmd.c_arg =
1430 (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1431 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1432 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1433 cmd.c_dmamap = sc->sc_dmap;
1434
1435 error = sdmmc_mmc_command(sc, &cmd);
1436 if (error == 0) {
1437 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1438 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1439 BUS_DMASYNC_POSTREAD);
1440 }
1441 memcpy(status, ptr, statlen);
1442 }
1443
1444 out:
1445 if (ptr != NULL) {
1446 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1447 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1448 dmamem_unmap:
1449 bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1450 dmamem_free:
1451 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1452 } else {
1453 free(ptr, M_DEVBUF);
1454 }
1455 }
1456
1457 if (error == 0)
1458 sdmmc_be512_to_bitfield512(status);
1459
1460 return error;
1461 }
1462
1463 static int
1464 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1465 uint8_t value)
1466 {
1467 struct sdmmc_softc *sc = sf->sc;
1468 struct sdmmc_command cmd;
1469 int error;
1470
1471 memset(&cmd, 0, sizeof(cmd));
1472 cmd.c_opcode = MMC_SWITCH;
1473 cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1474 (index << 16) | (value << 8) | set;
1475 cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1476
1477 error = sdmmc_mmc_command(sc, &cmd);
1478 if (error)
1479 return error;
1480
1481 if (index == EXT_CSD_HS_TIMING && value >= 2) {
1482 do {
1483 memset(&cmd, 0, sizeof(cmd));
1484 cmd.c_opcode = MMC_SEND_STATUS;
1485 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1486 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1487 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1488 error = sdmmc_mmc_command(sc, &cmd);
1489 if (error)
1490 break;
1491 if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1492 aprint_error_dev(sc->sc_dev, "switch error\n");
1493 return EINVAL;
1494 }
1495 /* XXX time out */
1496 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1497
1498 if (error) {
1499 aprint_error_dev(sc->sc_dev,
1500 "error waiting for high speed switch: %d\n",
1501 error);
1502 return error;
1503 }
1504 }
1505
1506 return 0;
1507 }
1508
1509 /*
1510 * SPI mode function
1511 */
1512 static int
1513 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1514 {
1515 struct sdmmc_command cmd;
1516 int error;
1517
1518 memset(&cmd, 0, sizeof(cmd));
1519 cmd.c_opcode = MMC_READ_OCR;
1520 cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1521 cmd.c_flags = SCF_RSP_SPI_R3;
1522
1523 error = sdmmc_mmc_command(sc, &cmd);
1524 if (error == 0 && card_ocr != NULL)
1525 *card_ocr = cmd.c_resp[1];
1526 DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1527 SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1528 return error;
1529 }
1530
1531 /*
1532 * read/write function
1533 */
1534 /* read */
1535 static int
1536 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1537 u_char *data, size_t datalen)
1538 {
1539 struct sdmmc_softc *sc = sf->sc;
1540 int error = 0;
1541 int i;
1542
1543 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1544 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1545
1546 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1547 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1548 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1549 if (error)
1550 break;
1551 }
1552 return error;
1553 }
1554
1555 /*
1556 * Simulate multi-segment dma transfer.
1557 */
1558 static int
1559 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1560 uint32_t blkno, u_char *data, size_t datalen)
1561 {
1562 struct sdmmc_softc *sc = sf->sc;
1563 bool use_bbuf = false;
1564 int error = 0;
1565 int i;
1566
1567 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1568 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1569 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1570 use_bbuf = true;
1571 break;
1572 }
1573 }
1574 if (use_bbuf) {
1575 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1576 BUS_DMASYNC_PREREAD);
1577
1578 error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1579 blkno, data, datalen);
1580 if (error) {
1581 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1582 return error;
1583 }
1584
1585 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1586 BUS_DMASYNC_POSTREAD);
1587
1588 /* Copy from bounce buffer */
1589 memcpy(data, sf->bbuf, datalen);
1590
1591 return 0;
1592 }
1593
1594 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1595 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1596
1597 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1598 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1599 if (error)
1600 return error;
1601
1602 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1603 BUS_DMASYNC_PREREAD);
1604
1605 error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1606 blkno, data, len);
1607 if (error) {
1608 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1609 return error;
1610 }
1611
1612 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1613 BUS_DMASYNC_POSTREAD);
1614
1615 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1616
1617 blkno += len / SDMMC_SECTOR_SIZE;
1618 data += len;
1619 }
1620 return 0;
1621 }
1622
1623 static int
1624 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1625 uint32_t blkno, u_char *data, size_t datalen)
1626 {
1627 struct sdmmc_softc *sc = sf->sc;
1628 struct sdmmc_command cmd;
1629 int error;
1630
1631 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1632 error = sdmmc_select_card(sc, sf);
1633 if (error)
1634 goto out;
1635 }
1636
1637 memset(&cmd, 0, sizeof(cmd));
1638 cmd.c_data = data;
1639 cmd.c_datalen = datalen;
1640 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1641 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1642 MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1643 cmd.c_arg = blkno;
1644 if (!ISSET(sf->flags, SFF_SDHC))
1645 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1646 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1647 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1648 cmd.c_dmamap = dmap;
1649
1650 sc->sc_ev_xfer.ev_count++;
1651
1652 error = sdmmc_mmc_command(sc, &cmd);
1653 if (error) {
1654 sc->sc_ev_xfer_error.ev_count++;
1655 goto out;
1656 }
1657
1658 const u_int counter = __builtin_ctz(cmd.c_datalen);
1659 if (counter >= 9 && counter <= 16) {
1660 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1661 } else {
1662 sc->sc_ev_xfer_unaligned.ev_count++;
1663 }
1664
1665 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1666 if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1667 memset(&cmd, 0, sizeof cmd);
1668 cmd.c_opcode = MMC_STOP_TRANSMISSION;
1669 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1670 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1671 error = sdmmc_mmc_command(sc, &cmd);
1672 if (error)
1673 goto out;
1674 }
1675 }
1676
1677 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1678 do {
1679 memset(&cmd, 0, sizeof(cmd));
1680 cmd.c_opcode = MMC_SEND_STATUS;
1681 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1682 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1683 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1684 error = sdmmc_mmc_command(sc, &cmd);
1685 if (error)
1686 break;
1687 /* XXX time out */
1688 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1689 }
1690
1691 out:
1692 return error;
1693 }
1694
1695 int
1696 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1697 size_t datalen)
1698 {
1699 struct sdmmc_softc *sc = sf->sc;
1700 int error;
1701
1702 SDMMC_LOCK(sc);
1703 mutex_enter(&sc->sc_mtx);
1704
1705 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1706 error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1707 goto out;
1708 }
1709
1710 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1711 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1712 datalen);
1713 goto out;
1714 }
1715
1716 /* DMA transfer */
1717 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1718 BUS_DMA_NOWAIT|BUS_DMA_READ);
1719 if (error)
1720 goto out;
1721
1722 #ifdef SDMMC_DEBUG
1723 printf("data=%p, datalen=%zu\n", data, datalen);
1724 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1725 printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1726 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1727 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1728 }
1729 #endif
1730
1731 if (sc->sc_dmap->dm_nsegs > 1
1732 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1733 error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1734 data, datalen);
1735 goto unload;
1736 }
1737
1738 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1739 BUS_DMASYNC_PREREAD);
1740
1741 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1742 datalen);
1743 if (error)
1744 goto unload;
1745
1746 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1747 BUS_DMASYNC_POSTREAD);
1748 unload:
1749 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1750
1751 out:
1752 mutex_exit(&sc->sc_mtx);
1753 SDMMC_UNLOCK(sc);
1754
1755 return error;
1756 }
1757
1758 /* write */
1759 static int
1760 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1761 u_char *data, size_t datalen)
1762 {
1763 struct sdmmc_softc *sc = sf->sc;
1764 int error = 0;
1765 int i;
1766
1767 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1768 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1769
1770 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1771 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1772 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1773 if (error)
1774 break;
1775 }
1776 return error;
1777 }
1778
1779 /*
1780 * Simulate multi-segment dma transfer.
1781 */
1782 static int
1783 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1784 uint32_t blkno, u_char *data, size_t datalen)
1785 {
1786 struct sdmmc_softc *sc = sf->sc;
1787 bool use_bbuf = false;
1788 int error = 0;
1789 int i;
1790
1791 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1792 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1793 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1794 use_bbuf = true;
1795 break;
1796 }
1797 }
1798 if (use_bbuf) {
1799 /* Copy to bounce buffer */
1800 memcpy(sf->bbuf, data, datalen);
1801
1802 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1803 BUS_DMASYNC_PREWRITE);
1804
1805 error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1806 blkno, data, datalen);
1807 if (error) {
1808 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1809 return error;
1810 }
1811
1812 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1813 BUS_DMASYNC_POSTWRITE);
1814
1815 return 0;
1816 }
1817
1818 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1819 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1820
1821 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1822 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1823 if (error)
1824 return error;
1825
1826 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1827 BUS_DMASYNC_PREWRITE);
1828
1829 error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
1830 blkno, data, len);
1831 if (error) {
1832 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1833 return error;
1834 }
1835
1836 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1837 BUS_DMASYNC_POSTWRITE);
1838
1839 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1840
1841 blkno += len / SDMMC_SECTOR_SIZE;
1842 data += len;
1843 }
1844
1845 return error;
1846 }
1847
1848 static int
1849 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1850 uint32_t blkno, u_char *data, size_t datalen)
1851 {
1852 struct sdmmc_softc *sc = sf->sc;
1853 struct sdmmc_command cmd;
1854 int error;
1855
1856 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1857 error = sdmmc_select_card(sc, sf);
1858 if (error)
1859 goto out;
1860 }
1861
1862 memset(&cmd, 0, sizeof(cmd));
1863 cmd.c_data = data;
1864 cmd.c_datalen = datalen;
1865 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1866 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1867 MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
1868 cmd.c_arg = blkno;
1869 if (!ISSET(sf->flags, SFF_SDHC))
1870 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1871 cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
1872 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1873 cmd.c_dmamap = dmap;
1874
1875 sc->sc_ev_xfer.ev_count++;
1876
1877 error = sdmmc_mmc_command(sc, &cmd);
1878 if (error) {
1879 sc->sc_ev_xfer_error.ev_count++;
1880 goto out;
1881 }
1882
1883 const u_int counter = __builtin_ctz(cmd.c_datalen);
1884 if (counter >= 9 && counter <= 16) {
1885 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1886 } else {
1887 sc->sc_ev_xfer_unaligned.ev_count++;
1888 }
1889
1890 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1891 if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
1892 memset(&cmd, 0, sizeof(cmd));
1893 cmd.c_opcode = MMC_STOP_TRANSMISSION;
1894 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1895 error = sdmmc_mmc_command(sc, &cmd);
1896 if (error)
1897 goto out;
1898 }
1899 }
1900
1901 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1902 do {
1903 memset(&cmd, 0, sizeof(cmd));
1904 cmd.c_opcode = MMC_SEND_STATUS;
1905 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1906 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1907 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1908 error = sdmmc_mmc_command(sc, &cmd);
1909 if (error)
1910 break;
1911 /* XXX time out */
1912 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1913 }
1914
1915 out:
1916 return error;
1917 }
1918
1919 int
1920 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1921 size_t datalen)
1922 {
1923 struct sdmmc_softc *sc = sf->sc;
1924 int error;
1925
1926 SDMMC_LOCK(sc);
1927 mutex_enter(&sc->sc_mtx);
1928
1929 if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
1930 aprint_normal_dev(sc->sc_dev, "write-protected\n");
1931 error = EIO;
1932 goto out;
1933 }
1934
1935 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1936 error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
1937 goto out;
1938 }
1939
1940 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1941 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
1942 datalen);
1943 goto out;
1944 }
1945
1946 /* DMA transfer */
1947 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1948 BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1949 if (error)
1950 goto out;
1951
1952 #ifdef SDMMC_DEBUG
1953 aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
1954 __func__, data, datalen);
1955 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1956 aprint_normal_dev(sc->sc_dev,
1957 "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
1958 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1959 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1960 }
1961 #endif
1962
1963 if (sc->sc_dmap->dm_nsegs > 1
1964 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1965 error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
1966 data, datalen);
1967 goto unload;
1968 }
1969
1970 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1971 BUS_DMASYNC_PREWRITE);
1972
1973 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
1974 datalen);
1975 if (error)
1976 goto unload;
1977
1978 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1979 BUS_DMASYNC_POSTWRITE);
1980 unload:
1981 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1982
1983 out:
1984 mutex_exit(&sc->sc_mtx);
1985 SDMMC_UNLOCK(sc);
1986
1987 return error;
1988 }
1989