sdmmc_mem.c revision 1.50 1 /* $NetBSD: sdmmc_mem.c,v 1.50 2015/12/22 09:56:06 mlelstv Exp $ */
2 /* $OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka (at) netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 /* Routines for SD/MMC memory cards. */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.50 2015/12/22 09:56:06 mlelstv Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s) do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s) do {} while (/*CONSTCOND*/0)
70 #endif
71
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78 sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80 uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
83 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
84 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
85 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
86 uint8_t);
87 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
88 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
89 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
90 u_char *, size_t);
91 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
92 u_char *, size_t);
93 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
94 uint32_t, u_char *, size_t);
95 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
96 uint32_t, u_char *, size_t);
97 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
98 uint32_t, u_char *, size_t);
99 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
100 uint32_t, u_char *, size_t);
101
102 static const struct {
103 const char *name;
104 int v;
105 int freq;
106 } switch_group0_functions[] = {
107 /* Default/SDR12 */
108 { "Default/SDR12", 0, 25000 },
109
110 /* High-Speed/SDR25 */
111 { "High-Speed/SDR25", SMC_CAPS_SD_HIGHSPEED, 50000 },
112
113 /* SDR50 */
114 { "SDR50", SMC_CAPS_UHS_SDR50, 100000 },
115
116 /* SDR104 */
117 { "SDR104", SMC_CAPS_UHS_SDR104, 208000 },
118
119 /* DDR50 */
120 { "DDR50", SMC_CAPS_UHS_DDR50, 50000 },
121 };
122
123 /*
124 * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
125 */
126 int
127 sdmmc_mem_enable(struct sdmmc_softc *sc)
128 {
129 uint32_t host_ocr;
130 uint32_t card_ocr;
131 uint32_t new_ocr;
132 uint32_t ocr = 0;
133 int error;
134
135 SDMMC_LOCK(sc);
136
137 /* Set host mode to SD "combo" card or SD memory-only. */
138 CLR(sc->sc_flags, SMF_UHS_MODE);
139 SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
140
141 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
142 sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
143
144 /* Reset memory (*must* do that before CMD55 or CMD1). */
145 sdmmc_go_idle_state(sc);
146
147 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
148 /* Check SD Ver.2 */
149 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
150 if (error == 0 && card_ocr == 0x1aa)
151 SET(ocr, MMC_OCR_HCS);
152 }
153
154 /*
155 * Read the SD/MMC memory OCR value by issuing CMD55 followed
156 * by ACMD41 to read the OCR value from memory-only SD cards.
157 * MMC cards will not respond to CMD55 or ACMD41 and this is
158 * how we distinguish them from SD cards.
159 */
160 mmc_mode:
161 error = sdmmc_mem_send_op_cond(sc,
162 ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
163 if (error) {
164 if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
165 !ISSET(sc->sc_flags, SMF_IO_MODE)) {
166 /* Not a SD card, switch to MMC mode. */
167 DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
168 CLR(sc->sc_flags, SMF_SD_MODE);
169 goto mmc_mode;
170 }
171 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
172 DPRINTF(("%s: couldn't read memory OCR\n",
173 SDMMCDEVNAME(sc)));
174 goto out;
175 } else {
176 /* Not a "combo" card. */
177 CLR(sc->sc_flags, SMF_MEM_MODE);
178 error = 0;
179 goto out;
180 }
181 }
182 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
183 /* get card OCR */
184 error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
185 if (error) {
186 DPRINTF(("%s: couldn't read SPI memory OCR\n",
187 SDMMCDEVNAME(sc)));
188 goto out;
189 }
190 }
191
192 /* Set the lowest voltage supported by the card and host. */
193 host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
194 error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
195 if (error) {
196 DPRINTF(("%s: couldn't supply voltage requested by card\n",
197 SDMMCDEVNAME(sc)));
198 goto out;
199 }
200
201 /* Tell the card(s) to enter the idle state (again). */
202 sdmmc_go_idle_state(sc);
203
204 DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
205 DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
206
207 host_ocr &= card_ocr; /* only allow the common voltages */
208 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
209 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
210 /* Check SD Ver.2 */
211 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
212 if (error == 0 && card_ocr == 0x1aa)
213 SET(ocr, MMC_OCR_HCS);
214
215 if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
216 SET(ocr, MMC_OCR_S18A);
217 } else {
218 SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
219 }
220 }
221 host_ocr |= ocr;
222
223 /* Send the new OCR value until all cards are ready. */
224 error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
225 if (error) {
226 DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
227 goto out;
228 }
229
230 if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
231 /*
232 * Card and host support low voltage mode, begin switch
233 * sequence.
234 */
235 struct sdmmc_command cmd;
236 memset(&cmd, 0, sizeof(cmd));
237 cmd.c_arg = 0;
238 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
239 cmd.c_opcode = SD_VOLTAGE_SWITCH;
240 DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
241 error = sdmmc_mmc_command(sc, &cmd);
242 if (error) {
243 DPRINTF(("%s: voltage switch command failed\n",
244 SDMMCDEVNAME(sc)));
245 goto out;
246 }
247
248 error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
249 if (error)
250 goto out;
251
252 SET(sc->sc_flags, SMF_UHS_MODE);
253 }
254
255 out:
256 SDMMC_UNLOCK(sc);
257
258 if (error)
259 printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
260 __func__, error);
261
262 return error;
263 }
264
265 static int
266 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
267 {
268 int error;
269
270 /*
271 * Stop the clock
272 */
273 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
274 SDMMC_SDCLK_OFF, false);
275 if (error)
276 goto out;
277
278 delay(1000);
279
280 /*
281 * Card switch command was successful, update host controller
282 * signal voltage setting.
283 */
284 DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
285 signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
286 error = sdmmc_chip_signal_voltage(sc->sc_sct,
287 sc->sc_sch, signal_voltage);
288 if (error)
289 goto out;
290
291 delay(5000);
292
293 /*
294 * Switch to SDR12 timing
295 */
296 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
297 false);
298 if (error)
299 goto out;
300
301 delay(1000);
302
303 out:
304 return error;
305 }
306
307 /*
308 * Read the CSD and CID from all cards and assign each card a unique
309 * relative card address (RCA). CMD2 is ignored by SDIO-only cards.
310 */
311 void
312 sdmmc_mem_scan(struct sdmmc_softc *sc)
313 {
314 sdmmc_response resp;
315 struct sdmmc_function *sf;
316 uint16_t next_rca;
317 int error;
318 int retry;
319
320 SDMMC_LOCK(sc);
321
322 /*
323 * CMD2 is a broadcast command understood by SD cards and MMC
324 * cards. All cards begin to respond to the command, but back
325 * off if another card drives the CMD line to a different level.
326 * Only one card will get its entire response through. That
327 * card remains silent once it has been assigned a RCA.
328 */
329 for (retry = 0; retry < 100; retry++) {
330 error = sdmmc_mem_send_cid(sc, &resp);
331 if (error) {
332 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
333 error == ETIMEDOUT) {
334 /* No more cards there. */
335 break;
336 }
337 DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
338 break;
339 }
340
341 /* In MMC mode, find the next available RCA. */
342 next_rca = 1;
343 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
344 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
345 next_rca++;
346 }
347
348 /* Allocate a sdmmc_function structure. */
349 sf = sdmmc_function_alloc(sc);
350 sf->rca = next_rca;
351
352 /*
353 * Remember the CID returned in the CMD2 response for
354 * later decoding.
355 */
356 memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
357
358 /*
359 * Silence the card by assigning it a unique RCA, or
360 * querying it for its RCA in the case of SD.
361 */
362 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
363 if (sdmmc_set_relative_addr(sc, sf) != 0) {
364 aprint_error_dev(sc->sc_dev,
365 "couldn't set mem RCA\n");
366 sdmmc_function_free(sf);
367 break;
368 }
369 }
370
371 /*
372 * If this is a memory-only card, the card responding
373 * first becomes an alias for SDIO function 0.
374 */
375 if (sc->sc_fn0 == NULL)
376 sc->sc_fn0 = sf;
377
378 SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
379
380 /* only one function in SPI mode */
381 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
382 break;
383 }
384
385 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
386 /* Go to Data Transfer Mode, if possible. */
387 sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
388
389 /*
390 * All cards are either inactive or awaiting further commands.
391 * Read the CSDs and decode the raw CID for each card.
392 */
393 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
394 error = sdmmc_mem_send_csd(sc, sf, &resp);
395 if (error) {
396 SET(sf->flags, SFF_ERROR);
397 continue;
398 }
399
400 if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
401 sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
402 SET(sf->flags, SFF_ERROR);
403 continue;
404 }
405
406 #ifdef SDMMC_DEBUG
407 printf("%s: CID: ", SDMMCDEVNAME(sc));
408 sdmmc_print_cid(&sf->cid);
409 #endif
410 }
411
412 SDMMC_UNLOCK(sc);
413 }
414
415 int
416 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
417 struct sdmmc_function *sf)
418 {
419 /* TRAN_SPEED(2:0): transfer rate exponent */
420 static const int speed_exponent[8] = {
421 100 * 1, /* 100 Kbits/s */
422 1 * 1000, /* 1 Mbits/s */
423 10 * 1000, /* 10 Mbits/s */
424 100 * 1000, /* 100 Mbits/s */
425 0,
426 0,
427 0,
428 0,
429 };
430 /* TRAN_SPEED(6:3): time mantissa */
431 static const int speed_mantissa[16] = {
432 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
433 };
434 struct sdmmc_csd *csd = &sf->csd;
435 int e, m;
436
437 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
438 /*
439 * CSD version 1.0 corresponds to SD system
440 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
441 */
442 csd->csdver = SD_CSD_CSDVER(resp);
443 switch (csd->csdver) {
444 case SD_CSD_CSDVER_2_0:
445 DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
446 SET(sf->flags, SFF_SDHC);
447 csd->capacity = SD_CSD_V2_CAPACITY(resp);
448 csd->read_bl_len = SD_CSD_V2_BL_LEN;
449 break;
450
451 case SD_CSD_CSDVER_1_0:
452 DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
453 csd->capacity = SD_CSD_CAPACITY(resp);
454 csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
455 break;
456
457 default:
458 aprint_error_dev(sc->sc_dev,
459 "unknown SD CSD structure version 0x%x\n",
460 csd->csdver);
461 return 1;
462 }
463
464 csd->mmcver = SD_CSD_MMCVER(resp);
465 csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
466 csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
467 e = SD_CSD_SPEED_EXP(resp);
468 m = SD_CSD_SPEED_MANT(resp);
469 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
470 csd->ccc = SD_CSD_CCC(resp);
471 } else {
472 csd->csdver = MMC_CSD_CSDVER(resp);
473 if (csd->csdver == MMC_CSD_CSDVER_1_0) {
474 aprint_error_dev(sc->sc_dev,
475 "unknown MMC CSD structure version 0x%x\n",
476 csd->csdver);
477 return 1;
478 }
479
480 csd->mmcver = MMC_CSD_MMCVER(resp);
481 csd->capacity = MMC_CSD_CAPACITY(resp);
482 csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
483 csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
484 csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
485 e = MMC_CSD_TRAN_SPEED_EXP(resp);
486 m = MMC_CSD_TRAN_SPEED_MANT(resp);
487 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
488 }
489 if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
490 csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
491
492 #ifdef SDMMC_DUMP_CSD
493 sdmmc_print_csd(resp, csd);
494 #endif
495
496 return 0;
497 }
498
499 int
500 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
501 struct sdmmc_function *sf)
502 {
503 struct sdmmc_cid *cid = &sf->cid;
504
505 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
506 cid->mid = SD_CID_MID(resp);
507 cid->oid = SD_CID_OID(resp);
508 SD_CID_PNM_CPY(resp, cid->pnm);
509 cid->rev = SD_CID_REV(resp);
510 cid->psn = SD_CID_PSN(resp);
511 cid->mdt = SD_CID_MDT(resp);
512 } else {
513 switch(sf->csd.mmcver) {
514 case MMC_CSD_MMCVER_1_0:
515 case MMC_CSD_MMCVER_1_4:
516 cid->mid = MMC_CID_MID_V1(resp);
517 MMC_CID_PNM_V1_CPY(resp, cid->pnm);
518 cid->rev = MMC_CID_REV_V1(resp);
519 cid->psn = MMC_CID_PSN_V1(resp);
520 cid->mdt = MMC_CID_MDT_V1(resp);
521 break;
522 case MMC_CSD_MMCVER_2_0:
523 case MMC_CSD_MMCVER_3_1:
524 case MMC_CSD_MMCVER_4_0:
525 cid->mid = MMC_CID_MID_V2(resp);
526 cid->oid = MMC_CID_OID_V2(resp);
527 MMC_CID_PNM_V2_CPY(resp, cid->pnm);
528 cid->psn = MMC_CID_PSN_V2(resp);
529 break;
530 default:
531 aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
532 sf->csd.mmcver);
533 return 1;
534 }
535 }
536 return 0;
537 }
538
539 void
540 sdmmc_print_cid(struct sdmmc_cid *cid)
541 {
542
543 printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
544 " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
545 cid->mdt);
546 }
547
548 #ifdef SDMMC_DUMP_CSD
549 void
550 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
551 {
552
553 printf("csdver = %d\n", csd->csdver);
554 printf("mmcver = %d\n", csd->mmcver);
555 printf("capacity = 0x%08x\n", csd->capacity);
556 printf("read_bl_len = %d\n", csd->read_bl_len);
557 printf("write_bl_len = %d\n", csd->write_bl_len);
558 printf("r2w_factor = %d\n", csd->r2w_factor);
559 printf("tran_speed = %d\n", csd->tran_speed);
560 printf("ccc = 0x%x\n", csd->ccc);
561 }
562 #endif
563
564 /*
565 * Initialize a SD/MMC memory card.
566 */
567 int
568 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
569 {
570 int error = 0;
571
572 SDMMC_LOCK(sc);
573
574 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
575 error = sdmmc_select_card(sc, sf);
576 if (error)
577 goto out;
578 }
579
580 error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
581 if (error)
582 goto out;
583
584 if (ISSET(sc->sc_flags, SMF_SD_MODE))
585 error = sdmmc_mem_sd_init(sc, sf);
586 else
587 error = sdmmc_mem_mmc_init(sc, sf);
588
589 out:
590 SDMMC_UNLOCK(sc);
591
592 return error;
593 }
594
595 /*
596 * Get or set the card's memory OCR value (SD or MMC).
597 */
598 int
599 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
600 {
601 struct sdmmc_command cmd;
602 int error;
603 int retry;
604
605 /* Don't lock */
606
607 DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
608 SDMMCDEVNAME(sc), ocr));
609
610 /*
611 * If we change the OCR value, retry the command until the OCR
612 * we receive in response has the "CARD BUSY" bit set, meaning
613 * that all cards are ready for identification.
614 */
615 for (retry = 0; retry < 100; retry++) {
616 memset(&cmd, 0, sizeof(cmd));
617 cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
618 ocr : (ocr & MMC_OCR_HCS);
619 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
620 | SCF_TOUT_OK;
621
622 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
623 cmd.c_opcode = SD_APP_OP_COND;
624 error = sdmmc_app_command(sc, NULL, &cmd);
625 } else {
626 cmd.c_opcode = MMC_SEND_OP_COND;
627 error = sdmmc_mmc_command(sc, &cmd);
628 }
629 if (error)
630 break;
631
632 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
633 if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
634 break;
635 } else {
636 if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
637 ocr == 0)
638 break;
639 }
640
641 error = ETIMEDOUT;
642 sdmmc_delay(10000);
643 }
644 if (error == 0 &&
645 ocrp != NULL &&
646 !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
647 *ocrp = MMC_R3(cmd.c_resp);
648 DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
649 SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
650 return error;
651 }
652
653 int
654 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
655 {
656 struct sdmmc_command cmd;
657 int error;
658
659 /* Don't lock */
660
661 memset(&cmd, 0, sizeof(cmd));
662 cmd.c_arg = ocr;
663 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7;
664 cmd.c_opcode = SD_SEND_IF_COND;
665
666 error = sdmmc_mmc_command(sc, &cmd);
667 if (error == 0 && ocrp != NULL) {
668 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
669 *ocrp = MMC_SPI_R7(cmd.c_resp);
670 } else {
671 *ocrp = MMC_R7(cmd.c_resp);
672 }
673 DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
674 SDMMCDEVNAME(sc), error, *ocrp));
675 }
676 return error;
677 }
678
679 /*
680 * Set the read block length appropriately for this card, according to
681 * the card CSD register value.
682 */
683 int
684 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
685 int block_len)
686 {
687 struct sdmmc_command cmd;
688 int error;
689
690 /* Don't lock */
691
692 memset(&cmd, 0, sizeof(cmd));
693 cmd.c_opcode = MMC_SET_BLOCKLEN;
694 cmd.c_arg = block_len;
695 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
696
697 error = sdmmc_mmc_command(sc, &cmd);
698
699 DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
700 SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
701
702 return error;
703 }
704
705 /* make 512-bit BE quantity __bitfield()-compatible */
706 static void
707 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
708 size_t i;
709 uint32_t tmp0, tmp1;
710 const size_t bitswords = __arraycount(buf->_bits);
711 for (i = 0; i < bitswords/2; i++) {
712 tmp0 = buf->_bits[i];
713 tmp1 = buf->_bits[bitswords - 1 - i];
714 buf->_bits[i] = be32toh(tmp1);
715 buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
716 }
717 }
718
719 static int
720 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
721 {
722 if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
723 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
724 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
725 return SD_ACCESS_MODE_SDR104;
726 }
727 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
728 ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
729 return SD_ACCESS_MODE_DDR50;
730 }
731 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
732 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
733 return SD_ACCESS_MODE_SDR50;
734 }
735 }
736 if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
737 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
738 return SD_ACCESS_MODE_SDR25;
739 }
740 return SD_ACCESS_MODE_SDR12;
741 }
742
743 static int
744 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
745 {
746 int timing = -1;
747
748 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
749 return 0;
750
751 if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
752 if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
753 return 0;
754
755 switch (sf->csd.tran_speed) {
756 case 100000:
757 timing = SDMMC_TIMING_UHS_SDR50;
758 break;
759 case 208000:
760 timing = SDMMC_TIMING_UHS_SDR104;
761 break;
762 default:
763 return 0;
764 }
765 } else {
766 switch (sf->csd.tran_speed) {
767 case 200000:
768 timing = SDMMC_TIMING_MMC_HS200;
769 break;
770 default:
771 return 0;
772 }
773 }
774
775 DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
776 timing));
777
778 return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
779 }
780
781 static int
782 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
783 {
784 int support_func, best_func, bus_clock, error, i;
785 sdmmc_bitfield512_t status; /* Switch Function Status */
786 bool ddr = false;
787
788 /* change bus clock */
789 bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
790 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
791 if (error) {
792 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
793 return error;
794 }
795
796 error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
797 if (error) {
798 aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
799 return error;
800 }
801 error = sdmmc_mem_decode_scr(sc, sf);
802 if (error)
803 return error;
804
805 if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
806 ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
807 DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
808 error = sdmmc_set_bus_width(sf, 4);
809 if (error) {
810 aprint_error_dev(sc->sc_dev,
811 "can't change bus width (%d bit)\n", 4);
812 return error;
813 }
814 sf->width = 4;
815 }
816
817 best_func = 0;
818 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
819 ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
820 DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
821 error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
822 if (error) {
823 aprint_error_dev(sc->sc_dev,
824 "switch func mode 0 failed\n");
825 return error;
826 }
827
828 support_func = SFUNC_STATUS_GROUP(&status, 1);
829
830 if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
831 /* XXX UHS-I card started in 1.8V mode, switch now */
832 error = sdmmc_mem_signal_voltage(sc,
833 SDMMC_SIGNAL_VOLTAGE_180);
834 if (error) {
835 aprint_error_dev(sc->sc_dev,
836 "failed to recover UHS card\n");
837 return error;
838 }
839 SET(sc->sc_flags, SMF_UHS_MODE);
840 }
841
842 for (i = 0; i < __arraycount(switch_group0_functions); i++) {
843 if (!(support_func & (1 << i)))
844 continue;
845 DPRINTF(("%s: card supports mode %s\n",
846 SDMMCDEVNAME(sc),
847 switch_group0_functions[i].name));
848 }
849
850 best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
851
852 DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
853 switch_group0_functions[best_func].name));
854
855 if (best_func != 0) {
856 DPRINTF(("%s: switch func mode 1(func=%d)\n",
857 SDMMCDEVNAME(sc), best_func));
858 error =
859 sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
860 if (error) {
861 aprint_error_dev(sc->sc_dev,
862 "switch func mode 1 failed:"
863 " group 1 function %d(0x%2x)\n",
864 best_func, support_func);
865 return error;
866 }
867 sf->csd.tran_speed =
868 switch_group0_functions[best_func].freq;
869
870 if (best_func == SD_ACCESS_MODE_DDR50)
871 ddr = true;
872
873 /* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
874 delay(25);
875 }
876 }
877
878 /* update bus clock */
879 if (sc->sc_busclk > sf->csd.tran_speed)
880 sc->sc_busclk = sf->csd.tran_speed;
881 if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
882 return 0;
883
884 /* change bus clock */
885 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
886 ddr);
887 if (error) {
888 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
889 return error;
890 }
891
892 sc->sc_transfer_mode = switch_group0_functions[best_func].name;
893 sc->sc_busddr = ddr;
894
895 /* execute tuning (UHS) */
896 error = sdmmc_mem_execute_tuning(sc, sf);
897 if (error) {
898 aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
899 return error;
900 }
901
902 return 0;
903 }
904
905 static int
906 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
907 {
908 int width, value, hs_timing, bus_clock, error;
909 char ext_csd[512];
910 uint32_t sectors = 0;
911
912 sc->sc_transfer_mode = NULL;
913
914 /* change bus clock */
915 bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
916 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
917 if (error) {
918 aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
919 return error;
920 }
921
922 if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
923 error = sdmmc_mem_send_cxd_data(sc,
924 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
925 if (error) {
926 aprint_error_dev(sc->sc_dev,
927 "can't read EXT_CSD (error=%d)\n", error);
928 return error;
929 }
930 if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
931 (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
932 aprint_error_dev(sc->sc_dev,
933 "unrecognised future version (%d)\n",
934 ext_csd[EXT_CSD_STRUCTURE]);
935 return ENOTSUP;
936 }
937
938 sc->sc_transfer_mode = NULL;
939 if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
940 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
941 sf->csd.tran_speed = 200000; /* 200MHz SDR */
942 hs_timing = 2;
943 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
944 sf->csd.tran_speed = 52000; /* 52MHz */
945 hs_timing = 1;
946 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
947 sf->csd.tran_speed = 26000; /* 26MHz */
948 hs_timing = 0;
949 } else {
950 aprint_error_dev(sc->sc_dev,
951 "unknown CARD_TYPE: 0x%x\n",
952 ext_csd[EXT_CSD_CARD_TYPE]);
953 return ENOTSUP;
954 }
955
956 if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
957 width = 8;
958 value = EXT_CSD_BUS_WIDTH_8;
959 } else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
960 width = 4;
961 value = EXT_CSD_BUS_WIDTH_4;
962 } else {
963 width = 1;
964 value = EXT_CSD_BUS_WIDTH_1;
965 }
966
967 if (width != 1) {
968 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
969 EXT_CSD_BUS_WIDTH, value);
970 if (error == 0)
971 error = sdmmc_chip_bus_width(sc->sc_sct,
972 sc->sc_sch, width);
973 else {
974 DPRINTF(("%s: can't change bus width"
975 " (%d bit)\n", SDMMCDEVNAME(sc), width));
976 return error;
977 }
978
979 /* XXXX: need bus test? (using by CMD14 & CMD19) */
980 delay(10000);
981 }
982 sf->width = width;
983
984 if (hs_timing == 1 &&
985 !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
986 hs_timing = 0;
987 }
988 if (hs_timing) {
989 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
990 EXT_CSD_HS_TIMING, hs_timing);
991 if (error) {
992 aprint_error_dev(sc->sc_dev,
993 "can't change high speed %d, error %d\n",
994 hs_timing, error);
995 return error;
996 }
997 }
998
999 if (sc->sc_busclk > sf->csd.tran_speed)
1000 sc->sc_busclk = sf->csd.tran_speed;
1001 if (sc->sc_busclk != bus_clock) {
1002 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1003 sc->sc_busclk, false);
1004 if (error) {
1005 aprint_error_dev(sc->sc_dev,
1006 "can't change bus clock\n");
1007 return error;
1008 }
1009 }
1010
1011 if (hs_timing) {
1012 error = sdmmc_mem_send_cxd_data(sc,
1013 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1014 if (error) {
1015 aprint_error_dev(sc->sc_dev,
1016 "can't re-read EXT_CSD\n");
1017 return error;
1018 }
1019 if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1020 aprint_error_dev(sc->sc_dev,
1021 "HS_TIMING set failed\n");
1022 return EINVAL;
1023 }
1024 }
1025
1026 sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1027 ext_csd[EXT_CSD_SEC_COUNT + 1] << 8 |
1028 ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1029 ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1030 if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1031 SET(sf->flags, SFF_SDHC);
1032 sf->csd.capacity = sectors;
1033 }
1034
1035 if (hs_timing == 2) {
1036 sc->sc_transfer_mode = "HS200";
1037
1038 /* execute tuning (HS200) */
1039 error = sdmmc_mem_execute_tuning(sc, sf);
1040 if (error) {
1041 aprint_error_dev(sc->sc_dev,
1042 "can't execute MMC tuning\n");
1043 return error;
1044 }
1045 } else {
1046 sc->sc_transfer_mode = NULL;
1047 }
1048 } else {
1049 if (sc->sc_busclk > sf->csd.tran_speed)
1050 sc->sc_busclk = sf->csd.tran_speed;
1051 if (sc->sc_busclk != bus_clock) {
1052 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1053 sc->sc_busclk, false);
1054 if (error) {
1055 aprint_error_dev(sc->sc_dev,
1056 "can't change bus clock\n");
1057 return error;
1058 }
1059 }
1060 }
1061
1062 return 0;
1063 }
1064
1065 static int
1066 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1067 {
1068 struct sdmmc_command cmd;
1069 int error;
1070
1071 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1072 memset(&cmd, 0, sizeof cmd);
1073 cmd.c_opcode = MMC_ALL_SEND_CID;
1074 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1075
1076 error = sdmmc_mmc_command(sc, &cmd);
1077 } else {
1078 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1079 sizeof(cmd.c_resp));
1080 }
1081
1082 #ifdef SDMMC_DEBUG
1083 if (error == 0)
1084 sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1085 #endif
1086 if (error == 0 && resp != NULL)
1087 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1088 return error;
1089 }
1090
1091 static int
1092 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1093 sdmmc_response *resp)
1094 {
1095 struct sdmmc_command cmd;
1096 int error;
1097
1098 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1099 memset(&cmd, 0, sizeof cmd);
1100 cmd.c_opcode = MMC_SEND_CSD;
1101 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1102 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1103
1104 error = sdmmc_mmc_command(sc, &cmd);
1105 } else {
1106 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1107 sizeof(cmd.c_resp));
1108 }
1109
1110 #ifdef SDMMC_DEBUG
1111 if (error == 0)
1112 sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1113 #endif
1114 if (error == 0 && resp != NULL)
1115 memcpy(resp, &cmd.c_resp, sizeof(*resp));
1116 return error;
1117 }
1118
1119 static int
1120 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1121 uint32_t *scr)
1122 {
1123 struct sdmmc_command cmd;
1124 bus_dma_segment_t ds[1];
1125 void *ptr = NULL;
1126 int datalen = 8;
1127 int rseg;
1128 int error = 0;
1129
1130 /* Don't lock */
1131
1132 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1133 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1134 ds, 1, &rseg, BUS_DMA_NOWAIT);
1135 if (error)
1136 goto out;
1137 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1138 BUS_DMA_NOWAIT);
1139 if (error)
1140 goto dmamem_free;
1141 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1142 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1143 if (error)
1144 goto dmamem_unmap;
1145
1146 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1147 BUS_DMASYNC_PREREAD);
1148 } else {
1149 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1150 if (ptr == NULL)
1151 goto out;
1152 }
1153
1154 memset(&cmd, 0, sizeof(cmd));
1155 cmd.c_data = ptr;
1156 cmd.c_datalen = datalen;
1157 cmd.c_blklen = datalen;
1158 cmd.c_arg = 0;
1159 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1160 cmd.c_opcode = SD_APP_SEND_SCR;
1161 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1162 cmd.c_dmamap = sc->sc_dmap;
1163
1164 error = sdmmc_app_command(sc, sf, &cmd);
1165 if (error == 0) {
1166 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1167 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1168 BUS_DMASYNC_POSTREAD);
1169 }
1170 memcpy(scr, ptr, datalen);
1171 }
1172
1173 out:
1174 if (ptr != NULL) {
1175 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1176 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1177 dmamem_unmap:
1178 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1179 dmamem_free:
1180 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1181 } else {
1182 free(ptr, M_DEVBUF);
1183 }
1184 }
1185 DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1186 error));
1187
1188 #ifdef SDMMC_DEBUG
1189 if (error == 0)
1190 sdmmc_dump_data("SCR", scr, datalen);
1191 #endif
1192 return error;
1193 }
1194
1195 static int
1196 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1197 {
1198 sdmmc_response resp;
1199 int ver;
1200
1201 memset(resp, 0, sizeof(resp));
1202 /*
1203 * Change the raw-scr received from the DMA stream to resp.
1204 */
1205 resp[0] = be32toh(sf->raw_scr[1]) >> 8; // LSW
1206 resp[1] = be32toh(sf->raw_scr[0]); // MSW
1207 resp[0] |= (resp[1] & 0xff) << 24;
1208 resp[1] >>= 8;
1209
1210 ver = SCR_STRUCTURE(resp);
1211 sf->scr.sd_spec = SCR_SD_SPEC(resp);
1212 sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1213
1214 DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1215 SDMMCDEVNAME(sc), resp[1], resp[0],
1216 ver, sf->scr.sd_spec, sf->scr.bus_width));
1217
1218 if (ver != 0 && ver != 1) {
1219 DPRINTF(("%s: unknown structure version: %d\n",
1220 SDMMCDEVNAME(sc), ver));
1221 return EINVAL;
1222 }
1223 return 0;
1224 }
1225
1226 static int
1227 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1228 size_t datalen)
1229 {
1230 struct sdmmc_command cmd;
1231 bus_dma_segment_t ds[1];
1232 void *ptr = NULL;
1233 int rseg;
1234 int error = 0;
1235
1236 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1237 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1238 1, &rseg, BUS_DMA_NOWAIT);
1239 if (error)
1240 goto out;
1241 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1242 BUS_DMA_NOWAIT);
1243 if (error)
1244 goto dmamem_free;
1245 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1246 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1247 if (error)
1248 goto dmamem_unmap;
1249
1250 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1251 BUS_DMASYNC_PREREAD);
1252 } else {
1253 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1254 if (ptr == NULL)
1255 goto out;
1256 }
1257
1258 memset(&cmd, 0, sizeof(cmd));
1259 cmd.c_data = ptr;
1260 cmd.c_datalen = datalen;
1261 cmd.c_blklen = datalen;
1262 cmd.c_opcode = opcode;
1263 cmd.c_arg = 0;
1264 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1265 if (opcode == MMC_SEND_EXT_CSD)
1266 SET(cmd.c_flags, SCF_RSP_R1);
1267 else
1268 SET(cmd.c_flags, SCF_RSP_R2);
1269 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1270 cmd.c_dmamap = sc->sc_dmap;
1271
1272 error = sdmmc_mmc_command(sc, &cmd);
1273 if (error == 0) {
1274 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1275 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1276 BUS_DMASYNC_POSTREAD);
1277 }
1278 memcpy(data, ptr, datalen);
1279 #ifdef SDMMC_DEBUG
1280 sdmmc_dump_data("CXD", data, datalen);
1281 #endif
1282 }
1283
1284 out:
1285 if (ptr != NULL) {
1286 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1287 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1288 dmamem_unmap:
1289 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1290 dmamem_free:
1291 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1292 } else {
1293 free(ptr, M_DEVBUF);
1294 }
1295 }
1296 return error;
1297 }
1298
1299 static int
1300 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1301 {
1302 struct sdmmc_softc *sc = sf->sc;
1303 struct sdmmc_command cmd;
1304 int error;
1305
1306 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1307 return ENODEV;
1308
1309 memset(&cmd, 0, sizeof(cmd));
1310 cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1311 cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1312
1313 switch (width) {
1314 case 1:
1315 cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1316 break;
1317
1318 case 4:
1319 cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1320 break;
1321
1322 default:
1323 return EINVAL;
1324 }
1325
1326 error = sdmmc_app_command(sc, sf, &cmd);
1327 if (error == 0)
1328 error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1329 return error;
1330 }
1331
1332 static int
1333 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1334 int function, sdmmc_bitfield512_t *status)
1335 {
1336 struct sdmmc_softc *sc = sf->sc;
1337 struct sdmmc_command cmd;
1338 bus_dma_segment_t ds[1];
1339 void *ptr = NULL;
1340 int gsft, rseg, error = 0;
1341 const int statlen = 64;
1342
1343 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1344 !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1345 return EINVAL;
1346
1347 if (group <= 0 || group > 6 ||
1348 function < 0 || function > 15)
1349 return EINVAL;
1350
1351 gsft = (group - 1) << 2;
1352
1353 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1354 error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1355 1, &rseg, BUS_DMA_NOWAIT);
1356 if (error)
1357 goto out;
1358 error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1359 BUS_DMA_NOWAIT);
1360 if (error)
1361 goto dmamem_free;
1362 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1363 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1364 if (error)
1365 goto dmamem_unmap;
1366
1367 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1368 BUS_DMASYNC_PREREAD);
1369 } else {
1370 ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1371 if (ptr == NULL)
1372 goto out;
1373 }
1374
1375 memset(&cmd, 0, sizeof(cmd));
1376 cmd.c_data = ptr;
1377 cmd.c_datalen = statlen;
1378 cmd.c_blklen = statlen;
1379 cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1380 cmd.c_arg =
1381 (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1382 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1383 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1384 cmd.c_dmamap = sc->sc_dmap;
1385
1386 error = sdmmc_mmc_command(sc, &cmd);
1387 if (error == 0) {
1388 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1389 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1390 BUS_DMASYNC_POSTREAD);
1391 }
1392 memcpy(status, ptr, statlen);
1393 }
1394
1395 out:
1396 if (ptr != NULL) {
1397 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1398 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1399 dmamem_unmap:
1400 bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1401 dmamem_free:
1402 bus_dmamem_free(sc->sc_dmat, ds, rseg);
1403 } else {
1404 free(ptr, M_DEVBUF);
1405 }
1406 }
1407
1408 if (error == 0)
1409 sdmmc_be512_to_bitfield512(status);
1410
1411 return error;
1412 }
1413
1414 static int
1415 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1416 uint8_t value)
1417 {
1418 struct sdmmc_softc *sc = sf->sc;
1419 struct sdmmc_command cmd;
1420 int error;
1421
1422 memset(&cmd, 0, sizeof(cmd));
1423 cmd.c_opcode = MMC_SWITCH;
1424 cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1425 (index << 16) | (value << 8) | set;
1426 cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1427
1428 error = sdmmc_mmc_command(sc, &cmd);
1429 if (error)
1430 return error;
1431
1432 if (index == EXT_CSD_HS_TIMING && value >= 2) {
1433 do {
1434 memset(&cmd, 0, sizeof(cmd));
1435 cmd.c_opcode = MMC_SEND_STATUS;
1436 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1437 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1438 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1439 error = sdmmc_mmc_command(sc, &cmd);
1440 if (error)
1441 break;
1442 if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1443 aprint_error_dev(sc->sc_dev, "switch error\n");
1444 return EINVAL;
1445 }
1446 /* XXX time out */
1447 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1448
1449 if (error) {
1450 aprint_error_dev(sc->sc_dev,
1451 "error waiting for high speed switch: %d\n",
1452 error);
1453 return error;
1454 }
1455 }
1456
1457 return 0;
1458 }
1459
1460 /*
1461 * SPI mode function
1462 */
1463 static int
1464 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1465 {
1466 struct sdmmc_command cmd;
1467 int error;
1468
1469 memset(&cmd, 0, sizeof(cmd));
1470 cmd.c_opcode = MMC_READ_OCR;
1471 cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1472 cmd.c_flags = SCF_RSP_SPI_R3;
1473
1474 error = sdmmc_mmc_command(sc, &cmd);
1475 if (error == 0 && card_ocr != NULL)
1476 *card_ocr = cmd.c_resp[1];
1477 DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1478 SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1479 return error;
1480 }
1481
1482 /*
1483 * read/write function
1484 */
1485 /* read */
1486 static int
1487 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1488 u_char *data, size_t datalen)
1489 {
1490 struct sdmmc_softc *sc = sf->sc;
1491 int error = 0;
1492 int i;
1493
1494 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1495 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1496
1497 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1498 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1499 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1500 if (error)
1501 break;
1502 }
1503 return error;
1504 }
1505
1506 /*
1507 * Simulate multi-segment dma transfer.
1508 */
1509 static int
1510 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1511 uint32_t blkno, u_char *data, size_t datalen)
1512 {
1513 struct sdmmc_softc *sc = sf->sc;
1514 bool use_bbuf = false;
1515 int error = 0;
1516 int i;
1517
1518 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1519 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1520 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1521 use_bbuf = true;
1522 break;
1523 }
1524 }
1525 if (use_bbuf) {
1526 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1527 BUS_DMASYNC_PREREAD);
1528
1529 error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1530 blkno, data, datalen);
1531 if (error) {
1532 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1533 return error;
1534 }
1535
1536 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1537 BUS_DMASYNC_POSTREAD);
1538
1539 /* Copy from bounce buffer */
1540 memcpy(data, sf->bbuf, datalen);
1541
1542 return 0;
1543 }
1544
1545 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1546 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1547
1548 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1549 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1550 if (error)
1551 return error;
1552
1553 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1554 BUS_DMASYNC_PREREAD);
1555
1556 error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1557 blkno, data, len);
1558 if (error) {
1559 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1560 return error;
1561 }
1562
1563 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1564 BUS_DMASYNC_POSTREAD);
1565
1566 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1567
1568 blkno += len / SDMMC_SECTOR_SIZE;
1569 data += len;
1570 }
1571 return 0;
1572 }
1573
1574 static int
1575 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1576 uint32_t blkno, u_char *data, size_t datalen)
1577 {
1578 struct sdmmc_softc *sc = sf->sc;
1579 struct sdmmc_command cmd;
1580 int error;
1581
1582 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1583 error = sdmmc_select_card(sc, sf);
1584 if (error)
1585 goto out;
1586 }
1587
1588 memset(&cmd, 0, sizeof(cmd));
1589 cmd.c_data = data;
1590 cmd.c_datalen = datalen;
1591 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1592 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1593 MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1594 cmd.c_arg = blkno;
1595 if (!ISSET(sf->flags, SFF_SDHC))
1596 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1597 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1598 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1599 cmd.c_dmamap = dmap;
1600
1601 sc->sc_ev_xfer.ev_count++;
1602
1603 error = sdmmc_mmc_command(sc, &cmd);
1604 if (error) {
1605 sc->sc_ev_xfer_error.ev_count++;
1606 goto out;
1607 }
1608
1609 const u_int counter = __builtin_ctz(cmd.c_datalen);
1610 if (counter >= 9 && counter <= 16) {
1611 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1612 } else {
1613 sc->sc_ev_xfer_unaligned.ev_count++;
1614 }
1615
1616 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1617 if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1618 memset(&cmd, 0, sizeof cmd);
1619 cmd.c_opcode = MMC_STOP_TRANSMISSION;
1620 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1621 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1622 error = sdmmc_mmc_command(sc, &cmd);
1623 if (error)
1624 goto out;
1625 }
1626 }
1627
1628 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1629 do {
1630 memset(&cmd, 0, sizeof(cmd));
1631 cmd.c_opcode = MMC_SEND_STATUS;
1632 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1633 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1634 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1635 error = sdmmc_mmc_command(sc, &cmd);
1636 if (error)
1637 break;
1638 /* XXX time out */
1639 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1640 }
1641
1642 out:
1643 return error;
1644 }
1645
1646 int
1647 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1648 size_t datalen)
1649 {
1650 struct sdmmc_softc *sc = sf->sc;
1651 int error;
1652
1653 SDMMC_LOCK(sc);
1654 mutex_enter(&sc->sc_mtx);
1655
1656 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1657 error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1658 goto out;
1659 }
1660
1661 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1662 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1663 datalen);
1664 goto out;
1665 }
1666
1667 /* DMA transfer */
1668 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1669 BUS_DMA_NOWAIT|BUS_DMA_READ);
1670 if (error)
1671 goto out;
1672
1673 #ifdef SDMMC_DEBUG
1674 printf("data=%p, datalen=%zu\n", data, datalen);
1675 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1676 printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1677 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1678 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1679 }
1680 #endif
1681
1682 if (sc->sc_dmap->dm_nsegs > 1
1683 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1684 error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1685 data, datalen);
1686 goto unload;
1687 }
1688
1689 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1690 BUS_DMASYNC_PREREAD);
1691
1692 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1693 datalen);
1694 if (error)
1695 goto unload;
1696
1697 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1698 BUS_DMASYNC_POSTREAD);
1699 unload:
1700 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1701
1702 out:
1703 mutex_exit(&sc->sc_mtx);
1704 SDMMC_UNLOCK(sc);
1705
1706 return error;
1707 }
1708
1709 /* write */
1710 static int
1711 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1712 u_char *data, size_t datalen)
1713 {
1714 struct sdmmc_softc *sc = sf->sc;
1715 int error = 0;
1716 int i;
1717
1718 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1719 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1720
1721 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1722 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1723 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1724 if (error)
1725 break;
1726 }
1727 return error;
1728 }
1729
1730 /*
1731 * Simulate multi-segment dma transfer.
1732 */
1733 static int
1734 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1735 uint32_t blkno, u_char *data, size_t datalen)
1736 {
1737 struct sdmmc_softc *sc = sf->sc;
1738 bool use_bbuf = false;
1739 int error = 0;
1740 int i;
1741
1742 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1743 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1744 if ((len % SDMMC_SECTOR_SIZE) != 0) {
1745 use_bbuf = true;
1746 break;
1747 }
1748 }
1749 if (use_bbuf) {
1750 /* Copy to bounce buffer */
1751 memcpy(sf->bbuf, data, datalen);
1752
1753 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1754 BUS_DMASYNC_PREWRITE);
1755
1756 error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1757 blkno, data, datalen);
1758 if (error) {
1759 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1760 return error;
1761 }
1762
1763 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1764 BUS_DMASYNC_POSTWRITE);
1765
1766 return 0;
1767 }
1768
1769 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1770 size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1771
1772 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1773 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1774 if (error)
1775 return error;
1776
1777 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1778 BUS_DMASYNC_PREWRITE);
1779
1780 error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
1781 blkno, data, len);
1782 if (error) {
1783 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1784 return error;
1785 }
1786
1787 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1788 BUS_DMASYNC_POSTWRITE);
1789
1790 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1791
1792 blkno += len / SDMMC_SECTOR_SIZE;
1793 data += len;
1794 }
1795
1796 return error;
1797 }
1798
1799 static int
1800 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1801 uint32_t blkno, u_char *data, size_t datalen)
1802 {
1803 struct sdmmc_softc *sc = sf->sc;
1804 struct sdmmc_command cmd;
1805 int error;
1806
1807 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1808 error = sdmmc_select_card(sc, sf);
1809 if (error)
1810 goto out;
1811 }
1812
1813 memset(&cmd, 0, sizeof(cmd));
1814 cmd.c_data = data;
1815 cmd.c_datalen = datalen;
1816 cmd.c_blklen = SDMMC_SECTOR_SIZE;
1817 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1818 MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
1819 cmd.c_arg = blkno;
1820 if (!ISSET(sf->flags, SFF_SDHC))
1821 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1822 cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
1823 if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1824 cmd.c_dmamap = dmap;
1825
1826 sc->sc_ev_xfer.ev_count++;
1827
1828 error = sdmmc_mmc_command(sc, &cmd);
1829 if (error) {
1830 sc->sc_ev_xfer_error.ev_count++;
1831 goto out;
1832 }
1833
1834 const u_int counter = __builtin_ctz(cmd.c_datalen);
1835 if (counter >= 9 && counter <= 16) {
1836 sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1837 } else {
1838 sc->sc_ev_xfer_unaligned.ev_count++;
1839 }
1840
1841 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1842 if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
1843 memset(&cmd, 0, sizeof(cmd));
1844 cmd.c_opcode = MMC_STOP_TRANSMISSION;
1845 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1846 error = sdmmc_mmc_command(sc, &cmd);
1847 if (error)
1848 goto out;
1849 }
1850 }
1851
1852 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1853 do {
1854 memset(&cmd, 0, sizeof(cmd));
1855 cmd.c_opcode = MMC_SEND_STATUS;
1856 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1857 cmd.c_arg = MMC_ARG_RCA(sf->rca);
1858 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1859 error = sdmmc_mmc_command(sc, &cmd);
1860 if (error)
1861 break;
1862 /* XXX time out */
1863 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1864 }
1865
1866 out:
1867 return error;
1868 }
1869
1870 int
1871 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1872 size_t datalen)
1873 {
1874 struct sdmmc_softc *sc = sf->sc;
1875 int error;
1876
1877 SDMMC_LOCK(sc);
1878 mutex_enter(&sc->sc_mtx);
1879
1880 if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
1881 aprint_normal_dev(sc->sc_dev, "write-protected\n");
1882 error = EIO;
1883 goto out;
1884 }
1885
1886 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1887 error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
1888 goto out;
1889 }
1890
1891 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1892 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
1893 datalen);
1894 goto out;
1895 }
1896
1897 /* DMA transfer */
1898 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1899 BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1900 if (error)
1901 goto out;
1902
1903 #ifdef SDMMC_DEBUG
1904 aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
1905 __func__, data, datalen);
1906 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1907 aprint_normal_dev(sc->sc_dev,
1908 "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
1909 (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1910 (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1911 }
1912 #endif
1913
1914 if (sc->sc_dmap->dm_nsegs > 1
1915 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1916 error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
1917 data, datalen);
1918 goto unload;
1919 }
1920
1921 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1922 BUS_DMASYNC_PREWRITE);
1923
1924 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
1925 datalen);
1926 if (error)
1927 goto unload;
1928
1929 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1930 BUS_DMASYNC_POSTWRITE);
1931 unload:
1932 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1933
1934 out:
1935 mutex_exit(&sc->sc_mtx);
1936 SDMMC_UNLOCK(sc);
1937
1938 return error;
1939 }
1940