1 /* $NetBSD: sdmmc_mem.c,v 1.47 2015/10/06 14:32:51 mlelstv Exp $ */ 2 /* $OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $ */ 3 4 /* 5 * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka@netbsd.org> 22 * All rights reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 1. Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * 2. Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in the 31 * documentation and/or other materials provided with the distribution. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 43 */ 44 45 /* Routines for SD/MMC memory cards. */ 46 47 #include <sys/cdefs.h> 48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.47 2015/10/06 14:32:51 mlelstv Exp $"); 49 50 #ifdef _KERNEL_OPT 51 #include "opt_sdmmc.h" 52 #endif 53 54 #include <sys/param.h> 55 #include <sys/kernel.h> 56 #include <sys/malloc.h> 57 #include <sys/systm.h> 58 #include <sys/device.h> 59 60 #include <dev/sdmmc/sdmmcchip.h> 61 #include <dev/sdmmc/sdmmcreg.h> 62 #include <dev/sdmmc/sdmmcvar.h> 63 64 #ifdef SDMMC_DEBUG 65 #define DPRINTF(s) do { printf s; } while (/*CONSTCOND*/0) 66 #else 67 #define DPRINTF(s) do {} while (/*CONSTCOND*/0) 68 #endif 69 70 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t; 71 72 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *); 73 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *); 74 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *); 75 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *, 76 sdmmc_response *); 77 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *, 78 uint32_t *scr); 79 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *); 80 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t); 81 static int sdmmc_set_bus_width(struct sdmmc_function *, int); 82 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *); 83 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t, 84 uint8_t); 85 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int); 86 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *); 87 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t, 88 u_char *, size_t); 89 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t, 90 u_char *, size_t); 91 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *, 92 uint32_t, u_char *, size_t); 93 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *, 94 uint32_t, u_char *, size_t); 95 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t, 96 uint32_t, u_char *, size_t); 97 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t, 98 uint32_t, u_char *, size_t); 99 100 static const struct { 101 const char *name; 102 int v; 103 int freq; 104 } switch_group0_functions[] = { 105 /* Default/SDR12 */ 106 { "Default/SDR12", 0, 25000 }, 107 108 /* High-Speed/SDR25 */ 109 { "High-Speed/SDR25", SMC_CAPS_SD_HIGHSPEED, 50000 }, 110 111 /* SDR50 */ 112 { "SDR50", SMC_CAPS_UHS_SDR50, 100000 }, 113 114 /* SDR104 */ 115 { "SDR104", SMC_CAPS_UHS_SDR104, 208000 }, 116 117 /* DDR50 */ 118 { "DDR50", SMC_CAPS_UHS_DDR50, 50000 }, 119 }; 120 121 /* 122 * Initialize SD/MMC memory cards and memory in SDIO "combo" cards. 123 */ 124 int 125 sdmmc_mem_enable(struct sdmmc_softc *sc) 126 { 127 uint32_t host_ocr; 128 uint32_t card_ocr; 129 uint32_t new_ocr; 130 uint32_t ocr = 0; 131 int error; 132 133 SDMMC_LOCK(sc); 134 135 /* Set host mode to SD "combo" card or SD memory-only. */ 136 CLR(sc->sc_flags, SMF_UHS_MODE); 137 SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE); 138 139 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) 140 sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch); 141 142 /* Reset memory (*must* do that before CMD55 or CMD1). */ 143 sdmmc_go_idle_state(sc); 144 145 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 146 /* Check SD Ver.2 */ 147 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr); 148 if (error == 0 && card_ocr == 0x1aa) 149 SET(ocr, MMC_OCR_HCS); 150 } 151 152 /* 153 * Read the SD/MMC memory OCR value by issuing CMD55 followed 154 * by ACMD41 to read the OCR value from memory-only SD cards. 155 * MMC cards will not respond to CMD55 or ACMD41 and this is 156 * how we distinguish them from SD cards. 157 */ 158 mmc_mode: 159 error = sdmmc_mem_send_op_cond(sc, 160 ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr); 161 if (error) { 162 if (ISSET(sc->sc_flags, SMF_SD_MODE) && 163 !ISSET(sc->sc_flags, SMF_IO_MODE)) { 164 /* Not a SD card, switch to MMC mode. */ 165 DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc))); 166 CLR(sc->sc_flags, SMF_SD_MODE); 167 goto mmc_mode; 168 } 169 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) { 170 DPRINTF(("%s: couldn't read memory OCR\n", 171 SDMMCDEVNAME(sc))); 172 goto out; 173 } else { 174 /* Not a "combo" card. */ 175 CLR(sc->sc_flags, SMF_MEM_MODE); 176 error = 0; 177 goto out; 178 } 179 } 180 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 181 /* get card OCR */ 182 error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr); 183 if (error) { 184 DPRINTF(("%s: couldn't read SPI memory OCR\n", 185 SDMMCDEVNAME(sc))); 186 goto out; 187 } 188 } 189 190 /* Set the lowest voltage supported by the card and host. */ 191 host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch); 192 error = sdmmc_set_bus_power(sc, host_ocr, card_ocr); 193 if (error) { 194 DPRINTF(("%s: couldn't supply voltage requested by card\n", 195 SDMMCDEVNAME(sc))); 196 goto out; 197 } 198 199 /* Tell the card(s) to enter the idle state (again). */ 200 sdmmc_go_idle_state(sc); 201 202 DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr)); 203 DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr)); 204 205 host_ocr &= card_ocr; /* only allow the common voltages */ 206 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 207 if (ISSET(sc->sc_flags, SMF_SD_MODE)) { 208 /* Check SD Ver.2 */ 209 error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr); 210 if (error == 0 && card_ocr == 0x1aa) 211 SET(ocr, MMC_OCR_HCS); 212 213 if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A) 214 SET(ocr, MMC_OCR_S18A); 215 } else { 216 SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR); 217 } 218 } 219 host_ocr |= ocr; 220 221 /* Send the new OCR value until all cards are ready. */ 222 error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr); 223 if (error) { 224 DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc))); 225 goto out; 226 } 227 228 if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) { 229 /* 230 * Card and host support low voltage mode, begin switch 231 * sequence. 232 */ 233 struct sdmmc_command cmd; 234 memset(&cmd, 0, sizeof(cmd)); 235 cmd.c_arg = 0; 236 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1; 237 cmd.c_opcode = SD_VOLTAGE_SWITCH; 238 DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc))); 239 error = sdmmc_mmc_command(sc, &cmd); 240 if (error) { 241 DPRINTF(("%s: voltage switch command failed\n", 242 SDMMCDEVNAME(sc))); 243 goto out; 244 } 245 246 error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180); 247 if (error) 248 goto out; 249 250 SET(sc->sc_flags, SMF_UHS_MODE); 251 } 252 253 out: 254 SDMMC_UNLOCK(sc); 255 256 if (error) 257 printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc), 258 __func__, error); 259 260 return error; 261 } 262 263 static int 264 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage) 265 { 266 int error; 267 268 /* 269 * Stop the clock 270 */ 271 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 272 SDMMC_SDCLK_OFF, false); 273 if (error) 274 goto out; 275 276 delay(1000); 277 278 /* 279 * Card switch command was successful, update host controller 280 * signal voltage setting. 281 */ 282 DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc), 283 signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V")); 284 error = sdmmc_chip_signal_voltage(sc->sc_sct, 285 sc->sc_sch, signal_voltage); 286 if (error) 287 goto out; 288 289 delay(5000); 290 291 /* 292 * Switch to SDR12 timing 293 */ 294 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000, 295 false); 296 if (error) 297 goto out; 298 299 delay(1000); 300 301 out: 302 return error; 303 } 304 305 /* 306 * Read the CSD and CID from all cards and assign each card a unique 307 * relative card address (RCA). CMD2 is ignored by SDIO-only cards. 308 */ 309 void 310 sdmmc_mem_scan(struct sdmmc_softc *sc) 311 { 312 sdmmc_response resp; 313 struct sdmmc_function *sf; 314 uint16_t next_rca; 315 int error; 316 int retry; 317 318 SDMMC_LOCK(sc); 319 320 /* 321 * CMD2 is a broadcast command understood by SD cards and MMC 322 * cards. All cards begin to respond to the command, but back 323 * off if another card drives the CMD line to a different level. 324 * Only one card will get its entire response through. That 325 * card remains silent once it has been assigned a RCA. 326 */ 327 for (retry = 0; retry < 100; retry++) { 328 error = sdmmc_mem_send_cid(sc, &resp); 329 if (error) { 330 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) && 331 error == ETIMEDOUT) { 332 /* No more cards there. */ 333 break; 334 } 335 DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc))); 336 break; 337 } 338 339 /* In MMC mode, find the next available RCA. */ 340 next_rca = 1; 341 if (!ISSET(sc->sc_flags, SMF_SD_MODE)) { 342 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) 343 next_rca++; 344 } 345 346 /* Allocate a sdmmc_function structure. */ 347 sf = sdmmc_function_alloc(sc); 348 sf->rca = next_rca; 349 350 /* 351 * Remember the CID returned in the CMD2 response for 352 * later decoding. 353 */ 354 memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid)); 355 356 /* 357 * Silence the card by assigning it a unique RCA, or 358 * querying it for its RCA in the case of SD. 359 */ 360 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 361 if (sdmmc_set_relative_addr(sc, sf) != 0) { 362 aprint_error_dev(sc->sc_dev, 363 "couldn't set mem RCA\n"); 364 sdmmc_function_free(sf); 365 break; 366 } 367 } 368 369 /* 370 * If this is a memory-only card, the card responding 371 * first becomes an alias for SDIO function 0. 372 */ 373 if (sc->sc_fn0 == NULL) 374 sc->sc_fn0 = sf; 375 376 SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list); 377 378 /* only one function in SPI mode */ 379 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) 380 break; 381 } 382 383 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) 384 /* Go to Data Transfer Mode, if possible. */ 385 sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0); 386 387 /* 388 * All cards are either inactive or awaiting further commands. 389 * Read the CSDs and decode the raw CID for each card. 390 */ 391 SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) { 392 error = sdmmc_mem_send_csd(sc, sf, &resp); 393 if (error) { 394 SET(sf->flags, SFF_ERROR); 395 continue; 396 } 397 398 if (sdmmc_decode_csd(sc, resp, sf) != 0 || 399 sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) { 400 SET(sf->flags, SFF_ERROR); 401 continue; 402 } 403 404 #ifdef SDMMC_DEBUG 405 printf("%s: CID: ", SDMMCDEVNAME(sc)); 406 sdmmc_print_cid(&sf->cid); 407 #endif 408 } 409 410 SDMMC_UNLOCK(sc); 411 } 412 413 int 414 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp, 415 struct sdmmc_function *sf) 416 { 417 /* TRAN_SPEED(2:0): transfer rate exponent */ 418 static const int speed_exponent[8] = { 419 100 * 1, /* 100 Kbits/s */ 420 1 * 1000, /* 1 Mbits/s */ 421 10 * 1000, /* 10 Mbits/s */ 422 100 * 1000, /* 100 Mbits/s */ 423 0, 424 0, 425 0, 426 0, 427 }; 428 /* TRAN_SPEED(6:3): time mantissa */ 429 static const int speed_mantissa[16] = { 430 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80, 431 }; 432 struct sdmmc_csd *csd = &sf->csd; 433 int e, m; 434 435 if (ISSET(sc->sc_flags, SMF_SD_MODE)) { 436 /* 437 * CSD version 1.0 corresponds to SD system 438 * specification version 1.0 - 1.10. (SanDisk, 3.5.3) 439 */ 440 csd->csdver = SD_CSD_CSDVER(resp); 441 switch (csd->csdver) { 442 case SD_CSD_CSDVER_2_0: 443 DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc))); 444 SET(sf->flags, SFF_SDHC); 445 csd->capacity = SD_CSD_V2_CAPACITY(resp); 446 csd->read_bl_len = SD_CSD_V2_BL_LEN; 447 break; 448 449 case SD_CSD_CSDVER_1_0: 450 DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc))); 451 csd->capacity = SD_CSD_CAPACITY(resp); 452 csd->read_bl_len = SD_CSD_READ_BL_LEN(resp); 453 break; 454 455 default: 456 aprint_error_dev(sc->sc_dev, 457 "unknown SD CSD structure version 0x%x\n", 458 csd->csdver); 459 return 1; 460 } 461 462 csd->mmcver = SD_CSD_MMCVER(resp); 463 csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp); 464 csd->r2w_factor = SD_CSD_R2W_FACTOR(resp); 465 e = SD_CSD_SPEED_EXP(resp); 466 m = SD_CSD_SPEED_MANT(resp); 467 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10; 468 csd->ccc = SD_CSD_CCC(resp); 469 } else { 470 csd->csdver = MMC_CSD_CSDVER(resp); 471 if (csd->csdver == MMC_CSD_CSDVER_1_0) { 472 aprint_error_dev(sc->sc_dev, 473 "unknown MMC CSD structure version 0x%x\n", 474 csd->csdver); 475 return 1; 476 } 477 478 csd->mmcver = MMC_CSD_MMCVER(resp); 479 csd->capacity = MMC_CSD_CAPACITY(resp); 480 csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp); 481 csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp); 482 csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp); 483 e = MMC_CSD_TRAN_SPEED_EXP(resp); 484 m = MMC_CSD_TRAN_SPEED_MANT(resp); 485 csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10; 486 } 487 if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE) 488 csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE; 489 490 #ifdef SDMMC_DUMP_CSD 491 sdmmc_print_csd(resp, csd); 492 #endif 493 494 return 0; 495 } 496 497 int 498 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp, 499 struct sdmmc_function *sf) 500 { 501 struct sdmmc_cid *cid = &sf->cid; 502 503 if (ISSET(sc->sc_flags, SMF_SD_MODE)) { 504 cid->mid = SD_CID_MID(resp); 505 cid->oid = SD_CID_OID(resp); 506 SD_CID_PNM_CPY(resp, cid->pnm); 507 cid->rev = SD_CID_REV(resp); 508 cid->psn = SD_CID_PSN(resp); 509 cid->mdt = SD_CID_MDT(resp); 510 } else { 511 switch(sf->csd.mmcver) { 512 case MMC_CSD_MMCVER_1_0: 513 case MMC_CSD_MMCVER_1_4: 514 cid->mid = MMC_CID_MID_V1(resp); 515 MMC_CID_PNM_V1_CPY(resp, cid->pnm); 516 cid->rev = MMC_CID_REV_V1(resp); 517 cid->psn = MMC_CID_PSN_V1(resp); 518 cid->mdt = MMC_CID_MDT_V1(resp); 519 break; 520 case MMC_CSD_MMCVER_2_0: 521 case MMC_CSD_MMCVER_3_1: 522 case MMC_CSD_MMCVER_4_0: 523 cid->mid = MMC_CID_MID_V2(resp); 524 cid->oid = MMC_CID_OID_V2(resp); 525 MMC_CID_PNM_V2_CPY(resp, cid->pnm); 526 cid->psn = MMC_CID_PSN_V2(resp); 527 break; 528 default: 529 aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n", 530 sf->csd.mmcver); 531 return 1; 532 } 533 } 534 return 0; 535 } 536 537 void 538 sdmmc_print_cid(struct sdmmc_cid *cid) 539 { 540 541 printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x" 542 " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn, 543 cid->mdt); 544 } 545 546 #ifdef SDMMC_DUMP_CSD 547 void 548 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd) 549 { 550 551 printf("csdver = %d\n", csd->csdver); 552 printf("mmcver = %d\n", csd->mmcver); 553 printf("capacity = 0x%08x\n", csd->capacity); 554 printf("read_bl_len = %d\n", csd->read_bl_len); 555 printf("write_bl_len = %d\n", csd->write_bl_len); 556 printf("r2w_factor = %d\n", csd->r2w_factor); 557 printf("tran_speed = %d\n", csd->tran_speed); 558 printf("ccc = 0x%x\n", csd->ccc); 559 } 560 #endif 561 562 /* 563 * Initialize a SD/MMC memory card. 564 */ 565 int 566 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf) 567 { 568 int error = 0; 569 570 SDMMC_LOCK(sc); 571 572 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 573 error = sdmmc_select_card(sc, sf); 574 if (error) 575 goto out; 576 } 577 578 error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE); 579 if (error) 580 goto out; 581 582 if (ISSET(sc->sc_flags, SMF_SD_MODE)) 583 error = sdmmc_mem_sd_init(sc, sf); 584 else 585 error = sdmmc_mem_mmc_init(sc, sf); 586 587 out: 588 SDMMC_UNLOCK(sc); 589 590 return error; 591 } 592 593 /* 594 * Get or set the card's memory OCR value (SD or MMC). 595 */ 596 int 597 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp) 598 { 599 struct sdmmc_command cmd; 600 int error; 601 int retry; 602 603 /* Don't lock */ 604 605 DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n", 606 SDMMCDEVNAME(sc), ocr)); 607 608 /* 609 * If we change the OCR value, retry the command until the OCR 610 * we receive in response has the "CARD BUSY" bit set, meaning 611 * that all cards are ready for identification. 612 */ 613 for (retry = 0; retry < 100; retry++) { 614 memset(&cmd, 0, sizeof(cmd)); 615 cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? 616 ocr : (ocr & MMC_OCR_HCS); 617 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1; 618 619 if (ISSET(sc->sc_flags, SMF_SD_MODE)) { 620 cmd.c_opcode = SD_APP_OP_COND; 621 error = sdmmc_app_command(sc, NULL, &cmd); 622 } else { 623 cmd.c_opcode = MMC_SEND_OP_COND; 624 error = sdmmc_mmc_command(sc, &cmd); 625 } 626 if (error) 627 break; 628 629 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 630 if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE)) 631 break; 632 } else { 633 if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) || 634 ocr == 0) 635 break; 636 } 637 638 error = ETIMEDOUT; 639 sdmmc_delay(10000); 640 } 641 if (error == 0 && 642 ocrp != NULL && 643 !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) 644 *ocrp = MMC_R3(cmd.c_resp); 645 DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n", 646 SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp))); 647 return error; 648 } 649 650 int 651 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp) 652 { 653 struct sdmmc_command cmd; 654 int error; 655 656 /* Don't lock */ 657 658 memset(&cmd, 0, sizeof(cmd)); 659 cmd.c_arg = ocr; 660 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7; 661 cmd.c_opcode = SD_SEND_IF_COND; 662 663 error = sdmmc_mmc_command(sc, &cmd); 664 if (error == 0 && ocrp != NULL) { 665 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 666 *ocrp = MMC_SPI_R7(cmd.c_resp); 667 } else { 668 *ocrp = MMC_R7(cmd.c_resp); 669 } 670 DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n", 671 SDMMCDEVNAME(sc), error, *ocrp)); 672 } 673 return error; 674 } 675 676 /* 677 * Set the read block length appropriately for this card, according to 678 * the card CSD register value. 679 */ 680 int 681 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf, 682 int block_len) 683 { 684 struct sdmmc_command cmd; 685 int error; 686 687 /* Don't lock */ 688 689 memset(&cmd, 0, sizeof(cmd)); 690 cmd.c_opcode = MMC_SET_BLOCKLEN; 691 cmd.c_arg = block_len; 692 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1; 693 694 error = sdmmc_mmc_command(sc, &cmd); 695 696 DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n", 697 SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len)); 698 699 return error; 700 } 701 702 /* make 512-bit BE quantity __bitfield()-compatible */ 703 static void 704 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) { 705 size_t i; 706 uint32_t tmp0, tmp1; 707 const size_t bitswords = __arraycount(buf->_bits); 708 for (i = 0; i < bitswords/2; i++) { 709 tmp0 = buf->_bits[i]; 710 tmp1 = buf->_bits[bitswords - 1 - i]; 711 buf->_bits[i] = be32toh(tmp1); 712 buf->_bits[bitswords - 1 - i] = be32toh(tmp0); 713 } 714 } 715 716 static int 717 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func) 718 { 719 if (ISSET(sc->sc_flags, SMF_UHS_MODE)) { 720 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) && 721 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) { 722 return SD_ACCESS_MODE_SDR104; 723 } 724 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) && 725 ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) { 726 return SD_ACCESS_MODE_DDR50; 727 } 728 if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) && 729 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) { 730 return SD_ACCESS_MODE_SDR50; 731 } 732 } 733 if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) && 734 ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) { 735 return SD_ACCESS_MODE_SDR25; 736 } 737 return SD_ACCESS_MODE_SDR12; 738 } 739 740 static int 741 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf) 742 { 743 int timing = -1; 744 745 if (!ISSET(sc->sc_flags, SMF_UHS_MODE)) 746 return 0; 747 748 if (ISSET(sc->sc_flags, SMF_SD_MODE)) { 749 if (!ISSET(sc->sc_flags, SMF_UHS_MODE)) 750 return 0; 751 752 switch (sf->csd.tran_speed) { 753 case 100000: 754 timing = SDMMC_TIMING_UHS_SDR50; 755 break; 756 case 208000: 757 timing = SDMMC_TIMING_UHS_SDR104; 758 break; 759 default: 760 return 0; 761 } 762 } else { 763 switch (sf->csd.tran_speed) { 764 case 200000: 765 timing = SDMMC_TIMING_MMC_HS200; 766 break; 767 default: 768 return 0; 769 } 770 } 771 772 DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc), 773 timing)); 774 775 return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing); 776 } 777 778 static int 779 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf) 780 { 781 int support_func, best_func, bus_clock, error, i; 782 sdmmc_bitfield512_t status; /* Switch Function Status */ 783 bool ddr = false; 784 785 /* change bus clock */ 786 bus_clock = min(sc->sc_busclk, sf->csd.tran_speed); 787 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false); 788 if (error) { 789 aprint_error_dev(sc->sc_dev, "can't change bus clock\n"); 790 return error; 791 } 792 793 error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr); 794 if (error) { 795 aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n"); 796 return error; 797 } 798 error = sdmmc_mem_decode_scr(sc, sf); 799 if (error) 800 return error; 801 802 if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) && 803 ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) { 804 DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc))); 805 error = sdmmc_set_bus_width(sf, 4); 806 if (error) { 807 aprint_error_dev(sc->sc_dev, 808 "can't change bus width (%d bit)\n", 4); 809 return error; 810 } 811 sf->width = 4; 812 } 813 814 best_func = 0; 815 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 && 816 ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) { 817 DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc))); 818 error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status); 819 if (error) { 820 aprint_error_dev(sc->sc_dev, 821 "switch func mode 0 failed\n"); 822 return error; 823 } 824 825 support_func = SFUNC_STATUS_GROUP(&status, 1); 826 827 if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) { 828 /* XXX UHS-I card started in 1.8V mode, switch now */ 829 error = sdmmc_mem_signal_voltage(sc, 830 SDMMC_SIGNAL_VOLTAGE_180); 831 if (error) { 832 aprint_error_dev(sc->sc_dev, 833 "failed to recover UHS card\n"); 834 return error; 835 } 836 SET(sc->sc_flags, SMF_UHS_MODE); 837 } 838 839 for (i = 0; i < __arraycount(switch_group0_functions); i++) { 840 if (!(support_func & (1 << i))) 841 continue; 842 DPRINTF(("%s: card supports mode %s\n", 843 SDMMCDEVNAME(sc), 844 switch_group0_functions[i].name)); 845 } 846 847 best_func = sdmmc_mem_select_transfer_mode(sc, support_func); 848 849 DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc), 850 switch_group0_functions[best_func].name)); 851 852 if (best_func != 0) { 853 DPRINTF(("%s: switch func mode 1(func=%d)\n", 854 SDMMCDEVNAME(sc), best_func)); 855 error = 856 sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status); 857 if (error) { 858 aprint_error_dev(sc->sc_dev, 859 "switch func mode 1 failed:" 860 " group 1 function %d(0x%2x)\n", 861 best_func, support_func); 862 return error; 863 } 864 sf->csd.tran_speed = 865 switch_group0_functions[best_func].freq; 866 867 if (best_func == SD_ACCESS_MODE_DDR50) 868 ddr = true; 869 870 /* Wait 400KHz x 8 clock (2.5us * 8 + slop) */ 871 delay(25); 872 } 873 } 874 875 /* update bus clock */ 876 if (sc->sc_busclk > sf->csd.tran_speed) 877 sc->sc_busclk = sf->csd.tran_speed; 878 if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr) 879 return 0; 880 881 /* change bus clock */ 882 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk, 883 ddr); 884 if (error) { 885 aprint_error_dev(sc->sc_dev, "can't change bus clock\n"); 886 return error; 887 } 888 889 sc->sc_transfer_mode = switch_group0_functions[best_func].name; 890 sc->sc_busddr = ddr; 891 892 /* execute tuning (UHS) */ 893 error = sdmmc_mem_execute_tuning(sc, sf); 894 if (error) { 895 aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n"); 896 return error; 897 } 898 899 return 0; 900 } 901 902 static int 903 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf) 904 { 905 int width, value, hs_timing, bus_clock, error; 906 char ext_csd[512]; 907 uint32_t sectors = 0; 908 909 sc->sc_transfer_mode = NULL; 910 911 /* change bus clock */ 912 bus_clock = min(sc->sc_busclk, sf->csd.tran_speed); 913 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false); 914 if (error) { 915 aprint_error_dev(sc->sc_dev, "can't change bus clock\n"); 916 return error; 917 } 918 919 if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) { 920 error = sdmmc_mem_send_cxd_data(sc, 921 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd)); 922 if (error) { 923 aprint_error_dev(sc->sc_dev, 924 "can't read EXT_CSD (error=%d)\n", error); 925 return error; 926 } 927 if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) && 928 (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) { 929 aprint_error_dev(sc->sc_dev, 930 "unrecognised future version (%d)\n", 931 ext_csd[EXT_CSD_STRUCTURE]); 932 return ENOTSUP; 933 } 934 935 sc->sc_transfer_mode = NULL; 936 if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) && 937 ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) { 938 sf->csd.tran_speed = 200000; /* 200MHz SDR */ 939 hs_timing = 2; 940 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) { 941 sf->csd.tran_speed = 52000; /* 52MHz */ 942 hs_timing = 1; 943 } else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) { 944 sf->csd.tran_speed = 26000; /* 26MHz */ 945 hs_timing = 0; 946 } else { 947 aprint_error_dev(sc->sc_dev, 948 "unknown CARD_TYPE: 0x%x\n", 949 ext_csd[EXT_CSD_CARD_TYPE]); 950 return ENOTSUP; 951 } 952 953 if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) { 954 width = 8; 955 value = EXT_CSD_BUS_WIDTH_8; 956 } else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) { 957 width = 4; 958 value = EXT_CSD_BUS_WIDTH_4; 959 } else { 960 width = 1; 961 value = EXT_CSD_BUS_WIDTH_1; 962 } 963 964 if (width != 1) { 965 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL, 966 EXT_CSD_BUS_WIDTH, value); 967 if (error == 0) 968 error = sdmmc_chip_bus_width(sc->sc_sct, 969 sc->sc_sch, width); 970 else { 971 DPRINTF(("%s: can't change bus width" 972 " (%d bit)\n", SDMMCDEVNAME(sc), width)); 973 return error; 974 } 975 976 /* XXXX: need bus test? (using by CMD14 & CMD19) */ 977 delay(10000); 978 } 979 sf->width = width; 980 981 if (hs_timing == 1 && 982 !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) { 983 hs_timing = 0; 984 } 985 if (hs_timing) { 986 error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL, 987 EXT_CSD_HS_TIMING, hs_timing); 988 if (error) { 989 aprint_error_dev(sc->sc_dev, 990 "can't change high speed %d, error %d\n", 991 hs_timing, error); 992 return error; 993 } 994 } 995 996 if (sc->sc_busclk > sf->csd.tran_speed) 997 sc->sc_busclk = sf->csd.tran_speed; 998 if (sc->sc_busclk != bus_clock) { 999 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 1000 sc->sc_busclk, false); 1001 if (error) { 1002 aprint_error_dev(sc->sc_dev, 1003 "can't change bus clock\n"); 1004 return error; 1005 } 1006 } 1007 1008 if (hs_timing) { 1009 error = sdmmc_mem_send_cxd_data(sc, 1010 MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd)); 1011 if (error) { 1012 aprint_error_dev(sc->sc_dev, 1013 "can't re-read EXT_CSD\n"); 1014 return error; 1015 } 1016 if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) { 1017 aprint_error_dev(sc->sc_dev, 1018 "HS_TIMING set failed\n"); 1019 return EINVAL; 1020 } 1021 } 1022 1023 sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 | 1024 ext_csd[EXT_CSD_SEC_COUNT + 1] << 8 | 1025 ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 | 1026 ext_csd[EXT_CSD_SEC_COUNT + 3] << 24; 1027 if (sectors > (2u * 1024 * 1024 * 1024) / 512) { 1028 SET(sf->flags, SFF_SDHC); 1029 sf->csd.capacity = sectors; 1030 } 1031 1032 if (hs_timing == 2) { 1033 sc->sc_transfer_mode = "HS200"; 1034 1035 /* execute tuning (HS200) */ 1036 error = sdmmc_mem_execute_tuning(sc, sf); 1037 if (error) { 1038 aprint_error_dev(sc->sc_dev, 1039 "can't execute MMC tuning\n"); 1040 return error; 1041 } 1042 } else { 1043 sc->sc_transfer_mode = NULL; 1044 } 1045 } else { 1046 if (sc->sc_busclk > sf->csd.tran_speed) 1047 sc->sc_busclk = sf->csd.tran_speed; 1048 if (sc->sc_busclk != bus_clock) { 1049 error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 1050 sc->sc_busclk, false); 1051 if (error) { 1052 aprint_error_dev(sc->sc_dev, 1053 "can't change bus clock\n"); 1054 return error; 1055 } 1056 } 1057 } 1058 1059 return 0; 1060 } 1061 1062 static int 1063 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp) 1064 { 1065 struct sdmmc_command cmd; 1066 int error; 1067 1068 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 1069 memset(&cmd, 0, sizeof cmd); 1070 cmd.c_opcode = MMC_ALL_SEND_CID; 1071 cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK; 1072 1073 error = sdmmc_mmc_command(sc, &cmd); 1074 } else { 1075 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp, 1076 sizeof(cmd.c_resp)); 1077 } 1078 1079 #ifdef SDMMC_DEBUG 1080 if (error == 0) 1081 sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp)); 1082 #endif 1083 if (error == 0 && resp != NULL) 1084 memcpy(resp, &cmd.c_resp, sizeof(*resp)); 1085 return error; 1086 } 1087 1088 static int 1089 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf, 1090 sdmmc_response *resp) 1091 { 1092 struct sdmmc_command cmd; 1093 int error; 1094 1095 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 1096 memset(&cmd, 0, sizeof cmd); 1097 cmd.c_opcode = MMC_SEND_CSD; 1098 cmd.c_arg = MMC_ARG_RCA(sf->rca); 1099 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2; 1100 1101 error = sdmmc_mmc_command(sc, &cmd); 1102 } else { 1103 error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp, 1104 sizeof(cmd.c_resp)); 1105 } 1106 1107 #ifdef SDMMC_DEBUG 1108 if (error == 0) 1109 sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp)); 1110 #endif 1111 if (error == 0 && resp != NULL) 1112 memcpy(resp, &cmd.c_resp, sizeof(*resp)); 1113 return error; 1114 } 1115 1116 static int 1117 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf, 1118 uint32_t *scr) 1119 { 1120 struct sdmmc_command cmd; 1121 bus_dma_segment_t ds[1]; 1122 void *ptr = NULL; 1123 int datalen = 8; 1124 int rseg; 1125 int error = 0; 1126 1127 /* Don't lock */ 1128 1129 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1130 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, 1131 ds, 1, &rseg, BUS_DMA_NOWAIT); 1132 if (error) 1133 goto out; 1134 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr, 1135 BUS_DMA_NOWAIT); 1136 if (error) 1137 goto dmamem_free; 1138 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen, 1139 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ); 1140 if (error) 1141 goto dmamem_unmap; 1142 1143 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen, 1144 BUS_DMASYNC_PREREAD); 1145 } else { 1146 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO); 1147 if (ptr == NULL) 1148 goto out; 1149 } 1150 1151 memset(&cmd, 0, sizeof(cmd)); 1152 cmd.c_data = ptr; 1153 cmd.c_datalen = datalen; 1154 cmd.c_blklen = datalen; 1155 cmd.c_arg = 0; 1156 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1; 1157 cmd.c_opcode = SD_APP_SEND_SCR; 1158 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) 1159 cmd.c_dmamap = sc->sc_dmap; 1160 1161 error = sdmmc_app_command(sc, sf, &cmd); 1162 if (error == 0) { 1163 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1164 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen, 1165 BUS_DMASYNC_POSTREAD); 1166 } 1167 memcpy(scr, ptr, datalen); 1168 } 1169 1170 out: 1171 if (ptr != NULL) { 1172 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1173 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap); 1174 dmamem_unmap: 1175 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen); 1176 dmamem_free: 1177 bus_dmamem_free(sc->sc_dmat, ds, rseg); 1178 } else { 1179 free(ptr, M_DEVBUF); 1180 } 1181 } 1182 DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc), 1183 error)); 1184 1185 #ifdef SDMMC_DEBUG 1186 if (error == 0) 1187 sdmmc_dump_data("SCR", scr, datalen); 1188 #endif 1189 return error; 1190 } 1191 1192 static int 1193 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf) 1194 { 1195 sdmmc_response resp; 1196 int ver; 1197 1198 memset(resp, 0, sizeof(resp)); 1199 /* 1200 * Change the raw-scr received from the DMA stream to resp. 1201 */ 1202 resp[0] = be32toh(sf->raw_scr[1]) >> 8; // LSW 1203 resp[1] = be32toh(sf->raw_scr[0]); // MSW 1204 resp[0] |= (resp[1] & 0xff) << 24; 1205 resp[1] >>= 8; 1206 1207 ver = SCR_STRUCTURE(resp); 1208 sf->scr.sd_spec = SCR_SD_SPEC(resp); 1209 sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp); 1210 1211 DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n", 1212 SDMMCDEVNAME(sc), resp[1], resp[0], 1213 ver, sf->scr.sd_spec, sf->scr.bus_width)); 1214 1215 if (ver != 0 && ver != 1) { 1216 DPRINTF(("%s: unknown structure version: %d\n", 1217 SDMMCDEVNAME(sc), ver)); 1218 return EINVAL; 1219 } 1220 return 0; 1221 } 1222 1223 static int 1224 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data, 1225 size_t datalen) 1226 { 1227 struct sdmmc_command cmd; 1228 bus_dma_segment_t ds[1]; 1229 void *ptr = NULL; 1230 int rseg; 1231 int error = 0; 1232 1233 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1234 error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds, 1235 1, &rseg, BUS_DMA_NOWAIT); 1236 if (error) 1237 goto out; 1238 error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr, 1239 BUS_DMA_NOWAIT); 1240 if (error) 1241 goto dmamem_free; 1242 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen, 1243 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ); 1244 if (error) 1245 goto dmamem_unmap; 1246 1247 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen, 1248 BUS_DMASYNC_PREREAD); 1249 } else { 1250 ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO); 1251 if (ptr == NULL) 1252 goto out; 1253 } 1254 1255 memset(&cmd, 0, sizeof(cmd)); 1256 cmd.c_data = ptr; 1257 cmd.c_datalen = datalen; 1258 cmd.c_blklen = datalen; 1259 cmd.c_opcode = opcode; 1260 cmd.c_arg = 0; 1261 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1; 1262 if (opcode == MMC_SEND_EXT_CSD) 1263 SET(cmd.c_flags, SCF_RSP_R1); 1264 else 1265 SET(cmd.c_flags, SCF_RSP_R2); 1266 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) 1267 cmd.c_dmamap = sc->sc_dmap; 1268 1269 error = sdmmc_mmc_command(sc, &cmd); 1270 if (error == 0) { 1271 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1272 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen, 1273 BUS_DMASYNC_POSTREAD); 1274 } 1275 memcpy(data, ptr, datalen); 1276 #ifdef SDMMC_DEBUG 1277 sdmmc_dump_data("CXD", data, datalen); 1278 #endif 1279 } 1280 1281 out: 1282 if (ptr != NULL) { 1283 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1284 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap); 1285 dmamem_unmap: 1286 bus_dmamem_unmap(sc->sc_dmat, ptr, datalen); 1287 dmamem_free: 1288 bus_dmamem_free(sc->sc_dmat, ds, rseg); 1289 } else { 1290 free(ptr, M_DEVBUF); 1291 } 1292 } 1293 return error; 1294 } 1295 1296 static int 1297 sdmmc_set_bus_width(struct sdmmc_function *sf, int width) 1298 { 1299 struct sdmmc_softc *sc = sf->sc; 1300 struct sdmmc_command cmd; 1301 int error; 1302 1303 if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) 1304 return ENODEV; 1305 1306 memset(&cmd, 0, sizeof(cmd)); 1307 cmd.c_opcode = SD_APP_SET_BUS_WIDTH; 1308 cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC; 1309 1310 switch (width) { 1311 case 1: 1312 cmd.c_arg = SD_ARG_BUS_WIDTH_1; 1313 break; 1314 1315 case 4: 1316 cmd.c_arg = SD_ARG_BUS_WIDTH_4; 1317 break; 1318 1319 default: 1320 return EINVAL; 1321 } 1322 1323 error = sdmmc_app_command(sc, sf, &cmd); 1324 if (error == 0) 1325 error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width); 1326 return error; 1327 } 1328 1329 static int 1330 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group, 1331 int function, sdmmc_bitfield512_t *status) 1332 { 1333 struct sdmmc_softc *sc = sf->sc; 1334 struct sdmmc_command cmd; 1335 bus_dma_segment_t ds[1]; 1336 void *ptr = NULL; 1337 int gsft, rseg, error = 0; 1338 const int statlen = 64; 1339 1340 if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 && 1341 !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) 1342 return EINVAL; 1343 1344 if (group <= 0 || group > 6 || 1345 function < 0 || function > 15) 1346 return EINVAL; 1347 1348 gsft = (group - 1) << 2; 1349 1350 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1351 error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds, 1352 1, &rseg, BUS_DMA_NOWAIT); 1353 if (error) 1354 goto out; 1355 error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr, 1356 BUS_DMA_NOWAIT); 1357 if (error) 1358 goto dmamem_free; 1359 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen, 1360 NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ); 1361 if (error) 1362 goto dmamem_unmap; 1363 1364 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen, 1365 BUS_DMASYNC_PREREAD); 1366 } else { 1367 ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO); 1368 if (ptr == NULL) 1369 goto out; 1370 } 1371 1372 memset(&cmd, 0, sizeof(cmd)); 1373 cmd.c_data = ptr; 1374 cmd.c_datalen = statlen; 1375 cmd.c_blklen = statlen; 1376 cmd.c_opcode = SD_SEND_SWITCH_FUNC; 1377 cmd.c_arg = 1378 (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft)); 1379 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1; 1380 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) 1381 cmd.c_dmamap = sc->sc_dmap; 1382 1383 error = sdmmc_mmc_command(sc, &cmd); 1384 if (error == 0) { 1385 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1386 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen, 1387 BUS_DMASYNC_POSTREAD); 1388 } 1389 memcpy(status, ptr, statlen); 1390 } 1391 1392 out: 1393 if (ptr != NULL) { 1394 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1395 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap); 1396 dmamem_unmap: 1397 bus_dmamem_unmap(sc->sc_dmat, ptr, statlen); 1398 dmamem_free: 1399 bus_dmamem_free(sc->sc_dmat, ds, rseg); 1400 } else { 1401 free(ptr, M_DEVBUF); 1402 } 1403 } 1404 1405 if (error == 0) 1406 sdmmc_be512_to_bitfield512(status); 1407 1408 return error; 1409 } 1410 1411 static int 1412 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index, 1413 uint8_t value) 1414 { 1415 struct sdmmc_softc *sc = sf->sc; 1416 struct sdmmc_command cmd; 1417 1418 memset(&cmd, 0, sizeof(cmd)); 1419 cmd.c_opcode = MMC_SWITCH; 1420 cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 1421 (index << 16) | (value << 8) | set; 1422 cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC; 1423 1424 return sdmmc_mmc_command(sc, &cmd); 1425 } 1426 1427 /* 1428 * SPI mode function 1429 */ 1430 static int 1431 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr) 1432 { 1433 struct sdmmc_command cmd; 1434 int error; 1435 1436 memset(&cmd, 0, sizeof(cmd)); 1437 cmd.c_opcode = MMC_READ_OCR; 1438 cmd.c_arg = hcs ? MMC_OCR_HCS : 0; 1439 cmd.c_flags = SCF_RSP_SPI_R3; 1440 1441 error = sdmmc_mmc_command(sc, &cmd); 1442 if (error == 0 && card_ocr != NULL) 1443 *card_ocr = cmd.c_resp[1]; 1444 DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n", 1445 SDMMCDEVNAME(sc), error, cmd.c_resp[1])); 1446 return error; 1447 } 1448 1449 /* 1450 * read/write function 1451 */ 1452 /* read */ 1453 static int 1454 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno, 1455 u_char *data, size_t datalen) 1456 { 1457 struct sdmmc_softc *sc = sf->sc; 1458 int error = 0; 1459 int i; 1460 1461 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0); 1462 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA)); 1463 1464 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) { 1465 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i, 1466 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE); 1467 if (error) 1468 break; 1469 } 1470 return error; 1471 } 1472 1473 /* 1474 * Simulate multi-segment dma transfer. 1475 */ 1476 static int 1477 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf, 1478 uint32_t blkno, u_char *data, size_t datalen) 1479 { 1480 struct sdmmc_softc *sc = sf->sc; 1481 bool use_bbuf = false; 1482 int error = 0; 1483 int i; 1484 1485 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) { 1486 size_t len = sc->sc_dmap->dm_segs[i].ds_len; 1487 if ((len % SDMMC_SECTOR_SIZE) != 0) { 1488 use_bbuf = true; 1489 break; 1490 } 1491 } 1492 if (use_bbuf) { 1493 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen, 1494 BUS_DMASYNC_PREREAD); 1495 1496 error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap, 1497 blkno, data, datalen); 1498 if (error) { 1499 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap); 1500 return error; 1501 } 1502 1503 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen, 1504 BUS_DMASYNC_POSTREAD); 1505 1506 /* Copy from bounce buffer */ 1507 memcpy(data, sf->bbuf, datalen); 1508 1509 return 0; 1510 } 1511 1512 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) { 1513 size_t len = sc->sc_dmap->dm_segs[i].ds_len; 1514 1515 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap, 1516 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ); 1517 if (error) 1518 return error; 1519 1520 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len, 1521 BUS_DMASYNC_PREREAD); 1522 1523 error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap, 1524 blkno, data, len); 1525 if (error) { 1526 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap); 1527 return error; 1528 } 1529 1530 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len, 1531 BUS_DMASYNC_POSTREAD); 1532 1533 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap); 1534 1535 blkno += len / SDMMC_SECTOR_SIZE; 1536 data += len; 1537 } 1538 return 0; 1539 } 1540 1541 static int 1542 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap, 1543 uint32_t blkno, u_char *data, size_t datalen) 1544 { 1545 struct sdmmc_softc *sc = sf->sc; 1546 struct sdmmc_command cmd; 1547 int error; 1548 1549 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 1550 error = sdmmc_select_card(sc, sf); 1551 if (error) 1552 goto out; 1553 } 1554 1555 memset(&cmd, 0, sizeof(cmd)); 1556 cmd.c_data = data; 1557 cmd.c_datalen = datalen; 1558 cmd.c_blklen = SDMMC_SECTOR_SIZE; 1559 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ? 1560 MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE; 1561 cmd.c_arg = blkno; 1562 if (!ISSET(sf->flags, SFF_SDHC)) 1563 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB; 1564 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1; 1565 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) 1566 cmd.c_dmamap = dmap; 1567 1568 error = sdmmc_mmc_command(sc, &cmd); 1569 if (error) 1570 goto out; 1571 1572 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) { 1573 if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) { 1574 memset(&cmd, 0, sizeof cmd); 1575 cmd.c_opcode = MMC_STOP_TRANSMISSION; 1576 cmd.c_arg = MMC_ARG_RCA(sf->rca); 1577 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B; 1578 error = sdmmc_mmc_command(sc, &cmd); 1579 if (error) 1580 goto out; 1581 } 1582 } 1583 1584 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 1585 do { 1586 memset(&cmd, 0, sizeof(cmd)); 1587 cmd.c_opcode = MMC_SEND_STATUS; 1588 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) 1589 cmd.c_arg = MMC_ARG_RCA(sf->rca); 1590 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2; 1591 error = sdmmc_mmc_command(sc, &cmd); 1592 if (error) 1593 break; 1594 /* XXX time out */ 1595 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA)); 1596 } 1597 1598 out: 1599 return error; 1600 } 1601 1602 int 1603 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data, 1604 size_t datalen) 1605 { 1606 struct sdmmc_softc *sc = sf->sc; 1607 int error; 1608 1609 SDMMC_LOCK(sc); 1610 mutex_enter(&sc->sc_mtx); 1611 1612 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) { 1613 error = sdmmc_mem_single_read_block(sf, blkno, data, datalen); 1614 goto out; 1615 } 1616 1617 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1618 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data, 1619 datalen); 1620 goto out; 1621 } 1622 1623 /* DMA transfer */ 1624 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL, 1625 BUS_DMA_NOWAIT|BUS_DMA_READ); 1626 if (error) 1627 goto out; 1628 1629 #ifdef SDMMC_DEBUG 1630 printf("data=%p, datalen=%zu\n", data, datalen); 1631 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) { 1632 printf("seg#%d: addr=%#lx, size=%#lx\n", i, 1633 (u_long)sc->sc_dmap->dm_segs[i].ds_addr, 1634 (u_long)sc->sc_dmap->dm_segs[i].ds_len); 1635 } 1636 #endif 1637 1638 if (sc->sc_dmap->dm_nsegs > 1 1639 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) { 1640 error = sdmmc_mem_single_segment_dma_read_block(sf, blkno, 1641 data, datalen); 1642 goto unload; 1643 } 1644 1645 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen, 1646 BUS_DMASYNC_PREREAD); 1647 1648 error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data, 1649 datalen); 1650 if (error) 1651 goto unload; 1652 1653 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen, 1654 BUS_DMASYNC_POSTREAD); 1655 unload: 1656 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap); 1657 1658 out: 1659 mutex_exit(&sc->sc_mtx); 1660 SDMMC_UNLOCK(sc); 1661 1662 return error; 1663 } 1664 1665 /* write */ 1666 static int 1667 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno, 1668 u_char *data, size_t datalen) 1669 { 1670 struct sdmmc_softc *sc = sf->sc; 1671 int error = 0; 1672 int i; 1673 1674 KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0); 1675 KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA)); 1676 1677 for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) { 1678 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i, 1679 data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE); 1680 if (error) 1681 break; 1682 } 1683 return error; 1684 } 1685 1686 /* 1687 * Simulate multi-segment dma transfer. 1688 */ 1689 static int 1690 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf, 1691 uint32_t blkno, u_char *data, size_t datalen) 1692 { 1693 struct sdmmc_softc *sc = sf->sc; 1694 bool use_bbuf = false; 1695 int error = 0; 1696 int i; 1697 1698 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) { 1699 size_t len = sc->sc_dmap->dm_segs[i].ds_len; 1700 if ((len % SDMMC_SECTOR_SIZE) != 0) { 1701 use_bbuf = true; 1702 break; 1703 } 1704 } 1705 if (use_bbuf) { 1706 /* Copy to bounce buffer */ 1707 memcpy(sf->bbuf, data, datalen); 1708 1709 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen, 1710 BUS_DMASYNC_PREWRITE); 1711 1712 error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap, 1713 blkno, data, datalen); 1714 if (error) { 1715 bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap); 1716 return error; 1717 } 1718 1719 bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen, 1720 BUS_DMASYNC_POSTWRITE); 1721 1722 return 0; 1723 } 1724 1725 for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) { 1726 size_t len = sc->sc_dmap->dm_segs[i].ds_len; 1727 1728 error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap, 1729 data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE); 1730 if (error) 1731 return error; 1732 1733 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len, 1734 BUS_DMASYNC_PREWRITE); 1735 1736 error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap, 1737 blkno, data, len); 1738 if (error) { 1739 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap); 1740 return error; 1741 } 1742 1743 bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len, 1744 BUS_DMASYNC_POSTWRITE); 1745 1746 bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap); 1747 1748 blkno += len / SDMMC_SECTOR_SIZE; 1749 data += len; 1750 } 1751 1752 return error; 1753 } 1754 1755 static int 1756 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap, 1757 uint32_t blkno, u_char *data, size_t datalen) 1758 { 1759 struct sdmmc_softc *sc = sf->sc; 1760 struct sdmmc_command cmd; 1761 int error; 1762 1763 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 1764 error = sdmmc_select_card(sc, sf); 1765 if (error) 1766 goto out; 1767 } 1768 1769 memset(&cmd, 0, sizeof(cmd)); 1770 cmd.c_data = data; 1771 cmd.c_datalen = datalen; 1772 cmd.c_blklen = SDMMC_SECTOR_SIZE; 1773 cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ? 1774 MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE; 1775 cmd.c_arg = blkno; 1776 if (!ISSET(sf->flags, SFF_SDHC)) 1777 cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB; 1778 cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1; 1779 if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) 1780 cmd.c_dmamap = dmap; 1781 1782 error = sdmmc_mmc_command(sc, &cmd); 1783 if (error) 1784 goto out; 1785 1786 if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) { 1787 if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) { 1788 memset(&cmd, 0, sizeof(cmd)); 1789 cmd.c_opcode = MMC_STOP_TRANSMISSION; 1790 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B; 1791 error = sdmmc_mmc_command(sc, &cmd); 1792 if (error) 1793 goto out; 1794 } 1795 } 1796 1797 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) { 1798 do { 1799 memset(&cmd, 0, sizeof(cmd)); 1800 cmd.c_opcode = MMC_SEND_STATUS; 1801 if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) 1802 cmd.c_arg = MMC_ARG_RCA(sf->rca); 1803 cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2; 1804 error = sdmmc_mmc_command(sc, &cmd); 1805 if (error) 1806 break; 1807 /* XXX time out */ 1808 } while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA)); 1809 } 1810 1811 out: 1812 return error; 1813 } 1814 1815 int 1816 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data, 1817 size_t datalen) 1818 { 1819 struct sdmmc_softc *sc = sf->sc; 1820 int error; 1821 1822 SDMMC_LOCK(sc); 1823 mutex_enter(&sc->sc_mtx); 1824 1825 if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) { 1826 aprint_normal_dev(sc->sc_dev, "write-protected\n"); 1827 error = EIO; 1828 goto out; 1829 } 1830 1831 if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) { 1832 error = sdmmc_mem_single_write_block(sf, blkno, data, datalen); 1833 goto out; 1834 } 1835 1836 if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) { 1837 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data, 1838 datalen); 1839 goto out; 1840 } 1841 1842 /* DMA transfer */ 1843 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL, 1844 BUS_DMA_NOWAIT|BUS_DMA_WRITE); 1845 if (error) 1846 goto out; 1847 1848 #ifdef SDMMC_DEBUG 1849 aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n", 1850 __func__, data, datalen); 1851 for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) { 1852 aprint_normal_dev(sc->sc_dev, 1853 "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i, 1854 (u_long)sc->sc_dmap->dm_segs[i].ds_addr, 1855 (u_long)sc->sc_dmap->dm_segs[i].ds_len); 1856 } 1857 #endif 1858 1859 if (sc->sc_dmap->dm_nsegs > 1 1860 && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) { 1861 error = sdmmc_mem_single_segment_dma_write_block(sf, blkno, 1862 data, datalen); 1863 goto unload; 1864 } 1865 1866 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen, 1867 BUS_DMASYNC_PREWRITE); 1868 1869 error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data, 1870 datalen); 1871 if (error) 1872 goto unload; 1873 1874 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen, 1875 BUS_DMASYNC_POSTWRITE); 1876 unload: 1877 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap); 1878 1879 out: 1880 mutex_exit(&sc->sc_mtx); 1881 SDMMC_UNLOCK(sc); 1882 1883 return error; 1884 } 1885