1 /* $NetBSD: sdhc.c,v 1.107 2020/07/15 15:57:52 msaitoh Exp $ */ 2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */ 3 4 /* 5 * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * SD Host Controller driver based on the SD Host Controller Standard 22 * Simplified Specification Version 1.00 (www.sdcard.com). 23 */ 24 25 #include <sys/cdefs.h> 26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.107 2020/07/15 15:57:52 msaitoh Exp $"); 27 28 #ifdef _KERNEL_OPT 29 #include "opt_sdmmc.h" 30 #endif 31 32 #include <sys/param.h> 33 #include <sys/device.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/systm.h> 37 #include <sys/mutex.h> 38 #include <sys/condvar.h> 39 #include <sys/atomic.h> 40 41 #include <dev/sdmmc/sdhcreg.h> 42 #include <dev/sdmmc/sdhcvar.h> 43 #include <dev/sdmmc/sdmmcchip.h> 44 #include <dev/sdmmc/sdmmcreg.h> 45 #include <dev/sdmmc/sdmmcvar.h> 46 47 #ifdef SDHC_DEBUG 48 int sdhcdebug = 1; 49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0) 50 void sdhc_dump_regs(struct sdhc_host *); 51 #else 52 #define DPRINTF(n,s) do {} while (0) 53 #endif 54 55 #define SDHC_COMMAND_TIMEOUT hz 56 #define SDHC_BUFFER_TIMEOUT hz 57 #define SDHC_TRANSFER_TIMEOUT hz 58 #define SDHC_DMA_TIMEOUT (hz*3) 59 #define SDHC_TUNING_TIMEOUT hz 60 61 struct sdhc_host { 62 struct sdhc_softc *sc; /* host controller device */ 63 64 bus_space_tag_t iot; /* host register set tag */ 65 bus_space_handle_t ioh; /* host register set handle */ 66 bus_size_t ios; /* host register space size */ 67 bus_dma_tag_t dmat; /* host DMA tag */ 68 69 device_t sdmmc; /* generic SD/MMC device */ 70 71 u_int clkbase; /* base clock frequency in KHz */ 72 int maxblklen; /* maximum block length */ 73 uint32_t ocr; /* OCR value from capabilities */ 74 75 uint8_t regs[14]; /* host controller state */ 76 77 uint16_t intr_status; /* soft interrupt status */ 78 uint16_t intr_error_status; /* soft error status */ 79 kmutex_t intr_lock; 80 kcondvar_t intr_cv; 81 82 callout_t tuning_timer; 83 int tuning_timing; 84 u_int tuning_timer_count; 85 u_int tuning_timer_pending; 86 87 int specver; /* spec. version */ 88 89 uint32_t flags; /* flags for this host */ 90 #define SHF_USE_DMA 0x0001 91 #define SHF_USE_4BIT_MODE 0x0002 92 #define SHF_USE_8BIT_MODE 0x0004 93 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */ 94 #define SHF_USE_ADMA2_32 0x0010 95 #define SHF_USE_ADMA2_64 0x0020 96 #define SHF_USE_ADMA2_MASK 0x0030 97 98 bus_dmamap_t adma_map; 99 bus_dma_segment_t adma_segs[1]; 100 void *adma2; 101 102 uint8_t vdd; /* last vdd setting */ 103 }; 104 105 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev)) 106 107 static uint8_t 108 hread1(struct sdhc_host *hp, bus_size_t reg) 109 { 110 111 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) 112 return bus_space_read_1(hp->iot, hp->ioh, reg); 113 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3)); 114 } 115 116 static uint16_t 117 hread2(struct sdhc_host *hp, bus_size_t reg) 118 { 119 120 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) 121 return bus_space_read_2(hp->iot, hp->ioh, reg); 122 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2)); 123 } 124 125 #define HREAD1(hp, reg) hread1(hp, reg) 126 #define HREAD2(hp, reg) hread2(hp, reg) 127 #define HREAD4(hp, reg) \ 128 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg))) 129 130 131 static void 132 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val) 133 { 134 135 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 136 bus_space_write_1(hp->iot, hp->ioh, o, val); 137 } else { 138 const size_t shift = 8 * (o & 3); 139 o &= -4; 140 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o); 141 tmp = (val << shift) | (tmp & ~(0xff << shift)); 142 bus_space_write_4(hp->iot, hp->ioh, o, tmp); 143 } 144 } 145 146 static void 147 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val) 148 { 149 150 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 151 bus_space_write_2(hp->iot, hp->ioh, o, val); 152 } else { 153 const size_t shift = 8 * (o & 2); 154 o &= -4; 155 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o); 156 tmp = (val << shift) | (tmp & ~(0xffff << shift)); 157 bus_space_write_4(hp->iot, hp->ioh, o, tmp); 158 } 159 } 160 161 static void 162 hwrite4(struct sdhc_host *hp, bus_size_t o, uint32_t val) 163 { 164 165 bus_space_write_4(hp->iot, hp->ioh, o, val); 166 } 167 168 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val) 169 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val) 170 #define HWRITE4(hp, reg, val) hwrite4(hp, reg, val) 171 172 #define HCLR1(hp, reg, bits) \ 173 do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0) 174 #define HCLR2(hp, reg, bits) \ 175 do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0) 176 #define HCLR4(hp, reg, bits) \ 177 do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0) 178 #define HSET1(hp, reg, bits) \ 179 do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0) 180 #define HSET2(hp, reg, bits) \ 181 do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0) 182 #define HSET4(hp, reg, bits) \ 183 do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0) 184 185 static int sdhc_host_reset(sdmmc_chipset_handle_t); 186 static int sdhc_host_reset1(sdmmc_chipset_handle_t); 187 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t); 188 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t); 189 static int sdhc_card_detect(sdmmc_chipset_handle_t); 190 static int sdhc_write_protect(sdmmc_chipset_handle_t); 191 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t); 192 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool); 193 static int sdhc_bus_width(sdmmc_chipset_handle_t, int); 194 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int); 195 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int); 196 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t); 197 static void sdhc_exec_command(sdmmc_chipset_handle_t, 198 struct sdmmc_command *); 199 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int); 200 static int sdhc_execute_tuning1(struct sdhc_host *, int); 201 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int); 202 static void sdhc_tuning_timer(void *); 203 static void sdhc_hw_reset(sdmmc_chipset_handle_t); 204 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *); 205 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t); 206 static int sdhc_soft_reset(struct sdhc_host *, int); 207 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool); 208 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *); 209 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *); 210 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *); 211 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int); 212 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int); 213 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int); 214 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int); 215 216 static struct sdmmc_chip_functions sdhc_functions = { 217 /* host controller reset */ 218 .host_reset = sdhc_host_reset, 219 220 /* host controller capabilities */ 221 .host_ocr = sdhc_host_ocr, 222 .host_maxblklen = sdhc_host_maxblklen, 223 224 /* card detection */ 225 .card_detect = sdhc_card_detect, 226 227 /* write protect */ 228 .write_protect = sdhc_write_protect, 229 230 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */ 231 .bus_power = sdhc_bus_power, 232 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */ 233 .bus_width = sdhc_bus_width, 234 .bus_rod = sdhc_bus_rod, 235 236 /* command execution */ 237 .exec_command = sdhc_exec_command, 238 239 /* card interrupt */ 240 .card_enable_intr = sdhc_card_enable_intr, 241 .card_intr_ack = sdhc_card_intr_ack, 242 243 /* UHS functions */ 244 .signal_voltage = sdhc_signal_voltage, 245 .bus_clock_ddr = sdhc_bus_clock_ddr, 246 .execute_tuning = sdhc_execute_tuning, 247 .hw_reset = sdhc_hw_reset, 248 }; 249 250 static int 251 sdhc_cfprint(void *aux, const char *pnp) 252 { 253 const struct sdmmcbus_attach_args * const saa = aux; 254 const struct sdhc_host * const hp = saa->saa_sch; 255 256 if (pnp) { 257 aprint_normal("sdmmc at %s", pnp); 258 } 259 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) { 260 if (hp->sc->sc_host[host] == hp) { 261 aprint_normal(" slot %zu", host); 262 } 263 } 264 265 return UNCONF; 266 } 267 268 /* 269 * Called by attachment driver. For each SD card slot there is one SD 270 * host controller standard register set. (1.3) 271 */ 272 int 273 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot, 274 bus_space_handle_t ioh, bus_size_t iosize) 275 { 276 struct sdmmcbus_attach_args saa; 277 struct sdhc_host *hp; 278 uint32_t caps, caps2; 279 uint16_t sdhcver; 280 int error; 281 282 /* Allocate one more host structure. */ 283 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO); 284 if (hp == NULL) { 285 aprint_error_dev(sc->sc_dev, 286 "couldn't alloc memory (sdhc host)\n"); 287 goto err1; 288 } 289 sc->sc_host[sc->sc_nhosts++] = hp; 290 291 /* Fill in the new host structure. */ 292 hp->sc = sc; 293 hp->iot = iot; 294 hp->ioh = ioh; 295 hp->ios = iosize; 296 hp->dmat = sc->sc_dmat; 297 298 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC); 299 cv_init(&hp->intr_cv, "sdhcintr"); 300 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE); 301 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp); 302 303 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 304 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT; 305 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 306 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION); 307 } else if (iosize <= SDHC_HOST_CTL_VERSION) { 308 sdhcver = SDHC_SPEC_NOVERS << SDHC_SPEC_VERS_SHIFT; 309 } else { 310 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION); 311 } 312 aprint_normal_dev(sc->sc_dev, "SDHC "); 313 hp->specver = SDHC_SPEC_VERSION(sdhcver); 314 switch (SDHC_SPEC_VERSION(sdhcver)) { 315 case SDHC_SPEC_VERS_100: 316 aprint_normal("1.0"); 317 break; 318 case SDHC_SPEC_VERS_200: 319 aprint_normal("2.0"); 320 break; 321 case SDHC_SPEC_VERS_300: 322 aprint_normal("3.0"); 323 break; 324 case SDHC_SPEC_VERS_400: 325 aprint_normal("4.0"); 326 break; 327 case SDHC_SPEC_VERS_410: 328 aprint_normal("4.1"); 329 break; 330 case SDHC_SPEC_VERS_420: 331 aprint_normal("4.2"); 332 break; 333 case SDHC_SPEC_NOVERS: 334 hp->specver = -1; 335 aprint_normal("NO-VERS"); 336 break; 337 default: 338 aprint_normal("unknown version(0x%x)", 339 SDHC_SPEC_VERSION(sdhcver)); 340 break; 341 } 342 if (SDHC_SPEC_VERSION(sdhcver) != SDHC_SPEC_NOVERS) 343 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver)); 344 345 /* 346 * Reset the host controller and enable interrupts. 347 */ 348 (void)sdhc_host_reset(hp); 349 350 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 351 /* init uSDHC registers */ 352 HWRITE4(hp, SDHC_MMC_BOOT, 0); 353 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN | 354 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE); 355 HWRITE4(hp, SDHC_WATERMARK_LEVEL, 356 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) | 357 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) | 358 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) | 359 (0x40 << SDHC_WATERMARK_READ_SHIFT)); 360 HSET4(hp, SDHC_VEND_SPEC, 361 SDHC_VEND_SPEC_MBO | 362 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 363 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN | 364 SDHC_VEND_SPEC_HCLK_SOFT_EN | 365 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN | 366 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN | 367 SDHC_VEND_SPEC_FRC_SDCLK_ON); 368 } 369 370 /* Determine host capabilities. */ 371 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) { 372 caps = sc->sc_caps; 373 caps2 = sc->sc_caps2; 374 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 375 /* uSDHC capability register is little bit different */ 376 caps = HREAD4(hp, SDHC_CAPABILITIES); 377 caps |= SDHC_8BIT_SUPP; 378 if (caps & SDHC_ADMA1_SUPP) 379 caps |= SDHC_ADMA2_SUPP; 380 sc->sc_caps = caps; 381 /* uSDHC has no SDHC_CAPABILITIES2 register */ 382 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP; 383 } else { 384 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES); 385 if (hp->specver >= SDHC_SPEC_VERS_300) { 386 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2); 387 } else { 388 caps2 = sc->sc_caps2 = 0; 389 } 390 } 391 392 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) & 393 SDHC_RETUNING_MODES_MASK; 394 if (retuning_mode == SDHC_RETUNING_MODE_1) { 395 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) & 396 SDHC_TIMER_COUNT_MASK; 397 if (hp->tuning_timer_count == 0xf) 398 hp->tuning_timer_count = 0; 399 if (hp->tuning_timer_count) 400 hp->tuning_timer_count = 401 1 << (hp->tuning_timer_count - 1); 402 } 403 404 /* 405 * Use DMA if the host system and the controller support it. 406 * Suports integrated or external DMA egine, with or without 407 * SDHC_DMA_ENABLE in the command. 408 */ 409 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) || 410 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA && 411 ISSET(caps, SDHC_DMA_SUPPORT)))) { 412 SET(hp->flags, SHF_USE_DMA); 413 414 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) && 415 ISSET(caps, SDHC_ADMA2_SUPP)) { 416 SET(hp->flags, SHF_MODE_DMAEN); 417 /* 418 * 64-bit mode was present in the 2.00 spec, removed 419 * from 3.00, and re-added in 4.00 with a different 420 * descriptor layout. We only support 2.00 and 3.00 421 * descriptors for now. 422 */ 423 if (hp->specver == SDHC_SPEC_VERS_200 && 424 ISSET(caps, SDHC_64BIT_SYS_BUS)) { 425 SET(hp->flags, SHF_USE_ADMA2_64); 426 aprint_normal(", 64-bit ADMA2"); 427 } else { 428 SET(hp->flags, SHF_USE_ADMA2_32); 429 aprint_normal(", 32-bit ADMA2"); 430 } 431 } else { 432 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) || 433 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN)) 434 SET(hp->flags, SHF_MODE_DMAEN); 435 if (sc->sc_vendor_transfer_data_dma) { 436 aprint_normal(", platform DMA"); 437 } else { 438 aprint_normal(", SDMA"); 439 } 440 } 441 } else { 442 aprint_normal(", PIO"); 443 } 444 445 /* 446 * Determine the base clock frequency. (2.2.24) 447 */ 448 if (hp->specver >= SDHC_SPEC_VERS_300) { 449 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps); 450 } else { 451 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps); 452 } 453 if (hp->clkbase == 0 || 454 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) { 455 if (sc->sc_clkbase == 0) { 456 /* The attachment driver must tell us. */ 457 aprint_error_dev(sc->sc_dev, 458 "unknown base clock frequency\n"); 459 goto err; 460 } 461 hp->clkbase = sc->sc_clkbase; 462 } 463 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) { 464 /* SDHC 1.0 supports only 10-63 MHz. */ 465 aprint_error_dev(sc->sc_dev, 466 "base clock frequency out of range: %u MHz\n", 467 hp->clkbase / 1000); 468 goto err; 469 } 470 aprint_normal(", %u kHz", hp->clkbase); 471 472 /* 473 * XXX Set the data timeout counter value according to 474 * capabilities. (2.2.15) 475 */ 476 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX); 477 #if 1 478 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 479 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16); 480 #endif 481 482 if (ISSET(caps, SDHC_EMBEDDED_SLOT)) 483 aprint_normal(", embedded slot"); 484 485 /* 486 * Determine SD bus voltage levels supported by the controller. 487 */ 488 aprint_normal(","); 489 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) { 490 SET(hp->ocr, MMC_OCR_HCS); 491 aprint_normal(" HS"); 492 } 493 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_1_8_V)) { 494 if (ISSET(caps2, SDHC_SDR50_SUPP)) { 495 SET(hp->ocr, MMC_OCR_S18A); 496 aprint_normal(" SDR50"); 497 } 498 if (ISSET(caps2, SDHC_DDR50_SUPP)) { 499 SET(hp->ocr, MMC_OCR_S18A); 500 aprint_normal(" DDR50"); 501 } 502 if (ISSET(caps2, SDHC_SDR104_SUPP)) { 503 SET(hp->ocr, MMC_OCR_S18A); 504 aprint_normal(" SDR104 HS200"); 505 } 506 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) { 507 SET(hp->ocr, MMC_OCR_1_65V_1_95V); 508 aprint_normal(" 1.8V"); 509 } 510 } 511 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) { 512 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V); 513 aprint_normal(" 3.0V"); 514 } 515 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) { 516 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V); 517 aprint_normal(" 3.3V"); 518 } 519 if (hp->specver >= SDHC_SPEC_VERS_300) { 520 aprint_normal(", re-tuning mode %d", retuning_mode + 1); 521 if (hp->tuning_timer_count) 522 aprint_normal(" (%us timer)", hp->tuning_timer_count); 523 } 524 525 /* 526 * Determine the maximum block length supported by the host 527 * controller. (2.2.24) 528 */ 529 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) { 530 case SDHC_MAX_BLK_LEN_512: 531 hp->maxblklen = 512; 532 break; 533 534 case SDHC_MAX_BLK_LEN_1024: 535 hp->maxblklen = 1024; 536 break; 537 538 case SDHC_MAX_BLK_LEN_2048: 539 hp->maxblklen = 2048; 540 break; 541 542 case SDHC_MAX_BLK_LEN_4096: 543 hp->maxblklen = 4096; 544 break; 545 546 default: 547 aprint_error_dev(sc->sc_dev, "max block length unknown\n"); 548 goto err; 549 } 550 aprint_normal(", %u byte blocks", hp->maxblklen); 551 aprint_normal("\n"); 552 553 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 554 int rseg; 555 556 /* Allocate ADMA2 descriptor memory */ 557 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 558 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK); 559 if (error) { 560 aprint_error_dev(sc->sc_dev, 561 "ADMA2 dmamem_alloc failed (%d)\n", error); 562 goto adma_done; 563 } 564 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg, 565 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK); 566 if (error) { 567 aprint_error_dev(sc->sc_dev, 568 "ADMA2 dmamem_map failed (%d)\n", error); 569 goto adma_done; 570 } 571 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 572 0, BUS_DMA_WAITOK, &hp->adma_map); 573 if (error) { 574 aprint_error_dev(sc->sc_dev, 575 "ADMA2 dmamap_create failed (%d)\n", error); 576 goto adma_done; 577 } 578 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map, 579 hp->adma2, PAGE_SIZE, NULL, 580 BUS_DMA_WAITOK|BUS_DMA_WRITE); 581 if (error) { 582 aprint_error_dev(sc->sc_dev, 583 "ADMA2 dmamap_load failed (%d)\n", error); 584 goto adma_done; 585 } 586 587 memset(hp->adma2, 0, PAGE_SIZE); 588 589 adma_done: 590 if (error) 591 CLR(hp->flags, SHF_USE_ADMA2_MASK); 592 } 593 594 /* 595 * Attach the generic SD/MMC bus driver. (The bus driver must 596 * not invoke any chipset functions before it is attached.) 597 */ 598 memset(&saa, 0, sizeof(saa)); 599 saa.saa_busname = "sdmmc"; 600 saa.saa_sct = &sdhc_functions; 601 saa.saa_sch = hp; 602 saa.saa_dmat = hp->dmat; 603 saa.saa_clkmax = hp->clkbase; 604 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM)) 605 saa.saa_clkmin = hp->clkbase / 256 / 2046; 606 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS)) 607 saa.saa_clkmin = hp->clkbase / 256 / 16; 608 else if (hp->sc->sc_clkmsk != 0) 609 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >> 610 (ffs(hp->sc->sc_clkmsk) - 1)); 611 else if (hp->specver >= SDHC_SPEC_VERS_300) 612 saa.saa_clkmin = hp->clkbase / 0x3ff; 613 else 614 saa.saa_clkmin = hp->clkbase / 256; 615 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP)) 616 saa.saa_caps |= SMC_CAPS_AUTO_STOP; 617 saa.saa_caps |= SMC_CAPS_4BIT_MODE; 618 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE)) 619 saa.saa_caps |= SMC_CAPS_8BIT_MODE; 620 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) 621 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED; 622 if (ISSET(caps2, SDHC_SDR104_SUPP)) 623 saa.saa_caps |= SMC_CAPS_UHS_SDR104 | 624 SMC_CAPS_UHS_SDR50 | 625 SMC_CAPS_MMC_HS200; 626 if (ISSET(caps2, SDHC_SDR50_SUPP)) 627 saa.saa_caps |= SMC_CAPS_UHS_SDR50; 628 if (ISSET(caps2, SDHC_DDR50_SUPP)) 629 saa.saa_caps |= SMC_CAPS_UHS_DDR50; 630 if (ISSET(hp->flags, SHF_USE_DMA)) { 631 saa.saa_caps |= SMC_CAPS_DMA; 632 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 633 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA; 634 } 635 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY)) 636 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY; 637 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET)) 638 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET; 639 640 if (ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA2_ZEROLEN)) 641 saa.saa_max_seg = 65535; 642 643 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint); 644 645 return 0; 646 647 err: 648 callout_destroy(&hp->tuning_timer); 649 cv_destroy(&hp->intr_cv); 650 mutex_destroy(&hp->intr_lock); 651 free(hp, M_DEVBUF); 652 sc->sc_host[--sc->sc_nhosts] = NULL; 653 err1: 654 return 1; 655 } 656 657 int 658 sdhc_detach(struct sdhc_softc *sc, int flags) 659 { 660 struct sdhc_host *hp; 661 int rv = 0; 662 663 for (size_t n = 0; n < sc->sc_nhosts; n++) { 664 hp = sc->sc_host[n]; 665 if (hp == NULL) 666 continue; 667 if (hp->sdmmc != NULL) { 668 rv = config_detach(hp->sdmmc, flags); 669 if (rv) 670 break; 671 hp->sdmmc = NULL; 672 } 673 /* disable interrupts */ 674 if ((flags & DETACH_FORCE) == 0) { 675 mutex_enter(&hp->intr_lock); 676 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 677 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0); 678 } else { 679 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0); 680 } 681 sdhc_soft_reset(hp, SDHC_RESET_ALL); 682 mutex_exit(&hp->intr_lock); 683 } 684 callout_halt(&hp->tuning_timer, NULL); 685 callout_destroy(&hp->tuning_timer); 686 cv_destroy(&hp->intr_cv); 687 mutex_destroy(&hp->intr_lock); 688 if (hp->ios > 0) { 689 bus_space_unmap(hp->iot, hp->ioh, hp->ios); 690 hp->ios = 0; 691 } 692 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 693 bus_dmamap_unload(sc->sc_dmat, hp->adma_map); 694 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map); 695 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE); 696 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1); 697 } 698 free(hp, M_DEVBUF); 699 sc->sc_host[n] = NULL; 700 } 701 702 return rv; 703 } 704 705 bool 706 sdhc_suspend(device_t dev, const pmf_qual_t *qual) 707 { 708 struct sdhc_softc *sc = device_private(dev); 709 struct sdhc_host *hp; 710 size_t i; 711 712 /* XXX poll for command completion or suspend command 713 * in progress */ 714 715 /* Save the host controller state. */ 716 for (size_t n = 0; n < sc->sc_nhosts; n++) { 717 hp = sc->sc_host[n]; 718 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 719 for (i = 0; i < sizeof hp->regs; i += 4) { 720 uint32_t v = HREAD4(hp, i); 721 hp->regs[i + 0] = (v >> 0); 722 hp->regs[i + 1] = (v >> 8); 723 if (i + 3 < sizeof hp->regs) { 724 hp->regs[i + 2] = (v >> 16); 725 hp->regs[i + 3] = (v >> 24); 726 } 727 } 728 } else { 729 for (i = 0; i < sizeof hp->regs; i++) { 730 hp->regs[i] = HREAD1(hp, i); 731 } 732 } 733 } 734 return true; 735 } 736 737 bool 738 sdhc_resume(device_t dev, const pmf_qual_t *qual) 739 { 740 struct sdhc_softc *sc = device_private(dev); 741 struct sdhc_host *hp; 742 size_t i; 743 744 /* Restore the host controller state. */ 745 for (size_t n = 0; n < sc->sc_nhosts; n++) { 746 hp = sc->sc_host[n]; 747 (void)sdhc_host_reset(hp); 748 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 749 for (i = 0; i < sizeof hp->regs; i += 4) { 750 if (i + 3 < sizeof hp->regs) { 751 HWRITE4(hp, i, 752 (hp->regs[i + 0] << 0) 753 | (hp->regs[i + 1] << 8) 754 | (hp->regs[i + 2] << 16) 755 | (hp->regs[i + 3] << 24)); 756 } else { 757 HWRITE4(hp, i, 758 (hp->regs[i + 0] << 0) 759 | (hp->regs[i + 1] << 8)); 760 } 761 } 762 } else { 763 for (i = 0; i < sizeof hp->regs; i++) { 764 HWRITE1(hp, i, hp->regs[i]); 765 } 766 } 767 } 768 return true; 769 } 770 771 bool 772 sdhc_shutdown(device_t dev, int flags) 773 { 774 struct sdhc_softc *sc = device_private(dev); 775 struct sdhc_host *hp; 776 777 /* XXX chip locks up if we don't disable it before reboot. */ 778 for (size_t i = 0; i < sc->sc_nhosts; i++) { 779 hp = sc->sc_host[i]; 780 (void)sdhc_host_reset(hp); 781 } 782 return true; 783 } 784 785 /* 786 * Reset the host controller. Called during initialization, when 787 * cards are removed, upon resume, and during error recovery. 788 */ 789 static int 790 sdhc_host_reset1(sdmmc_chipset_handle_t sch) 791 { 792 struct sdhc_host *hp = (struct sdhc_host *)sch; 793 uint32_t sdhcimask; 794 int error; 795 796 KASSERT(mutex_owned(&hp->intr_lock)); 797 798 /* Disable all interrupts. */ 799 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 800 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0); 801 } else { 802 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0); 803 } 804 805 /* Let sdhc_bus_power restore power */ 806 hp->vdd = 0; 807 808 /* 809 * Reset the entire host controller and wait up to 100ms for 810 * the controller to clear the reset bit. 811 */ 812 error = sdhc_soft_reset(hp, SDHC_RESET_ALL); 813 if (error) 814 goto out; 815 816 /* Set data timeout counter value to max for now. */ 817 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX); 818 #if 1 819 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 820 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16); 821 #endif 822 823 /* Enable interrupts. */ 824 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION | 825 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY | 826 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT | 827 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE; 828 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 829 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16; 830 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask); 831 sdhcimask ^= 832 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16; 833 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY; 834 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask); 835 } else { 836 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask); 837 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK); 838 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY; 839 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask); 840 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK); 841 } 842 843 out: 844 return error; 845 } 846 847 static int 848 sdhc_host_reset(sdmmc_chipset_handle_t sch) 849 { 850 struct sdhc_host *hp = (struct sdhc_host *)sch; 851 int error; 852 853 mutex_enter(&hp->intr_lock); 854 error = sdhc_host_reset1(sch); 855 mutex_exit(&hp->intr_lock); 856 857 return error; 858 } 859 860 static uint32_t 861 sdhc_host_ocr(sdmmc_chipset_handle_t sch) 862 { 863 struct sdhc_host *hp = (struct sdhc_host *)sch; 864 865 return hp->ocr; 866 } 867 868 static int 869 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch) 870 { 871 struct sdhc_host *hp = (struct sdhc_host *)sch; 872 873 return hp->maxblklen; 874 } 875 876 /* 877 * Return non-zero if the card is currently inserted. 878 */ 879 static int 880 sdhc_card_detect(sdmmc_chipset_handle_t sch) 881 { 882 struct sdhc_host *hp = (struct sdhc_host *)sch; 883 int r; 884 885 if (hp->sc->sc_vendor_card_detect) 886 return (*hp->sc->sc_vendor_card_detect)(hp->sc); 887 888 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED); 889 890 return r ? 1 : 0; 891 } 892 893 /* 894 * Return non-zero if the card is currently write-protected. 895 */ 896 static int 897 sdhc_write_protect(sdmmc_chipset_handle_t sch) 898 { 899 struct sdhc_host *hp = (struct sdhc_host *)sch; 900 int r; 901 902 if (hp->sc->sc_vendor_write_protect) 903 return (*hp->sc->sc_vendor_write_protect)(hp->sc); 904 905 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH); 906 907 return r ? 0 : 1; 908 } 909 910 /* 911 * Set or change SD bus voltage and enable or disable SD bus power. 912 * Return zero on success. 913 */ 914 static int 915 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr) 916 { 917 struct sdhc_host *hp = (struct sdhc_host *)sch; 918 uint8_t vdd; 919 int error = 0; 920 const uint32_t pcmask = 921 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT)); 922 uint32_t reg; 923 924 mutex_enter(&hp->intr_lock); 925 926 /* 927 * Disable bus power before voltage change. 928 */ 929 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS) 930 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0)) { 931 hp->vdd = 0; 932 HWRITE1(hp, SDHC_POWER_CTL, 0); 933 } 934 935 /* If power is disabled, reset the host and return now. */ 936 if (ocr == 0) { 937 (void)sdhc_host_reset1(hp); 938 callout_halt(&hp->tuning_timer, &hp->intr_lock); 939 goto out; 940 } 941 942 /* 943 * Select the lowest voltage according to capabilities. 944 */ 945 ocr &= hp->ocr; 946 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) { 947 vdd = SDHC_VOLTAGE_1_8V; 948 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) { 949 vdd = SDHC_VOLTAGE_3_0V; 950 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) { 951 vdd = SDHC_VOLTAGE_3_3V; 952 } else { 953 /* Unsupported voltage level requested. */ 954 error = EINVAL; 955 goto out; 956 } 957 958 /* 959 * Did voltage change ? 960 */ 961 if (vdd == hp->vdd) 962 goto out; 963 964 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 965 /* 966 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus 967 * voltage ramp until power rises. 968 */ 969 970 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) { 971 HWRITE1(hp, SDHC_POWER_CTL, 972 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER); 973 } else { 974 reg = HREAD1(hp, SDHC_POWER_CTL) & pcmask; 975 HWRITE1(hp, SDHC_POWER_CTL, reg); 976 sdmmc_delay(1); 977 reg |= (vdd << SDHC_VOLTAGE_SHIFT); 978 HWRITE1(hp, SDHC_POWER_CTL, reg); 979 sdmmc_delay(1); 980 reg |= SDHC_BUS_POWER; 981 HWRITE1(hp, SDHC_POWER_CTL, reg); 982 sdmmc_delay(10000); 983 } 984 985 /* 986 * The host system may not power the bus due to battery low, 987 * etc. In that case, the host controller should clear the 988 * bus power bit. 989 */ 990 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) { 991 error = ENXIO; 992 goto out; 993 } 994 } 995 996 /* power successfully changed */ 997 hp->vdd = vdd; 998 999 out: 1000 mutex_exit(&hp->intr_lock); 1001 1002 return error; 1003 } 1004 1005 /* 1006 * Return the smallest possible base clock frequency divisor value 1007 * for the CLOCK_CTL register to produce `freq' (KHz). 1008 */ 1009 static bool 1010 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp) 1011 { 1012 u_int div; 1013 1014 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) { 1015 for (div = hp->clkbase / freq; div <= 0x3ff; div++) { 1016 if ((hp->clkbase / div) <= freq) { 1017 *divp = SDHC_SDCLK_CGM 1018 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT) 1019 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT); 1020 //freq = hp->clkbase / div; 1021 return true; 1022 } 1023 } 1024 /* No divisor found. */ 1025 return false; 1026 } 1027 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) { 1028 u_int dvs = (hp->clkbase + freq - 1) / freq; 1029 u_int roundup = dvs & 1; 1030 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) { 1031 if (dvs + roundup <= 16) { 1032 dvs += roundup - 1; 1033 *divp = (div << SDHC_SDCLK_DIV_SHIFT) 1034 | (dvs << SDHC_SDCLK_DVS_SHIFT); 1035 DPRINTF(2, 1036 ("%s: divisor for freq %u is %u * %u\n", 1037 HDEVNAME(hp), freq, div * 2, dvs + 1)); 1038 //freq = hp->clkbase / (div * 2) * (dvs + 1); 1039 return true; 1040 } 1041 /* 1042 * If we drop bits, we need to round up the divisor. 1043 */ 1044 roundup |= dvs & 1; 1045 } 1046 /* No divisor found. */ 1047 return false; 1048 } 1049 if (hp->sc->sc_clkmsk != 0) { 1050 div = howmany(hp->clkbase, freq); 1051 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1))) 1052 return false; 1053 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1); 1054 //freq = hp->clkbase / div; 1055 return true; 1056 } 1057 if (hp->specver >= SDHC_SPEC_VERS_300) { 1058 div = howmany(hp->clkbase, freq); 1059 div = div > 1 ? howmany(div, 2) : 0; 1060 if (div > 0x3ff) 1061 return false; 1062 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK) 1063 << SDHC_SDCLK_XDIV_SHIFT) | 1064 (((div >> 0) & SDHC_SDCLK_DIV_MASK) 1065 << SDHC_SDCLK_DIV_SHIFT); 1066 //freq = hp->clkbase / (div ? div * 2 : 1); 1067 return true; 1068 } else { 1069 for (div = 1; div <= 256; div *= 2) { 1070 if ((hp->clkbase / div) <= freq) { 1071 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT; 1072 //freq = hp->clkbase / div; 1073 return true; 1074 } 1075 } 1076 /* No divisor found. */ 1077 return false; 1078 } 1079 /* No divisor found. */ 1080 return false; 1081 } 1082 1083 /* 1084 * Set or change SDCLK frequency or disable the SD clock. 1085 * Return zero on success. 1086 */ 1087 static int 1088 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr) 1089 { 1090 struct sdhc_host *hp = (struct sdhc_host *)sch; 1091 u_int div; 1092 u_int timo; 1093 int16_t reg; 1094 int error = 0; 1095 bool present __diagused; 1096 1097 mutex_enter(&hp->intr_lock); 1098 1099 #ifdef DIAGNOSTIC 1100 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK); 1101 1102 /* Must not stop the clock if commands are in progress. */ 1103 if (present && sdhc_card_detect(hp)) { 1104 aprint_normal_dev(hp->sc->sc_dev, 1105 "%s: command in progress\n", __func__); 1106 } 1107 #endif 1108 1109 if (hp->sc->sc_vendor_bus_clock) { 1110 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq); 1111 if (error != 0) 1112 goto out; 1113 } 1114 1115 /* 1116 * Stop SD clock before changing the frequency. 1117 */ 1118 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1119 HCLR4(hp, SDHC_VEND_SPEC, 1120 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1121 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1122 if (freq == SDMMC_SDCLK_OFF) { 1123 goto out; 1124 } 1125 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1126 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8); 1127 if (freq == SDMMC_SDCLK_OFF) { 1128 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0); 1129 goto out; 1130 } 1131 } else { 1132 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1133 if (freq == SDMMC_SDCLK_OFF) 1134 goto out; 1135 } 1136 1137 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1138 if (ddr) 1139 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN); 1140 else 1141 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN); 1142 } else if (hp->specver >= SDHC_SPEC_VERS_300) { 1143 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK); 1144 if (freq > 100000) { 1145 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104); 1146 } else if (freq > 50000) { 1147 if (ddr) { 1148 HSET2(hp, SDHC_HOST_CTL2, 1149 SDHC_UHS_MODE_SELECT_DDR50); 1150 } else { 1151 HSET2(hp, SDHC_HOST_CTL2, 1152 SDHC_UHS_MODE_SELECT_SDR50); 1153 } 1154 } else if (freq > 25000) { 1155 if (ddr) { 1156 HSET2(hp, SDHC_HOST_CTL2, 1157 SDHC_UHS_MODE_SELECT_DDR50); 1158 } else { 1159 HSET2(hp, SDHC_HOST_CTL2, 1160 SDHC_UHS_MODE_SELECT_SDR25); 1161 } 1162 } else if (freq > 400) { 1163 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12); 1164 } 1165 } 1166 1167 /* 1168 * Slow down Ricoh 5U823 controller that isn't reliable 1169 * at 100MHz bus clock. 1170 */ 1171 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) { 1172 if (freq == 100000) 1173 --freq; 1174 } 1175 1176 /* 1177 * Set the minimum base clock frequency divisor. 1178 */ 1179 if (!sdhc_clock_divisor(hp, freq, &div)) { 1180 /* Invalid base clock frequency or `freq' value. */ 1181 aprint_error_dev(hp->sc->sc_dev, 1182 "Invalid bus clock %d kHz\n", freq); 1183 error = EINVAL; 1184 goto out; 1185 } 1186 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1187 if (ddr) { 1188 /* in ddr mode, divisor >>= 1 */ 1189 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK << 1190 SDHC_SDCLK_DIV_SHIFT)) | 1191 (div & (SDHC_SDCLK_DVS_MASK << 1192 SDHC_SDCLK_DVS_SHIFT)); 1193 } 1194 for (timo = 1000; timo > 0; timo--) { 1195 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB)) 1196 break; 1197 sdmmc_delay(10); 1198 } 1199 HWRITE4(hp, SDHC_CLOCK_CTL, 1200 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f); 1201 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1202 HWRITE4(hp, SDHC_CLOCK_CTL, 1203 div | (SDHC_TIMEOUT_MAX << 16)); 1204 } else { 1205 reg = HREAD2(hp, SDHC_CLOCK_CTL); 1206 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE); 1207 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div); 1208 } 1209 1210 /* 1211 * Start internal clock. Wait 10ms for stabilization. 1212 */ 1213 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1214 HSET4(hp, SDHC_VEND_SPEC, 1215 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1216 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1217 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1218 sdmmc_delay(10000); 1219 HSET4(hp, SDHC_CLOCK_CTL, 1220 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE); 1221 } else { 1222 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE); 1223 for (timo = 1000; timo > 0; timo--) { 1224 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL), 1225 SDHC_INTCLK_STABLE)) 1226 break; 1227 sdmmc_delay(10); 1228 } 1229 if (timo == 0) { 1230 error = ETIMEDOUT; 1231 DPRINTF(1,("%s: timeout\n", __func__)); 1232 goto out; 1233 } 1234 } 1235 1236 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1237 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE); 1238 /* 1239 * Sending 80 clocks at 400kHz takes 200us. 1240 * So delay for that time + slop and then 1241 * check a few times for completion. 1242 */ 1243 sdmmc_delay(210); 1244 for (timo = 10; timo > 0; timo--) { 1245 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), 1246 SDHC_INIT_ACTIVE)) 1247 break; 1248 sdmmc_delay(10); 1249 } 1250 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo)); 1251 1252 /* 1253 * Enable SD clock. 1254 */ 1255 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1256 HSET4(hp, SDHC_VEND_SPEC, 1257 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1258 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1259 } else { 1260 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1261 } 1262 } else { 1263 /* 1264 * Enable SD clock. 1265 */ 1266 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1267 1268 if (freq > 25000 && 1269 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT)) 1270 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED); 1271 else 1272 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED); 1273 } 1274 1275 if (hp->sc->sc_vendor_bus_clock_post) { 1276 error = (*hp->sc->sc_vendor_bus_clock_post)(hp->sc, freq); 1277 if (error != 0) 1278 goto out; 1279 } 1280 1281 out: 1282 mutex_exit(&hp->intr_lock); 1283 1284 return error; 1285 } 1286 1287 static int 1288 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width) 1289 { 1290 struct sdhc_host *hp = (struct sdhc_host *)sch; 1291 int reg; 1292 1293 switch (width) { 1294 case 1: 1295 case 4: 1296 break; 1297 1298 case 8: 1299 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE)) 1300 break; 1301 /* FALLTHROUGH */ 1302 default: 1303 DPRINTF(0,("%s: unsupported bus width (%d)\n", 1304 HDEVNAME(hp), width)); 1305 return 1; 1306 } 1307 1308 if (hp->sc->sc_vendor_bus_width) { 1309 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width); 1310 if (error != 0) 1311 return error; 1312 } 1313 1314 mutex_enter(&hp->intr_lock); 1315 1316 reg = HREAD1(hp, SDHC_HOST_CTL); 1317 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1318 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE); 1319 if (width == 4) 1320 reg |= SDHC_4BIT_MODE; 1321 else if (width == 8) 1322 reg |= SDHC_ESDHC_8BIT_MODE; 1323 } else { 1324 reg &= ~SDHC_4BIT_MODE; 1325 if (hp->specver >= SDHC_SPEC_VERS_300) { 1326 reg &= ~SDHC_8BIT_MODE; 1327 } 1328 if (width == 4) { 1329 reg |= SDHC_4BIT_MODE; 1330 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) { 1331 reg |= SDHC_8BIT_MODE; 1332 } 1333 } 1334 HWRITE1(hp, SDHC_HOST_CTL, reg); 1335 1336 mutex_exit(&hp->intr_lock); 1337 1338 return 0; 1339 } 1340 1341 static int 1342 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on) 1343 { 1344 struct sdhc_host *hp = (struct sdhc_host *)sch; 1345 1346 if (hp->sc->sc_vendor_rod) 1347 return (*hp->sc->sc_vendor_rod)(hp->sc, on); 1348 1349 return 0; 1350 } 1351 1352 static void 1353 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable) 1354 { 1355 struct sdhc_host *hp = (struct sdhc_host *)sch; 1356 1357 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1358 mutex_enter(&hp->intr_lock); 1359 if (enable) { 1360 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1361 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT); 1362 } else { 1363 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT); 1364 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1365 } 1366 mutex_exit(&hp->intr_lock); 1367 } 1368 } 1369 1370 static void 1371 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch) 1372 { 1373 struct sdhc_host *hp = (struct sdhc_host *)sch; 1374 1375 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1376 mutex_enter(&hp->intr_lock); 1377 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1378 mutex_exit(&hp->intr_lock); 1379 } 1380 } 1381 1382 static int 1383 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage) 1384 { 1385 struct sdhc_host *hp = (struct sdhc_host *)sch; 1386 int error = 0; 1387 1388 if (hp->specver < SDHC_SPEC_VERS_300) 1389 return EINVAL; 1390 1391 mutex_enter(&hp->intr_lock); 1392 switch (signal_voltage) { 1393 case SDMMC_SIGNAL_VOLTAGE_180: 1394 if (hp->sc->sc_vendor_signal_voltage != NULL) { 1395 error = hp->sc->sc_vendor_signal_voltage(hp->sc, 1396 signal_voltage); 1397 if (error != 0) 1398 break; 1399 } 1400 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) 1401 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN); 1402 break; 1403 case SDMMC_SIGNAL_VOLTAGE_330: 1404 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) 1405 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN); 1406 if (hp->sc->sc_vendor_signal_voltage != NULL) { 1407 error = hp->sc->sc_vendor_signal_voltage(hp->sc, 1408 signal_voltage); 1409 if (error != 0) 1410 break; 1411 } 1412 break; 1413 default: 1414 error = EINVAL; 1415 break; 1416 } 1417 mutex_exit(&hp->intr_lock); 1418 1419 return error; 1420 } 1421 1422 /* 1423 * Sampling clock tuning procedure (UHS) 1424 */ 1425 static int 1426 sdhc_execute_tuning1(struct sdhc_host *hp, int timing) 1427 { 1428 struct sdmmc_command cmd; 1429 uint8_t hostctl; 1430 int opcode, error, retry = 40; 1431 1432 KASSERT(mutex_owned(&hp->intr_lock)); 1433 1434 hp->tuning_timing = timing; 1435 1436 switch (timing) { 1437 case SDMMC_TIMING_MMC_HS200: 1438 opcode = MMC_SEND_TUNING_BLOCK_HS200; 1439 break; 1440 case SDMMC_TIMING_UHS_SDR50: 1441 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50)) 1442 return 0; 1443 /* FALLTHROUGH */ 1444 case SDMMC_TIMING_UHS_SDR104: 1445 opcode = MMC_SEND_TUNING_BLOCK; 1446 break; 1447 default: 1448 return EINVAL; 1449 } 1450 1451 hostctl = HREAD1(hp, SDHC_HOST_CTL); 1452 1453 /* enable buffer read ready interrupt */ 1454 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY); 1455 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY); 1456 1457 /* disable DMA */ 1458 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT); 1459 1460 /* reset tuning circuit */ 1461 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL); 1462 1463 /* start of tuning */ 1464 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING); 1465 1466 do { 1467 memset(&cmd, 0, sizeof(cmd)); 1468 cmd.c_opcode = opcode; 1469 cmd.c_arg = 0; 1470 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1; 1471 if (ISSET(hostctl, SDHC_8BIT_MODE)) { 1472 cmd.c_blklen = cmd.c_datalen = 128; 1473 } else { 1474 cmd.c_blklen = cmd.c_datalen = 64; 1475 } 1476 1477 error = sdhc_start_command(hp, &cmd); 1478 if (error) 1479 break; 1480 1481 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY, 1482 SDHC_TUNING_TIMEOUT, false)) { 1483 break; 1484 } 1485 1486 delay(1000); 1487 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry); 1488 1489 /* disable buffer read ready interrupt */ 1490 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY); 1491 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY); 1492 1493 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) { 1494 HCLR2(hp, SDHC_HOST_CTL2, 1495 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING); 1496 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1497 aprint_error_dev(hp->sc->sc_dev, 1498 "tuning did not complete, using fixed sampling clock\n"); 1499 return 0; /* tuning did not complete */ 1500 } 1501 1502 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) { 1503 HCLR2(hp, SDHC_HOST_CTL2, 1504 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING); 1505 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1506 aprint_error_dev(hp->sc->sc_dev, 1507 "tuning failed, using fixed sampling clock\n"); 1508 return 0; /* tuning failed */ 1509 } 1510 1511 if (hp->tuning_timer_count) { 1512 callout_schedule(&hp->tuning_timer, 1513 hz * hp->tuning_timer_count); 1514 } 1515 1516 return 0; /* tuning completed */ 1517 } 1518 1519 static int 1520 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing) 1521 { 1522 struct sdhc_host *hp = (struct sdhc_host *)sch; 1523 int error; 1524 1525 mutex_enter(&hp->intr_lock); 1526 error = sdhc_execute_tuning1(hp, timing); 1527 mutex_exit(&hp->intr_lock); 1528 return error; 1529 } 1530 1531 static void 1532 sdhc_tuning_timer(void *arg) 1533 { 1534 struct sdhc_host *hp = arg; 1535 1536 atomic_swap_uint(&hp->tuning_timer_pending, 1); 1537 } 1538 1539 static void 1540 sdhc_hw_reset(sdmmc_chipset_handle_t sch) 1541 { 1542 struct sdhc_host *hp = (struct sdhc_host *)sch; 1543 struct sdhc_softc *sc = hp->sc; 1544 1545 if (sc->sc_vendor_hw_reset != NULL) 1546 sc->sc_vendor_hw_reset(sc, hp); 1547 } 1548 1549 static int 1550 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value) 1551 { 1552 uint32_t state; 1553 int timeout; 1554 1555 for (timeout = 100000; timeout > 0; timeout--) { 1556 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value) 1557 return 0; 1558 sdmmc_delay(10); 1559 } 1560 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n", 1561 mask, value, state); 1562 return ETIMEDOUT; 1563 } 1564 1565 static void 1566 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd) 1567 { 1568 struct sdhc_host *hp = (struct sdhc_host *)sch; 1569 int error; 1570 bool probing; 1571 1572 mutex_enter(&hp->intr_lock); 1573 1574 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) { 1575 (void)sdhc_execute_tuning1(hp, hp->tuning_timing); 1576 } 1577 1578 if (cmd->c_data && 1579 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1580 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY; 1581 if (ISSET(hp->flags, SHF_USE_DMA)) { 1582 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready); 1583 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready); 1584 } else { 1585 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready); 1586 HSET2(hp, SDHC_NINTR_STATUS_EN, ready); 1587 } 1588 } 1589 1590 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) { 1591 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR; 1592 if (cmd->c_data != NULL) { 1593 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr); 1594 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr); 1595 } else { 1596 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr); 1597 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr); 1598 } 1599 } 1600 1601 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_STOP_WITH_TC)) { 1602 if (cmd->c_opcode == MMC_STOP_TRANSMISSION) 1603 SET(cmd->c_flags, SCF_RSP_BSY); 1604 } 1605 1606 /* 1607 * Start the MMC command, or mark `cmd' as failed and return. 1608 */ 1609 error = sdhc_start_command(hp, cmd); 1610 if (error) { 1611 cmd->c_error = error; 1612 goto out; 1613 } 1614 1615 /* 1616 * Wait until the command phase is done, or until the command 1617 * is marked done for any other reason. 1618 */ 1619 probing = (cmd->c_flags & SCF_TOUT_OK) != 0; 1620 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT*3, probing)) { 1621 DPRINTF(1,("%s: timeout for command\n", __func__)); 1622 sdmmc_delay(50); 1623 cmd->c_error = ETIMEDOUT; 1624 goto out; 1625 } 1626 1627 /* 1628 * The host controller removes bits [0:7] from the response 1629 * data (CRC) and we pass the data up unchanged to the bus 1630 * driver (without padding). 1631 */ 1632 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) { 1633 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0); 1634 if (ISSET(cmd->c_flags, SCF_RSP_136)) { 1635 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4); 1636 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8); 1637 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12); 1638 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) { 1639 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) | 1640 (cmd->c_resp[1] << 24); 1641 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) | 1642 (cmd->c_resp[2] << 24); 1643 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) | 1644 (cmd->c_resp[3] << 24); 1645 cmd->c_resp[3] = (cmd->c_resp[3] >> 8); 1646 } 1647 } 1648 } 1649 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0])); 1650 1651 /* 1652 * If the command has data to transfer in any direction, 1653 * execute the transfer now. 1654 */ 1655 if (cmd->c_error == 0 && cmd->c_data != NULL) 1656 sdhc_transfer_data(hp, cmd); 1657 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) { 1658 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) && 1659 !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) { 1660 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n", 1661 HDEVNAME(hp))); 1662 cmd->c_error = ETIMEDOUT; 1663 goto out; 1664 } 1665 } 1666 1667 out: 1668 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED) 1669 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) { 1670 /* Turn off the LED. */ 1671 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON); 1672 } 1673 SET(cmd->c_flags, SCF_ITSDONE); 1674 1675 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) && 1676 cmd->c_opcode == MMC_STOP_TRANSMISSION) 1677 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT); 1678 1679 mutex_exit(&hp->intr_lock); 1680 1681 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp), 1682 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort", 1683 cmd->c_flags, cmd->c_error)); 1684 } 1685 1686 static int 1687 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd) 1688 { 1689 struct sdhc_softc * const sc = hp->sc; 1690 uint16_t blksize = 0; 1691 uint16_t blkcount = 0; 1692 uint16_t mode; 1693 uint16_t command; 1694 uint32_t pmask; 1695 int error; 1696 1697 KASSERT(mutex_owned(&hp->intr_lock)); 1698 1699 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n", 1700 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data, 1701 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS))); 1702 1703 /* 1704 * The maximum block length for commands should be the minimum 1705 * of the host buffer size and the card buffer size. (1.7.2) 1706 */ 1707 1708 /* Fragment the data into proper blocks. */ 1709 if (cmd->c_datalen > 0) { 1710 blksize = MIN(cmd->c_datalen, cmd->c_blklen); 1711 blkcount = cmd->c_datalen / blksize; 1712 if (cmd->c_datalen % blksize > 0) { 1713 /* XXX: Split this command. (1.7.4) */ 1714 aprint_error_dev(sc->sc_dev, 1715 "data not a multiple of %u bytes\n", blksize); 1716 return EINVAL; 1717 } 1718 } 1719 1720 /* Check limit imposed by 9-bit block count. (1.7.2) */ 1721 if (blkcount > SDHC_BLOCK_COUNT_MAX) { 1722 aprint_error_dev(sc->sc_dev, "too much data\n"); 1723 return EINVAL; 1724 } 1725 1726 /* Prepare transfer mode register value. (2.2.5) */ 1727 mode = SDHC_BLOCK_COUNT_ENABLE; 1728 if (ISSET(cmd->c_flags, SCF_CMD_READ)) 1729 mode |= SDHC_READ_MODE; 1730 if (blkcount > 1) { 1731 mode |= SDHC_MULTI_BLOCK_MODE; 1732 /* XXX only for memory commands? */ 1733 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP)) 1734 mode |= SDHC_AUTO_CMD12_ENABLE; 1735 } 1736 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 && 1737 ISSET(hp->flags, SHF_MODE_DMAEN)) { 1738 mode |= SDHC_DMA_ENABLE; 1739 } 1740 1741 /* 1742 * Prepare command register value. (2.2.6) 1743 */ 1744 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT; 1745 1746 if (ISSET(cmd->c_flags, SCF_RSP_CRC)) 1747 command |= SDHC_CRC_CHECK_ENABLE; 1748 if (ISSET(cmd->c_flags, SCF_RSP_IDX)) 1749 command |= SDHC_INDEX_CHECK_ENABLE; 1750 if (cmd->c_datalen > 0) 1751 command |= SDHC_DATA_PRESENT_SELECT; 1752 1753 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT)) 1754 command |= SDHC_NO_RESPONSE; 1755 else if (ISSET(cmd->c_flags, SCF_RSP_136)) 1756 command |= SDHC_RESP_LEN_136; 1757 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) 1758 command |= SDHC_RESP_LEN_48_CHK_BUSY; 1759 else 1760 command |= SDHC_RESP_LEN_48; 1761 1762 /* Wait until command and optionally data inhibit bits are clear. (1.5) */ 1763 pmask = SDHC_CMD_INHIBIT_CMD; 1764 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY)) 1765 pmask |= SDHC_CMD_INHIBIT_DAT; 1766 error = sdhc_wait_state(hp, pmask, 0); 1767 if (error) { 1768 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1769 device_printf(sc->sc_dev, "command or data phase inhibited\n"); 1770 return error; 1771 } 1772 1773 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n", 1774 HDEVNAME(hp), blksize, blkcount, mode, command)); 1775 1776 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1777 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) << 1778 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */ 1779 } 1780 1781 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1782 /* Alert the user not to remove the card. */ 1783 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON); 1784 } 1785 1786 /* Set DMA start address. */ 1787 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) { 1788 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) { 1789 bus_addr_t paddr = 1790 cmd->c_dmamap->dm_segs[seg].ds_addr; 1791 uint16_t len = 1792 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ? 1793 0 : cmd->c_dmamap->dm_segs[seg].ds_len; 1794 uint16_t attr = 1795 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS; 1796 if (seg == cmd->c_dmamap->dm_nsegs - 1) { 1797 attr |= SDHC_ADMA2_END; 1798 } 1799 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) { 1800 struct sdhc_adma2_descriptor32 *desc = 1801 hp->adma2; 1802 desc[seg].attribute = htole16(attr); 1803 desc[seg].length = htole16(len); 1804 desc[seg].address = htole32(paddr); 1805 } else { 1806 struct sdhc_adma2_descriptor64 *desc = 1807 hp->adma2; 1808 desc[seg].attribute = htole16(attr); 1809 desc[seg].length = htole16(len); 1810 desc[seg].address = htole32(paddr & 0xffffffff); 1811 desc[seg].address_hi = htole32( 1812 (uint64_t)paddr >> 32); 1813 } 1814 } 1815 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) { 1816 struct sdhc_adma2_descriptor32 *desc = hp->adma2; 1817 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0); 1818 } else { 1819 struct sdhc_adma2_descriptor64 *desc = hp->adma2; 1820 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0); 1821 } 1822 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE, 1823 BUS_DMASYNC_PREWRITE); 1824 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1825 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT); 1826 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2); 1827 } else { 1828 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT); 1829 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2); 1830 } 1831 1832 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr; 1833 1834 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff); 1835 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) { 1836 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4, 1837 (uint64_t)desc_addr >> 32); 1838 } 1839 } else if (ISSET(mode, SDHC_DMA_ENABLE) && 1840 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) { 1841 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1842 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT); 1843 } 1844 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr); 1845 } 1846 1847 /* 1848 * Start a CPU data transfer. Writing to the high order byte 1849 * of the SDHC_COMMAND register triggers the SD command. (1.5) 1850 */ 1851 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 1852 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16)); 1853 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg); 1854 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1855 /* mode bits is in MIX_CTRL register on uSDHC */ 1856 HWRITE4(hp, SDHC_MIX_CTRL, mode | 1857 (HREAD4(hp, SDHC_MIX_CTRL) & ~SDHC_TRANSFER_MODE_MASK)); 1858 if (cmd->c_opcode == MMC_STOP_TRANSMISSION) 1859 command |= SDHC_COMMAND_TYPE_ABORT; 1860 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16); 1861 } else { 1862 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16)); 1863 } 1864 } else { 1865 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize); 1866 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount); 1867 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg); 1868 HWRITE2(hp, SDHC_TRANSFER_MODE, mode); 1869 HWRITE2(hp, SDHC_COMMAND, command); 1870 } 1871 1872 return 0; 1873 } 1874 1875 static void 1876 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd) 1877 { 1878 struct sdhc_softc *sc = hp->sc; 1879 int error; 1880 1881 KASSERT(mutex_owned(&hp->intr_lock)); 1882 1883 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp), 1884 MMC_R1(cmd->c_resp), cmd->c_datalen)); 1885 1886 #ifdef SDHC_DEBUG 1887 /* XXX I forgot why I wanted to know when this happens :-( */ 1888 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) && 1889 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) { 1890 aprint_error_dev(hp->sc->sc_dev, 1891 "CMD52/53 error response flags %#x\n", 1892 MMC_R1(cmd->c_resp) & 0xff00); 1893 } 1894 #endif 1895 1896 if (cmd->c_dmamap != NULL) { 1897 if (hp->sc->sc_vendor_transfer_data_dma != NULL) { 1898 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd); 1899 if (error == 0 && !sdhc_wait_intr(hp, 1900 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) { 1901 DPRINTF(1,("%s: timeout\n", __func__)); 1902 error = ETIMEDOUT; 1903 } 1904 } else { 1905 error = sdhc_transfer_data_dma(hp, cmd); 1906 } 1907 } else 1908 error = sdhc_transfer_data_pio(hp, cmd); 1909 if (error) 1910 cmd->c_error = error; 1911 SET(cmd->c_flags, SCF_ITSDONE); 1912 1913 DPRINTF(1,("%s: data transfer done (error=%d)\n", 1914 HDEVNAME(hp), cmd->c_error)); 1915 } 1916 1917 static int 1918 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd) 1919 { 1920 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs; 1921 bus_addr_t posaddr; 1922 bus_addr_t segaddr; 1923 bus_size_t seglen; 1924 u_int seg = 0; 1925 int error = 0; 1926 int status; 1927 1928 KASSERT(mutex_owned(&hp->intr_lock)); 1929 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT); 1930 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT); 1931 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE); 1932 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE); 1933 1934 for (;;) { 1935 status = sdhc_wait_intr(hp, 1936 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE, 1937 SDHC_DMA_TIMEOUT, false); 1938 1939 if (status & SDHC_TRANSFER_COMPLETE) { 1940 break; 1941 } 1942 if (!status) { 1943 DPRINTF(1,("%s: timeout\n", __func__)); 1944 error = ETIMEDOUT; 1945 break; 1946 } 1947 1948 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 1949 continue; 1950 } 1951 1952 if ((status & SDHC_DMA_INTERRUPT) == 0) { 1953 continue; 1954 } 1955 1956 /* DMA Interrupt (boundary crossing) */ 1957 1958 segaddr = dm_segs[seg].ds_addr; 1959 seglen = dm_segs[seg].ds_len; 1960 posaddr = HREAD4(hp, SDHC_DMA_ADDR); 1961 1962 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) { 1963 continue; 1964 } 1965 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen))) 1966 HWRITE4(hp, SDHC_DMA_ADDR, posaddr); 1967 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs) 1968 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr); 1969 KASSERT(seg < cmd->c_dmamap->dm_nsegs); 1970 } 1971 1972 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 1973 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0, 1974 PAGE_SIZE, BUS_DMASYNC_POSTWRITE); 1975 } 1976 1977 return error; 1978 } 1979 1980 static int 1981 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd) 1982 { 1983 uint8_t *data = cmd->c_data; 1984 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int); 1985 u_int len, datalen; 1986 u_int imask; 1987 u_int pmask; 1988 int error = 0; 1989 1990 KASSERT(mutex_owned(&hp->intr_lock)); 1991 1992 if (ISSET(cmd->c_flags, SCF_CMD_READ)) { 1993 imask = SDHC_BUFFER_READ_READY; 1994 pmask = SDHC_BUFFER_READ_ENABLE; 1995 if (ISSET(hp->sc->sc_flags, 1996 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1997 pio_func = esdhc_read_data_pio; 1998 } else { 1999 pio_func = sdhc_read_data_pio; 2000 } 2001 } else { 2002 imask = SDHC_BUFFER_WRITE_READY; 2003 pmask = SDHC_BUFFER_WRITE_ENABLE; 2004 if (ISSET(hp->sc->sc_flags, 2005 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2006 pio_func = esdhc_write_data_pio; 2007 } else { 2008 pio_func = sdhc_write_data_pio; 2009 } 2010 } 2011 datalen = cmd->c_datalen; 2012 2013 KASSERT(mutex_owned(&hp->intr_lock)); 2014 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask); 2015 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE); 2016 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE); 2017 2018 while (datalen > 0) { 2019 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) { 2020 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 2021 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask); 2022 } else { 2023 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask); 2024 } 2025 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) { 2026 DPRINTF(1,("%s: timeout\n", __func__)); 2027 error = ETIMEDOUT; 2028 break; 2029 } 2030 2031 error = sdhc_wait_state(hp, pmask, pmask); 2032 if (error) 2033 break; 2034 } 2035 2036 len = MIN(datalen, cmd->c_blklen); 2037 (*pio_func)(hp, data, len); 2038 DPRINTF(2,("%s: pio data transfer %u @ %p\n", 2039 HDEVNAME(hp), len, data)); 2040 2041 data += len; 2042 datalen -= len; 2043 } 2044 2045 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, 2046 SDHC_TRANSFER_TIMEOUT, false)) { 2047 DPRINTF(1,("%s: timeout for transfer\n", __func__)); 2048 error = ETIMEDOUT; 2049 } 2050 2051 return error; 2052 } 2053 2054 static void 2055 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2056 { 2057 2058 if (((__uintptr_t)data & 3) == 0) { 2059 while (datalen > 3) { 2060 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA)); 2061 data += 4; 2062 datalen -= 4; 2063 } 2064 if (datalen > 1) { 2065 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA)); 2066 data += 2; 2067 datalen -= 2; 2068 } 2069 if (datalen > 0) { 2070 *data = HREAD1(hp, SDHC_DATA); 2071 data += 1; 2072 datalen -= 1; 2073 } 2074 } else if (((__uintptr_t)data & 1) == 0) { 2075 while (datalen > 1) { 2076 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA)); 2077 data += 2; 2078 datalen -= 2; 2079 } 2080 if (datalen > 0) { 2081 *data = HREAD1(hp, SDHC_DATA); 2082 data += 1; 2083 datalen -= 1; 2084 } 2085 } else { 2086 while (datalen > 0) { 2087 *data = HREAD1(hp, SDHC_DATA); 2088 data += 1; 2089 datalen -= 1; 2090 } 2091 } 2092 } 2093 2094 static void 2095 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2096 { 2097 2098 if (((__uintptr_t)data & 3) == 0) { 2099 while (datalen > 3) { 2100 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data)); 2101 data += 4; 2102 datalen -= 4; 2103 } 2104 if (datalen > 1) { 2105 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data)); 2106 data += 2; 2107 datalen -= 2; 2108 } 2109 if (datalen > 0) { 2110 HWRITE1(hp, SDHC_DATA, *data); 2111 data += 1; 2112 datalen -= 1; 2113 } 2114 } else if (((__uintptr_t)data & 1) == 0) { 2115 while (datalen > 1) { 2116 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data)); 2117 data += 2; 2118 datalen -= 2; 2119 } 2120 if (datalen > 0) { 2121 HWRITE1(hp, SDHC_DATA, *data); 2122 data += 1; 2123 datalen -= 1; 2124 } 2125 } else { 2126 while (datalen > 0) { 2127 HWRITE1(hp, SDHC_DATA, *data); 2128 data += 1; 2129 datalen -= 1; 2130 } 2131 } 2132 } 2133 2134 static void 2135 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2136 { 2137 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS); 2138 uint32_t v; 2139 2140 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK; 2141 size_t count = 0; 2142 2143 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2144 if (count == 0) { 2145 /* 2146 * If we've drained "watermark" words, we need to wait 2147 * a little bit so the read FIFO can refill. 2148 */ 2149 sdmmc_delay(10); 2150 count = watermark; 2151 } 2152 v = HREAD4(hp, SDHC_DATA); 2153 v = le32toh(v); 2154 *(uint32_t *)data = v; 2155 data += 4; 2156 datalen -= 4; 2157 status = HREAD2(hp, SDHC_NINTR_STATUS); 2158 count--; 2159 } 2160 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2161 if (count == 0) { 2162 sdmmc_delay(10); 2163 } 2164 v = HREAD4(hp, SDHC_DATA); 2165 v = le32toh(v); 2166 do { 2167 *data++ = v; 2168 v >>= 8; 2169 } while (--datalen > 0); 2170 } 2171 } 2172 2173 static void 2174 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2175 { 2176 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS); 2177 uint32_t v; 2178 2179 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK; 2180 size_t count = watermark; 2181 2182 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2183 if (count == 0) { 2184 sdmmc_delay(10); 2185 count = watermark; 2186 } 2187 v = *(uint32_t *)data; 2188 v = htole32(v); 2189 HWRITE4(hp, SDHC_DATA, v); 2190 data += 4; 2191 datalen -= 4; 2192 status = HREAD2(hp, SDHC_NINTR_STATUS); 2193 count--; 2194 } 2195 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2196 if (count == 0) { 2197 sdmmc_delay(10); 2198 } 2199 v = *(uint32_t *)data; 2200 v = htole32(v); 2201 HWRITE4(hp, SDHC_DATA, v); 2202 } 2203 } 2204 2205 /* Prepare for another command. */ 2206 static int 2207 sdhc_soft_reset(struct sdhc_host *hp, int mask) 2208 { 2209 int timo; 2210 2211 KASSERT(mutex_owned(&hp->intr_lock)); 2212 2213 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask)); 2214 2215 /* Request the reset. */ 2216 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask); 2217 2218 /* 2219 * If necessary, wait for the controller to set the bits to 2220 * acknowledge the reset. 2221 */ 2222 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) && 2223 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) { 2224 for (timo = 10000; timo > 0; timo--) { 2225 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask)) 2226 break; 2227 /* Short delay because I worry we may miss it... */ 2228 sdmmc_delay(1); 2229 } 2230 if (timo == 0) { 2231 DPRINTF(1,("%s: timeout for reset on\n", __func__)); 2232 return ETIMEDOUT; 2233 } 2234 } 2235 2236 /* 2237 * Wait for the controller to clear the bits to indicate that 2238 * the reset has completed. 2239 */ 2240 for (timo = 10; timo > 0; timo--) { 2241 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask)) 2242 break; 2243 sdmmc_delay(10000); 2244 } 2245 if (timo == 0) { 2246 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp), 2247 HREAD1(hp, SDHC_SOFTWARE_RESET))); 2248 return ETIMEDOUT; 2249 } 2250 2251 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 2252 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP); 2253 } 2254 2255 return 0; 2256 } 2257 2258 static int 2259 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing) 2260 { 2261 int status, error, nointr; 2262 2263 KASSERT(mutex_owned(&hp->intr_lock)); 2264 2265 mask |= SDHC_ERROR_INTERRUPT; 2266 2267 nointr = 0; 2268 status = hp->intr_status & mask; 2269 while (status == 0) { 2270 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo) 2271 == EWOULDBLOCK) { 2272 nointr = 1; 2273 break; 2274 } 2275 status = hp->intr_status & mask; 2276 } 2277 error = hp->intr_error_status; 2278 2279 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status, 2280 error)); 2281 2282 hp->intr_status &= ~status; 2283 hp->intr_error_status &= ~error; 2284 2285 if (ISSET(status, SDHC_ERROR_INTERRUPT)) { 2286 if (ISSET(error, SDHC_DMA_ERROR)) 2287 device_printf(hp->sc->sc_dev,"dma error\n"); 2288 if (ISSET(error, SDHC_ADMA_ERROR)) 2289 device_printf(hp->sc->sc_dev,"adma error\n"); 2290 if (ISSET(error, SDHC_AUTO_CMD12_ERROR)) 2291 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n"); 2292 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR)) 2293 device_printf(hp->sc->sc_dev,"current limit error\n"); 2294 if (ISSET(error, SDHC_DATA_END_BIT_ERROR)) 2295 device_printf(hp->sc->sc_dev,"data end bit error\n"); 2296 if (ISSET(error, SDHC_DATA_CRC_ERROR)) 2297 device_printf(hp->sc->sc_dev,"data crc error\n"); 2298 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR)) 2299 device_printf(hp->sc->sc_dev,"data timeout error\n"); 2300 if (ISSET(error, SDHC_CMD_INDEX_ERROR)) 2301 device_printf(hp->sc->sc_dev,"cmd index error\n"); 2302 if (ISSET(error, SDHC_CMD_END_BIT_ERROR)) 2303 device_printf(hp->sc->sc_dev,"cmd end bit error\n"); 2304 if (ISSET(error, SDHC_CMD_CRC_ERROR)) 2305 device_printf(hp->sc->sc_dev,"cmd crc error\n"); 2306 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) { 2307 if (!probing) 2308 device_printf(hp->sc->sc_dev,"cmd timeout error\n"); 2309 #ifdef SDHC_DEBUG 2310 else if (sdhcdebug > 0) 2311 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n"); 2312 #endif 2313 } 2314 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0) 2315 device_printf(hp->sc->sc_dev,"vendor error %#x\n", 2316 (error & ~SDHC_EINTR_STATUS_MASK)); 2317 if (error == 0) 2318 device_printf(hp->sc->sc_dev,"no error\n"); 2319 2320 /* Command timeout has higher priority than command complete. */ 2321 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) 2322 CLR(status, SDHC_COMMAND_COMPLETE); 2323 2324 /* Transfer complete has higher priority than data timeout. */ 2325 if (ISSET(status, SDHC_TRANSFER_COMPLETE)) 2326 CLR(error, SDHC_DATA_TIMEOUT_ERROR); 2327 } 2328 2329 if (nointr || 2330 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) { 2331 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 2332 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT); 2333 hp->intr_error_status = 0; 2334 status = 0; 2335 } 2336 2337 return status; 2338 } 2339 2340 /* 2341 * Established by attachment driver at interrupt priority IPL_SDMMC. 2342 */ 2343 int 2344 sdhc_intr(void *arg) 2345 { 2346 struct sdhc_softc *sc = (struct sdhc_softc *)arg; 2347 struct sdhc_host *hp; 2348 int done = 0; 2349 uint16_t status; 2350 uint16_t error; 2351 2352 /* We got an interrupt, but we don't know from which slot. */ 2353 for (size_t host = 0; host < sc->sc_nhosts; host++) { 2354 hp = sc->sc_host[host]; 2355 if (hp == NULL) 2356 continue; 2357 2358 mutex_enter(&hp->intr_lock); 2359 2360 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 2361 /* Find out which interrupts are pending. */ 2362 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS); 2363 status = xstatus; 2364 error = xstatus >> 16; 2365 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) && 2366 (xstatus & SDHC_TRANSFER_COMPLETE) && 2367 !(xstatus & SDHC_DMA_INTERRUPT)) { 2368 /* read again due to uSDHC errata */ 2369 status = xstatus = HREAD4(hp, 2370 SDHC_NINTR_STATUS); 2371 error = xstatus >> 16; 2372 } 2373 if (ISSET(sc->sc_flags, 2374 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2375 if ((error & SDHC_NINTR_STATUS_MASK) != 0) 2376 SET(status, SDHC_ERROR_INTERRUPT); 2377 } 2378 if (error) 2379 xstatus |= SDHC_ERROR_INTERRUPT; 2380 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK)) 2381 goto next_port; /* no interrupt for us */ 2382 /* Acknowledge the interrupts we are about to handle. */ 2383 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus); 2384 } else { 2385 /* Find out which interrupts are pending. */ 2386 error = 0; 2387 status = HREAD2(hp, SDHC_NINTR_STATUS); 2388 if (!ISSET(status, SDHC_NINTR_STATUS_MASK)) 2389 goto next_port; /* no interrupt for us */ 2390 /* Acknowledge the interrupts we are about to handle. */ 2391 HWRITE2(hp, SDHC_NINTR_STATUS, status); 2392 if (ISSET(status, SDHC_ERROR_INTERRUPT)) { 2393 /* Acknowledge error interrupts. */ 2394 error = HREAD2(hp, SDHC_EINTR_STATUS); 2395 HWRITE2(hp, SDHC_EINTR_STATUS, error); 2396 } 2397 } 2398 2399 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp), 2400 status, error)); 2401 2402 /* Claim this interrupt. */ 2403 done = 1; 2404 2405 if (ISSET(status, SDHC_ERROR_INTERRUPT) && 2406 ISSET(error, SDHC_ADMA_ERROR)) { 2407 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS); 2408 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp), 2409 adma_err); 2410 } 2411 2412 /* 2413 * Wake up the sdmmc event thread to scan for cards. 2414 */ 2415 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) { 2416 if (hp->sdmmc != NULL) { 2417 sdmmc_needs_discover(hp->sdmmc); 2418 } 2419 if (ISSET(sc->sc_flags, 2420 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2421 HCLR4(hp, SDHC_NINTR_STATUS_EN, 2422 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)); 2423 HCLR4(hp, SDHC_NINTR_SIGNAL_EN, 2424 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)); 2425 } 2426 } 2427 2428 /* 2429 * Schedule re-tuning process (UHS). 2430 */ 2431 if (ISSET(status, SDHC_RETUNING_EVENT)) { 2432 atomic_swap_uint(&hp->tuning_timer_pending, 1); 2433 } 2434 2435 /* 2436 * Wake up the blocking process to service command 2437 * related interrupt(s). 2438 */ 2439 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT| 2440 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY| 2441 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) { 2442 hp->intr_error_status |= error; 2443 hp->intr_status |= status; 2444 if (ISSET(sc->sc_flags, 2445 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2446 HCLR4(hp, SDHC_NINTR_SIGNAL_EN, 2447 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY)); 2448 } 2449 cv_broadcast(&hp->intr_cv); 2450 } 2451 2452 /* 2453 * Service SD card interrupts. 2454 */ 2455 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC) 2456 && ISSET(status, SDHC_CARD_INTERRUPT)) { 2457 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp))); 2458 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 2459 sdmmc_card_intr(hp->sdmmc); 2460 } 2461 next_port: 2462 mutex_exit(&hp->intr_lock); 2463 } 2464 2465 return done; 2466 } 2467 2468 kmutex_t * 2469 sdhc_host_lock(struct sdhc_host *hp) 2470 { 2471 return &hp->intr_lock; 2472 } 2473 2474 uint8_t 2475 sdhc_host_read_1(struct sdhc_host *hp, int reg) 2476 { 2477 return HREAD1(hp, reg); 2478 } 2479 2480 uint16_t 2481 sdhc_host_read_2(struct sdhc_host *hp, int reg) 2482 { 2483 return HREAD2(hp, reg); 2484 } 2485 2486 uint32_t 2487 sdhc_host_read_4(struct sdhc_host *hp, int reg) 2488 { 2489 return HREAD4(hp, reg); 2490 } 2491 2492 void 2493 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val) 2494 { 2495 HWRITE1(hp, reg, val); 2496 } 2497 2498 void 2499 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val) 2500 { 2501 HWRITE2(hp, reg, val); 2502 } 2503 2504 void 2505 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val) 2506 { 2507 HWRITE4(hp, reg, val); 2508 } 2509 2510 #ifdef SDHC_DEBUG 2511 void 2512 sdhc_dump_regs(struct sdhc_host *hp) 2513 { 2514 2515 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE, 2516 HREAD4(hp, SDHC_PRESENT_STATE)); 2517 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 2518 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL, 2519 HREAD1(hp, SDHC_POWER_CTL)); 2520 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS, 2521 HREAD2(hp, SDHC_NINTR_STATUS)); 2522 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS, 2523 HREAD2(hp, SDHC_EINTR_STATUS)); 2524 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN, 2525 HREAD2(hp, SDHC_NINTR_STATUS_EN)); 2526 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN, 2527 HREAD2(hp, SDHC_EINTR_STATUS_EN)); 2528 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN, 2529 HREAD2(hp, SDHC_NINTR_SIGNAL_EN)); 2530 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN, 2531 HREAD2(hp, SDHC_EINTR_SIGNAL_EN)); 2532 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES, 2533 HREAD4(hp, SDHC_CAPABILITIES)); 2534 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES, 2535 HREAD4(hp, SDHC_MAX_CAPABILITIES)); 2536 } 2537 #endif 2538