1 /* $NetBSD: sdhc.c,v 1.93 2015/12/31 11:53:19 ryo Exp $ */ 2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */ 3 4 /* 5 * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * SD Host Controller driver based on the SD Host Controller Standard 22 * Simplified Specification Version 1.00 (www.sdcard.com). 23 */ 24 25 #include <sys/cdefs.h> 26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.93 2015/12/31 11:53:19 ryo Exp $"); 27 28 #ifdef _KERNEL_OPT 29 #include "opt_sdmmc.h" 30 #endif 31 32 #include <sys/param.h> 33 #include <sys/device.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/systm.h> 37 #include <sys/mutex.h> 38 #include <sys/condvar.h> 39 #include <sys/atomic.h> 40 41 #include <dev/sdmmc/sdhcreg.h> 42 #include <dev/sdmmc/sdhcvar.h> 43 #include <dev/sdmmc/sdmmcchip.h> 44 #include <dev/sdmmc/sdmmcreg.h> 45 #include <dev/sdmmc/sdmmcvar.h> 46 47 #ifdef SDHC_DEBUG 48 int sdhcdebug = 1; 49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0) 50 void sdhc_dump_regs(struct sdhc_host *); 51 #else 52 #define DPRINTF(n,s) do {} while (0) 53 #endif 54 55 #define SDHC_COMMAND_TIMEOUT hz 56 #define SDHC_BUFFER_TIMEOUT hz 57 #define SDHC_TRANSFER_TIMEOUT hz 58 #define SDHC_DMA_TIMEOUT (hz*3) 59 #define SDHC_TUNING_TIMEOUT hz 60 61 struct sdhc_host { 62 struct sdhc_softc *sc; /* host controller device */ 63 64 bus_space_tag_t iot; /* host register set tag */ 65 bus_space_handle_t ioh; /* host register set handle */ 66 bus_size_t ios; /* host register space size */ 67 bus_dma_tag_t dmat; /* host DMA tag */ 68 69 device_t sdmmc; /* generic SD/MMC device */ 70 71 u_int clkbase; /* base clock frequency in KHz */ 72 int maxblklen; /* maximum block length */ 73 uint32_t ocr; /* OCR value from capabilities */ 74 75 uint8_t regs[14]; /* host controller state */ 76 77 uint16_t intr_status; /* soft interrupt status */ 78 uint16_t intr_error_status; /* soft error status */ 79 kmutex_t intr_lock; 80 kcondvar_t intr_cv; 81 82 callout_t tuning_timer; 83 int tuning_timing; 84 u_int tuning_timer_count; 85 u_int tuning_timer_pending; 86 87 int specver; /* spec. version */ 88 89 uint32_t flags; /* flags for this host */ 90 #define SHF_USE_DMA 0x0001 91 #define SHF_USE_4BIT_MODE 0x0002 92 #define SHF_USE_8BIT_MODE 0x0004 93 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */ 94 #define SHF_USE_ADMA2_32 0x0010 95 #define SHF_USE_ADMA2_64 0x0020 96 #define SHF_USE_ADMA2_MASK 0x0030 97 98 bus_dmamap_t adma_map; 99 bus_dma_segment_t adma_segs[1]; 100 void *adma2; 101 }; 102 103 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev)) 104 105 static uint8_t 106 hread1(struct sdhc_host *hp, bus_size_t reg) 107 { 108 109 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) 110 return bus_space_read_1(hp->iot, hp->ioh, reg); 111 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3)); 112 } 113 114 static uint16_t 115 hread2(struct sdhc_host *hp, bus_size_t reg) 116 { 117 118 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) 119 return bus_space_read_2(hp->iot, hp->ioh, reg); 120 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2)); 121 } 122 123 #define HREAD1(hp, reg) hread1(hp, reg) 124 #define HREAD2(hp, reg) hread2(hp, reg) 125 #define HREAD4(hp, reg) \ 126 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg))) 127 128 129 static void 130 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val) 131 { 132 133 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 134 bus_space_write_1(hp->iot, hp->ioh, o, val); 135 } else { 136 const size_t shift = 8 * (o & 3); 137 o &= -4; 138 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o); 139 tmp = (val << shift) | (tmp & ~(0xff << shift)); 140 bus_space_write_4(hp->iot, hp->ioh, o, tmp); 141 } 142 } 143 144 static void 145 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val) 146 { 147 148 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 149 bus_space_write_2(hp->iot, hp->ioh, o, val); 150 } else { 151 const size_t shift = 8 * (o & 2); 152 o &= -4; 153 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o); 154 tmp = (val << shift) | (tmp & ~(0xffff << shift)); 155 bus_space_write_4(hp->iot, hp->ioh, o, tmp); 156 } 157 } 158 159 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val) 160 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val) 161 #define HWRITE4(hp, reg, val) \ 162 bus_space_write_4((hp)->iot, (hp)->ioh, (reg), (val)) 163 164 #define HCLR1(hp, reg, bits) \ 165 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0) 166 #define HCLR2(hp, reg, bits) \ 167 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0) 168 #define HCLR4(hp, reg, bits) \ 169 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0) 170 #define HSET1(hp, reg, bits) \ 171 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0) 172 #define HSET2(hp, reg, bits) \ 173 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0) 174 #define HSET4(hp, reg, bits) \ 175 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0) 176 177 static int sdhc_host_reset(sdmmc_chipset_handle_t); 178 static int sdhc_host_reset1(sdmmc_chipset_handle_t); 179 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t); 180 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t); 181 static int sdhc_card_detect(sdmmc_chipset_handle_t); 182 static int sdhc_write_protect(sdmmc_chipset_handle_t); 183 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t); 184 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool); 185 static int sdhc_bus_width(sdmmc_chipset_handle_t, int); 186 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int); 187 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int); 188 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t); 189 static void sdhc_exec_command(sdmmc_chipset_handle_t, 190 struct sdmmc_command *); 191 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int); 192 static int sdhc_execute_tuning1(struct sdhc_host *, int); 193 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int); 194 static void sdhc_tuning_timer(void *); 195 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *); 196 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t); 197 static int sdhc_soft_reset(struct sdhc_host *, int); 198 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool); 199 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *); 200 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *); 201 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *); 202 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int); 203 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int); 204 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int); 205 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int); 206 207 static struct sdmmc_chip_functions sdhc_functions = { 208 /* host controller reset */ 209 .host_reset = sdhc_host_reset, 210 211 /* host controller capabilities */ 212 .host_ocr = sdhc_host_ocr, 213 .host_maxblklen = sdhc_host_maxblklen, 214 215 /* card detection */ 216 .card_detect = sdhc_card_detect, 217 218 /* write protect */ 219 .write_protect = sdhc_write_protect, 220 221 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */ 222 .bus_power = sdhc_bus_power, 223 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */ 224 .bus_width = sdhc_bus_width, 225 .bus_rod = sdhc_bus_rod, 226 227 /* command execution */ 228 .exec_command = sdhc_exec_command, 229 230 /* card interrupt */ 231 .card_enable_intr = sdhc_card_enable_intr, 232 .card_intr_ack = sdhc_card_intr_ack, 233 234 /* UHS functions */ 235 .signal_voltage = sdhc_signal_voltage, 236 .bus_clock_ddr = sdhc_bus_clock_ddr, 237 .execute_tuning = sdhc_execute_tuning, 238 }; 239 240 static int 241 sdhc_cfprint(void *aux, const char *pnp) 242 { 243 const struct sdmmcbus_attach_args * const saa = aux; 244 const struct sdhc_host * const hp = saa->saa_sch; 245 246 if (pnp) { 247 aprint_normal("sdmmc at %s", pnp); 248 } 249 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) { 250 if (hp->sc->sc_host[host] == hp) { 251 aprint_normal(" slot %zu", host); 252 } 253 } 254 255 return UNCONF; 256 } 257 258 /* 259 * Called by attachment driver. For each SD card slot there is one SD 260 * host controller standard register set. (1.3) 261 */ 262 int 263 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot, 264 bus_space_handle_t ioh, bus_size_t iosize) 265 { 266 struct sdmmcbus_attach_args saa; 267 struct sdhc_host *hp; 268 uint32_t caps, caps2; 269 uint16_t sdhcver; 270 int error; 271 272 /* Allocate one more host structure. */ 273 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO); 274 if (hp == NULL) { 275 aprint_error_dev(sc->sc_dev, 276 "couldn't alloc memory (sdhc host)\n"); 277 goto err1; 278 } 279 sc->sc_host[sc->sc_nhosts++] = hp; 280 281 /* Fill in the new host structure. */ 282 hp->sc = sc; 283 hp->iot = iot; 284 hp->ioh = ioh; 285 hp->ios = iosize; 286 hp->dmat = sc->sc_dmat; 287 288 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC); 289 cv_init(&hp->intr_cv, "sdhcintr"); 290 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE); 291 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp); 292 293 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 294 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT; 295 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 296 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION); 297 } else { 298 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION); 299 } 300 aprint_normal_dev(sc->sc_dev, "SDHC "); 301 hp->specver = SDHC_SPEC_VERSION(sdhcver); 302 switch (SDHC_SPEC_VERSION(sdhcver)) { 303 case SDHC_SPEC_VERS_100: 304 aprint_normal("1.0"); 305 break; 306 307 case SDHC_SPEC_VERS_200: 308 aprint_normal("2.0"); 309 break; 310 311 case SDHC_SPEC_VERS_300: 312 aprint_normal("3.0"); 313 break; 314 315 case SDHC_SPEC_VERS_400: 316 aprint_normal("4.0"); 317 break; 318 319 default: 320 aprint_normal("unknown version(0x%x)", 321 SDHC_SPEC_VERSION(sdhcver)); 322 break; 323 } 324 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver)); 325 326 /* 327 * Reset the host controller and enable interrupts. 328 */ 329 (void)sdhc_host_reset(hp); 330 331 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 332 /* init uSDHC registers */ 333 HWRITE4(hp, SDHC_MMC_BOOT, 0); 334 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN | 335 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE); 336 HWRITE4(hp, SDHC_WATERMARK_LEVEL, 337 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) | 338 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) | 339 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) | 340 (0x40 << SDHC_WATERMARK_READ_SHIFT)); 341 HSET4(hp, SDHC_VEND_SPEC, 342 SDHC_VEND_SPEC_MBO | 343 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 344 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN | 345 SDHC_VEND_SPEC_HCLK_SOFT_EN | 346 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN | 347 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN | 348 SDHC_VEND_SPEC_FRC_SDCLK_ON); 349 } 350 351 /* Determine host capabilities. */ 352 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) { 353 caps = sc->sc_caps; 354 caps2 = sc->sc_caps2; 355 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 356 /* uSDHC capability register is little bit different */ 357 caps = HREAD4(hp, SDHC_CAPABILITIES); 358 caps |= SDHC_8BIT_SUPP; 359 if (caps & SDHC_ADMA1_SUPP) 360 caps |= SDHC_ADMA2_SUPP; 361 sc->sc_caps = caps; 362 /* uSDHC has no SDHC_CAPABILITIES2 register */ 363 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP; 364 } else { 365 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES); 366 if (hp->specver >= SDHC_SPEC_VERS_300) { 367 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2); 368 } else { 369 caps2 = sc->sc_caps2 = 0; 370 } 371 } 372 373 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) & 374 SDHC_RETUNING_MODES_MASK; 375 if (retuning_mode == SDHC_RETUNING_MODE_1) { 376 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) & 377 SDHC_TIMER_COUNT_MASK; 378 if (hp->tuning_timer_count == 0xf) 379 hp->tuning_timer_count = 0; 380 if (hp->tuning_timer_count) 381 hp->tuning_timer_count = 382 1 << (hp->tuning_timer_count - 1); 383 } 384 385 /* 386 * Use DMA if the host system and the controller support it. 387 * Suports integrated or external DMA egine, with or without 388 * SDHC_DMA_ENABLE in the command. 389 */ 390 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) || 391 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA && 392 ISSET(caps, SDHC_DMA_SUPPORT)))) { 393 SET(hp->flags, SHF_USE_DMA); 394 395 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) && 396 ISSET(caps, SDHC_ADMA2_SUPP)) { 397 SET(hp->flags, SHF_MODE_DMAEN); 398 /* 399 * 64-bit mode was present in the 2.00 spec, removed 400 * from 3.00, and re-added in 4.00 with a different 401 * descriptor layout. We only support 2.00 and 3.00 402 * descriptors for now. 403 */ 404 if (hp->specver == SDHC_SPEC_VERS_200 && 405 ISSET(caps, SDHC_64BIT_SYS_BUS)) { 406 SET(hp->flags, SHF_USE_ADMA2_64); 407 aprint_normal(", 64-bit ADMA2"); 408 } else { 409 SET(hp->flags, SHF_USE_ADMA2_32); 410 aprint_normal(", 32-bit ADMA2"); 411 } 412 } else { 413 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) || 414 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN)) 415 SET(hp->flags, SHF_MODE_DMAEN); 416 if (sc->sc_vendor_transfer_data_dma) { 417 aprint_normal(", platform DMA"); 418 } else { 419 aprint_normal(", SDMA"); 420 } 421 } 422 } else { 423 aprint_normal(", PIO"); 424 } 425 426 /* 427 * Determine the base clock frequency. (2.2.24) 428 */ 429 if (hp->specver >= SDHC_SPEC_VERS_300) { 430 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps); 431 } else { 432 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps); 433 } 434 if (hp->clkbase == 0 || 435 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) { 436 if (sc->sc_clkbase == 0) { 437 /* The attachment driver must tell us. */ 438 aprint_error_dev(sc->sc_dev, 439 "unknown base clock frequency\n"); 440 goto err; 441 } 442 hp->clkbase = sc->sc_clkbase; 443 } 444 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) { 445 /* SDHC 1.0 supports only 10-63 MHz. */ 446 aprint_error_dev(sc->sc_dev, 447 "base clock frequency out of range: %u MHz\n", 448 hp->clkbase / 1000); 449 goto err; 450 } 451 aprint_normal(", %u kHz", hp->clkbase); 452 453 /* 454 * XXX Set the data timeout counter value according to 455 * capabilities. (2.2.15) 456 */ 457 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX); 458 #if 1 459 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 460 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16); 461 #endif 462 463 if (ISSET(caps, SDHC_EMBEDDED_SLOT)) 464 aprint_normal(", embedded slot"); 465 466 /* 467 * Determine SD bus voltage levels supported by the controller. 468 */ 469 aprint_normal(","); 470 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) { 471 SET(hp->ocr, MMC_OCR_HCS); 472 aprint_normal(" HS"); 473 } 474 if (ISSET(caps2, SDHC_SDR50_SUPP)) { 475 SET(hp->ocr, MMC_OCR_S18A); 476 aprint_normal(" SDR50"); 477 } 478 if (ISSET(caps2, SDHC_DDR50_SUPP)) { 479 SET(hp->ocr, MMC_OCR_S18A); 480 aprint_normal(" DDR50"); 481 } 482 if (ISSET(caps2, SDHC_SDR104_SUPP)) { 483 SET(hp->ocr, MMC_OCR_S18A); 484 aprint_normal(" SDR104 HS200"); 485 } 486 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) { 487 SET(hp->ocr, MMC_OCR_1_7V_1_8V | MMC_OCR_1_8V_1_9V); 488 aprint_normal(" 1.8V"); 489 } 490 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) { 491 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V); 492 aprint_normal(" 3.0V"); 493 } 494 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) { 495 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V); 496 aprint_normal(" 3.3V"); 497 } 498 if (hp->specver >= SDHC_SPEC_VERS_300) { 499 aprint_normal(", re-tuning mode %d", retuning_mode + 1); 500 if (hp->tuning_timer_count) 501 aprint_normal(" (%us timer)", hp->tuning_timer_count); 502 } 503 504 /* 505 * Determine the maximum block length supported by the host 506 * controller. (2.2.24) 507 */ 508 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) { 509 case SDHC_MAX_BLK_LEN_512: 510 hp->maxblklen = 512; 511 break; 512 513 case SDHC_MAX_BLK_LEN_1024: 514 hp->maxblklen = 1024; 515 break; 516 517 case SDHC_MAX_BLK_LEN_2048: 518 hp->maxblklen = 2048; 519 break; 520 521 case SDHC_MAX_BLK_LEN_4096: 522 hp->maxblklen = 4096; 523 break; 524 525 default: 526 aprint_error_dev(sc->sc_dev, "max block length unknown\n"); 527 goto err; 528 } 529 aprint_normal(", %u byte blocks", hp->maxblklen); 530 aprint_normal("\n"); 531 532 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 533 int rseg; 534 535 /* Allocate ADMA2 descriptor memory */ 536 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 537 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK); 538 if (error) { 539 aprint_error_dev(sc->sc_dev, 540 "ADMA2 dmamem_alloc failed (%d)\n", error); 541 goto adma_done; 542 } 543 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg, 544 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK); 545 if (error) { 546 aprint_error_dev(sc->sc_dev, 547 "ADMA2 dmamem_map failed (%d)\n", error); 548 goto adma_done; 549 } 550 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 551 0, BUS_DMA_WAITOK, &hp->adma_map); 552 if (error) { 553 aprint_error_dev(sc->sc_dev, 554 "ADMA2 dmamap_create failed (%d)\n", error); 555 goto adma_done; 556 } 557 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map, 558 hp->adma2, PAGE_SIZE, NULL, 559 BUS_DMA_WAITOK|BUS_DMA_WRITE); 560 if (error) { 561 aprint_error_dev(sc->sc_dev, 562 "ADMA2 dmamap_load failed (%d)\n", error); 563 goto adma_done; 564 } 565 566 memset(hp->adma2, 0, PAGE_SIZE); 567 568 adma_done: 569 if (error) 570 CLR(hp->flags, SHF_USE_ADMA2_MASK); 571 } 572 573 /* 574 * Attach the generic SD/MMC bus driver. (The bus driver must 575 * not invoke any chipset functions before it is attached.) 576 */ 577 memset(&saa, 0, sizeof(saa)); 578 saa.saa_busname = "sdmmc"; 579 saa.saa_sct = &sdhc_functions; 580 saa.saa_sch = hp; 581 saa.saa_dmat = hp->dmat; 582 saa.saa_clkmax = hp->clkbase; 583 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM)) 584 saa.saa_clkmin = hp->clkbase / 256 / 2046; 585 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS)) 586 saa.saa_clkmin = hp->clkbase / 256 / 16; 587 else if (hp->sc->sc_clkmsk != 0) 588 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >> 589 (ffs(hp->sc->sc_clkmsk) - 1)); 590 else if (hp->specver >= SDHC_SPEC_VERS_300) 591 saa.saa_clkmin = hp->clkbase / 0x3ff; 592 else 593 saa.saa_clkmin = hp->clkbase / 256; 594 saa.saa_caps = SMC_CAPS_4BIT_MODE|SMC_CAPS_AUTO_STOP; 595 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE)) 596 saa.saa_caps |= SMC_CAPS_8BIT_MODE; 597 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) 598 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED; 599 if (ISSET(caps2, SDHC_SDR104_SUPP)) 600 saa.saa_caps |= SMC_CAPS_UHS_SDR104 | 601 SMC_CAPS_UHS_SDR50 | 602 SMC_CAPS_MMC_HS200; 603 if (ISSET(caps2, SDHC_SDR50_SUPP)) 604 saa.saa_caps |= SMC_CAPS_UHS_SDR50; 605 if (ISSET(caps2, SDHC_DDR50_SUPP)) 606 saa.saa_caps |= SMC_CAPS_UHS_DDR50; 607 if (ISSET(hp->flags, SHF_USE_DMA)) { 608 saa.saa_caps |= SMC_CAPS_DMA; 609 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 610 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA; 611 } 612 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY)) 613 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY; 614 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET)) 615 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET; 616 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint); 617 618 return 0; 619 620 err: 621 callout_destroy(&hp->tuning_timer); 622 cv_destroy(&hp->intr_cv); 623 mutex_destroy(&hp->intr_lock); 624 free(hp, M_DEVBUF); 625 sc->sc_host[--sc->sc_nhosts] = NULL; 626 err1: 627 return 1; 628 } 629 630 int 631 sdhc_detach(struct sdhc_softc *sc, int flags) 632 { 633 struct sdhc_host *hp; 634 int rv = 0; 635 636 for (size_t n = 0; n < sc->sc_nhosts; n++) { 637 hp = sc->sc_host[n]; 638 if (hp == NULL) 639 continue; 640 if (hp->sdmmc != NULL) { 641 rv = config_detach(hp->sdmmc, flags); 642 if (rv) 643 break; 644 hp->sdmmc = NULL; 645 } 646 /* disable interrupts */ 647 if ((flags & DETACH_FORCE) == 0) { 648 mutex_enter(&hp->intr_lock); 649 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 650 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0); 651 } else { 652 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0); 653 } 654 sdhc_soft_reset(hp, SDHC_RESET_ALL); 655 mutex_exit(&hp->intr_lock); 656 } 657 callout_halt(&hp->tuning_timer, NULL); 658 callout_destroy(&hp->tuning_timer); 659 cv_destroy(&hp->intr_cv); 660 mutex_destroy(&hp->intr_lock); 661 if (hp->ios > 0) { 662 bus_space_unmap(hp->iot, hp->ioh, hp->ios); 663 hp->ios = 0; 664 } 665 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 666 bus_dmamap_unload(sc->sc_dmat, hp->adma_map); 667 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map); 668 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE); 669 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1); 670 } 671 free(hp, M_DEVBUF); 672 sc->sc_host[n] = NULL; 673 } 674 675 return rv; 676 } 677 678 bool 679 sdhc_suspend(device_t dev, const pmf_qual_t *qual) 680 { 681 struct sdhc_softc *sc = device_private(dev); 682 struct sdhc_host *hp; 683 size_t i; 684 685 /* XXX poll for command completion or suspend command 686 * in progress */ 687 688 /* Save the host controller state. */ 689 for (size_t n = 0; n < sc->sc_nhosts; n++) { 690 hp = sc->sc_host[n]; 691 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 692 for (i = 0; i < sizeof hp->regs; i += 4) { 693 uint32_t v = HREAD4(hp, i); 694 hp->regs[i + 0] = (v >> 0); 695 hp->regs[i + 1] = (v >> 8); 696 if (i + 3 < sizeof hp->regs) { 697 hp->regs[i + 2] = (v >> 16); 698 hp->regs[i + 3] = (v >> 24); 699 } 700 } 701 } else { 702 for (i = 0; i < sizeof hp->regs; i++) { 703 hp->regs[i] = HREAD1(hp, i); 704 } 705 } 706 } 707 return true; 708 } 709 710 bool 711 sdhc_resume(device_t dev, const pmf_qual_t *qual) 712 { 713 struct sdhc_softc *sc = device_private(dev); 714 struct sdhc_host *hp; 715 size_t i; 716 717 /* Restore the host controller state. */ 718 for (size_t n = 0; n < sc->sc_nhosts; n++) { 719 hp = sc->sc_host[n]; 720 (void)sdhc_host_reset(hp); 721 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 722 for (i = 0; i < sizeof hp->regs; i += 4) { 723 if (i + 3 < sizeof hp->regs) { 724 HWRITE4(hp, i, 725 (hp->regs[i + 0] << 0) 726 | (hp->regs[i + 1] << 8) 727 | (hp->regs[i + 2] << 16) 728 | (hp->regs[i + 3] << 24)); 729 } else { 730 HWRITE4(hp, i, 731 (hp->regs[i + 0] << 0) 732 | (hp->regs[i + 1] << 8)); 733 } 734 } 735 } else { 736 for (i = 0; i < sizeof hp->regs; i++) { 737 HWRITE1(hp, i, hp->regs[i]); 738 } 739 } 740 } 741 return true; 742 } 743 744 bool 745 sdhc_shutdown(device_t dev, int flags) 746 { 747 struct sdhc_softc *sc = device_private(dev); 748 struct sdhc_host *hp; 749 750 /* XXX chip locks up if we don't disable it before reboot. */ 751 for (size_t i = 0; i < sc->sc_nhosts; i++) { 752 hp = sc->sc_host[i]; 753 (void)sdhc_host_reset(hp); 754 } 755 return true; 756 } 757 758 /* 759 * Reset the host controller. Called during initialization, when 760 * cards are removed, upon resume, and during error recovery. 761 */ 762 static int 763 sdhc_host_reset1(sdmmc_chipset_handle_t sch) 764 { 765 struct sdhc_host *hp = (struct sdhc_host *)sch; 766 uint32_t sdhcimask; 767 int error; 768 769 KASSERT(mutex_owned(&hp->intr_lock)); 770 771 /* Disable all interrupts. */ 772 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 773 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0); 774 } else { 775 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0); 776 } 777 778 /* 779 * Reset the entire host controller and wait up to 100ms for 780 * the controller to clear the reset bit. 781 */ 782 error = sdhc_soft_reset(hp, SDHC_RESET_ALL); 783 if (error) 784 goto out; 785 786 /* Set data timeout counter value to max for now. */ 787 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX); 788 #if 1 789 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 790 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16); 791 #endif 792 793 /* Enable interrupts. */ 794 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION | 795 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY | 796 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT | 797 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE; 798 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 799 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16; 800 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask); 801 sdhcimask ^= 802 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16; 803 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY; 804 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask); 805 } else { 806 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask); 807 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK); 808 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY; 809 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask); 810 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK); 811 } 812 813 out: 814 return error; 815 } 816 817 static int 818 sdhc_host_reset(sdmmc_chipset_handle_t sch) 819 { 820 struct sdhc_host *hp = (struct sdhc_host *)sch; 821 int error; 822 823 mutex_enter(&hp->intr_lock); 824 error = sdhc_host_reset1(sch); 825 mutex_exit(&hp->intr_lock); 826 827 return error; 828 } 829 830 static uint32_t 831 sdhc_host_ocr(sdmmc_chipset_handle_t sch) 832 { 833 struct sdhc_host *hp = (struct sdhc_host *)sch; 834 835 return hp->ocr; 836 } 837 838 static int 839 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch) 840 { 841 struct sdhc_host *hp = (struct sdhc_host *)sch; 842 843 return hp->maxblklen; 844 } 845 846 /* 847 * Return non-zero if the card is currently inserted. 848 */ 849 static int 850 sdhc_card_detect(sdmmc_chipset_handle_t sch) 851 { 852 struct sdhc_host *hp = (struct sdhc_host *)sch; 853 int r; 854 855 if (hp->sc->sc_vendor_card_detect) 856 return (*hp->sc->sc_vendor_card_detect)(hp->sc); 857 858 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED); 859 860 return r ? 1 : 0; 861 } 862 863 /* 864 * Return non-zero if the card is currently write-protected. 865 */ 866 static int 867 sdhc_write_protect(sdmmc_chipset_handle_t sch) 868 { 869 struct sdhc_host *hp = (struct sdhc_host *)sch; 870 int r; 871 872 if (hp->sc->sc_vendor_write_protect) 873 return (*hp->sc->sc_vendor_write_protect)(hp->sc); 874 875 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH); 876 877 return r ? 0 : 1; 878 } 879 880 /* 881 * Set or change SD bus voltage and enable or disable SD bus power. 882 * Return zero on success. 883 */ 884 static int 885 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr) 886 { 887 struct sdhc_host *hp = (struct sdhc_host *)sch; 888 uint8_t vdd; 889 int error = 0; 890 const uint32_t pcmask = 891 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT)); 892 893 mutex_enter(&hp->intr_lock); 894 895 /* 896 * Disable bus power before voltage change. 897 */ 898 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS) 899 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0)) 900 HWRITE1(hp, SDHC_POWER_CTL, 0); 901 902 /* If power is disabled, reset the host and return now. */ 903 if (ocr == 0) { 904 (void)sdhc_host_reset1(hp); 905 callout_halt(&hp->tuning_timer, &hp->intr_lock); 906 goto out; 907 } 908 909 /* 910 * Select the lowest voltage according to capabilities. 911 */ 912 ocr &= hp->ocr; 913 if (ISSET(ocr, MMC_OCR_1_7V_1_8V|MMC_OCR_1_8V_1_9V)) { 914 vdd = SDHC_VOLTAGE_1_8V; 915 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) { 916 vdd = SDHC_VOLTAGE_3_0V; 917 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) { 918 vdd = SDHC_VOLTAGE_3_3V; 919 } else { 920 /* Unsupported voltage level requested. */ 921 error = EINVAL; 922 goto out; 923 } 924 925 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 926 /* 927 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus 928 * voltage ramp until power rises. 929 */ 930 931 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) { 932 HWRITE1(hp, SDHC_POWER_CTL, 933 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER); 934 } else { 935 HWRITE1(hp, SDHC_POWER_CTL, 936 HREAD1(hp, SDHC_POWER_CTL) & pcmask); 937 sdmmc_delay(1); 938 HWRITE1(hp, SDHC_POWER_CTL, 939 (vdd << SDHC_VOLTAGE_SHIFT)); 940 sdmmc_delay(1); 941 HSET1(hp, SDHC_POWER_CTL, SDHC_BUS_POWER); 942 sdmmc_delay(10000); 943 } 944 945 /* 946 * The host system may not power the bus due to battery low, 947 * etc. In that case, the host controller should clear the 948 * bus power bit. 949 */ 950 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) { 951 error = ENXIO; 952 goto out; 953 } 954 } 955 956 out: 957 mutex_exit(&hp->intr_lock); 958 959 return error; 960 } 961 962 /* 963 * Return the smallest possible base clock frequency divisor value 964 * for the CLOCK_CTL register to produce `freq' (KHz). 965 */ 966 static bool 967 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp) 968 { 969 u_int div; 970 971 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) { 972 for (div = hp->clkbase / freq; div <= 0x3ff; div++) { 973 if ((hp->clkbase / div) <= freq) { 974 *divp = SDHC_SDCLK_CGM 975 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT) 976 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT); 977 //freq = hp->clkbase / div; 978 return true; 979 } 980 } 981 /* No divisor found. */ 982 return false; 983 } 984 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) { 985 u_int dvs = (hp->clkbase + freq - 1) / freq; 986 u_int roundup = dvs & 1; 987 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) { 988 if (dvs + roundup <= 16) { 989 dvs += roundup - 1; 990 *divp = (div << SDHC_SDCLK_DIV_SHIFT) 991 | (dvs << SDHC_SDCLK_DVS_SHIFT); 992 DPRINTF(2, 993 ("%s: divisor for freq %u is %u * %u\n", 994 HDEVNAME(hp), freq, div * 2, dvs + 1)); 995 //freq = hp->clkbase / (div * 2) * (dvs + 1); 996 return true; 997 } 998 /* 999 * If we drop bits, we need to round up the divisor. 1000 */ 1001 roundup |= dvs & 1; 1002 } 1003 /* No divisor found. */ 1004 return false; 1005 } 1006 if (hp->sc->sc_clkmsk != 0) { 1007 div = howmany(hp->clkbase, freq); 1008 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1))) 1009 return false; 1010 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1); 1011 //freq = hp->clkbase / div; 1012 return true; 1013 } 1014 if (hp->specver >= SDHC_SPEC_VERS_300) { 1015 div = howmany(hp->clkbase, freq); 1016 div = div > 1 ? howmany(div, 2) : 0; 1017 if (div > 0x3ff) 1018 return false; 1019 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK) 1020 << SDHC_SDCLK_XDIV_SHIFT) | 1021 (((div >> 0) & SDHC_SDCLK_DIV_MASK) 1022 << SDHC_SDCLK_DIV_SHIFT); 1023 //freq = hp->clkbase / (div ? div * 2 : 1); 1024 return true; 1025 } else { 1026 for (div = 1; div <= 256; div *= 2) { 1027 if ((hp->clkbase / div) <= freq) { 1028 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT; 1029 //freq = hp->clkbase / div; 1030 return true; 1031 } 1032 } 1033 /* No divisor found. */ 1034 return false; 1035 } 1036 /* No divisor found. */ 1037 return false; 1038 } 1039 1040 /* 1041 * Set or change SDCLK frequency or disable the SD clock. 1042 * Return zero on success. 1043 */ 1044 static int 1045 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr) 1046 { 1047 struct sdhc_host *hp = (struct sdhc_host *)sch; 1048 u_int div; 1049 u_int timo; 1050 int16_t reg; 1051 int error = 0; 1052 bool present __diagused; 1053 1054 mutex_enter(&hp->intr_lock); 1055 1056 #ifdef DIAGNOSTIC 1057 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK); 1058 1059 /* Must not stop the clock if commands are in progress. */ 1060 if (present && sdhc_card_detect(hp)) { 1061 aprint_normal_dev(hp->sc->sc_dev, 1062 "%s: command in progress\n", __func__); 1063 } 1064 #endif 1065 1066 if (hp->sc->sc_vendor_bus_clock) { 1067 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq); 1068 if (error != 0) 1069 goto out; 1070 } 1071 1072 /* 1073 * Stop SD clock before changing the frequency. 1074 */ 1075 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1076 HCLR4(hp, SDHC_VEND_SPEC, 1077 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1078 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1079 if (freq == SDMMC_SDCLK_OFF) { 1080 goto out; 1081 } 1082 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1083 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8); 1084 if (freq == SDMMC_SDCLK_OFF) { 1085 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0); 1086 goto out; 1087 } 1088 } else { 1089 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1090 if (freq == SDMMC_SDCLK_OFF) 1091 goto out; 1092 } 1093 1094 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1095 if (ddr) 1096 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN); 1097 else 1098 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN); 1099 } else if (hp->specver >= SDHC_SPEC_VERS_300) { 1100 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK); 1101 if (freq > 100000) { 1102 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104); 1103 } else if (freq > 50000) { 1104 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR50); 1105 } else if (freq > 25000) { 1106 if (ddr) { 1107 HSET2(hp, SDHC_HOST_CTL2, 1108 SDHC_UHS_MODE_SELECT_DDR50); 1109 } else { 1110 HSET2(hp, SDHC_HOST_CTL2, 1111 SDHC_UHS_MODE_SELECT_SDR25); 1112 } 1113 } else if (freq > 400) { 1114 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12); 1115 } 1116 } 1117 1118 /* 1119 * Slow down Ricoh 5U823 controller that isn't reliable 1120 * at 100MHz bus clock. 1121 */ 1122 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) { 1123 if (freq == 100000) 1124 --freq; 1125 } 1126 1127 /* 1128 * Set the minimum base clock frequency divisor. 1129 */ 1130 if (!sdhc_clock_divisor(hp, freq, &div)) { 1131 /* Invalid base clock frequency or `freq' value. */ 1132 aprint_error_dev(hp->sc->sc_dev, 1133 "Invalid bus clock %d kHz\n", freq); 1134 error = EINVAL; 1135 goto out; 1136 } 1137 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1138 if (ddr) { 1139 /* in ddr mode, divisor >>= 1 */ 1140 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK << 1141 SDHC_SDCLK_DIV_SHIFT)) | 1142 (div & (SDHC_SDCLK_DVS_MASK << 1143 SDHC_SDCLK_DVS_SHIFT)); 1144 } 1145 for (timo = 1000; timo > 0; timo--) { 1146 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB)) 1147 break; 1148 sdmmc_delay(10); 1149 } 1150 HWRITE4(hp, SDHC_CLOCK_CTL, 1151 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f); 1152 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1153 HWRITE4(hp, SDHC_CLOCK_CTL, 1154 div | (SDHC_TIMEOUT_MAX << 16)); 1155 } else { 1156 reg = HREAD2(hp, SDHC_CLOCK_CTL); 1157 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE); 1158 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div); 1159 } 1160 1161 /* 1162 * Start internal clock. Wait 10ms for stabilization. 1163 */ 1164 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1165 HSET4(hp, SDHC_VEND_SPEC, 1166 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1167 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1168 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1169 sdmmc_delay(10000); 1170 HSET4(hp, SDHC_CLOCK_CTL, 1171 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE); 1172 } else { 1173 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE); 1174 for (timo = 1000; timo > 0; timo--) { 1175 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL), 1176 SDHC_INTCLK_STABLE)) 1177 break; 1178 sdmmc_delay(10); 1179 } 1180 if (timo == 0) { 1181 error = ETIMEDOUT; 1182 DPRINTF(1,("%s: timeout\n", __func__)); 1183 goto out; 1184 } 1185 } 1186 1187 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1188 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE); 1189 /* 1190 * Sending 80 clocks at 400kHz takes 200us. 1191 * So delay for that time + slop and then 1192 * check a few times for completion. 1193 */ 1194 sdmmc_delay(210); 1195 for (timo = 10; timo > 0; timo--) { 1196 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), 1197 SDHC_INIT_ACTIVE)) 1198 break; 1199 sdmmc_delay(10); 1200 } 1201 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo)); 1202 1203 /* 1204 * Enable SD clock. 1205 */ 1206 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1207 HSET4(hp, SDHC_VEND_SPEC, 1208 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN | 1209 SDHC_VEND_SPEC_FRC_SDCLK_ON); 1210 } else { 1211 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1212 } 1213 } else { 1214 /* 1215 * Enable SD clock. 1216 */ 1217 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE); 1218 1219 if (freq > 25000 && 1220 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT)) 1221 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED); 1222 else 1223 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED); 1224 } 1225 1226 out: 1227 mutex_exit(&hp->intr_lock); 1228 1229 return error; 1230 } 1231 1232 static int 1233 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width) 1234 { 1235 struct sdhc_host *hp = (struct sdhc_host *)sch; 1236 int reg; 1237 1238 switch (width) { 1239 case 1: 1240 case 4: 1241 break; 1242 1243 case 8: 1244 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE)) 1245 break; 1246 /* FALLTHROUGH */ 1247 default: 1248 DPRINTF(0,("%s: unsupported bus width (%d)\n", 1249 HDEVNAME(hp), width)); 1250 return 1; 1251 } 1252 1253 if (hp->sc->sc_vendor_bus_width) { 1254 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width); 1255 if (error != 0) 1256 return error; 1257 } 1258 1259 mutex_enter(&hp->intr_lock); 1260 1261 reg = HREAD1(hp, SDHC_HOST_CTL); 1262 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1263 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE); 1264 if (width == 4) 1265 reg |= SDHC_4BIT_MODE; 1266 else if (width == 8) 1267 reg |= SDHC_ESDHC_8BIT_MODE; 1268 } else { 1269 reg &= ~SDHC_4BIT_MODE; 1270 if (hp->specver >= SDHC_SPEC_VERS_300) { 1271 reg &= ~SDHC_8BIT_MODE; 1272 } 1273 if (width == 4) { 1274 reg |= SDHC_4BIT_MODE; 1275 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) { 1276 reg |= SDHC_8BIT_MODE; 1277 } 1278 } 1279 HWRITE1(hp, SDHC_HOST_CTL, reg); 1280 1281 mutex_exit(&hp->intr_lock); 1282 1283 return 0; 1284 } 1285 1286 static int 1287 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on) 1288 { 1289 struct sdhc_host *hp = (struct sdhc_host *)sch; 1290 1291 if (hp->sc->sc_vendor_rod) 1292 return (*hp->sc->sc_vendor_rod)(hp->sc, on); 1293 1294 return 0; 1295 } 1296 1297 static void 1298 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable) 1299 { 1300 struct sdhc_host *hp = (struct sdhc_host *)sch; 1301 1302 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1303 mutex_enter(&hp->intr_lock); 1304 if (enable) { 1305 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1306 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT); 1307 } else { 1308 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT); 1309 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1310 } 1311 mutex_exit(&hp->intr_lock); 1312 } 1313 } 1314 1315 static void 1316 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch) 1317 { 1318 struct sdhc_host *hp = (struct sdhc_host *)sch; 1319 1320 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1321 mutex_enter(&hp->intr_lock); 1322 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 1323 mutex_exit(&hp->intr_lock); 1324 } 1325 } 1326 1327 static int 1328 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage) 1329 { 1330 struct sdhc_host *hp = (struct sdhc_host *)sch; 1331 1332 mutex_enter(&hp->intr_lock); 1333 switch (signal_voltage) { 1334 case SDMMC_SIGNAL_VOLTAGE_180: 1335 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) 1336 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN); 1337 break; 1338 case SDMMC_SIGNAL_VOLTAGE_330: 1339 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) 1340 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN); 1341 break; 1342 default: 1343 return EINVAL; 1344 } 1345 mutex_exit(&hp->intr_lock); 1346 1347 return 0; 1348 } 1349 1350 /* 1351 * Sampling clock tuning procedure (UHS) 1352 */ 1353 static int 1354 sdhc_execute_tuning1(struct sdhc_host *hp, int timing) 1355 { 1356 struct sdmmc_command cmd; 1357 uint8_t hostctl; 1358 int opcode, error, retry = 40; 1359 1360 KASSERT(mutex_owned(&hp->intr_lock)); 1361 1362 hp->tuning_timing = timing; 1363 1364 switch (timing) { 1365 case SDMMC_TIMING_MMC_HS200: 1366 opcode = MMC_SEND_TUNING_BLOCK_HS200; 1367 break; 1368 case SDMMC_TIMING_UHS_SDR50: 1369 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50)) 1370 return 0; 1371 /* FALLTHROUGH */ 1372 case SDMMC_TIMING_UHS_SDR104: 1373 opcode = MMC_SEND_TUNING_BLOCK; 1374 break; 1375 default: 1376 return EINVAL; 1377 } 1378 1379 hostctl = HREAD1(hp, SDHC_HOST_CTL); 1380 1381 /* enable buffer read ready interrupt */ 1382 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY); 1383 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY); 1384 1385 /* disable DMA */ 1386 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT); 1387 1388 /* reset tuning circuit */ 1389 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL); 1390 1391 /* start of tuning */ 1392 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING); 1393 1394 do { 1395 memset(&cmd, 0, sizeof(cmd)); 1396 cmd.c_opcode = opcode; 1397 cmd.c_arg = 0; 1398 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1; 1399 if (ISSET(hostctl, SDHC_8BIT_MODE)) { 1400 cmd.c_blklen = cmd.c_datalen = 128; 1401 } else { 1402 cmd.c_blklen = cmd.c_datalen = 64; 1403 } 1404 1405 error = sdhc_start_command(hp, &cmd); 1406 if (error) 1407 break; 1408 1409 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY, 1410 SDHC_TUNING_TIMEOUT, false)) { 1411 break; 1412 } 1413 1414 delay(1000); 1415 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry); 1416 1417 /* disable buffer read ready interrupt */ 1418 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY); 1419 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY); 1420 1421 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) { 1422 HCLR2(hp, SDHC_HOST_CTL2, 1423 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING); 1424 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1425 aprint_error_dev(hp->sc->sc_dev, 1426 "tuning did not complete, using fixed sampling clock\n"); 1427 return EIO; /* tuning did not complete */ 1428 } 1429 1430 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) { 1431 HCLR2(hp, SDHC_HOST_CTL2, 1432 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING); 1433 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1434 aprint_error_dev(hp->sc->sc_dev, 1435 "tuning failed, using fixed sampling clock\n"); 1436 return EIO; /* tuning failed */ 1437 } 1438 1439 if (hp->tuning_timer_count) { 1440 callout_schedule(&hp->tuning_timer, 1441 hz * hp->tuning_timer_count); 1442 } 1443 1444 return 0; /* tuning completed */ 1445 } 1446 1447 static int 1448 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing) 1449 { 1450 struct sdhc_host *hp = (struct sdhc_host *)sch; 1451 int error; 1452 1453 mutex_enter(&hp->intr_lock); 1454 error = sdhc_execute_tuning1(hp, timing); 1455 mutex_exit(&hp->intr_lock); 1456 return error; 1457 } 1458 1459 static void 1460 sdhc_tuning_timer(void *arg) 1461 { 1462 struct sdhc_host *hp = arg; 1463 1464 atomic_swap_uint(&hp->tuning_timer_pending, 1); 1465 } 1466 1467 static int 1468 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value) 1469 { 1470 uint32_t state; 1471 int timeout; 1472 1473 for (timeout = 10000; timeout > 0; timeout--) { 1474 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value) 1475 return 0; 1476 sdmmc_delay(10); 1477 } 1478 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n", 1479 mask, value, state); 1480 return ETIMEDOUT; 1481 } 1482 1483 static void 1484 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd) 1485 { 1486 struct sdhc_host *hp = (struct sdhc_host *)sch; 1487 int error; 1488 bool probing; 1489 1490 mutex_enter(&hp->intr_lock); 1491 1492 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) { 1493 (void)sdhc_execute_tuning1(hp, hp->tuning_timing); 1494 } 1495 1496 if (cmd->c_data && 1497 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1498 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY; 1499 if (ISSET(hp->flags, SHF_USE_DMA)) { 1500 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready); 1501 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready); 1502 } else { 1503 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready); 1504 HSET2(hp, SDHC_NINTR_STATUS_EN, ready); 1505 } 1506 } 1507 1508 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) { 1509 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR; 1510 if (cmd->c_data != NULL) { 1511 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr); 1512 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr); 1513 } else { 1514 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr); 1515 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr); 1516 } 1517 } 1518 1519 /* 1520 * Start the MMC command, or mark `cmd' as failed and return. 1521 */ 1522 error = sdhc_start_command(hp, cmd); 1523 if (error) { 1524 cmd->c_error = error; 1525 goto out; 1526 } 1527 1528 /* 1529 * Wait until the command phase is done, or until the command 1530 * is marked done for any other reason. 1531 */ 1532 probing = (cmd->c_flags & SCF_TOUT_OK) != 0; 1533 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT, probing)) { 1534 DPRINTF(1,("%s: timeout for command\n", __func__)); 1535 cmd->c_error = ETIMEDOUT; 1536 goto out; 1537 } 1538 1539 /* 1540 * The host controller removes bits [0:7] from the response 1541 * data (CRC) and we pass the data up unchanged to the bus 1542 * driver (without padding). 1543 */ 1544 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) { 1545 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0); 1546 if (ISSET(cmd->c_flags, SCF_RSP_136)) { 1547 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4); 1548 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8); 1549 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12); 1550 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) { 1551 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) | 1552 (cmd->c_resp[1] << 24); 1553 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) | 1554 (cmd->c_resp[2] << 24); 1555 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) | 1556 (cmd->c_resp[3] << 24); 1557 cmd->c_resp[3] = (cmd->c_resp[3] >> 8); 1558 } 1559 } 1560 } 1561 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0])); 1562 1563 /* 1564 * If the command has data to transfer in any direction, 1565 * execute the transfer now. 1566 */ 1567 if (cmd->c_error == 0 && cmd->c_data != NULL) 1568 sdhc_transfer_data(hp, cmd); 1569 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) { 1570 if (!sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) { 1571 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n", 1572 HDEVNAME(hp))); 1573 cmd->c_error = ETIMEDOUT; 1574 goto out; 1575 } 1576 } 1577 1578 out: 1579 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED) 1580 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) { 1581 /* Turn off the LED. */ 1582 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON); 1583 } 1584 SET(cmd->c_flags, SCF_ITSDONE); 1585 1586 mutex_exit(&hp->intr_lock); 1587 1588 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp), 1589 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort", 1590 cmd->c_flags, cmd->c_error)); 1591 } 1592 1593 static int 1594 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd) 1595 { 1596 struct sdhc_softc * const sc = hp->sc; 1597 uint16_t blksize = 0; 1598 uint16_t blkcount = 0; 1599 uint16_t mode; 1600 uint16_t command; 1601 uint32_t pmask; 1602 int error; 1603 1604 KASSERT(mutex_owned(&hp->intr_lock)); 1605 1606 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n", 1607 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data, 1608 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS))); 1609 1610 /* 1611 * The maximum block length for commands should be the minimum 1612 * of the host buffer size and the card buffer size. (1.7.2) 1613 */ 1614 1615 /* Fragment the data into proper blocks. */ 1616 if (cmd->c_datalen > 0) { 1617 blksize = MIN(cmd->c_datalen, cmd->c_blklen); 1618 blkcount = cmd->c_datalen / blksize; 1619 if (cmd->c_datalen % blksize > 0) { 1620 /* XXX: Split this command. (1.7.4) */ 1621 aprint_error_dev(sc->sc_dev, 1622 "data not a multiple of %u bytes\n", blksize); 1623 return EINVAL; 1624 } 1625 } 1626 1627 /* Check limit imposed by 9-bit block count. (1.7.2) */ 1628 if (blkcount > SDHC_BLOCK_COUNT_MAX) { 1629 aprint_error_dev(sc->sc_dev, "too much data\n"); 1630 return EINVAL; 1631 } 1632 1633 /* Prepare transfer mode register value. (2.2.5) */ 1634 mode = SDHC_BLOCK_COUNT_ENABLE; 1635 if (ISSET(cmd->c_flags, SCF_CMD_READ)) 1636 mode |= SDHC_READ_MODE; 1637 if (blkcount > 1) { 1638 mode |= SDHC_MULTI_BLOCK_MODE; 1639 /* XXX only for memory commands? */ 1640 mode |= SDHC_AUTO_CMD12_ENABLE; 1641 } 1642 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 && 1643 ISSET(hp->flags, SHF_MODE_DMAEN)) { 1644 mode |= SDHC_DMA_ENABLE; 1645 } 1646 1647 /* 1648 * Prepare command register value. (2.2.6) 1649 */ 1650 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT; 1651 1652 if (ISSET(cmd->c_flags, SCF_RSP_CRC)) 1653 command |= SDHC_CRC_CHECK_ENABLE; 1654 if (ISSET(cmd->c_flags, SCF_RSP_IDX)) 1655 command |= SDHC_INDEX_CHECK_ENABLE; 1656 if (cmd->c_datalen > 0) 1657 command |= SDHC_DATA_PRESENT_SELECT; 1658 1659 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT)) 1660 command |= SDHC_NO_RESPONSE; 1661 else if (ISSET(cmd->c_flags, SCF_RSP_136)) 1662 command |= SDHC_RESP_LEN_136; 1663 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) 1664 command |= SDHC_RESP_LEN_48_CHK_BUSY; 1665 else 1666 command |= SDHC_RESP_LEN_48; 1667 1668 /* Wait until command and optionally data inhibit bits are clear. (1.5) */ 1669 pmask = SDHC_CMD_INHIBIT_CMD; 1670 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY)) 1671 pmask |= SDHC_CMD_INHIBIT_DAT; 1672 error = sdhc_wait_state(hp, pmask, 0); 1673 if (error) { 1674 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD); 1675 device_printf(sc->sc_dev, "command or data phase inhibited\n"); 1676 return error; 1677 } 1678 1679 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n", 1680 HDEVNAME(hp), blksize, blkcount, mode, command)); 1681 1682 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1683 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) << 1684 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */ 1685 } 1686 1687 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 1688 /* Alert the user not to remove the card. */ 1689 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON); 1690 } 1691 1692 /* Set DMA start address. */ 1693 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) { 1694 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) { 1695 bus_addr_t paddr = 1696 cmd->c_dmamap->dm_segs[seg].ds_addr; 1697 uint16_t len = 1698 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ? 1699 0 : cmd->c_dmamap->dm_segs[seg].ds_len; 1700 uint16_t attr = 1701 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS; 1702 if (seg == cmd->c_dmamap->dm_nsegs - 1) { 1703 attr |= SDHC_ADMA2_END; 1704 } 1705 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) { 1706 struct sdhc_adma2_descriptor32 *desc = 1707 hp->adma2; 1708 desc[seg].attribute = htole16(attr); 1709 desc[seg].length = htole16(len); 1710 desc[seg].address = htole32(paddr); 1711 } else { 1712 struct sdhc_adma2_descriptor64 *desc = 1713 hp->adma2; 1714 desc[seg].attribute = htole16(attr); 1715 desc[seg].length = htole16(len); 1716 desc[seg].address = htole32(paddr & 0xffffffff); 1717 desc[seg].address_hi = htole32( 1718 (uint64_t)paddr >> 32); 1719 } 1720 } 1721 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) { 1722 struct sdhc_adma2_descriptor32 *desc = hp->adma2; 1723 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0); 1724 } else { 1725 struct sdhc_adma2_descriptor64 *desc = hp->adma2; 1726 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0); 1727 } 1728 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE, 1729 BUS_DMASYNC_PREWRITE); 1730 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1731 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT); 1732 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2); 1733 } else { 1734 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT); 1735 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2); 1736 } 1737 1738 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr; 1739 1740 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff); 1741 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) { 1742 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4, 1743 (uint64_t)desc_addr >> 32); 1744 } 1745 } else if (ISSET(mode, SDHC_DMA_ENABLE) && 1746 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) { 1747 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1748 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT); 1749 } 1750 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr); 1751 } 1752 1753 /* 1754 * Start a CPU data transfer. Writing to the high order byte 1755 * of the SDHC_COMMAND register triggers the SD command. (1.5) 1756 */ 1757 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 1758 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16)); 1759 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg); 1760 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) { 1761 /* mode bits is in MIX_CTRL register on uSDHC */ 1762 HWRITE4(hp, SDHC_MIX_CTRL, mode | 1763 (HREAD4(hp, SDHC_MIX_CTRL) & 1764 ~(SDHC_MULTI_BLOCK_MODE | 1765 SDHC_READ_MODE | 1766 SDHC_AUTO_CMD12_ENABLE | 1767 SDHC_BLOCK_COUNT_ENABLE | 1768 SDHC_DMA_ENABLE))); 1769 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16); 1770 } else { 1771 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16)); 1772 } 1773 } else { 1774 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize); 1775 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount); 1776 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg); 1777 HWRITE2(hp, SDHC_TRANSFER_MODE, mode); 1778 HWRITE2(hp, SDHC_COMMAND, command); 1779 } 1780 1781 return 0; 1782 } 1783 1784 static void 1785 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd) 1786 { 1787 struct sdhc_softc *sc = hp->sc; 1788 int error; 1789 1790 KASSERT(mutex_owned(&hp->intr_lock)); 1791 1792 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp), 1793 MMC_R1(cmd->c_resp), cmd->c_datalen)); 1794 1795 #ifdef SDHC_DEBUG 1796 /* XXX I forgot why I wanted to know when this happens :-( */ 1797 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) && 1798 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) { 1799 aprint_error_dev(hp->sc->sc_dev, 1800 "CMD52/53 error response flags %#x\n", 1801 MMC_R1(cmd->c_resp) & 0xff00); 1802 } 1803 #endif 1804 1805 if (cmd->c_dmamap != NULL) { 1806 if (hp->sc->sc_vendor_transfer_data_dma != NULL) { 1807 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd); 1808 if (error == 0 && !sdhc_wait_intr(hp, 1809 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) { 1810 DPRINTF(1,("%s: timeout\n", __func__)); 1811 error = ETIMEDOUT; 1812 } 1813 } else { 1814 error = sdhc_transfer_data_dma(hp, cmd); 1815 } 1816 } else 1817 error = sdhc_transfer_data_pio(hp, cmd); 1818 if (error) 1819 cmd->c_error = error; 1820 SET(cmd->c_flags, SCF_ITSDONE); 1821 1822 DPRINTF(1,("%s: data transfer done (error=%d)\n", 1823 HDEVNAME(hp), cmd->c_error)); 1824 } 1825 1826 static int 1827 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd) 1828 { 1829 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs; 1830 bus_addr_t posaddr; 1831 bus_addr_t segaddr; 1832 bus_size_t seglen; 1833 u_int seg = 0; 1834 int error = 0; 1835 int status; 1836 1837 KASSERT(mutex_owned(&hp->intr_lock)); 1838 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT); 1839 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT); 1840 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE); 1841 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE); 1842 1843 for (;;) { 1844 status = sdhc_wait_intr(hp, 1845 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE, 1846 SDHC_DMA_TIMEOUT, false); 1847 1848 if (status & SDHC_TRANSFER_COMPLETE) { 1849 break; 1850 } 1851 if (!status) { 1852 DPRINTF(1,("%s: timeout\n", __func__)); 1853 error = ETIMEDOUT; 1854 break; 1855 } 1856 1857 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 1858 continue; 1859 } 1860 1861 if ((status & SDHC_DMA_INTERRUPT) == 0) { 1862 continue; 1863 } 1864 1865 /* DMA Interrupt (boundary crossing) */ 1866 1867 segaddr = dm_segs[seg].ds_addr; 1868 seglen = dm_segs[seg].ds_len; 1869 posaddr = HREAD4(hp, SDHC_DMA_ADDR); 1870 1871 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) { 1872 continue; 1873 } 1874 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen))) 1875 HWRITE4(hp, SDHC_DMA_ADDR, posaddr); 1876 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs) 1877 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr); 1878 KASSERT(seg < cmd->c_dmamap->dm_nsegs); 1879 } 1880 1881 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) { 1882 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0, 1883 PAGE_SIZE, BUS_DMASYNC_POSTWRITE); 1884 } 1885 1886 return error; 1887 } 1888 1889 static int 1890 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd) 1891 { 1892 uint8_t *data = cmd->c_data; 1893 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int); 1894 u_int len, datalen; 1895 u_int imask; 1896 u_int pmask; 1897 int error = 0; 1898 1899 KASSERT(mutex_owned(&hp->intr_lock)); 1900 1901 if (ISSET(cmd->c_flags, SCF_CMD_READ)) { 1902 imask = SDHC_BUFFER_READ_READY; 1903 pmask = SDHC_BUFFER_READ_ENABLE; 1904 if (ISSET(hp->sc->sc_flags, 1905 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1906 pio_func = esdhc_read_data_pio; 1907 } else { 1908 pio_func = sdhc_read_data_pio; 1909 } 1910 } else { 1911 imask = SDHC_BUFFER_WRITE_READY; 1912 pmask = SDHC_BUFFER_WRITE_ENABLE; 1913 if (ISSET(hp->sc->sc_flags, 1914 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 1915 pio_func = esdhc_write_data_pio; 1916 } else { 1917 pio_func = sdhc_write_data_pio; 1918 } 1919 } 1920 datalen = cmd->c_datalen; 1921 1922 KASSERT(mutex_owned(&hp->intr_lock)); 1923 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask); 1924 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE); 1925 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE); 1926 1927 while (datalen > 0) { 1928 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) { 1929 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 1930 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask); 1931 } else { 1932 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask); 1933 } 1934 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) { 1935 DPRINTF(1,("%s: timeout\n", __func__)); 1936 error = ETIMEDOUT; 1937 break; 1938 } 1939 1940 error = sdhc_wait_state(hp, pmask, pmask); 1941 if (error) 1942 break; 1943 } 1944 1945 len = MIN(datalen, cmd->c_blklen); 1946 (*pio_func)(hp, data, len); 1947 DPRINTF(2,("%s: pio data transfer %u @ %p\n", 1948 HDEVNAME(hp), len, data)); 1949 1950 data += len; 1951 datalen -= len; 1952 } 1953 1954 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, 1955 SDHC_TRANSFER_TIMEOUT, false)) { 1956 DPRINTF(1,("%s: timeout for transfer\n", __func__)); 1957 error = ETIMEDOUT; 1958 } 1959 1960 return error; 1961 } 1962 1963 static void 1964 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 1965 { 1966 1967 if (((__uintptr_t)data & 3) == 0) { 1968 while (datalen > 3) { 1969 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA)); 1970 data += 4; 1971 datalen -= 4; 1972 } 1973 if (datalen > 1) { 1974 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA)); 1975 data += 2; 1976 datalen -= 2; 1977 } 1978 if (datalen > 0) { 1979 *data = HREAD1(hp, SDHC_DATA); 1980 data += 1; 1981 datalen -= 1; 1982 } 1983 } else if (((__uintptr_t)data & 1) == 0) { 1984 while (datalen > 1) { 1985 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA)); 1986 data += 2; 1987 datalen -= 2; 1988 } 1989 if (datalen > 0) { 1990 *data = HREAD1(hp, SDHC_DATA); 1991 data += 1; 1992 datalen -= 1; 1993 } 1994 } else { 1995 while (datalen > 0) { 1996 *data = HREAD1(hp, SDHC_DATA); 1997 data += 1; 1998 datalen -= 1; 1999 } 2000 } 2001 } 2002 2003 static void 2004 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2005 { 2006 2007 if (((__uintptr_t)data & 3) == 0) { 2008 while (datalen > 3) { 2009 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data)); 2010 data += 4; 2011 datalen -= 4; 2012 } 2013 if (datalen > 1) { 2014 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data)); 2015 data += 2; 2016 datalen -= 2; 2017 } 2018 if (datalen > 0) { 2019 HWRITE1(hp, SDHC_DATA, *data); 2020 data += 1; 2021 datalen -= 1; 2022 } 2023 } else if (((__uintptr_t)data & 1) == 0) { 2024 while (datalen > 1) { 2025 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data)); 2026 data += 2; 2027 datalen -= 2; 2028 } 2029 if (datalen > 0) { 2030 HWRITE1(hp, SDHC_DATA, *data); 2031 data += 1; 2032 datalen -= 1; 2033 } 2034 } else { 2035 while (datalen > 0) { 2036 HWRITE1(hp, SDHC_DATA, *data); 2037 data += 1; 2038 datalen -= 1; 2039 } 2040 } 2041 } 2042 2043 static void 2044 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2045 { 2046 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS); 2047 uint32_t v; 2048 2049 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK; 2050 size_t count = 0; 2051 2052 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2053 if (count == 0) { 2054 /* 2055 * If we've drained "watermark" words, we need to wait 2056 * a little bit so the read FIFO can refill. 2057 */ 2058 sdmmc_delay(10); 2059 count = watermark; 2060 } 2061 v = HREAD4(hp, SDHC_DATA); 2062 v = le32toh(v); 2063 *(uint32_t *)data = v; 2064 data += 4; 2065 datalen -= 4; 2066 status = HREAD2(hp, SDHC_NINTR_STATUS); 2067 count--; 2068 } 2069 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2070 if (count == 0) { 2071 sdmmc_delay(10); 2072 } 2073 v = HREAD4(hp, SDHC_DATA); 2074 v = le32toh(v); 2075 do { 2076 *data++ = v; 2077 v >>= 8; 2078 } while (--datalen > 0); 2079 } 2080 } 2081 2082 static void 2083 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen) 2084 { 2085 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS); 2086 uint32_t v; 2087 2088 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK; 2089 size_t count = watermark; 2090 2091 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2092 if (count == 0) { 2093 sdmmc_delay(10); 2094 count = watermark; 2095 } 2096 v = *(uint32_t *)data; 2097 v = htole32(v); 2098 HWRITE4(hp, SDHC_DATA, v); 2099 data += 4; 2100 datalen -= 4; 2101 status = HREAD2(hp, SDHC_NINTR_STATUS); 2102 count--; 2103 } 2104 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) { 2105 if (count == 0) { 2106 sdmmc_delay(10); 2107 } 2108 v = *(uint32_t *)data; 2109 v = htole32(v); 2110 HWRITE4(hp, SDHC_DATA, v); 2111 } 2112 } 2113 2114 /* Prepare for another command. */ 2115 static int 2116 sdhc_soft_reset(struct sdhc_host *hp, int mask) 2117 { 2118 int timo; 2119 2120 KASSERT(mutex_owned(&hp->intr_lock)); 2121 2122 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask)); 2123 2124 /* Request the reset. */ 2125 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask); 2126 2127 /* 2128 * If necessary, wait for the controller to set the bits to 2129 * acknowledge the reset. 2130 */ 2131 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) && 2132 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) { 2133 for (timo = 10000; timo > 0; timo--) { 2134 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask)) 2135 break; 2136 /* Short delay because I worry we may miss it... */ 2137 sdmmc_delay(1); 2138 } 2139 if (timo == 0) { 2140 DPRINTF(1,("%s: timeout for reset on\n", __func__)); 2141 return ETIMEDOUT; 2142 } 2143 } 2144 2145 /* 2146 * Wait for the controller to clear the bits to indicate that 2147 * the reset has completed. 2148 */ 2149 for (timo = 10; timo > 0; timo--) { 2150 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask)) 2151 break; 2152 sdmmc_delay(10000); 2153 } 2154 if (timo == 0) { 2155 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp), 2156 HREAD1(hp, SDHC_SOFTWARE_RESET))); 2157 return ETIMEDOUT; 2158 } 2159 2160 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) { 2161 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP); 2162 } 2163 2164 return 0; 2165 } 2166 2167 static int 2168 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing) 2169 { 2170 int status, error, nointr; 2171 2172 KASSERT(mutex_owned(&hp->intr_lock)); 2173 2174 mask |= SDHC_ERROR_INTERRUPT; 2175 2176 nointr = 0; 2177 status = hp->intr_status & mask; 2178 while (status == 0) { 2179 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo) 2180 == EWOULDBLOCK) { 2181 nointr = 1; 2182 break; 2183 } 2184 status = hp->intr_status & mask; 2185 } 2186 error = hp->intr_error_status; 2187 2188 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status, 2189 error)); 2190 2191 hp->intr_status &= ~status; 2192 hp->intr_error_status &= ~error; 2193 2194 if (ISSET(status, SDHC_ERROR_INTERRUPT)) { 2195 if (ISSET(error, SDHC_DMA_ERROR)) 2196 device_printf(hp->sc->sc_dev,"dma error\n"); 2197 if (ISSET(error, SDHC_ADMA_ERROR)) 2198 device_printf(hp->sc->sc_dev,"adma error\n"); 2199 if (ISSET(error, SDHC_AUTO_CMD12_ERROR)) 2200 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n"); 2201 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR)) 2202 device_printf(hp->sc->sc_dev,"current limit error\n"); 2203 if (ISSET(error, SDHC_DATA_END_BIT_ERROR)) 2204 device_printf(hp->sc->sc_dev,"data end bit error\n"); 2205 if (ISSET(error, SDHC_DATA_CRC_ERROR)) 2206 device_printf(hp->sc->sc_dev,"data crc error\n"); 2207 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR)) 2208 device_printf(hp->sc->sc_dev,"data timeout error\n"); 2209 if (ISSET(error, SDHC_CMD_INDEX_ERROR)) 2210 device_printf(hp->sc->sc_dev,"cmd index error\n"); 2211 if (ISSET(error, SDHC_CMD_END_BIT_ERROR)) 2212 device_printf(hp->sc->sc_dev,"cmd end bit error\n"); 2213 if (ISSET(error, SDHC_CMD_CRC_ERROR)) 2214 device_printf(hp->sc->sc_dev,"cmd crc error\n"); 2215 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) { 2216 if (!probing) 2217 device_printf(hp->sc->sc_dev,"cmd timeout error\n"); 2218 #ifdef SDHC_DEBUG 2219 else if (sdhcdebug > 0) 2220 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n"); 2221 #endif 2222 } 2223 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0) 2224 device_printf(hp->sc->sc_dev,"vendor error %#x\n", 2225 (error & ~SDHC_EINTR_STATUS_MASK)); 2226 if (error == 0) 2227 device_printf(hp->sc->sc_dev,"no error\n"); 2228 2229 /* Command timeout has higher priority than command complete. */ 2230 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) 2231 CLR(status, SDHC_COMMAND_COMPLETE); 2232 2233 /* Transfer complete has higher priority than data timeout. */ 2234 if (ISSET(status, SDHC_TRANSFER_COMPLETE)) 2235 CLR(error, SDHC_DATA_TIMEOUT_ERROR); 2236 } 2237 2238 if (nointr || 2239 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) { 2240 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 2241 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT); 2242 hp->intr_error_status = 0; 2243 status = 0; 2244 } 2245 2246 return status; 2247 } 2248 2249 /* 2250 * Established by attachment driver at interrupt priority IPL_SDMMC. 2251 */ 2252 int 2253 sdhc_intr(void *arg) 2254 { 2255 struct sdhc_softc *sc = (struct sdhc_softc *)arg; 2256 struct sdhc_host *hp; 2257 int done = 0; 2258 uint16_t status; 2259 uint16_t error; 2260 2261 /* We got an interrupt, but we don't know from which slot. */ 2262 for (size_t host = 0; host < sc->sc_nhosts; host++) { 2263 hp = sc->sc_host[host]; 2264 if (hp == NULL) 2265 continue; 2266 2267 mutex_enter(&hp->intr_lock); 2268 2269 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) { 2270 /* Find out which interrupts are pending. */ 2271 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS); 2272 status = xstatus; 2273 error = xstatus >> 16; 2274 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) && 2275 (xstatus & SDHC_TRANSFER_COMPLETE) && 2276 !(xstatus & SDHC_DMA_INTERRUPT)) { 2277 /* read again due to uSDHC errata */ 2278 status = xstatus = HREAD4(hp, 2279 SDHC_NINTR_STATUS); 2280 error = xstatus >> 16; 2281 } 2282 if (ISSET(sc->sc_flags, 2283 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2284 if ((error & SDHC_NINTR_STATUS_MASK) != 0) 2285 SET(status, SDHC_ERROR_INTERRUPT); 2286 } 2287 if (error) 2288 xstatus |= SDHC_ERROR_INTERRUPT; 2289 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK)) 2290 goto next_port; /* no interrupt for us */ 2291 /* Acknowledge the interrupts we are about to handle. */ 2292 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus); 2293 } else { 2294 /* Find out which interrupts are pending. */ 2295 error = 0; 2296 status = HREAD2(hp, SDHC_NINTR_STATUS); 2297 if (!ISSET(status, SDHC_NINTR_STATUS_MASK)) 2298 goto next_port; /* no interrupt for us */ 2299 /* Acknowledge the interrupts we are about to handle. */ 2300 HWRITE2(hp, SDHC_NINTR_STATUS, status); 2301 if (ISSET(status, SDHC_ERROR_INTERRUPT)) { 2302 /* Acknowledge error interrupts. */ 2303 error = HREAD2(hp, SDHC_EINTR_STATUS); 2304 HWRITE2(hp, SDHC_EINTR_STATUS, error); 2305 } 2306 } 2307 2308 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp), 2309 status, error)); 2310 2311 /* Claim this interrupt. */ 2312 done = 1; 2313 2314 if (ISSET(status, SDHC_ERROR_INTERRUPT) && 2315 ISSET(error, SDHC_ADMA_ERROR)) { 2316 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS); 2317 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp), 2318 adma_err); 2319 } 2320 2321 /* 2322 * Wake up the sdmmc event thread to scan for cards. 2323 */ 2324 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) { 2325 if (hp->sdmmc != NULL) { 2326 sdmmc_needs_discover(hp->sdmmc); 2327 } 2328 if (ISSET(sc->sc_flags, 2329 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2330 HCLR4(hp, SDHC_NINTR_STATUS_EN, 2331 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)); 2332 HCLR4(hp, SDHC_NINTR_SIGNAL_EN, 2333 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)); 2334 } 2335 } 2336 2337 /* 2338 * Schedule re-tuning process (UHS). 2339 */ 2340 if (ISSET(status, SDHC_RETUNING_EVENT)) { 2341 atomic_swap_uint(&hp->tuning_timer_pending, 1); 2342 } 2343 2344 /* 2345 * Wake up the blocking process to service command 2346 * related interrupt(s). 2347 */ 2348 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT| 2349 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY| 2350 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) { 2351 hp->intr_error_status |= error; 2352 hp->intr_status |= status; 2353 if (ISSET(sc->sc_flags, 2354 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) { 2355 HCLR4(hp, SDHC_NINTR_SIGNAL_EN, 2356 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY)); 2357 } 2358 cv_broadcast(&hp->intr_cv); 2359 } 2360 2361 /* 2362 * Service SD card interrupts. 2363 */ 2364 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC) 2365 && ISSET(status, SDHC_CARD_INTERRUPT)) { 2366 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp))); 2367 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT); 2368 sdmmc_card_intr(hp->sdmmc); 2369 } 2370 next_port: 2371 mutex_exit(&hp->intr_lock); 2372 } 2373 2374 return done; 2375 } 2376 2377 kmutex_t * 2378 sdhc_host_lock(struct sdhc_host *hp) 2379 { 2380 return &hp->intr_lock; 2381 } 2382 2383 #ifdef SDHC_DEBUG 2384 void 2385 sdhc_dump_regs(struct sdhc_host *hp) 2386 { 2387 2388 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE, 2389 HREAD4(hp, SDHC_PRESENT_STATE)); 2390 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) 2391 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL, 2392 HREAD1(hp, SDHC_POWER_CTL)); 2393 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS, 2394 HREAD2(hp, SDHC_NINTR_STATUS)); 2395 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS, 2396 HREAD2(hp, SDHC_EINTR_STATUS)); 2397 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN, 2398 HREAD2(hp, SDHC_NINTR_STATUS_EN)); 2399 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN, 2400 HREAD2(hp, SDHC_EINTR_STATUS_EN)); 2401 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN, 2402 HREAD2(hp, SDHC_NINTR_SIGNAL_EN)); 2403 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN, 2404 HREAD2(hp, SDHC_EINTR_SIGNAL_EN)); 2405 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES, 2406 HREAD4(hp, SDHC_CAPABILITIES)); 2407 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES, 2408 HREAD4(hp, SDHC_MAX_CAPABILITIES)); 2409 } 2410 #endif 2411