1 /* $NetBSD: sunxi_mmc.c,v 1.17 2017/11/15 13:53:26 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "opt_sunximmc.h" 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.17 2017/11/15 13:53:26 jmcneill Exp $"); 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/device.h> 37 #include <sys/intr.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/gpio.h> 41 42 #include <dev/sdmmc/sdmmcvar.h> 43 #include <dev/sdmmc/sdmmcchip.h> 44 #include <dev/sdmmc/sdmmc_ioreg.h> 45 46 #include <dev/fdt/fdtvar.h> 47 48 #include <arm/sunxi/sunxi_mmc.h> 49 50 #ifdef SUNXI_MMC_DEBUG 51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG; 52 #define DPRINTF(dev, fmt, ...) \ 53 do { \ 54 if (sunxi_mmc_debug & __BIT(device_unit(dev))) \ 55 device_printf((dev), fmt, ##__VA_ARGS__); \ 56 } while (0) 57 #else 58 #define DPRINTF(dev, fmt, ...) ((void)0) 59 #endif 60 61 enum sunxi_mmc_timing { 62 SUNXI_MMC_TIMING_400K, 63 SUNXI_MMC_TIMING_25M, 64 SUNXI_MMC_TIMING_50M, 65 SUNXI_MMC_TIMING_50M_DDR, 66 SUNXI_MMC_TIMING_50M_DDR_8BIT, 67 }; 68 69 struct sunxi_mmc_delay { 70 u_int output_phase; 71 u_int sample_phase; 72 }; 73 74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = { 75 [SUNXI_MMC_TIMING_400K] = { 180, 180 }, 76 [SUNXI_MMC_TIMING_25M] = { 180, 75 }, 77 [SUNXI_MMC_TIMING_50M] = { 90, 120 }, 78 [SUNXI_MMC_TIMING_50M_DDR] = { 60, 120 }, 79 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 90, 180 }, 80 }; 81 82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = { 83 [SUNXI_MMC_TIMING_400K] = { 180, 180 }, 84 [SUNXI_MMC_TIMING_25M] = { 180, 75 }, 85 [SUNXI_MMC_TIMING_50M] = { 150, 120 }, 86 [SUNXI_MMC_TIMING_50M_DDR] = { 54, 36 }, 87 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 72, 72 }, 88 }; 89 90 #define SUNXI_MMC_NDESC 16 91 92 struct sunxi_mmc_softc; 93 94 static int sunxi_mmc_match(device_t, cfdata_t, void *); 95 static void sunxi_mmc_attach(device_t, device_t, void *); 96 static void sunxi_mmc_attach_i(device_t); 97 98 static int sunxi_mmc_intr(void *); 99 static int sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *); 100 static int sunxi_mmc_idma_setup(struct sunxi_mmc_softc *); 101 102 static int sunxi_mmc_host_reset(sdmmc_chipset_handle_t); 103 static uint32_t sunxi_mmc_host_ocr(sdmmc_chipset_handle_t); 104 static int sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t); 105 static int sunxi_mmc_card_detect(sdmmc_chipset_handle_t); 106 static int sunxi_mmc_write_protect(sdmmc_chipset_handle_t); 107 static int sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t); 108 static int sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool); 109 static int sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int); 110 static int sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int); 111 static int sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int); 112 static void sunxi_mmc_exec_command(sdmmc_chipset_handle_t, 113 struct sdmmc_command *); 114 static void sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int); 115 static void sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t); 116 117 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = { 118 .host_reset = sunxi_mmc_host_reset, 119 .host_ocr = sunxi_mmc_host_ocr, 120 .host_maxblklen = sunxi_mmc_host_maxblklen, 121 .card_detect = sunxi_mmc_card_detect, 122 .write_protect = sunxi_mmc_write_protect, 123 .bus_power = sunxi_mmc_bus_power, 124 .bus_clock_ddr = sunxi_mmc_bus_clock, 125 .bus_width = sunxi_mmc_bus_width, 126 .bus_rod = sunxi_mmc_bus_rod, 127 .signal_voltage = sunxi_mmc_signal_voltage, 128 .exec_command = sunxi_mmc_exec_command, 129 .card_enable_intr = sunxi_mmc_card_enable_intr, 130 .card_intr_ack = sunxi_mmc_card_intr_ack, 131 }; 132 133 struct sunxi_mmc_config { 134 u_int idma_xferlen; 135 u_int flags; 136 #define SUNXI_MMC_FLAG_CALIB_REG 0x01 137 #define SUNXI_MMC_FLAG_NEW_TIMINGS 0x02 138 #define SUNXI_MMC_FLAG_MASK_DATA0 0x04 139 const struct sunxi_mmc_delay *delays; 140 uint32_t dma_ftrglevel; 141 }; 142 143 struct sunxi_mmc_softc { 144 device_t sc_dev; 145 bus_space_tag_t sc_bst; 146 bus_space_handle_t sc_bsh; 147 bus_dma_tag_t sc_dmat; 148 int sc_phandle; 149 150 void *sc_ih; 151 kmutex_t sc_intr_lock; 152 kcondvar_t sc_intr_cv; 153 kcondvar_t sc_idst_cv; 154 155 int sc_mmc_width; 156 int sc_mmc_present; 157 158 device_t sc_sdmmc_dev; 159 160 struct sunxi_mmc_config *sc_config; 161 162 bus_dma_segment_t sc_idma_segs[1]; 163 int sc_idma_nsegs; 164 bus_size_t sc_idma_size; 165 bus_dmamap_t sc_idma_map; 166 int sc_idma_ndesc; 167 void *sc_idma_desc; 168 169 bus_dmamap_t sc_dmabounce_map; 170 void *sc_dmabounce_buf; 171 size_t sc_dmabounce_buflen; 172 173 uint32_t sc_intr_rint; 174 uint32_t sc_idma_idst; 175 176 struct clk *sc_clk_ahb; 177 struct clk *sc_clk_mmc; 178 struct clk *sc_clk_output; 179 struct clk *sc_clk_sample; 180 181 struct fdtbus_reset *sc_rst_ahb; 182 183 struct fdtbus_gpio_pin *sc_gpio_cd; 184 int sc_gpio_cd_inverted; 185 struct fdtbus_gpio_pin *sc_gpio_wp; 186 int sc_gpio_wp_inverted; 187 188 struct fdtbus_regulator *sc_reg_vqmmc; 189 190 struct fdtbus_mmc_pwrseq *sc_pwrseq; 191 192 bool sc_non_removable; 193 bool sc_broken_cd; 194 }; 195 196 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc), 197 sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL); 198 199 #define MMC_WRITE(sc, reg, val) \ 200 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 201 #define MMC_READ(sc, reg) \ 202 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 203 204 static const struct sunxi_mmc_config sun4i_a10_mmc_config = { 205 .idma_xferlen = 0x2000, 206 .dma_ftrglevel = 0x20070008, 207 .delays = NULL, 208 .flags = 0, 209 }; 210 211 static const struct sunxi_mmc_config sun5i_a13_mmc_config = { 212 .idma_xferlen = 0x10000, 213 .dma_ftrglevel = 0x20070008, 214 .delays = NULL, 215 .flags = 0, 216 }; 217 218 static const struct sunxi_mmc_config sun7i_a20_mmc_config = { 219 .idma_xferlen = 0x2000, 220 .dma_ftrglevel = 0x20070008, 221 .delays = sun7i_mmc_delays, 222 .flags = 0, 223 }; 224 225 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = { 226 .idma_xferlen = 0x10000, 227 .dma_ftrglevel = 0x20070008, 228 .delays = NULL, 229 .flags = SUNXI_MMC_FLAG_NEW_TIMINGS, 230 }; 231 232 static const struct sunxi_mmc_config sun9i_a80_mmc_config = { 233 .idma_xferlen = 0x10000, 234 .dma_ftrglevel = 0x200f0010, 235 .delays = sun9i_mmc_delays, 236 .flags = 0, 237 }; 238 239 static const struct sunxi_mmc_config sun50i_a64_mmc_config = { 240 .idma_xferlen = 0x10000, 241 .dma_ftrglevel = 0x20070008, 242 .delays = NULL, 243 .flags = SUNXI_MMC_FLAG_CALIB_REG | 244 SUNXI_MMC_FLAG_NEW_TIMINGS | 245 SUNXI_MMC_FLAG_MASK_DATA0, 246 }; 247 248 static const struct of_compat_data compat_data[] = { 249 { "allwinner,sun4i-a10-mmc", (uintptr_t)&sun4i_a10_mmc_config }, 250 { "allwinner,sun5i-a13-mmc", (uintptr_t)&sun5i_a13_mmc_config }, 251 { "allwinner,sun7i-a20-mmc", (uintptr_t)&sun7i_a20_mmc_config }, 252 { "allwinner,sun8i-a83t-emmc", (uintptr_t)&sun8i_a83t_emmc_config }, 253 { "allwinner,sun9i-a80-mmc", (uintptr_t)&sun9i_a80_mmc_config }, 254 { "allwinner,sun50i-a64-mmc", (uintptr_t)&sun50i_a64_mmc_config }, 255 { NULL } 256 }; 257 258 static int 259 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux) 260 { 261 struct fdt_attach_args * const faa = aux; 262 263 return of_match_compat_data(faa->faa_phandle, compat_data); 264 } 265 266 static void 267 sunxi_mmc_attach(device_t parent, device_t self, void *aux) 268 { 269 struct sunxi_mmc_softc * const sc = device_private(self); 270 struct fdt_attach_args * const faa = aux; 271 const int phandle = faa->faa_phandle; 272 char intrstr[128]; 273 bus_addr_t addr; 274 bus_size_t size; 275 276 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) { 277 aprint_error(": couldn't get registers\n"); 278 return; 279 } 280 281 sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb"); 282 sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc"); 283 sc->sc_clk_output = fdtbus_clock_get(phandle, "output"); 284 sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample"); 285 286 #if notyet 287 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL || 288 sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) { 289 #else 290 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) { 291 #endif 292 aprint_error(": couldn't get clocks\n"); 293 return; 294 } 295 296 sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb"); 297 298 sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply"); 299 300 sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle); 301 302 if (clk_enable(sc->sc_clk_ahb) != 0 || 303 clk_enable(sc->sc_clk_mmc) != 0) { 304 aprint_error(": couldn't enable clocks\n"); 305 return; 306 } 307 308 if (sc->sc_rst_ahb != NULL) { 309 if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) { 310 aprint_error(": couldn't de-assert resets\n"); 311 return; 312 } 313 } 314 315 sc->sc_dev = self; 316 sc->sc_phandle = phandle; 317 sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data; 318 sc->sc_bst = faa->faa_bst; 319 sc->sc_dmat = faa->faa_dmat; 320 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO); 321 cv_init(&sc->sc_intr_cv, "awinmmcirq"); 322 cv_init(&sc->sc_idst_cv, "awinmmcdma"); 323 324 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) { 325 aprint_error(": couldn't map registers\n"); 326 return; 327 } 328 329 aprint_naive("\n"); 330 aprint_normal(": SD/MMC controller\n"); 331 332 sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios", 333 GPIO_PIN_INPUT); 334 sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios", 335 GPIO_PIN_INPUT); 336 337 sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1; 338 sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1; 339 340 sc->sc_non_removable = of_hasprop(phandle, "non-removable"); 341 sc->sc_broken_cd = of_hasprop(phandle, "broken-cd"); 342 343 if (sunxi_mmc_dmabounce_setup(sc) != 0 || 344 sunxi_mmc_idma_setup(sc) != 0) { 345 aprint_error_dev(self, "failed to setup DMA\n"); 346 return; 347 } 348 349 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { 350 aprint_error_dev(self, "failed to decode interrupt\n"); 351 return; 352 } 353 354 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE, 355 sunxi_mmc_intr, sc); 356 if (sc->sc_ih == NULL) { 357 aprint_error_dev(self, "failed to establish interrupt on %s\n", 358 intrstr); 359 return; 360 } 361 aprint_normal_dev(self, "interrupting on %s\n", intrstr); 362 363 config_interrupts(self, sunxi_mmc_attach_i); 364 } 365 366 static int 367 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc) 368 { 369 bus_dma_segment_t ds[1]; 370 int error, rseg; 371 372 sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc); 373 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0, 374 sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK); 375 if (error) 376 return error; 377 error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen, 378 &sc->sc_dmabounce_buf, BUS_DMA_WAITOK); 379 if (error) 380 goto free; 381 error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1, 382 sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map); 383 if (error) 384 goto unmap; 385 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map, 386 sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL, 387 BUS_DMA_WAITOK); 388 if (error) 389 goto destroy; 390 return 0; 391 392 destroy: 393 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map); 394 unmap: 395 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf, 396 sc->sc_dmabounce_buflen); 397 free: 398 bus_dmamem_free(sc->sc_dmat, ds, rseg); 399 return error; 400 } 401 402 static int 403 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc) 404 { 405 int error; 406 407 sc->sc_idma_ndesc = SUNXI_MMC_NDESC; 408 sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) * 409 sc->sc_idma_ndesc; 410 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0, 411 sc->sc_idma_size, sc->sc_idma_segs, 1, 412 &sc->sc_idma_nsegs, BUS_DMA_WAITOK); 413 if (error) 414 return error; 415 error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs, 416 sc->sc_idma_nsegs, sc->sc_idma_size, 417 &sc->sc_idma_desc, BUS_DMA_WAITOK); 418 if (error) 419 goto free; 420 error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1, 421 sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map); 422 if (error) 423 goto unmap; 424 error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map, 425 sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK); 426 if (error) 427 goto destroy; 428 return 0; 429 430 destroy: 431 bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map); 432 unmap: 433 bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size); 434 free: 435 bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs); 436 return error; 437 } 438 439 static int 440 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr) 441 { 442 const struct sunxi_mmc_delay *delays; 443 int error, timing; 444 445 if (freq <= 400) { 446 timing = SUNXI_MMC_TIMING_400K; 447 } else if (freq <= 25000) { 448 timing = SUNXI_MMC_TIMING_25M; 449 } else if (freq <= 52000) { 450 if (ddr) { 451 timing = sc->sc_mmc_width == 8 ? 452 SUNXI_MMC_TIMING_50M_DDR_8BIT : 453 SUNXI_MMC_TIMING_50M_DDR; 454 } else { 455 timing = SUNXI_MMC_TIMING_50M; 456 } 457 } else 458 return EINVAL; 459 460 error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << ddr); 461 if (error != 0) 462 return error; 463 464 if (sc->sc_config->delays == NULL) 465 return 0; 466 467 delays = &sc->sc_config->delays[timing]; 468 469 if (sc->sc_clk_sample) { 470 error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase); 471 if (error != 0) 472 return error; 473 } 474 if (sc->sc_clk_output) { 475 error = clk_set_rate(sc->sc_clk_output, delays->output_phase); 476 if (error != 0) 477 return error; 478 } 479 480 return 0; 481 } 482 483 static void 484 sunxi_mmc_attach_i(device_t self) 485 { 486 struct sunxi_mmc_softc *sc = device_private(self); 487 struct sdmmcbus_attach_args saa; 488 uint32_t width; 489 490 if (sc->sc_pwrseq) 491 fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq); 492 493 sunxi_mmc_host_reset(sc); 494 sunxi_mmc_bus_width(sc, 1); 495 sunxi_mmc_set_clock(sc, 400, false); 496 497 if (sc->sc_pwrseq) 498 fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq); 499 500 if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0) 501 width = 4; 502 503 memset(&saa, 0, sizeof(saa)); 504 saa.saa_busname = "sdmmc"; 505 saa.saa_sct = &sunxi_mmc_chip_functions; 506 saa.saa_sch = sc; 507 saa.saa_dmat = sc->sc_dmat; 508 saa.saa_clkmin = 400; 509 saa.saa_clkmax = 52000; 510 saa.saa_caps = SMC_CAPS_DMA | 511 SMC_CAPS_MULTI_SEG_DMA | 512 SMC_CAPS_AUTO_STOP | 513 SMC_CAPS_SD_HIGHSPEED | 514 SMC_CAPS_MMC_HIGHSPEED | 515 SMC_CAPS_MMC_DDR52 | 516 SMC_CAPS_POLLING; 517 if (width == 4) 518 saa.saa_caps |= SMC_CAPS_4BIT_MODE; 519 if (width == 8) 520 saa.saa_caps |= SMC_CAPS_8BIT_MODE; 521 522 if (sc->sc_gpio_cd) 523 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET; 524 525 sc->sc_sdmmc_dev = config_found(self, &saa, NULL); 526 } 527 528 static int 529 sunxi_mmc_intr(void *priv) 530 { 531 struct sunxi_mmc_softc *sc = priv; 532 uint32_t idst, rint; 533 534 mutex_enter(&sc->sc_intr_lock); 535 idst = MMC_READ(sc, SUNXI_MMC_IDST); 536 rint = MMC_READ(sc, SUNXI_MMC_RINT); 537 if (!idst && !rint) { 538 mutex_exit(&sc->sc_intr_lock); 539 return 0; 540 } 541 MMC_WRITE(sc, SUNXI_MMC_IDST, idst); 542 MMC_WRITE(sc, SUNXI_MMC_RINT, rint); 543 544 DPRINTF(sc->sc_dev, "mmc intr idst=%08X rint=%08X\n", 545 idst, rint); 546 547 if (idst != 0) { 548 sc->sc_idma_idst |= idst; 549 cv_broadcast(&sc->sc_idst_cv); 550 } 551 552 if ((rint & ~SUNXI_MMC_INT_SDIO_INT) != 0) { 553 sc->sc_intr_rint |= (rint & ~SUNXI_MMC_INT_SDIO_INT); 554 cv_broadcast(&sc->sc_intr_cv); 555 } 556 557 if ((rint & SUNXI_MMC_INT_SDIO_INT) != 0) { 558 sdmmc_card_intr(sc->sc_sdmmc_dev); 559 } 560 561 mutex_exit(&sc->sc_intr_lock); 562 563 return 1; 564 } 565 566 static int 567 sunxi_mmc_wait_rint(struct sunxi_mmc_softc *sc, uint32_t mask, 568 int timeout, bool poll) 569 { 570 int retry; 571 int error; 572 573 KASSERT(mutex_owned(&sc->sc_intr_lock)); 574 575 if (sc->sc_intr_rint & mask) 576 return 0; 577 578 if (poll) 579 retry = timeout / hz * 1000; 580 else 581 retry = timeout / hz; 582 583 while (retry > 0) { 584 if (poll) { 585 sc->sc_intr_rint |= MMC_READ(sc, SUNXI_MMC_RINT); 586 } else { 587 error = cv_timedwait(&sc->sc_intr_cv, 588 &sc->sc_intr_lock, hz); 589 if (error && error != EWOULDBLOCK) 590 return error; 591 } 592 if (sc->sc_intr_rint & mask) 593 return 0; 594 if (poll) 595 delay(1000); 596 --retry; 597 } 598 599 return ETIMEDOUT; 600 } 601 602 static int 603 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch) 604 { 605 struct sunxi_mmc_softc *sc = sch; 606 int retry = 1000; 607 608 DPRINTF(sc->sc_dev, "host reset\n"); 609 610 MMC_WRITE(sc, SUNXI_MMC_GCTRL, 611 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_RESET); 612 while (--retry > 0) { 613 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET)) 614 break; 615 delay(100); 616 } 617 618 MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff); 619 620 MMC_WRITE(sc, SUNXI_MMC_IMASK, 621 SUNXI_MMC_INT_CMD_DONE | SUNXI_MMC_INT_ERROR | 622 SUNXI_MMC_INT_DATA_OVER | SUNXI_MMC_INT_AUTO_CMD_DONE); 623 624 MMC_WRITE(sc, SUNXI_MMC_GCTRL, 625 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_INTEN); 626 627 return 0; 628 } 629 630 static uint32_t 631 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch) 632 { 633 return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS; 634 } 635 636 static int 637 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch) 638 { 639 return 8192; 640 } 641 642 static int 643 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch) 644 { 645 struct sunxi_mmc_softc *sc = sch; 646 647 if (sc->sc_non_removable || sc->sc_broken_cd) { 648 /* 649 * Non-removable or broken card detect flag set in 650 * DT, assume always present 651 */ 652 return 1; 653 } else if (sc->sc_gpio_cd != NULL) { 654 /* Use card detect GPIO */ 655 int v = 0, i; 656 for (i = 0; i < 5; i++) { 657 v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^ 658 sc->sc_gpio_cd_inverted); 659 delay(1000); 660 } 661 if (v == 5) 662 sc->sc_mmc_present = 0; 663 else if (v == 0) 664 sc->sc_mmc_present = 1; 665 return sc->sc_mmc_present; 666 } else { 667 /* Use CARD_PRESENT field of SD_STATUS register */ 668 const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) & 669 SUNXI_MMC_STATUS_CARD_PRESENT; 670 return present != 0; 671 } 672 } 673 674 static int 675 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch) 676 { 677 struct sunxi_mmc_softc *sc = sch; 678 679 if (sc->sc_gpio_wp == NULL) { 680 return 0; /* no write protect pin, assume rw */ 681 } else { 682 return fdtbus_gpio_read(sc->sc_gpio_wp) ^ 683 sc->sc_gpio_wp_inverted; 684 } 685 } 686 687 static int 688 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr) 689 { 690 return 0; 691 } 692 693 static int 694 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc) 695 { 696 uint32_t cmd; 697 int retry; 698 699 DPRINTF(sc->sc_dev, "update clock\n"); 700 701 cmd = SUNXI_MMC_CMD_START | 702 SUNXI_MMC_CMD_UPCLK_ONLY | 703 SUNXI_MMC_CMD_WAIT_PRE_OVER; 704 MMC_WRITE(sc, SUNXI_MMC_CMD, cmd); 705 retry = 0xfffff; 706 while (--retry > 0) { 707 if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START)) 708 break; 709 delay(10); 710 } 711 712 if (retry == 0) { 713 aprint_error_dev(sc->sc_dev, "timeout updating clock\n"); 714 DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n", 715 MMC_READ(sc, SUNXI_MMC_GCTRL)); 716 DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n", 717 MMC_READ(sc, SUNXI_MMC_CLKCR)); 718 DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n", 719 MMC_READ(sc, SUNXI_MMC_TIMEOUT)); 720 DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n", 721 MMC_READ(sc, SUNXI_MMC_WIDTH)); 722 DPRINTF(sc->sc_dev, "CMD: 0x%08x\n", 723 MMC_READ(sc, SUNXI_MMC_CMD)); 724 DPRINTF(sc->sc_dev, "MINT: 0x%08x\n", 725 MMC_READ(sc, SUNXI_MMC_MINT)); 726 DPRINTF(sc->sc_dev, "RINT: 0x%08x\n", 727 MMC_READ(sc, SUNXI_MMC_RINT)); 728 DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n", 729 MMC_READ(sc, SUNXI_MMC_STATUS)); 730 return ETIMEDOUT; 731 } 732 733 return 0; 734 } 735 736 static int 737 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr) 738 { 739 struct sunxi_mmc_softc *sc = sch; 740 uint32_t clkcr, gctrl, ntsr; 741 const u_int flags = sc->sc_config->flags; 742 743 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR); 744 if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) { 745 clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON; 746 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) 747 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0; 748 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr); 749 if (sunxi_mmc_update_clock(sc) != 0) 750 return 1; 751 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) { 752 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR); 753 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0; 754 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr); 755 } 756 } 757 758 if (freq) { 759 760 clkcr &= ~SUNXI_MMC_CLKCR_DIV; 761 clkcr |= __SHIFTIN(ddr, SUNXI_MMC_CLKCR_DIV); 762 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr); 763 764 if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) { 765 ntsr = MMC_READ(sc, SUNXI_MMC_NTSR); 766 ntsr |= SUNXI_MMC_NTSR_MODE_SELECT; 767 MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr); 768 } 769 770 if (flags & SUNXI_MMC_FLAG_CALIB_REG) 771 MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN); 772 773 if (sunxi_mmc_update_clock(sc) != 0) 774 return 1; 775 776 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL); 777 if (ddr) 778 gctrl |= SUNXI_MMC_GCTRL_DDR_MODE; 779 else 780 gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE; 781 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl); 782 783 if (sunxi_mmc_set_clock(sc, freq, ddr) != 0) 784 return 1; 785 786 clkcr |= SUNXI_MMC_CLKCR_CARDCLKON; 787 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) 788 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0; 789 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr); 790 if (sunxi_mmc_update_clock(sc) != 0) 791 return 1; 792 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) { 793 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR); 794 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0; 795 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr); 796 } 797 } 798 799 return 0; 800 } 801 802 static int 803 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width) 804 { 805 struct sunxi_mmc_softc *sc = sch; 806 807 DPRINTF(sc->sc_dev, "width = %d\n", width); 808 809 switch (width) { 810 case 1: 811 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1); 812 break; 813 case 4: 814 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4); 815 break; 816 case 8: 817 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8); 818 break; 819 default: 820 return 1; 821 } 822 823 sc->sc_mmc_width = width; 824 825 return 0; 826 } 827 828 static int 829 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on) 830 { 831 return -1; 832 } 833 834 static int 835 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage) 836 { 837 struct sunxi_mmc_softc *sc = sch; 838 u_int uvol; 839 int error; 840 841 if (sc->sc_reg_vqmmc == NULL) 842 return 0; 843 844 switch (signal_voltage) { 845 case SDMMC_SIGNAL_VOLTAGE_330: 846 uvol = 3300000; 847 break; 848 case SDMMC_SIGNAL_VOLTAGE_180: 849 uvol = 1800000; 850 break; 851 default: 852 return EINVAL; 853 } 854 855 error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol); 856 if (error != 0) 857 return error; 858 859 return fdtbus_regulator_enable(sc->sc_reg_vqmmc); 860 } 861 862 static int 863 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd) 864 { 865 struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc; 866 bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr; 867 bus_dmamap_t map; 868 bus_size_t off; 869 int desc, resid, seg; 870 uint32_t val; 871 872 /* 873 * If the command includes a dma map use it, otherwise we need to 874 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands. 875 */ 876 if (cmd->c_dmamap) { 877 map = cmd->c_dmamap; 878 } else { 879 if (cmd->c_datalen > sc->sc_dmabounce_buflen) 880 return E2BIG; 881 map = sc->sc_dmabounce_map; 882 883 if (ISSET(cmd->c_flags, SCF_CMD_READ)) { 884 memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen); 885 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map, 886 0, cmd->c_datalen, BUS_DMASYNC_PREREAD); 887 } else { 888 memcpy(sc->sc_dmabounce_buf, cmd->c_data, 889 cmd->c_datalen); 890 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map, 891 0, cmd->c_datalen, BUS_DMASYNC_PREWRITE); 892 } 893 } 894 895 desc = 0; 896 for (seg = 0; seg < map->dm_nsegs; seg++) { 897 bus_addr_t paddr = map->dm_segs[seg].ds_addr; 898 bus_size_t len = map->dm_segs[seg].ds_len; 899 resid = min(len, cmd->c_resid); 900 off = 0; 901 while (resid > 0) { 902 if (desc == sc->sc_idma_ndesc) 903 break; 904 len = min(sc->sc_config->idma_xferlen, resid); 905 dma[desc].dma_buf_size = htole32(len); 906 dma[desc].dma_buf_addr = htole32(paddr + off); 907 dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH | 908 SUNXI_MMC_IDMA_CONFIG_OWN); 909 cmd->c_resid -= len; 910 resid -= len; 911 off += len; 912 if (desc == 0) { 913 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD); 914 } 915 if (cmd->c_resid == 0) { 916 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD); 917 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER); 918 dma[desc].dma_next = 0; 919 } else { 920 dma[desc].dma_config |= 921 htole32(SUNXI_MMC_IDMA_CONFIG_DIC); 922 dma[desc].dma_next = htole32( 923 desc_paddr + ((desc+1) * 924 sizeof(struct sunxi_mmc_idma_descriptor))); 925 } 926 ++desc; 927 } 928 } 929 if (desc == sc->sc_idma_ndesc) { 930 aprint_error_dev(sc->sc_dev, 931 "not enough descriptors for %d byte transfer!\n", 932 cmd->c_datalen); 933 return EIO; 934 } 935 936 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0, 937 sc->sc_idma_size, BUS_DMASYNC_PREWRITE); 938 939 sc->sc_idma_idst = 0; 940 941 val = MMC_READ(sc, SUNXI_MMC_GCTRL); 942 val |= SUNXI_MMC_GCTRL_DMAEN; 943 val |= SUNXI_MMC_GCTRL_INTEN; 944 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val); 945 val |= SUNXI_MMC_GCTRL_DMARESET; 946 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val); 947 MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET); 948 MMC_WRITE(sc, SUNXI_MMC_DMAC, 949 SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST); 950 val = MMC_READ(sc, SUNXI_MMC_IDIE); 951 val &= ~(SUNXI_MMC_IDST_RECEIVE_INT|SUNXI_MMC_IDST_TRANSMIT_INT); 952 if (ISSET(cmd->c_flags, SCF_CMD_READ)) 953 val |= SUNXI_MMC_IDST_RECEIVE_INT; 954 else 955 val |= SUNXI_MMC_IDST_TRANSMIT_INT; 956 MMC_WRITE(sc, SUNXI_MMC_IDIE, val); 957 MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr); 958 MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel); 959 960 return 0; 961 } 962 963 static void 964 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd) 965 { 966 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0, 967 sc->sc_idma_size, BUS_DMASYNC_POSTWRITE); 968 969 if (cmd->c_dmamap == NULL) { 970 if (ISSET(cmd->c_flags, SCF_CMD_READ)) { 971 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map, 972 0, cmd->c_datalen, BUS_DMASYNC_POSTREAD); 973 memcpy(cmd->c_data, sc->sc_dmabounce_buf, 974 cmd->c_datalen); 975 } else { 976 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map, 977 0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE); 978 } 979 } 980 } 981 982 static void 983 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd) 984 { 985 struct sunxi_mmc_softc *sc = sch; 986 uint32_t cmdval = SUNXI_MMC_CMD_START; 987 const bool poll = (cmd->c_flags & SCF_POLL) != 0; 988 int retry; 989 990 DPRINTF(sc->sc_dev, 991 "opcode %d flags 0x%x data %p datalen %d blklen %d poll %d\n", 992 cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen, 993 cmd->c_blklen, poll); 994 995 mutex_enter(&sc->sc_intr_lock); 996 997 if (cmd->c_opcode == 0) 998 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ; 999 if (cmd->c_flags & SCF_RSP_PRESENT) 1000 cmdval |= SUNXI_MMC_CMD_RSP_EXP; 1001 if (cmd->c_flags & SCF_RSP_136) 1002 cmdval |= SUNXI_MMC_CMD_LONG_RSP; 1003 if (cmd->c_flags & SCF_RSP_CRC) 1004 cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC; 1005 1006 if (cmd->c_datalen > 0) { 1007 unsigned int nblks; 1008 1009 cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER; 1010 if (!ISSET(cmd->c_flags, SCF_CMD_READ)) { 1011 cmdval |= SUNXI_MMC_CMD_WRITE; 1012 } 1013 1014 nblks = cmd->c_datalen / cmd->c_blklen; 1015 if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0) 1016 ++nblks; 1017 1018 if (nblks > 1) { 1019 cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP; 1020 } 1021 1022 MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen); 1023 MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen); 1024 } 1025 1026 sc->sc_intr_rint = 0; 1027 1028 MMC_WRITE(sc, SUNXI_MMC_A12A, 1029 (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff); 1030 1031 MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg); 1032 1033 DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval); 1034 1035 if (cmd->c_datalen == 0) { 1036 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode); 1037 } else { 1038 cmd->c_resid = cmd->c_datalen; 1039 cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd); 1040 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode); 1041 if (cmd->c_error == 0) { 1042 const uint32_t idst_mask = 1043 SUNXI_MMC_IDST_ERROR | SUNXI_MMC_IDST_COMPLETE; 1044 retry = 10; 1045 while ((sc->sc_idma_idst & idst_mask) == 0) { 1046 if (retry-- == 0) { 1047 cmd->c_error = ETIMEDOUT; 1048 break; 1049 } 1050 cv_timedwait(&sc->sc_idst_cv, 1051 &sc->sc_intr_lock, hz); 1052 } 1053 } 1054 sunxi_mmc_dma_complete(sc, cmd); 1055 if (sc->sc_idma_idst & SUNXI_MMC_IDST_ERROR) { 1056 cmd->c_error = EIO; 1057 } else if (!(sc->sc_idma_idst & SUNXI_MMC_IDST_COMPLETE)) { 1058 cmd->c_error = ETIMEDOUT; 1059 } 1060 if (cmd->c_error) { 1061 DPRINTF(sc->sc_dev, 1062 "xfer failed, error %d\n", cmd->c_error); 1063 goto done; 1064 } 1065 } 1066 1067 cmd->c_error = sunxi_mmc_wait_rint(sc, 1068 SUNXI_MMC_INT_ERROR|SUNXI_MMC_INT_CMD_DONE, hz * 10, poll); 1069 if (cmd->c_error == 0 && (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) { 1070 if (sc->sc_intr_rint & SUNXI_MMC_INT_RESP_TIMEOUT) { 1071 cmd->c_error = ETIMEDOUT; 1072 } else { 1073 cmd->c_error = EIO; 1074 } 1075 } 1076 if (cmd->c_error) { 1077 DPRINTF(sc->sc_dev, 1078 "cmd failed, error %d\n", cmd->c_error); 1079 goto done; 1080 } 1081 1082 if (cmd->c_datalen > 0) { 1083 cmd->c_error = sunxi_mmc_wait_rint(sc, 1084 SUNXI_MMC_INT_ERROR| 1085 SUNXI_MMC_INT_AUTO_CMD_DONE| 1086 SUNXI_MMC_INT_DATA_OVER, 1087 hz*10, poll); 1088 if (cmd->c_error == 0 && 1089 (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) { 1090 cmd->c_error = ETIMEDOUT; 1091 } 1092 if (cmd->c_error) { 1093 DPRINTF(sc->sc_dev, 1094 "data timeout, rint = %08x\n", 1095 sc->sc_intr_rint); 1096 cmd->c_error = ETIMEDOUT; 1097 goto done; 1098 } 1099 } 1100 1101 if (cmd->c_flags & SCF_RSP_PRESENT) { 1102 if (cmd->c_flags & SCF_RSP_136) { 1103 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0); 1104 cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1); 1105 cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2); 1106 cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3); 1107 if (cmd->c_flags & SCF_RSP_CRC) { 1108 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) | 1109 (cmd->c_resp[1] << 24); 1110 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) | 1111 (cmd->c_resp[2] << 24); 1112 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) | 1113 (cmd->c_resp[3] << 24); 1114 cmd->c_resp[3] = (cmd->c_resp[3] >> 8); 1115 } 1116 } else { 1117 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0); 1118 } 1119 } 1120 1121 done: 1122 cmd->c_flags |= SCF_ITSDONE; 1123 mutex_exit(&sc->sc_intr_lock); 1124 1125 if (cmd->c_error) { 1126 DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error); 1127 MMC_WRITE(sc, SUNXI_MMC_GCTRL, 1128 MMC_READ(sc, SUNXI_MMC_GCTRL) | 1129 SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET); 1130 for (retry = 0; retry < 1000; retry++) { 1131 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET)) 1132 break; 1133 delay(10); 1134 } 1135 sunxi_mmc_update_clock(sc); 1136 } 1137 1138 MMC_WRITE(sc, SUNXI_MMC_GCTRL, 1139 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET); 1140 } 1141 1142 static void 1143 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable) 1144 { 1145 struct sunxi_mmc_softc *sc = sch; 1146 uint32_t imask; 1147 1148 imask = MMC_READ(sc, SUNXI_MMC_IMASK); 1149 if (enable) 1150 imask |= SUNXI_MMC_INT_SDIO_INT; 1151 else 1152 imask &= ~SUNXI_MMC_INT_SDIO_INT; 1153 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask); 1154 } 1155 1156 static void 1157 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch) 1158 { 1159 struct sunxi_mmc_softc *sc = sch; 1160 1161 MMC_WRITE(sc, SUNXI_MMC_RINT, SUNXI_MMC_INT_SDIO_INT); 1162 } 1163