1 /* $NetBSD: sunxi_mmc.c,v 1.48 2021/08/07 16:18:45 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill@invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_sunximmc.h"
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.48 2021/08/07 16:18:45 thorpej Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45
46 #include <dev/fdt/fdtvar.h>
47
48 #include <arm/sunxi/sunxi_mmc.h>
49
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define DPRINTF(dev, fmt, ...) \
53 do { \
54 if (sunxi_mmc_debug & __BIT(device_unit(dev))) \
55 device_printf((dev), fmt, ##__VA_ARGS__); \
56 } while (0)
57 #else
58 #define DPRINTF(dev, fmt, ...) ((void)0)
59 #endif
60
61 enum sunxi_mmc_timing {
62 SUNXI_MMC_TIMING_400K,
63 SUNXI_MMC_TIMING_25M,
64 SUNXI_MMC_TIMING_50M,
65 SUNXI_MMC_TIMING_50M_DDR,
66 SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68
69 struct sunxi_mmc_delay {
70 u_int output_phase;
71 u_int sample_phase;
72 };
73
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
76 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
77 [SUNXI_MMC_TIMING_50M] = { 90, 120 },
78 [SUNXI_MMC_TIMING_50M_DDR] = { 60, 120 },
79 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 90, 180 },
80 };
81
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 [SUNXI_MMC_TIMING_400K] = { 180, 180 },
84 [SUNXI_MMC_TIMING_25M] = { 180, 75 },
85 [SUNXI_MMC_TIMING_50M] = { 150, 120 },
86 [SUNXI_MMC_TIMING_50M_DDR] = { 54, 36 },
87 [SUNXI_MMC_TIMING_50M_DDR_8BIT] = { 72, 72 },
88 };
89
90 #define SUNXI_MMC_NDESC 64
91
92 struct sunxi_mmc_softc;
93
94 static int sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void sunxi_mmc_attach(device_t, device_t, void *);
96 static void sunxi_mmc_attach_i(device_t);
97
98 static int sunxi_mmc_intr(void *);
99 static int sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101 static void sunxi_mmc_dma_complete(struct sunxi_mmc_softc *, struct sdmmc_command *);
102
103 static int sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
104 static uint32_t sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
105 static int sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
106 static int sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
107 static int sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
108 static int sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
109 static int sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
110 static int sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
111 static int sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
112 static int sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
113 static int sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t, int);
114 static void sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
115 struct sdmmc_command *);
116 static void sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
117 static void sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
118
119 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
120 .host_reset = sunxi_mmc_host_reset,
121 .host_ocr = sunxi_mmc_host_ocr,
122 .host_maxblklen = sunxi_mmc_host_maxblklen,
123 .card_detect = sunxi_mmc_card_detect,
124 .write_protect = sunxi_mmc_write_protect,
125 .bus_power = sunxi_mmc_bus_power,
126 .bus_clock_ddr = sunxi_mmc_bus_clock,
127 .bus_width = sunxi_mmc_bus_width,
128 .bus_rod = sunxi_mmc_bus_rod,
129 .signal_voltage = sunxi_mmc_signal_voltage,
130 .execute_tuning = sunxi_mmc_execute_tuning,
131 .exec_command = sunxi_mmc_exec_command,
132 .card_enable_intr = sunxi_mmc_card_enable_intr,
133 .card_intr_ack = sunxi_mmc_card_intr_ack,
134 };
135
136 struct sunxi_mmc_config {
137 u_int idma_xferlen;
138 u_int flags;
139 #define SUNXI_MMC_FLAG_CALIB_REG 0x01
140 #define SUNXI_MMC_FLAG_NEW_TIMINGS 0x02
141 #define SUNXI_MMC_FLAG_MASK_DATA0 0x04
142 #define SUNXI_MMC_FLAG_HS200 0x08
143 const struct sunxi_mmc_delay *delays;
144 uint32_t dma_ftrglevel;
145 };
146
147 struct sunxi_mmc_softc {
148 device_t sc_dev;
149 bus_space_tag_t sc_bst;
150 bus_space_handle_t sc_bsh;
151 bus_dma_tag_t sc_dmat;
152 int sc_phandle;
153
154 void *sc_ih;
155 kmutex_t sc_intr_lock;
156 kcondvar_t sc_intr_cv;
157
158 int sc_mmc_width;
159 int sc_mmc_present;
160
161 u_int sc_max_frequency;
162
163 device_t sc_sdmmc_dev;
164
165 const struct sunxi_mmc_config *sc_config;
166
167 bus_dma_segment_t sc_idma_segs[1];
168 int sc_idma_nsegs;
169 bus_size_t sc_idma_size;
170 bus_dmamap_t sc_idma_map;
171 int sc_idma_ndesc;
172 void *sc_idma_desc;
173
174 bus_dmamap_t sc_dmabounce_map;
175 void *sc_dmabounce_buf;
176 size_t sc_dmabounce_buflen;
177
178 struct clk *sc_clk_ahb;
179 struct clk *sc_clk_mmc;
180 struct clk *sc_clk_output;
181 struct clk *sc_clk_sample;
182
183 struct fdtbus_reset *sc_rst_ahb;
184
185 struct fdtbus_gpio_pin *sc_gpio_cd;
186 int sc_gpio_cd_inverted;
187 struct fdtbus_gpio_pin *sc_gpio_wp;
188 int sc_gpio_wp_inverted;
189
190 struct fdtbus_regulator *sc_reg_vmmc;
191 struct fdtbus_regulator *sc_reg_vqmmc;
192
193 struct fdtbus_mmc_pwrseq *sc_pwrseq;
194
195 bool sc_non_removable;
196 bool sc_broken_cd;
197
198 uint32_t sc_intr_card;
199 struct sdmmc_command *sc_curcmd;
200 bool sc_wait_dma;
201 bool sc_wait_cmd;
202 bool sc_wait_data;
203 };
204
205 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
206 sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
207
208 #define MMC_WRITE(sc, reg, val) \
209 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
210 #define MMC_READ(sc, reg) \
211 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
212
213 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
214 .idma_xferlen = 0x2000,
215 .dma_ftrglevel = 0x20070008,
216 .delays = NULL,
217 .flags = 0,
218 };
219
220 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
221 .idma_xferlen = 0x10000,
222 .dma_ftrglevel = 0x20070008,
223 .delays = NULL,
224 .flags = 0,
225 };
226
227 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
228 .idma_xferlen = 0x2000,
229 .dma_ftrglevel = 0x20070008,
230 .delays = sun7i_mmc_delays,
231 .flags = 0,
232 };
233
234 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
235 .idma_xferlen = 0x10000,
236 .dma_ftrglevel = 0x20070008,
237 .delays = NULL,
238 .flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
239 };
240
241 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
242 .idma_xferlen = 0x10000,
243 .dma_ftrglevel = 0x200f0010,
244 .delays = sun9i_mmc_delays,
245 .flags = 0,
246 };
247
248 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
249 .idma_xferlen = 0x10000,
250 .dma_ftrglevel = 0x20070008,
251 .delays = NULL,
252 .flags = SUNXI_MMC_FLAG_CALIB_REG |
253 SUNXI_MMC_FLAG_NEW_TIMINGS |
254 SUNXI_MMC_FLAG_MASK_DATA0,
255 };
256
257 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
258 .idma_xferlen = 0x2000,
259 .dma_ftrglevel = 0x20070008,
260 .delays = NULL,
261 .flags = SUNXI_MMC_FLAG_CALIB_REG |
262 SUNXI_MMC_FLAG_NEW_TIMINGS |
263 SUNXI_MMC_FLAG_HS200,
264 };
265
266 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
267 .idma_xferlen = 0x10000,
268 .dma_ftrglevel = 0x20070008,
269 .delays = NULL,
270 .flags = SUNXI_MMC_FLAG_CALIB_REG |
271 SUNXI_MMC_FLAG_NEW_TIMINGS |
272 SUNXI_MMC_FLAG_MASK_DATA0,
273 };
274
275 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
276 .idma_xferlen = 0x2000,
277 .dma_ftrglevel = 0x20070008,
278 .delays = NULL,
279 .flags = SUNXI_MMC_FLAG_CALIB_REG,
280 };
281
282 static const struct device_compatible_entry compat_data[] = {
283 { .compat = "allwinner,sun4i-a10-mmc",
284 .data = &sun4i_a10_mmc_config },
285 { .compat = "allwinner,sun5i-a13-mmc",
286 .data = &sun5i_a13_mmc_config },
287 { .compat = "allwinner,sun7i-a20-mmc",
288 .data = &sun7i_a20_mmc_config },
289 { .compat = "allwinner,sun8i-a83t-emmc",
290 .data = &sun8i_a83t_emmc_config },
291 { .compat = "allwinner,sun9i-a80-mmc",
292 .data = &sun9i_a80_mmc_config },
293 { .compat = "allwinner,sun50i-a64-mmc",
294 .data = &sun50i_a64_mmc_config },
295 { .compat = "allwinner,sun50i-a64-emmc",
296 .data = &sun50i_a64_emmc_config },
297 { .compat = "allwinner,sun50i-h6-mmc",
298 .data = &sun50i_h6_mmc_config },
299 { .compat = "allwinner,sun50i-h6-emmc",
300 .data = &sun50i_h6_emmc_config },
301
302 DEVICE_COMPAT_EOL
303 };
304
305 static int
sunxi_mmc_match(device_t parent,cfdata_t cf,void * aux)306 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
307 {
308 struct fdt_attach_args * const faa = aux;
309
310 return of_compatible_match(faa->faa_phandle, compat_data);
311 }
312
313 static void
sunxi_mmc_attach(device_t parent,device_t self,void * aux)314 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
315 {
316 struct sunxi_mmc_softc * const sc = device_private(self);
317 struct fdt_attach_args * const faa = aux;
318 const int phandle = faa->faa_phandle;
319 char intrstr[128];
320 bus_addr_t addr;
321 bus_size_t size;
322
323 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
324 aprint_error(": couldn't get registers\n");
325 return;
326 }
327
328 sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
329 sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
330 sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
331 sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
332
333 #if notyet
334 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
335 sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
336 #else
337 if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
338 #endif
339 aprint_error(": couldn't get clocks\n");
340 return;
341 }
342
343 sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
344
345 sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
346
347 if (clk_enable(sc->sc_clk_ahb) != 0 ||
348 clk_enable(sc->sc_clk_mmc) != 0) {
349 aprint_error(": couldn't enable clocks\n");
350 return;
351 }
352
353 if (sc->sc_rst_ahb != NULL) {
354 if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
355 aprint_error(": couldn't de-assert resets\n");
356 return;
357 }
358 }
359
360 sc->sc_dev = self;
361 sc->sc_phandle = phandle;
362 sc->sc_config = of_compatible_lookup(phandle, compat_data)->data;
363 sc->sc_bst = faa->faa_bst;
364 sc->sc_dmat = faa->faa_dmat;
365 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
366 cv_init(&sc->sc_intr_cv, "sunximmcirq");
367
368 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
369 aprint_error(": couldn't map registers\n");
370 return;
371 }
372
373 sc->sc_reg_vmmc = fdtbus_regulator_acquire(phandle, "vmmc-supply");
374 if (sc->sc_reg_vmmc != NULL && fdtbus_regulator_enable(sc->sc_reg_vmmc)) {
375 aprint_error(": couldn't enable vmmc-supply\n");
376 return;
377 }
378
379 aprint_naive("\n");
380 aprint_normal(": SD/MMC controller\n");
381
382 sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
383
384 sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
385 GPIO_PIN_INPUT);
386 sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
387 GPIO_PIN_INPUT);
388
389 sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
390 sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
391
392 sc->sc_non_removable = of_hasprop(phandle, "non-removable");
393 sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
394
395 if (of_getprop_uint32(phandle, "max-frequency", &sc->sc_max_frequency))
396 sc->sc_max_frequency = 52000000;
397
398 if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
399 sunxi_mmc_idma_setup(sc) != 0) {
400 aprint_error_dev(self, "failed to setup DMA\n");
401 return;
402 }
403
404 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
405 aprint_error_dev(self, "failed to decode interrupt\n");
406 return;
407 }
408
409 sc->sc_ih = fdtbus_intr_establish_xname(phandle, 0, IPL_BIO,
410 FDT_INTR_MPSAFE, sunxi_mmc_intr, sc, device_xname(self));
411 if (sc->sc_ih == NULL) {
412 aprint_error_dev(self, "failed to establish interrupt on %s\n",
413 intrstr);
414 return;
415 }
416 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
417
418 config_interrupts(self, sunxi_mmc_attach_i);
419 }
420
421 static int
422 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
423 {
424 bus_dma_segment_t ds[1];
425 int error, rseg;
426
427 sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
428 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
429 sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
430 if (error)
431 return error;
432 error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
433 &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
434 if (error)
435 goto free;
436 error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
437 sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
438 if (error)
439 goto unmap;
440 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
441 sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
442 BUS_DMA_WAITOK);
443 if (error)
444 goto destroy;
445 return 0;
446
447 destroy:
448 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
449 unmap:
450 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
451 sc->sc_dmabounce_buflen);
452 free:
453 bus_dmamem_free(sc->sc_dmat, ds, rseg);
454 return error;
455 }
456
457 static int
458 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
459 {
460 int error;
461
462 sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
463 sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
464 sc->sc_idma_ndesc;
465 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
466 sc->sc_idma_size, sc->sc_idma_segs, 1,
467 &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
468 if (error)
469 return error;
470 error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
471 sc->sc_idma_nsegs, sc->sc_idma_size,
472 &sc->sc_idma_desc, BUS_DMA_WAITOK);
473 if (error)
474 goto free;
475 error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
476 sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
477 if (error)
478 goto unmap;
479 error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
480 sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
481 if (error)
482 goto destroy;
483 return 0;
484
485 destroy:
486 bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
487 unmap:
488 bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
489 free:
490 bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
491 return error;
492 }
493
494 static int
495 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr, bool dbl)
496 {
497 const struct sunxi_mmc_delay *delays;
498 int error, timing = SUNXI_MMC_TIMING_400K;
499
500 if (sc->sc_config->delays) {
501 if (freq <= 400) {
502 timing = SUNXI_MMC_TIMING_400K;
503 } else if (freq <= 25000) {
504 timing = SUNXI_MMC_TIMING_25M;
505 } else if (freq <= 52000) {
506 if (ddr) {
507 timing = sc->sc_mmc_width == 8 ?
508 SUNXI_MMC_TIMING_50M_DDR_8BIT :
509 SUNXI_MMC_TIMING_50M_DDR;
510 } else {
511 timing = SUNXI_MMC_TIMING_50M;
512 }
513 } else
514 return EINVAL;
515 }
516 if (sc->sc_max_frequency) {
517 if (freq * 1000 > sc->sc_max_frequency)
518 return EINVAL;
519 }
520
521 error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << dbl);
522 if (error != 0)
523 return error;
524
525 if (sc->sc_config->delays == NULL)
526 return 0;
527
528 delays = &sc->sc_config->delays[timing];
529
530 if (sc->sc_clk_sample) {
531 error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
532 if (error != 0)
533 return error;
534 }
535 if (sc->sc_clk_output) {
536 error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
537 if (error != 0)
538 return error;
539 }
540
541 return 0;
542 }
543
544 static void
545 sunxi_mmc_hw_reset(struct sunxi_mmc_softc *sc)
546 {
547 MMC_WRITE(sc, SUNXI_MMC_HWRST, 0);
548 delay(1000);
549 MMC_WRITE(sc, SUNXI_MMC_HWRST, 1);
550 delay(1000);
551 }
552
553 static void
554 sunxi_mmc_attach_i(device_t self)
555 {
556 struct sunxi_mmc_softc *sc = device_private(self);
557 const u_int flags = sc->sc_config->flags;
558 struct sdmmcbus_attach_args saa;
559 uint32_t width;
560 const bool supports_hs200 =
561 of_hasprop(sc->sc_phandle, "mmc-hs200-1_2v") |
562 of_hasprop(sc->sc_phandle, "mmc-hs200-1_8v");
563
564 const bool supports_ddr =
565 of_hasprop(sc->sc_phandle, "mmc-ddr-1_2v") |
566 of_hasprop(sc->sc_phandle, "mmc-ddr-1_8v") |
567 of_hasprop(sc->sc_phandle, "mmc-ddr-3_3v");
568
569 if (sc->sc_pwrseq)
570 fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
571
572 if (of_hasprop(sc->sc_phandle, "cap-mmc-hw-reset"))
573 sunxi_mmc_hw_reset(sc);
574
575 sunxi_mmc_host_reset(sc);
576 sunxi_mmc_bus_width(sc, 1);
577 sunxi_mmc_set_clock(sc, 400, false, false);
578
579 if (sc->sc_pwrseq)
580 fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
581
582 if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
583 width = 4;
584
585 memset(&saa, 0, sizeof(saa));
586 saa.saa_busname = "sdmmc";
587 saa.saa_sct = &sunxi_mmc_chip_functions;
588 saa.saa_sch = sc;
589 saa.saa_dmat = sc->sc_dmat;
590 saa.saa_clkmin = 400;
591 saa.saa_clkmax = sc->sc_max_frequency / 1000;
592 saa.saa_caps = SMC_CAPS_DMA |
593 SMC_CAPS_MULTI_SEG_DMA |
594 SMC_CAPS_AUTO_STOP |
595 SMC_CAPS_SD_HIGHSPEED |
596 SMC_CAPS_MMC_HIGHSPEED;
597
598 if ((sc->sc_config->delays || (flags & SUNXI_MMC_FLAG_NEW_TIMINGS)) &&
599 supports_ddr)
600 saa.saa_caps |= SMC_CAPS_MMC_DDR52;
601
602 if ((flags & SUNXI_MMC_FLAG_HS200) != 0 && supports_hs200)
603 saa.saa_caps |= SMC_CAPS_MMC_HS200;
604
605 if (width == 4)
606 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
607 if (width == 8)
608 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
609
610 if (sc->sc_gpio_cd)
611 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
612
613 sc->sc_sdmmc_dev = config_found(self, &saa, NULL, CFARGS_NONE);
614 }
615
616 static int
617 sunxi_mmc_intr(void *priv)
618 {
619 struct sunxi_mmc_softc *sc = priv;
620 struct sdmmc_command *cmd;
621 uint32_t idst, mint, imask;
622
623 mutex_enter(&sc->sc_intr_lock);
624 idst = MMC_READ(sc, SUNXI_MMC_IDST);
625 mint = MMC_READ(sc, SUNXI_MMC_MINT);
626 if (!idst && !mint) {
627 mutex_exit(&sc->sc_intr_lock);
628 return 0;
629 }
630 MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
631 MMC_WRITE(sc, SUNXI_MMC_RINT, mint);
632
633 cmd = sc->sc_curcmd;
634
635 DPRINTF(sc->sc_dev, "mmc intr idst=%08X mint=%08X\n",
636 idst, mint);
637
638 /* Handle SDIO card interrupt */
639 if ((mint & SUNXI_MMC_INT_SDIO_INT) != 0) {
640 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
641 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_SDIO_INT);
642 sdmmc_card_intr(sc->sc_sdmmc_dev);
643 }
644
645 /* Error interrupts take priority over command and transfer interrupts */
646 if (cmd != NULL && (mint & SUNXI_MMC_INT_ERROR) != 0) {
647 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
648 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_ERROR);
649 if ((mint & SUNXI_MMC_INT_RESP_TIMEOUT) != 0) {
650 cmd->c_error = ETIMEDOUT;
651 /* Wait for command to complete */
652 sc->sc_wait_data = sc->sc_wait_dma = false;
653 if (cmd->c_opcode != SD_IO_SEND_OP_COND &&
654 cmd->c_opcode != SD_IO_RW_DIRECT &&
655 !ISSET(cmd->c_flags, SCF_TOUT_OK))
656 device_printf(sc->sc_dev, "host controller timeout, mint=0x%08x\n", mint);
657 } else {
658 device_printf(sc->sc_dev, "host controller error, mint=0x%08x\n", mint);
659 cmd->c_error = EIO;
660 SET(cmd->c_flags, SCF_ITSDONE);
661 goto done;
662 }
663 }
664
665 if (cmd != NULL && (idst & SUNXI_MMC_IDST_RECEIVE_INT) != 0) {
666 MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
667 if (sc->sc_wait_dma == false)
668 device_printf(sc->sc_dev, "unexpected DMA receive interrupt\n");
669 sc->sc_wait_dma = false;
670 }
671
672 if (cmd != NULL && (mint & SUNXI_MMC_INT_CMD_DONE) != 0) {
673 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
674 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_CMD_DONE);
675 if (sc->sc_wait_cmd == false)
676 device_printf(sc->sc_dev, "unexpected command complete interrupt\n");
677 sc->sc_wait_cmd = false;
678 }
679
680 const uint32_t dmadone_mask = SUNXI_MMC_INT_AUTO_CMD_DONE|SUNXI_MMC_INT_DATA_OVER;
681 if (cmd != NULL && (mint & dmadone_mask) != 0) {
682 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
683 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~dmadone_mask);
684 if (sc->sc_wait_data == false)
685 device_printf(sc->sc_dev, "unexpected data complete interrupt\n");
686 sc->sc_wait_data = false;
687 }
688
689 if (cmd != NULL &&
690 sc->sc_wait_dma == false &&
691 sc->sc_wait_cmd == false &&
692 sc->sc_wait_data == false) {
693 SET(cmd->c_flags, SCF_ITSDONE);
694 }
695
696 done:
697 if (cmd != NULL && ISSET(cmd->c_flags, SCF_ITSDONE)) {
698 cv_broadcast(&sc->sc_intr_cv);
699 }
700
701 mutex_exit(&sc->sc_intr_lock);
702
703 return 1;
704 }
705
706 static int
707 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
708 {
709 struct sunxi_mmc_softc *sc = sch;
710 uint32_t gctrl;
711 int retry = 1000;
712
713 DPRINTF(sc->sc_dev, "host reset\n");
714
715 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
716 gctrl |= SUNXI_MMC_GCTRL_RESET;
717 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
718 while (--retry > 0) {
719 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
720 break;
721 delay(100);
722 }
723
724 MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
725
726 MMC_WRITE(sc, SUNXI_MMC_IMASK, 0);
727
728 MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffffffff);
729
730 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
731 gctrl |= SUNXI_MMC_GCTRL_INTEN;
732 gctrl &= ~SUNXI_MMC_GCTRL_WAIT_MEM_ACCESS_DONE;
733 gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
734 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
735
736 return 0;
737 }
738
739 static uint32_t
740 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
741 {
742 return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
743 }
744
745 static int
746 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
747 {
748 return 8192;
749 }
750
751 static int
752 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
753 {
754 struct sunxi_mmc_softc *sc = sch;
755
756 if (sc->sc_non_removable || sc->sc_broken_cd) {
757 /*
758 * Non-removable or broken card detect flag set in
759 * DT, assume always present
760 */
761 return 1;
762 } else if (sc->sc_gpio_cd != NULL) {
763 /* Use card detect GPIO */
764 int v = 0, i;
765 for (i = 0; i < 5; i++) {
766 v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
767 sc->sc_gpio_cd_inverted);
768 delay(1000);
769 }
770 if (v == 5)
771 sc->sc_mmc_present = 0;
772 else if (v == 0)
773 sc->sc_mmc_present = 1;
774 return sc->sc_mmc_present;
775 } else {
776 /* Use CARD_PRESENT field of SD_STATUS register */
777 const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
778 SUNXI_MMC_STATUS_CARD_PRESENT;
779 return present != 0;
780 }
781 }
782
783 static int
784 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
785 {
786 struct sunxi_mmc_softc *sc = sch;
787
788 if (sc->sc_gpio_wp == NULL) {
789 return 0; /* no write protect pin, assume rw */
790 } else {
791 return fdtbus_gpio_read(sc->sc_gpio_wp) ^
792 sc->sc_gpio_wp_inverted;
793 }
794 }
795
796 static int
797 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
798 {
799 return 0;
800 }
801
802 static int
803 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
804 {
805 uint32_t cmd;
806 int retry;
807
808 DPRINTF(sc->sc_dev, "update clock\n");
809
810 cmd = SUNXI_MMC_CMD_START |
811 SUNXI_MMC_CMD_UPCLK_ONLY |
812 SUNXI_MMC_CMD_WAIT_PRE_OVER;
813 MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
814 retry = 100000;
815 while (--retry > 0) {
816 if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
817 break;
818 delay(10);
819 }
820
821 if (retry == 0) {
822 aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
823 DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
824 MMC_READ(sc, SUNXI_MMC_GCTRL));
825 DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
826 MMC_READ(sc, SUNXI_MMC_CLKCR));
827 DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
828 MMC_READ(sc, SUNXI_MMC_TIMEOUT));
829 DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
830 MMC_READ(sc, SUNXI_MMC_WIDTH));
831 DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
832 MMC_READ(sc, SUNXI_MMC_CMD));
833 DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
834 MMC_READ(sc, SUNXI_MMC_MINT));
835 DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
836 MMC_READ(sc, SUNXI_MMC_RINT));
837 DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
838 MMC_READ(sc, SUNXI_MMC_STATUS));
839 return ETIMEDOUT;
840 }
841
842 return 0;
843 }
844
845 static int
846 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
847 {
848 struct sunxi_mmc_softc *sc = sch;
849 uint32_t clkcr, gctrl, ntsr;
850 const u_int flags = sc->sc_config->flags;
851 bool dbl = 0;
852
853 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
854 if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
855 clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
856 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
857 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
858 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
859 if (sunxi_mmc_update_clock(sc) != 0)
860 return 1;
861 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
862 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
863 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
864 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
865 }
866 }
867
868 if (freq) {
869 /* For 8bits ddr in old timing modes, and all ddr in new
870 * timing modes, the module clock has to be 2x the card clock.
871 */
872 if (ddr && ((flags & SUNXI_MMC_FLAG_NEW_TIMINGS) ||
873 sc->sc_mmc_width == 8))
874 dbl = 1;
875
876 clkcr &= ~SUNXI_MMC_CLKCR_DIV;
877 clkcr |= __SHIFTIN(dbl, SUNXI_MMC_CLKCR_DIV);
878 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
879
880 if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
881 ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
882 ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
883 MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
884 }
885
886 if (flags & SUNXI_MMC_FLAG_CALIB_REG)
887 MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
888
889 if (sunxi_mmc_update_clock(sc) != 0)
890 return 1;
891
892 gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
893 if (ddr)
894 gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
895 else
896 gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
897 MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
898
899 if (sunxi_mmc_set_clock(sc, freq, ddr, dbl) != 0)
900 return 1;
901
902 clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
903 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
904 clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
905 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
906 if (sunxi_mmc_update_clock(sc) != 0)
907 return 1;
908 if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
909 clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
910 clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
911 MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
912 }
913 }
914
915 return 0;
916 }
917
918 static int
919 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
920 {
921 struct sunxi_mmc_softc *sc = sch;
922
923 DPRINTF(sc->sc_dev, "width = %d\n", width);
924
925 switch (width) {
926 case 1:
927 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
928 break;
929 case 4:
930 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
931 break;
932 case 8:
933 MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
934 break;
935 default:
936 return 1;
937 }
938
939 sc->sc_mmc_width = width;
940
941 return 0;
942 }
943
944 static int
945 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
946 {
947 return -1;
948 }
949
950 static int
951 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
952 {
953 struct sunxi_mmc_softc *sc = sch;
954 u_int uvol;
955 int error;
956
957 if (sc->sc_reg_vqmmc == NULL)
958 return 0;
959
960 switch (signal_voltage) {
961 case SDMMC_SIGNAL_VOLTAGE_330:
962 uvol = 3300000;
963 break;
964 case SDMMC_SIGNAL_VOLTAGE_180:
965 uvol = 1800000;
966 break;
967 default:
968 return EINVAL;
969 }
970
971 error = fdtbus_regulator_supports_voltage(sc->sc_reg_vqmmc, uvol, uvol);
972 if (error != 0)
973 return 0;
974
975 error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
976 if (error != 0)
977 return error;
978
979 return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
980 }
981
982 static int
983 sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
984 {
985 switch (timing) {
986 case SDMMC_TIMING_MMC_HS200:
987 break;
988 default:
989 return EINVAL;
990 }
991
992 return 0;
993 }
994
995 static int
996 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
997 {
998 struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
999 bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
1000 bus_dmamap_t map;
1001 bus_size_t off;
1002 int desc, resid, seg;
1003 uint32_t val;
1004
1005 /*
1006 * If the command includes a dma map use it, otherwise we need to
1007 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
1008 */
1009 if (cmd->c_dmamap) {
1010 map = cmd->c_dmamap;
1011 } else {
1012 if (cmd->c_datalen > sc->sc_dmabounce_buflen)
1013 return E2BIG;
1014 map = sc->sc_dmabounce_map;
1015
1016 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1017 memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
1018 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1019 0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
1020 } else {
1021 memcpy(sc->sc_dmabounce_buf, cmd->c_data,
1022 cmd->c_datalen);
1023 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1024 0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
1025 }
1026 }
1027
1028 desc = 0;
1029 for (seg = 0; seg < map->dm_nsegs; seg++) {
1030 bus_addr_t paddr = map->dm_segs[seg].ds_addr;
1031 bus_size_t len = map->dm_segs[seg].ds_len;
1032 resid = uimin(len, cmd->c_resid);
1033 off = 0;
1034 while (resid > 0) {
1035 if (desc == sc->sc_idma_ndesc)
1036 break;
1037 len = uimin(sc->sc_config->idma_xferlen, resid);
1038 dma[desc].dma_buf_size = htole32(len);
1039 dma[desc].dma_buf_addr = htole32(paddr + off);
1040 dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
1041 SUNXI_MMC_IDMA_CONFIG_OWN);
1042 cmd->c_resid -= len;
1043 resid -= len;
1044 off += len;
1045 if (desc == 0) {
1046 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
1047 }
1048 if (cmd->c_resid == 0) {
1049 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
1050 dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
1051 dma[desc].dma_next = 0;
1052 } else {
1053 dma[desc].dma_config |=
1054 htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
1055 dma[desc].dma_next = htole32(
1056 desc_paddr + ((desc+1) *
1057 sizeof(struct sunxi_mmc_idma_descriptor)));
1058 }
1059 ++desc;
1060 }
1061 }
1062 if (desc == sc->sc_idma_ndesc) {
1063 aprint_error_dev(sc->sc_dev,
1064 "not enough descriptors for %d byte transfer! "
1065 "there are %u segments with a max xfer length of %u\n",
1066 cmd->c_datalen, map->dm_nsegs, sc->sc_config->idma_xferlen);
1067 return EIO;
1068 }
1069
1070 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1071 sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
1072
1073 MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
1074 MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
1075
1076 val = MMC_READ(sc, SUNXI_MMC_GCTRL);
1077 val |= SUNXI_MMC_GCTRL_DMAEN;
1078 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1079 val |= SUNXI_MMC_GCTRL_DMARESET;
1080 MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1081
1082 MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
1083 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1084 val = SUNXI_MMC_IDST_RECEIVE_INT;
1085 else
1086 val = 0;
1087 MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
1088 MMC_WRITE(sc, SUNXI_MMC_DMAC,
1089 SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
1090
1091 return 0;
1092 }
1093
1094 static void
1095 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
1096 {
1097 MMC_WRITE(sc, SUNXI_MMC_DMAC, 0);
1098 MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
1099
1100 bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1101 sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
1102
1103 if (cmd->c_dmamap == NULL) {
1104 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1105 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1106 0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
1107 memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1108 cmd->c_datalen);
1109 } else {
1110 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1111 0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1112 }
1113 }
1114 }
1115
1116 static void
1117 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1118 {
1119 struct sunxi_mmc_softc *sc = sch;
1120 uint32_t cmdval = SUNXI_MMC_CMD_START;
1121 uint32_t imask;
1122 int retry, error;
1123
1124 DPRINTF(sc->sc_dev,
1125 "opcode %d flags 0x%x data %p datalen %d blklen %d\n",
1126 cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1127 cmd->c_blklen);
1128
1129 mutex_enter(&sc->sc_intr_lock);
1130 if (sc->sc_curcmd != NULL) {
1131 device_printf(sc->sc_dev,
1132 "WARNING: driver submitted a command while the controller was busy\n");
1133 cmd->c_error = EBUSY;
1134 SET(cmd->c_flags, SCF_ITSDONE);
1135 mutex_exit(&sc->sc_intr_lock);
1136 return;
1137 }
1138 sc->sc_curcmd = cmd;
1139
1140 if (cmd->c_opcode == 0)
1141 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1142 if (cmd->c_flags & SCF_RSP_PRESENT)
1143 cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1144 if (cmd->c_flags & SCF_RSP_136)
1145 cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1146 if (cmd->c_flags & SCF_RSP_CRC)
1147 cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1148
1149 imask = SUNXI_MMC_INT_ERROR | SUNXI_MMC_INT_CMD_DONE;
1150
1151 if (cmd->c_datalen > 0) {
1152 unsigned int nblks;
1153
1154 cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1155 if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1156 cmdval |= SUNXI_MMC_CMD_WRITE;
1157 }
1158
1159 nblks = cmd->c_datalen / cmd->c_blklen;
1160 if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1161 ++nblks;
1162
1163 if (nblks > 1) {
1164 cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1165 imask |= SUNXI_MMC_INT_AUTO_CMD_DONE;
1166 } else {
1167 imask |= SUNXI_MMC_INT_DATA_OVER;
1168 }
1169
1170 MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1171 MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1172 }
1173
1174 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask | sc->sc_intr_card);
1175 MMC_WRITE(sc, SUNXI_MMC_RINT, 0x7fff);
1176
1177 MMC_WRITE(sc, SUNXI_MMC_A12A,
1178 (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1179
1180 MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1181
1182 cmd->c_resid = cmd->c_datalen;
1183 if (cmd->c_resid > 0) {
1184 cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1185 if (cmd->c_error != 0) {
1186 SET(cmd->c_flags, SCF_ITSDONE);
1187 goto done;
1188 }
1189 sc->sc_wait_dma = ISSET(cmd->c_flags, SCF_CMD_READ);
1190 sc->sc_wait_data = true;
1191 } else {
1192 sc->sc_wait_dma = false;
1193 sc->sc_wait_data = false;
1194 }
1195 sc->sc_wait_cmd = true;
1196
1197 DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1198
1199 MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1200
1201 struct bintime timeout = { .sec = 15, .frac = 0 };
1202 const struct bintime epsilon = { .sec = 1, .frac = 0 };
1203 while (!ISSET(cmd->c_flags, SCF_ITSDONE)) {
1204 error = cv_timedwaitbt(&sc->sc_intr_cv,
1205 &sc->sc_intr_lock, &timeout, &epsilon);
1206 if (error != 0) {
1207 cmd->c_error = error;
1208 SET(cmd->c_flags, SCF_ITSDONE);
1209 goto done;
1210 }
1211 }
1212
1213 if (cmd->c_error == 0 && cmd->c_datalen > 0)
1214 sunxi_mmc_dma_complete(sc, cmd);
1215
1216 if (cmd->c_flags & SCF_RSP_PRESENT) {
1217 if (cmd->c_flags & SCF_RSP_136) {
1218 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1219 cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1220 cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1221 cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1222 if (cmd->c_flags & SCF_RSP_CRC) {
1223 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1224 (cmd->c_resp[1] << 24);
1225 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1226 (cmd->c_resp[2] << 24);
1227 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1228 (cmd->c_resp[3] << 24);
1229 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1230 }
1231 } else {
1232 cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1233 }
1234 }
1235
1236 done:
1237 KASSERT(ISSET(cmd->c_flags, SCF_ITSDONE));
1238 MMC_WRITE(sc, SUNXI_MMC_IMASK, sc->sc_intr_card);
1239 MMC_WRITE(sc, SUNXI_MMC_RINT, 0x7fff);
1240 MMC_WRITE(sc, SUNXI_MMC_IDST, 0x337);
1241 sc->sc_curcmd = NULL;
1242 mutex_exit(&sc->sc_intr_lock);
1243
1244 if (cmd->c_error) {
1245 DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1246 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1247 MMC_READ(sc, SUNXI_MMC_GCTRL) |
1248 SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1249 for (retry = 0; retry < 1000; retry++) {
1250 if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1251 break;
1252 delay(10);
1253 }
1254 sunxi_mmc_update_clock(sc);
1255 }
1256
1257 MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1258 MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1259 }
1260
1261 static void
1262 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1263 {
1264 struct sunxi_mmc_softc *sc = sch;
1265 uint32_t imask;
1266
1267 mutex_enter(&sc->sc_intr_lock);
1268 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1269 if (enable)
1270 imask |= SUNXI_MMC_INT_SDIO_INT;
1271 else
1272 imask &= ~SUNXI_MMC_INT_SDIO_INT;
1273 sc->sc_intr_card = imask & SUNXI_MMC_INT_SDIO_INT;
1274 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1275 mutex_exit(&sc->sc_intr_lock);
1276 }
1277
1278 static void
1279 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1280 {
1281 struct sunxi_mmc_softc *sc = sch;
1282 uint32_t imask;
1283
1284 mutex_enter(&sc->sc_intr_lock);
1285 imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1286 MMC_WRITE(sc, SUNXI_MMC_IMASK, imask | sc->sc_intr_card);
1287 mutex_exit(&sc->sc_intr_lock);
1288 }
1289