xref: /netbsd-src/sys/arch/arm/sunxi/sunxi_mmc.c (revision c38e7cc395b1472a774ff828e46123de44c628e9)
1 /* $NetBSD: sunxi_mmc.c,v 1.22 2018/04/15 18:24:52 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2014-2017 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_sunximmc.h"
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.22 2018/04/15 18:24:52 jmcneill Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41 
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45 
46 #include <dev/fdt/fdtvar.h>
47 
48 #include <arm/sunxi/sunxi_mmc.h>
49 
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define	DPRINTF(dev, fmt, ...)						\
53 do {									\
54 	if (sunxi_mmc_debug & __BIT(device_unit(dev)))			\
55 		device_printf((dev), fmt, ##__VA_ARGS__);		\
56 } while (0)
57 #else
58 #define	DPRINTF(dev, fmt, ...)		((void)0)
59 #endif
60 
61 enum sunxi_mmc_timing {
62 	SUNXI_MMC_TIMING_400K,
63 	SUNXI_MMC_TIMING_25M,
64 	SUNXI_MMC_TIMING_50M,
65 	SUNXI_MMC_TIMING_50M_DDR,
66 	SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68 
69 struct sunxi_mmc_delay {
70 	u_int	output_phase;
71 	u_int	sample_phase;
72 };
73 
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 	[SUNXI_MMC_TIMING_400K]		= { 180,	180 },
76 	[SUNXI_MMC_TIMING_25M]		= { 180,	 75 },
77 	[SUNXI_MMC_TIMING_50M]		= {  90,	120 },
78 	[SUNXI_MMC_TIMING_50M_DDR]	= {  60,	120 },
79 	[SUNXI_MMC_TIMING_50M_DDR_8BIT]	= {  90,	180 },
80 };
81 
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 	[SUNXI_MMC_TIMING_400K]		= { 180,	180 },
84 	[SUNXI_MMC_TIMING_25M]		= { 180,	 75 },
85 	[SUNXI_MMC_TIMING_50M]		= { 150,	120 },
86 	[SUNXI_MMC_TIMING_50M_DDR]	= {  54,	 36 },
87 	[SUNXI_MMC_TIMING_50M_DDR_8BIT]	= {  72,	 72 },
88 };
89 
90 #define SUNXI_MMC_NDESC		64
91 
92 struct sunxi_mmc_softc;
93 
94 static int	sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void	sunxi_mmc_attach(device_t, device_t, void *);
96 static void	sunxi_mmc_attach_i(device_t);
97 
98 static int	sunxi_mmc_intr(void *);
99 static int	sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int	sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101 
102 static int	sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
103 static uint32_t	sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
104 static int	sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
105 static int	sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
106 static int	sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
107 static int	sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
108 static int	sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
109 static int	sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
110 static int	sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
111 static int	sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
112 static void	sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
113 				      struct sdmmc_command *);
114 static void	sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
115 static void	sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
116 
117 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
118 	.host_reset = sunxi_mmc_host_reset,
119 	.host_ocr = sunxi_mmc_host_ocr,
120 	.host_maxblklen = sunxi_mmc_host_maxblklen,
121 	.card_detect = sunxi_mmc_card_detect,
122 	.write_protect = sunxi_mmc_write_protect,
123 	.bus_power = sunxi_mmc_bus_power,
124 	.bus_clock_ddr = sunxi_mmc_bus_clock,
125 	.bus_width = sunxi_mmc_bus_width,
126 	.bus_rod = sunxi_mmc_bus_rod,
127 	.signal_voltage = sunxi_mmc_signal_voltage,
128 	.exec_command = sunxi_mmc_exec_command,
129 	.card_enable_intr = sunxi_mmc_card_enable_intr,
130 	.card_intr_ack = sunxi_mmc_card_intr_ack,
131 };
132 
133 struct sunxi_mmc_config {
134 	u_int idma_xferlen;
135 	u_int flags;
136 #define	SUNXI_MMC_FLAG_CALIB_REG	0x01
137 #define	SUNXI_MMC_FLAG_NEW_TIMINGS	0x02
138 #define	SUNXI_MMC_FLAG_MASK_DATA0	0x04
139 	const struct sunxi_mmc_delay *delays;
140 	uint32_t dma_ftrglevel;
141 };
142 
143 struct sunxi_mmc_softc {
144 	device_t sc_dev;
145 	bus_space_tag_t sc_bst;
146 	bus_space_handle_t sc_bsh;
147 	bus_dma_tag_t sc_dmat;
148 	int sc_phandle;
149 
150 	void *sc_ih;
151 	kmutex_t sc_intr_lock;
152 	kcondvar_t sc_intr_cv;
153 	kcondvar_t sc_idst_cv;
154 
155 	int sc_mmc_width;
156 	int sc_mmc_present;
157 
158 	device_t sc_sdmmc_dev;
159 
160 	struct sunxi_mmc_config *sc_config;
161 
162 	bus_dma_segment_t sc_idma_segs[1];
163 	int sc_idma_nsegs;
164 	bus_size_t sc_idma_size;
165 	bus_dmamap_t sc_idma_map;
166 	int sc_idma_ndesc;
167 	void *sc_idma_desc;
168 
169 	bus_dmamap_t sc_dmabounce_map;
170 	void *sc_dmabounce_buf;
171 	size_t sc_dmabounce_buflen;
172 
173 	uint32_t sc_intr_rint;
174 	uint32_t sc_idma_idst;
175 
176 	struct clk *sc_clk_ahb;
177 	struct clk *sc_clk_mmc;
178 	struct clk *sc_clk_output;
179 	struct clk *sc_clk_sample;
180 
181 	struct fdtbus_reset *sc_rst_ahb;
182 
183 	struct fdtbus_gpio_pin *sc_gpio_cd;
184 	int sc_gpio_cd_inverted;
185 	struct fdtbus_gpio_pin *sc_gpio_wp;
186 	int sc_gpio_wp_inverted;
187 
188 	struct fdtbus_regulator *sc_reg_vqmmc;
189 
190 	struct fdtbus_mmc_pwrseq *sc_pwrseq;
191 
192 	bool sc_non_removable;
193 	bool sc_broken_cd;
194 };
195 
196 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
197 	sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
198 
199 #define MMC_WRITE(sc, reg, val)	\
200 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
201 #define MMC_READ(sc, reg) \
202 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
203 
204 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
205 	.idma_xferlen = 0x2000,
206 	.dma_ftrglevel = 0x20070008,
207 	.delays = NULL,
208 	.flags = 0,
209 };
210 
211 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
212 	.idma_xferlen = 0x10000,
213 	.dma_ftrglevel = 0x20070008,
214 	.delays = NULL,
215 	.flags = 0,
216 };
217 
218 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
219 	.idma_xferlen = 0x2000,
220 	.dma_ftrglevel = 0x20070008,
221 	.delays = sun7i_mmc_delays,
222 	.flags = 0,
223 };
224 
225 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
226 	.idma_xferlen = 0x10000,
227 	.dma_ftrglevel = 0x20070008,
228 	.delays = NULL,
229 	.flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
230 };
231 
232 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
233 	.idma_xferlen = 0x10000,
234 	.dma_ftrglevel = 0x200f0010,
235 	.delays = sun9i_mmc_delays,
236 	.flags = 0,
237 };
238 
239 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
240 	.idma_xferlen = 0x10000,
241 	.dma_ftrglevel = 0x20070008,
242 	.delays = NULL,
243 	.flags = SUNXI_MMC_FLAG_CALIB_REG |
244 		 SUNXI_MMC_FLAG_NEW_TIMINGS |
245 		 SUNXI_MMC_FLAG_MASK_DATA0,
246 };
247 
248 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
249 	.idma_xferlen = 0x2000,
250 	.dma_ftrglevel = 0x20070008,
251 	.delays = NULL,
252 	.flags = SUNXI_MMC_FLAG_CALIB_REG,
253 };
254 
255 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
256 	.idma_xferlen = 0x10000,
257 	.dma_ftrglevel = 0x20070008,
258 	.delays = NULL,
259 	.flags = SUNXI_MMC_FLAG_CALIB_REG |
260 		 SUNXI_MMC_FLAG_NEW_TIMINGS |
261 		 SUNXI_MMC_FLAG_MASK_DATA0,
262 };
263 
264 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
265 	.idma_xferlen = 0x2000,
266 	.dma_ftrglevel = 0x20070008,
267 	.delays = NULL,
268 	.flags = SUNXI_MMC_FLAG_CALIB_REG,
269 };
270 
271 static const struct of_compat_data compat_data[] = {
272 	{ "allwinner,sun4i-a10-mmc",	(uintptr_t)&sun4i_a10_mmc_config },
273 	{ "allwinner,sun5i-a13-mmc",	(uintptr_t)&sun5i_a13_mmc_config },
274 	{ "allwinner,sun7i-a20-mmc",	(uintptr_t)&sun7i_a20_mmc_config },
275 	{ "allwinner,sun8i-a83t-emmc",	(uintptr_t)&sun8i_a83t_emmc_config },
276 	{ "allwinner,sun9i-a80-mmc",	(uintptr_t)&sun9i_a80_mmc_config },
277 	{ "allwinner,sun50i-a64-mmc",	(uintptr_t)&sun50i_a64_mmc_config },
278 	{ "allwinner,sun50i-a64-emmc",	(uintptr_t)&sun50i_a64_emmc_config },
279 	{ "allwinner,sun50i-h6-mmc",	(uintptr_t)&sun50i_h6_mmc_config },
280 	{ "allwinner,sun50i-h6-emmc",	(uintptr_t)&sun50i_h6_emmc_config },
281 	{ NULL }
282 };
283 
284 static int
285 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
286 {
287 	struct fdt_attach_args * const faa = aux;
288 
289 	return of_match_compat_data(faa->faa_phandle, compat_data);
290 }
291 
292 static void
293 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
294 {
295 	struct sunxi_mmc_softc * const sc = device_private(self);
296 	struct fdt_attach_args * const faa = aux;
297 	const int phandle = faa->faa_phandle;
298 	char intrstr[128];
299 	bus_addr_t addr;
300 	bus_size_t size;
301 
302 	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
303 		aprint_error(": couldn't get registers\n");
304 		return;
305 	}
306 
307 	sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
308 	sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
309 	sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
310 	sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
311 
312 #if notyet
313 	if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
314 	    sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
315 #else
316 	if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
317 #endif
318 		aprint_error(": couldn't get clocks\n");
319 		return;
320 	}
321 
322 	sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
323 
324 	sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
325 
326 	sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
327 
328 	if (clk_enable(sc->sc_clk_ahb) != 0 ||
329 	    clk_enable(sc->sc_clk_mmc) != 0) {
330 		aprint_error(": couldn't enable clocks\n");
331 		return;
332 	}
333 
334 	if (sc->sc_rst_ahb != NULL) {
335 		if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
336 			aprint_error(": couldn't de-assert resets\n");
337 			return;
338 		}
339 	}
340 
341 	sc->sc_dev = self;
342 	sc->sc_phandle = phandle;
343 	sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
344 	sc->sc_bst = faa->faa_bst;
345 	sc->sc_dmat = faa->faa_dmat;
346 	mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
347 	cv_init(&sc->sc_intr_cv, "awinmmcirq");
348 	cv_init(&sc->sc_idst_cv, "awinmmcdma");
349 
350 	if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
351 		aprint_error(": couldn't map registers\n");
352 		return;
353 	}
354 
355 	aprint_naive("\n");
356 	aprint_normal(": SD/MMC controller\n");
357 
358 	sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
359 	    GPIO_PIN_INPUT);
360 	sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
361 	    GPIO_PIN_INPUT);
362 
363 	sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
364 	sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
365 
366 	sc->sc_non_removable = of_hasprop(phandle, "non-removable");
367 	sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
368 
369 	if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
370 	    sunxi_mmc_idma_setup(sc) != 0) {
371 		aprint_error_dev(self, "failed to setup DMA\n");
372 		return;
373 	}
374 
375 	if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
376 		aprint_error_dev(self, "failed to decode interrupt\n");
377 		return;
378 	}
379 
380 	sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
381 	    sunxi_mmc_intr, sc);
382 	if (sc->sc_ih == NULL) {
383 		aprint_error_dev(self, "failed to establish interrupt on %s\n",
384 		    intrstr);
385 		return;
386 	}
387 	aprint_normal_dev(self, "interrupting on %s\n", intrstr);
388 
389 	config_interrupts(self, sunxi_mmc_attach_i);
390 }
391 
392 static int
393 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
394 {
395 	bus_dma_segment_t ds[1];
396 	int error, rseg;
397 
398 	sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
399 	error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
400 	    sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
401 	if (error)
402 		return error;
403 	error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
404 	    &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
405 	if (error)
406 		goto free;
407 	error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
408 	    sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
409 	if (error)
410 		goto unmap;
411 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
412 	    sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
413 	    BUS_DMA_WAITOK);
414 	if (error)
415 		goto destroy;
416 	return 0;
417 
418 destroy:
419 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
420 unmap:
421 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
422 	    sc->sc_dmabounce_buflen);
423 free:
424 	bus_dmamem_free(sc->sc_dmat, ds, rseg);
425 	return error;
426 }
427 
428 static int
429 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
430 {
431 	int error;
432 
433 	sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
434 	sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
435 	    sc->sc_idma_ndesc;
436 	error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
437 	    sc->sc_idma_size, sc->sc_idma_segs, 1,
438 	    &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
439 	if (error)
440 		return error;
441 	error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
442 	    sc->sc_idma_nsegs, sc->sc_idma_size,
443 	    &sc->sc_idma_desc, BUS_DMA_WAITOK);
444 	if (error)
445 		goto free;
446 	error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
447 	    sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
448 	if (error)
449 		goto unmap;
450 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
451 	    sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
452 	if (error)
453 		goto destroy;
454 	return 0;
455 
456 destroy:
457 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
458 unmap:
459 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
460 free:
461 	bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
462 	return error;
463 }
464 
465 static int
466 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr)
467 {
468 	const struct sunxi_mmc_delay *delays;
469 	int error, timing;
470 
471 	if (freq <= 400) {
472 		timing = SUNXI_MMC_TIMING_400K;
473 	} else if (freq <= 25000) {
474 		timing = SUNXI_MMC_TIMING_25M;
475 	} else if (freq <= 52000) {
476 		if (ddr) {
477 			timing = sc->sc_mmc_width == 8 ?
478 			    SUNXI_MMC_TIMING_50M_DDR_8BIT :
479 			    SUNXI_MMC_TIMING_50M_DDR;
480 		} else {
481 			timing = SUNXI_MMC_TIMING_50M;
482 		}
483 	} else
484 		return EINVAL;
485 
486 	error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << ddr);
487 	if (error != 0)
488 		return error;
489 
490 	if (sc->sc_config->delays == NULL)
491 		return 0;
492 
493 	delays = &sc->sc_config->delays[timing];
494 
495 	if (sc->sc_clk_sample) {
496 		error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
497 		if (error != 0)
498 			return error;
499 	}
500 	if (sc->sc_clk_output) {
501 		error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
502 		if (error != 0)
503 			return error;
504 	}
505 
506 	return 0;
507 }
508 
509 static void
510 sunxi_mmc_attach_i(device_t self)
511 {
512 	struct sunxi_mmc_softc *sc = device_private(self);
513 	struct sdmmcbus_attach_args saa;
514 	uint32_t width;
515 
516 	if (sc->sc_pwrseq)
517 		fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
518 
519 	sunxi_mmc_host_reset(sc);
520 	sunxi_mmc_bus_width(sc, 1);
521 	sunxi_mmc_set_clock(sc, 400, false);
522 
523 	if (sc->sc_pwrseq)
524 		fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
525 
526 	if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
527 		width = 4;
528 
529 	memset(&saa, 0, sizeof(saa));
530 	saa.saa_busname = "sdmmc";
531 	saa.saa_sct = &sunxi_mmc_chip_functions;
532 	saa.saa_sch = sc;
533 	saa.saa_dmat = sc->sc_dmat;
534 	saa.saa_clkmin = 400;
535 	saa.saa_clkmax = 52000;
536 	saa.saa_caps = SMC_CAPS_DMA |
537 		       SMC_CAPS_MULTI_SEG_DMA |
538 		       SMC_CAPS_AUTO_STOP |
539 		       SMC_CAPS_SD_HIGHSPEED |
540 		       SMC_CAPS_MMC_HIGHSPEED |
541 		       SMC_CAPS_MMC_DDR52 |
542 		       SMC_CAPS_POLLING;
543 	if (width == 4)
544 		saa.saa_caps |= SMC_CAPS_4BIT_MODE;
545 	if (width == 8)
546 		saa.saa_caps |= SMC_CAPS_8BIT_MODE;
547 
548 	if (sc->sc_gpio_cd)
549 		saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
550 
551 	sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
552 }
553 
554 static int
555 sunxi_mmc_intr(void *priv)
556 {
557 	struct sunxi_mmc_softc *sc = priv;
558 	uint32_t idst, rint, imask;
559 
560 	mutex_enter(&sc->sc_intr_lock);
561 	idst = MMC_READ(sc, SUNXI_MMC_IDST);
562 	rint = MMC_READ(sc, SUNXI_MMC_RINT);
563 	if (!idst && !rint) {
564 		mutex_exit(&sc->sc_intr_lock);
565 		return 0;
566 	}
567 	MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
568 	MMC_WRITE(sc, SUNXI_MMC_RINT, rint & ~SUNXI_MMC_INT_SDIO_INT);
569 
570 	DPRINTF(sc->sc_dev, "mmc intr idst=%08X rint=%08X\n",
571 	    idst, rint);
572 
573 	if (idst != 0) {
574 		MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
575 		sc->sc_idma_idst |= idst;
576 		cv_broadcast(&sc->sc_idst_cv);
577 	}
578 
579 	if ((rint & ~SUNXI_MMC_INT_SDIO_INT) != 0) {
580 		imask = MMC_READ(sc, SUNXI_MMC_IMASK);
581 		MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_SDIO_INT);
582 		sc->sc_intr_rint |= (rint & ~SUNXI_MMC_INT_SDIO_INT);
583 		cv_broadcast(&sc->sc_intr_cv);
584 	}
585 
586 	if ((rint & SUNXI_MMC_INT_SDIO_INT) != 0) {
587 		sdmmc_card_intr(sc->sc_sdmmc_dev);
588 	}
589 
590 	mutex_exit(&sc->sc_intr_lock);
591 
592 	return 1;
593 }
594 
595 static int
596 sunxi_mmc_wait_rint(struct sunxi_mmc_softc *sc, uint32_t mask,
597     int timeout, bool poll)
598 {
599 	int retry;
600 	int error;
601 
602 	KASSERT(mutex_owned(&sc->sc_intr_lock));
603 
604 	if (sc->sc_intr_rint & mask)
605 		return 0;
606 
607 	if (poll)
608 		retry = timeout / hz * 1000;
609 	else
610 		retry = timeout / hz;
611 
612 	while (retry > 0) {
613 		if (poll) {
614 			sc->sc_intr_rint |= MMC_READ(sc, SUNXI_MMC_RINT);
615 		} else {
616 			error = cv_timedwait(&sc->sc_intr_cv,
617 			    &sc->sc_intr_lock, hz);
618 			if (error && error != EWOULDBLOCK)
619 				return error;
620 		}
621 		if (sc->sc_intr_rint & mask)
622 			return 0;
623 		if (poll)
624 			delay(1000);
625 		--retry;
626 	}
627 
628 	return ETIMEDOUT;
629 }
630 
631 static int
632 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
633 {
634 	struct sunxi_mmc_softc *sc = sch;
635 	uint32_t gctrl;
636 	int retry = 1000;
637 
638 	DPRINTF(sc->sc_dev, "host reset\n");
639 
640 	gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
641 	gctrl |= SUNXI_MMC_GCTRL_RESET;
642 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
643 	while (--retry > 0) {
644 		if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
645 			break;
646 		delay(100);
647 	}
648 
649 	MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
650 
651 	MMC_WRITE(sc, SUNXI_MMC_IMASK, 0);
652 
653 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffffffff);
654 
655 	gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
656 	gctrl |= SUNXI_MMC_GCTRL_INTEN;
657 	gctrl &= ~SUNXI_MMC_GCTRL_WAIT_MEM_ACCESS_DONE;
658 	gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
659 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
660 
661 	return 0;
662 }
663 
664 static uint32_t
665 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
666 {
667 	return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
668 }
669 
670 static int
671 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
672 {
673 	return 8192;
674 }
675 
676 static int
677 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
678 {
679 	struct sunxi_mmc_softc *sc = sch;
680 
681 	if (sc->sc_non_removable || sc->sc_broken_cd) {
682 		/*
683 		 * Non-removable or broken card detect flag set in
684 		 * DT, assume always present
685 		 */
686 		return 1;
687 	} else if (sc->sc_gpio_cd != NULL) {
688 		/* Use card detect GPIO */
689 		int v = 0, i;
690 		for (i = 0; i < 5; i++) {
691 			v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
692 			    sc->sc_gpio_cd_inverted);
693 			delay(1000);
694 		}
695 		if (v == 5)
696 			sc->sc_mmc_present = 0;
697 		else if (v == 0)
698 			sc->sc_mmc_present = 1;
699 		return sc->sc_mmc_present;
700 	} else {
701 		/* Use CARD_PRESENT field of SD_STATUS register */
702 		const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
703 		    SUNXI_MMC_STATUS_CARD_PRESENT;
704 		return present != 0;
705 	}
706 }
707 
708 static int
709 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
710 {
711 	struct sunxi_mmc_softc *sc = sch;
712 
713 	if (sc->sc_gpio_wp == NULL) {
714 		return 0;	/* no write protect pin, assume rw */
715 	} else {
716 		return fdtbus_gpio_read(sc->sc_gpio_wp) ^
717 		    sc->sc_gpio_wp_inverted;
718 	}
719 }
720 
721 static int
722 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
723 {
724 	return 0;
725 }
726 
727 static int
728 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
729 {
730 	uint32_t cmd;
731 	int retry;
732 
733 	DPRINTF(sc->sc_dev, "update clock\n");
734 
735 	cmd = SUNXI_MMC_CMD_START |
736 	      SUNXI_MMC_CMD_UPCLK_ONLY |
737 	      SUNXI_MMC_CMD_WAIT_PRE_OVER;
738 	MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
739 	retry = 0xfffff;
740 	while (--retry > 0) {
741 		if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
742 			break;
743 		delay(10);
744 	}
745 
746 	if (retry == 0) {
747 		aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
748 		DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
749 		    MMC_READ(sc, SUNXI_MMC_GCTRL));
750 		DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
751 		    MMC_READ(sc, SUNXI_MMC_CLKCR));
752 		DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
753 		    MMC_READ(sc, SUNXI_MMC_TIMEOUT));
754 		DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
755 		    MMC_READ(sc, SUNXI_MMC_WIDTH));
756 		DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
757 		    MMC_READ(sc, SUNXI_MMC_CMD));
758 		DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
759 		    MMC_READ(sc, SUNXI_MMC_MINT));
760 		DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
761 		    MMC_READ(sc, SUNXI_MMC_RINT));
762 		DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
763 		    MMC_READ(sc, SUNXI_MMC_STATUS));
764 		return ETIMEDOUT;
765 	}
766 
767 	return 0;
768 }
769 
770 static int
771 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
772 {
773 	struct sunxi_mmc_softc *sc = sch;
774 	uint32_t clkcr, gctrl, ntsr;
775 	const u_int flags = sc->sc_config->flags;
776 
777 	clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
778 	if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
779 		clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
780 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
781 			clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
782 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
783 		if (sunxi_mmc_update_clock(sc) != 0)
784 			return 1;
785 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
786 			clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
787 			clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
788 			MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
789 		}
790 	}
791 
792 	if (freq) {
793 
794 		clkcr &= ~SUNXI_MMC_CLKCR_DIV;
795 		clkcr |= __SHIFTIN(ddr, SUNXI_MMC_CLKCR_DIV);
796 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
797 
798 		if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
799 			ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
800 			ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
801 			MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
802 		}
803 
804 		if (flags & SUNXI_MMC_FLAG_CALIB_REG)
805 			MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
806 
807 		if (sunxi_mmc_update_clock(sc) != 0)
808 			return 1;
809 
810 		gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
811 		if (ddr)
812 			gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
813 		else
814 			gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
815 		MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
816 
817 		if (sunxi_mmc_set_clock(sc, freq, ddr) != 0)
818 			return 1;
819 
820 		clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
821 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
822 			clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
823 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
824 		if (sunxi_mmc_update_clock(sc) != 0)
825 			return 1;
826 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
827 			clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
828 			clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
829 			MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
830 		}
831 	}
832 
833 	return 0;
834 }
835 
836 static int
837 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
838 {
839 	struct sunxi_mmc_softc *sc = sch;
840 
841 	DPRINTF(sc->sc_dev, "width = %d\n", width);
842 
843 	switch (width) {
844 	case 1:
845 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
846 		break;
847 	case 4:
848 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
849 		break;
850 	case 8:
851 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
852 		break;
853 	default:
854 		return 1;
855 	}
856 
857 	sc->sc_mmc_width = width;
858 
859 	return 0;
860 }
861 
862 static int
863 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
864 {
865 	return -1;
866 }
867 
868 static int
869 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
870 {
871 	struct sunxi_mmc_softc *sc = sch;
872 	u_int uvol;
873 	int error;
874 
875 	if (sc->sc_reg_vqmmc == NULL)
876 		return 0;
877 
878 	switch (signal_voltage) {
879 	case SDMMC_SIGNAL_VOLTAGE_330:
880 		uvol = 3300000;
881 		break;
882 	case SDMMC_SIGNAL_VOLTAGE_180:
883 		uvol = 1800000;
884 		break;
885 	default:
886 		return EINVAL;
887 	}
888 
889 	error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
890 	if (error != 0)
891 		return error;
892 
893 	return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
894 }
895 
896 static int
897 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
898 {
899 	struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
900 	bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
901 	bus_dmamap_t map;
902 	bus_size_t off;
903 	int desc, resid, seg;
904 	uint32_t val;
905 
906 	/*
907 	 * If the command includes a dma map use it, otherwise we need to
908 	 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
909 	 */
910 	if (cmd->c_dmamap) {
911 		map = cmd->c_dmamap;
912 	} else {
913 		if (cmd->c_datalen > sc->sc_dmabounce_buflen)
914 			return E2BIG;
915 		map = sc->sc_dmabounce_map;
916 
917 		if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
918 			memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
919 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
920 			    0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
921 		} else {
922 			memcpy(sc->sc_dmabounce_buf, cmd->c_data,
923 			    cmd->c_datalen);
924 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
925 			    0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
926 		}
927 	}
928 
929 	desc = 0;
930 	for (seg = 0; seg < map->dm_nsegs; seg++) {
931 		bus_addr_t paddr = map->dm_segs[seg].ds_addr;
932 		bus_size_t len = map->dm_segs[seg].ds_len;
933 		resid = min(len, cmd->c_resid);
934 		off = 0;
935 		while (resid > 0) {
936 			if (desc == sc->sc_idma_ndesc)
937 				break;
938 			len = min(sc->sc_config->idma_xferlen, resid);
939 			dma[desc].dma_buf_size = htole32(len);
940 			dma[desc].dma_buf_addr = htole32(paddr + off);
941 			dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
942 					       SUNXI_MMC_IDMA_CONFIG_OWN);
943 			cmd->c_resid -= len;
944 			resid -= len;
945 			off += len;
946 			if (desc == 0) {
947 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
948 			}
949 			if (cmd->c_resid == 0) {
950 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
951 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
952 				dma[desc].dma_next = 0;
953 			} else {
954 				dma[desc].dma_config |=
955 				    htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
956 				dma[desc].dma_next = htole32(
957 				    desc_paddr + ((desc+1) *
958 				    sizeof(struct sunxi_mmc_idma_descriptor)));
959 			}
960 			++desc;
961 		}
962 	}
963 	if (desc == sc->sc_idma_ndesc) {
964 		aprint_error_dev(sc->sc_dev,
965 		    "not enough descriptors for %d byte transfer! "
966 		    "there are %u segments with a max xfer length of %u\n",
967 		    cmd->c_datalen, map->dm_nsegs, sc->sc_config->idma_xferlen);
968 		return EIO;
969 	}
970 
971 	bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
972 	    sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
973 
974 	sc->sc_idma_idst = 0;
975 
976 	MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
977 	MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
978 
979 	val = MMC_READ(sc, SUNXI_MMC_GCTRL);
980 	val |= SUNXI_MMC_GCTRL_DMAEN;
981 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
982 	val |= SUNXI_MMC_GCTRL_DMARESET;
983 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
984 
985 	MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
986 	if (ISSET(cmd->c_flags, SCF_CMD_READ))
987 		val = SUNXI_MMC_IDST_RECEIVE_INT;
988 	else
989 		val = 0;
990 	MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
991 	MMC_WRITE(sc, SUNXI_MMC_DMAC,
992 	    SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
993 
994 	return 0;
995 }
996 
997 static void
998 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
999 {
1000 	MMC_WRITE(sc, SUNXI_MMC_DMAC, 0);
1001 
1002 	bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1003 	    sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
1004 
1005 	if (cmd->c_dmamap == NULL) {
1006 		if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1007 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1008 			    0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
1009 			memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1010 			    cmd->c_datalen);
1011 		} else {
1012 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1013 			    0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1014 		}
1015 	}
1016 }
1017 
1018 static void
1019 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1020 {
1021 	struct sunxi_mmc_softc *sc = sch;
1022 	uint32_t cmdval = SUNXI_MMC_CMD_START;
1023 	uint32_t imask, oimask;
1024 	const bool poll = (cmd->c_flags & SCF_POLL) != 0;
1025 	int retry;
1026 
1027 	DPRINTF(sc->sc_dev,
1028 	    "opcode %d flags 0x%x data %p datalen %d blklen %d poll %d\n",
1029 	    cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1030 	    cmd->c_blklen, poll);
1031 
1032 	mutex_enter(&sc->sc_intr_lock);
1033 
1034 	if (cmd->c_opcode == 0)
1035 		cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1036 	if (cmd->c_flags & SCF_RSP_PRESENT)
1037 		cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1038 	if (cmd->c_flags & SCF_RSP_136)
1039 		cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1040 	if (cmd->c_flags & SCF_RSP_CRC)
1041 		cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1042 
1043 	imask = oimask = MMC_READ(sc, SUNXI_MMC_IMASK);
1044 	imask |= SUNXI_MMC_INT_ERROR;
1045 
1046 	if (cmd->c_datalen > 0) {
1047 		unsigned int nblks;
1048 
1049 		cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1050 		if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1051 			cmdval |= SUNXI_MMC_CMD_WRITE;
1052 		}
1053 
1054 		nblks = cmd->c_datalen / cmd->c_blklen;
1055 		if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1056 			++nblks;
1057 
1058 		if (nblks > 1) {
1059 			cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1060 			imask |= SUNXI_MMC_INT_AUTO_CMD_DONE;
1061 		} else {
1062 			imask |= SUNXI_MMC_INT_DATA_OVER;
1063 		}
1064 
1065 		MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1066 		MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1067 	} else {
1068 		imask |= SUNXI_MMC_INT_CMD_DONE;
1069 	}
1070 
1071 	MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1072 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1073 
1074 	sc->sc_intr_rint = 0;
1075 
1076 	MMC_WRITE(sc, SUNXI_MMC_A12A,
1077 	    (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1078 
1079 	MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1080 
1081 	DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1082 
1083 	if (cmd->c_datalen == 0) {
1084 		MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1085 	} else {
1086 		cmd->c_resid = cmd->c_datalen;
1087 		cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1088 		MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1089 		if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_CMD_READ)) {
1090 			const uint32_t idst_mask = SUNXI_MMC_IDST_RECEIVE_INT;
1091 
1092 			retry = 10;
1093 			while ((sc->sc_idma_idst & idst_mask) == 0) {
1094 				if (retry-- == 0) {
1095 					cmd->c_error = ETIMEDOUT;
1096 					break;
1097 				}
1098 				cv_timedwait(&sc->sc_idst_cv,
1099 				    &sc->sc_intr_lock, hz);
1100 			}
1101 		}
1102 	}
1103 
1104 	cmd->c_error = sunxi_mmc_wait_rint(sc,
1105 	    SUNXI_MMC_INT_ERROR|SUNXI_MMC_INT_CMD_DONE, hz * 10, poll);
1106 	if (cmd->c_error == 0 && (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1107 		if (sc->sc_intr_rint & SUNXI_MMC_INT_RESP_TIMEOUT) {
1108 			cmd->c_error = ETIMEDOUT;
1109 		} else {
1110 			cmd->c_error = EIO;
1111 		}
1112 	}
1113 	if (cmd->c_error) {
1114 		DPRINTF(sc->sc_dev,
1115 		    "cmd failed, error %d\n", cmd->c_error);
1116 		goto done;
1117 	}
1118 
1119 	if (cmd->c_datalen > 0) {
1120 		sunxi_mmc_dma_complete(sc, cmd);
1121 
1122 		cmd->c_error = sunxi_mmc_wait_rint(sc,
1123 		    SUNXI_MMC_INT_ERROR|
1124 		    SUNXI_MMC_INT_AUTO_CMD_DONE|
1125 		    SUNXI_MMC_INT_DATA_OVER,
1126 		    hz*10, poll);
1127 		if (cmd->c_error == 0 &&
1128 		    (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1129 			cmd->c_error = ETIMEDOUT;
1130 		}
1131 		if (cmd->c_error) {
1132 			DPRINTF(sc->sc_dev,
1133 			    "data timeout, rint = %08x\n",
1134 			    sc->sc_intr_rint);
1135 			cmd->c_error = ETIMEDOUT;
1136 			goto done;
1137 		}
1138 	}
1139 
1140 	if (cmd->c_flags & SCF_RSP_PRESENT) {
1141 		if (cmd->c_flags & SCF_RSP_136) {
1142 			cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1143 			cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1144 			cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1145 			cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1146 			if (cmd->c_flags & SCF_RSP_CRC) {
1147 				cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1148 				    (cmd->c_resp[1] << 24);
1149 				cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1150 				    (cmd->c_resp[2] << 24);
1151 				cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1152 				    (cmd->c_resp[3] << 24);
1153 				cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1154 			}
1155 		} else {
1156 			cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1157 		}
1158 	}
1159 
1160 done:
1161 	cmd->c_flags |= SCF_ITSDONE;
1162 	MMC_WRITE(sc, SUNXI_MMC_IMASK, oimask);
1163 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1164 	MMC_WRITE(sc, SUNXI_MMC_IDST, 0x337);
1165 	mutex_exit(&sc->sc_intr_lock);
1166 
1167 	if (cmd->c_error) {
1168 		DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1169 		MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1170 		    MMC_READ(sc, SUNXI_MMC_GCTRL) |
1171 		      SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1172 		for (retry = 0; retry < 1000; retry++) {
1173 			if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1174 				break;
1175 			delay(10);
1176 		}
1177 		sunxi_mmc_update_clock(sc);
1178 	}
1179 
1180 	MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1181 	    MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1182 }
1183 
1184 static void
1185 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1186 {
1187 	struct sunxi_mmc_softc *sc = sch;
1188 	uint32_t imask;
1189 
1190 	imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1191 	if (enable)
1192 		imask |= SUNXI_MMC_INT_SDIO_INT;
1193 	else
1194 		imask &= ~SUNXI_MMC_INT_SDIO_INT;
1195 	MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1196 }
1197 
1198 static void
1199 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1200 {
1201 	struct sunxi_mmc_softc *sc = sch;
1202 
1203 	MMC_WRITE(sc, SUNXI_MMC_RINT, SUNXI_MMC_INT_SDIO_INT);
1204 }
1205