xref: /netbsd-src/sys/arch/arm/sunxi/sunxi_mmc.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* $NetBSD: sunxi_mmc.c,v 1.26 2018/06/13 11:17:02 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2014-2017 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_sunximmc.h"
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.26 2018/06/13 11:17:02 jmcneill Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41 
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45 
46 #include <dev/fdt/fdtvar.h>
47 
48 #include <arm/sunxi/sunxi_mmc.h>
49 
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define	DPRINTF(dev, fmt, ...)						\
53 do {									\
54 	if (sunxi_mmc_debug & __BIT(device_unit(dev)))			\
55 		device_printf((dev), fmt, ##__VA_ARGS__);		\
56 } while (0)
57 #else
58 #define	DPRINTF(dev, fmt, ...)		((void)0)
59 #endif
60 
61 enum sunxi_mmc_timing {
62 	SUNXI_MMC_TIMING_400K,
63 	SUNXI_MMC_TIMING_25M,
64 	SUNXI_MMC_TIMING_50M,
65 	SUNXI_MMC_TIMING_50M_DDR,
66 	SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68 
69 struct sunxi_mmc_delay {
70 	u_int	output_phase;
71 	u_int	sample_phase;
72 };
73 
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 	[SUNXI_MMC_TIMING_400K]		= { 180,	180 },
76 	[SUNXI_MMC_TIMING_25M]		= { 180,	 75 },
77 	[SUNXI_MMC_TIMING_50M]		= {  90,	120 },
78 	[SUNXI_MMC_TIMING_50M_DDR]	= {  60,	120 },
79 	[SUNXI_MMC_TIMING_50M_DDR_8BIT]	= {  90,	180 },
80 };
81 
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 	[SUNXI_MMC_TIMING_400K]		= { 180,	180 },
84 	[SUNXI_MMC_TIMING_25M]		= { 180,	 75 },
85 	[SUNXI_MMC_TIMING_50M]		= { 150,	120 },
86 	[SUNXI_MMC_TIMING_50M_DDR]	= {  54,	 36 },
87 	[SUNXI_MMC_TIMING_50M_DDR_8BIT]	= {  72,	 72 },
88 };
89 
90 #define SUNXI_MMC_NDESC		64
91 
92 struct sunxi_mmc_softc;
93 
94 static int	sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void	sunxi_mmc_attach(device_t, device_t, void *);
96 static void	sunxi_mmc_attach_i(device_t);
97 
98 static int	sunxi_mmc_intr(void *);
99 static int	sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int	sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101 
102 static int	sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
103 static uint32_t	sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
104 static int	sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
105 static int	sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
106 static int	sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
107 static int	sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
108 static int	sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
109 static int	sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
110 static int	sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
111 static int	sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
112 static int	sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t, int);
113 static void	sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
114 				      struct sdmmc_command *);
115 static void	sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
116 static void	sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
117 
118 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
119 	.host_reset = sunxi_mmc_host_reset,
120 	.host_ocr = sunxi_mmc_host_ocr,
121 	.host_maxblklen = sunxi_mmc_host_maxblklen,
122 	.card_detect = sunxi_mmc_card_detect,
123 	.write_protect = sunxi_mmc_write_protect,
124 	.bus_power = sunxi_mmc_bus_power,
125 	.bus_clock_ddr = sunxi_mmc_bus_clock,
126 	.bus_width = sunxi_mmc_bus_width,
127 	.bus_rod = sunxi_mmc_bus_rod,
128 	.signal_voltage = sunxi_mmc_signal_voltage,
129 	.execute_tuning = sunxi_mmc_execute_tuning,
130 	.exec_command = sunxi_mmc_exec_command,
131 	.card_enable_intr = sunxi_mmc_card_enable_intr,
132 	.card_intr_ack = sunxi_mmc_card_intr_ack,
133 };
134 
135 struct sunxi_mmc_config {
136 	u_int idma_xferlen;
137 	u_int flags;
138 #define	SUNXI_MMC_FLAG_CALIB_REG	0x01
139 #define	SUNXI_MMC_FLAG_NEW_TIMINGS	0x02
140 #define	SUNXI_MMC_FLAG_MASK_DATA0	0x04
141 #define	SUNXI_MMC_FLAG_HS200		0x08
142 	const struct sunxi_mmc_delay *delays;
143 	uint32_t dma_ftrglevel;
144 };
145 
146 struct sunxi_mmc_softc {
147 	device_t sc_dev;
148 	bus_space_tag_t sc_bst;
149 	bus_space_handle_t sc_bsh;
150 	bus_dma_tag_t sc_dmat;
151 	int sc_phandle;
152 
153 	void *sc_ih;
154 	kmutex_t sc_intr_lock;
155 	kcondvar_t sc_intr_cv;
156 	kcondvar_t sc_idst_cv;
157 
158 	int sc_mmc_width;
159 	int sc_mmc_present;
160 
161 	u_int sc_max_frequency;
162 
163 	device_t sc_sdmmc_dev;
164 
165 	struct sunxi_mmc_config *sc_config;
166 
167 	bus_dma_segment_t sc_idma_segs[1];
168 	int sc_idma_nsegs;
169 	bus_size_t sc_idma_size;
170 	bus_dmamap_t sc_idma_map;
171 	int sc_idma_ndesc;
172 	void *sc_idma_desc;
173 
174 	bus_dmamap_t sc_dmabounce_map;
175 	void *sc_dmabounce_buf;
176 	size_t sc_dmabounce_buflen;
177 
178 	uint32_t sc_intr_rint;
179 	uint32_t sc_idma_idst;
180 
181 	struct clk *sc_clk_ahb;
182 	struct clk *sc_clk_mmc;
183 	struct clk *sc_clk_output;
184 	struct clk *sc_clk_sample;
185 
186 	struct fdtbus_reset *sc_rst_ahb;
187 
188 	struct fdtbus_gpio_pin *sc_gpio_cd;
189 	int sc_gpio_cd_inverted;
190 	struct fdtbus_gpio_pin *sc_gpio_wp;
191 	int sc_gpio_wp_inverted;
192 
193 	struct fdtbus_regulator *sc_reg_vqmmc;
194 
195 	struct fdtbus_mmc_pwrseq *sc_pwrseq;
196 
197 	bool sc_non_removable;
198 	bool sc_broken_cd;
199 };
200 
201 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
202 	sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
203 
204 #define MMC_WRITE(sc, reg, val)	\
205 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
206 #define MMC_READ(sc, reg) \
207 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
208 
209 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
210 	.idma_xferlen = 0x2000,
211 	.dma_ftrglevel = 0x20070008,
212 	.delays = NULL,
213 	.flags = 0,
214 };
215 
216 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
217 	.idma_xferlen = 0x10000,
218 	.dma_ftrglevel = 0x20070008,
219 	.delays = NULL,
220 	.flags = 0,
221 };
222 
223 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
224 	.idma_xferlen = 0x2000,
225 	.dma_ftrglevel = 0x20070008,
226 	.delays = sun7i_mmc_delays,
227 	.flags = 0,
228 };
229 
230 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
231 	.idma_xferlen = 0x10000,
232 	.dma_ftrglevel = 0x20070008,
233 	.delays = NULL,
234 	.flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
235 };
236 
237 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
238 	.idma_xferlen = 0x10000,
239 	.dma_ftrglevel = 0x200f0010,
240 	.delays = sun9i_mmc_delays,
241 	.flags = 0,
242 };
243 
244 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
245 	.idma_xferlen = 0x10000,
246 	.dma_ftrglevel = 0x20070008,
247 	.delays = NULL,
248 	.flags = SUNXI_MMC_FLAG_CALIB_REG |
249 		 SUNXI_MMC_FLAG_NEW_TIMINGS |
250 		 SUNXI_MMC_FLAG_MASK_DATA0,
251 };
252 
253 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
254 	.idma_xferlen = 0x2000,
255 	.dma_ftrglevel = 0x20070008,
256 	.delays = NULL,
257 	.flags = SUNXI_MMC_FLAG_CALIB_REG,
258 };
259 
260 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
261 	.idma_xferlen = 0x10000,
262 	.dma_ftrglevel = 0x20070008,
263 	.delays = NULL,
264 	.flags = SUNXI_MMC_FLAG_CALIB_REG |
265 		 SUNXI_MMC_FLAG_NEW_TIMINGS |
266 		 SUNXI_MMC_FLAG_MASK_DATA0,
267 };
268 
269 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
270 	.idma_xferlen = 0x2000,
271 	.dma_ftrglevel = 0x20070008,
272 	.delays = NULL,
273 	.flags = SUNXI_MMC_FLAG_CALIB_REG,
274 };
275 
276 static const struct of_compat_data compat_data[] = {
277 	{ "allwinner,sun4i-a10-mmc",	(uintptr_t)&sun4i_a10_mmc_config },
278 	{ "allwinner,sun5i-a13-mmc",	(uintptr_t)&sun5i_a13_mmc_config },
279 	{ "allwinner,sun7i-a20-mmc",	(uintptr_t)&sun7i_a20_mmc_config },
280 	{ "allwinner,sun8i-a83t-emmc",	(uintptr_t)&sun8i_a83t_emmc_config },
281 	{ "allwinner,sun9i-a80-mmc",	(uintptr_t)&sun9i_a80_mmc_config },
282 	{ "allwinner,sun50i-a64-mmc",	(uintptr_t)&sun50i_a64_mmc_config },
283 	{ "allwinner,sun50i-a64-emmc",	(uintptr_t)&sun50i_a64_emmc_config },
284 	{ "allwinner,sun50i-h6-mmc",	(uintptr_t)&sun50i_h6_mmc_config },
285 	{ "allwinner,sun50i-h6-emmc",	(uintptr_t)&sun50i_h6_emmc_config },
286 	{ NULL }
287 };
288 
289 static int
290 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
291 {
292 	struct fdt_attach_args * const faa = aux;
293 
294 	return of_match_compat_data(faa->faa_phandle, compat_data);
295 }
296 
297 static void
298 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
299 {
300 	struct sunxi_mmc_softc * const sc = device_private(self);
301 	struct fdt_attach_args * const faa = aux;
302 	const int phandle = faa->faa_phandle;
303 	char intrstr[128];
304 	bus_addr_t addr;
305 	bus_size_t size;
306 
307 	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
308 		aprint_error(": couldn't get registers\n");
309 		return;
310 	}
311 
312 	sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
313 	sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
314 	sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
315 	sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
316 
317 #if notyet
318 	if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
319 	    sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
320 #else
321 	if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
322 #endif
323 		aprint_error(": couldn't get clocks\n");
324 		return;
325 	}
326 
327 	sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
328 
329 	sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
330 
331 	sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
332 
333 	if (clk_enable(sc->sc_clk_ahb) != 0 ||
334 	    clk_enable(sc->sc_clk_mmc) != 0) {
335 		aprint_error(": couldn't enable clocks\n");
336 		return;
337 	}
338 
339 	if (sc->sc_rst_ahb != NULL) {
340 		if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
341 			aprint_error(": couldn't de-assert resets\n");
342 			return;
343 		}
344 	}
345 
346 	sc->sc_dev = self;
347 	sc->sc_phandle = phandle;
348 	sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
349 	sc->sc_bst = faa->faa_bst;
350 	sc->sc_dmat = faa->faa_dmat;
351 	mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
352 	cv_init(&sc->sc_intr_cv, "awinmmcirq");
353 	cv_init(&sc->sc_idst_cv, "awinmmcdma");
354 
355 	if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
356 		aprint_error(": couldn't map registers\n");
357 		return;
358 	}
359 
360 	aprint_naive("\n");
361 	aprint_normal(": SD/MMC controller\n");
362 
363 	sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
364 	    GPIO_PIN_INPUT);
365 	sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
366 	    GPIO_PIN_INPUT);
367 
368 	sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
369 	sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
370 
371 	sc->sc_non_removable = of_hasprop(phandle, "non-removable");
372 	sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
373 
374 	if (of_getprop_uint32(phandle, "max-frequency", &sc->sc_max_frequency))
375 		sc->sc_max_frequency = 52000000;
376 
377 	if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
378 	    sunxi_mmc_idma_setup(sc) != 0) {
379 		aprint_error_dev(self, "failed to setup DMA\n");
380 		return;
381 	}
382 
383 	if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
384 		aprint_error_dev(self, "failed to decode interrupt\n");
385 		return;
386 	}
387 
388 	sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
389 	    sunxi_mmc_intr, sc);
390 	if (sc->sc_ih == NULL) {
391 		aprint_error_dev(self, "failed to establish interrupt on %s\n",
392 		    intrstr);
393 		return;
394 	}
395 	aprint_normal_dev(self, "interrupting on %s\n", intrstr);
396 
397 	config_interrupts(self, sunxi_mmc_attach_i);
398 }
399 
400 static int
401 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
402 {
403 	bus_dma_segment_t ds[1];
404 	int error, rseg;
405 
406 	sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
407 	error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
408 	    sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
409 	if (error)
410 		return error;
411 	error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
412 	    &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
413 	if (error)
414 		goto free;
415 	error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
416 	    sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
417 	if (error)
418 		goto unmap;
419 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
420 	    sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
421 	    BUS_DMA_WAITOK);
422 	if (error)
423 		goto destroy;
424 	return 0;
425 
426 destroy:
427 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
428 unmap:
429 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
430 	    sc->sc_dmabounce_buflen);
431 free:
432 	bus_dmamem_free(sc->sc_dmat, ds, rseg);
433 	return error;
434 }
435 
436 static int
437 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
438 {
439 	int error;
440 
441 	sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
442 	sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
443 	    sc->sc_idma_ndesc;
444 	error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
445 	    sc->sc_idma_size, sc->sc_idma_segs, 1,
446 	    &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
447 	if (error)
448 		return error;
449 	error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
450 	    sc->sc_idma_nsegs, sc->sc_idma_size,
451 	    &sc->sc_idma_desc, BUS_DMA_WAITOK);
452 	if (error)
453 		goto free;
454 	error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
455 	    sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
456 	if (error)
457 		goto unmap;
458 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
459 	    sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
460 	if (error)
461 		goto destroy;
462 	return 0;
463 
464 destroy:
465 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
466 unmap:
467 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
468 free:
469 	bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
470 	return error;
471 }
472 
473 static int
474 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr)
475 {
476 	const struct sunxi_mmc_delay *delays;
477 	int error, timing = SUNXI_MMC_TIMING_400K;
478 
479 	if (sc->sc_config->delays) {
480 		if (freq <= 400) {
481 			timing = SUNXI_MMC_TIMING_400K;
482 		} else if (freq <= 25000) {
483 			timing = SUNXI_MMC_TIMING_25M;
484 		} else if (freq <= 52000) {
485 			if (ddr) {
486 				timing = sc->sc_mmc_width == 8 ?
487 				    SUNXI_MMC_TIMING_50M_DDR_8BIT :
488 				    SUNXI_MMC_TIMING_50M_DDR;
489 			} else {
490 				timing = SUNXI_MMC_TIMING_50M;
491 			}
492 		} else
493 			return EINVAL;
494 	}
495 	if (sc->sc_max_frequency) {
496 		if (freq * 1000 > sc->sc_max_frequency)
497 			return EINVAL;
498 	}
499 
500 	error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << ddr);
501 	if (error != 0)
502 		return error;
503 
504 	if (sc->sc_config->delays == NULL)
505 		return 0;
506 
507 	delays = &sc->sc_config->delays[timing];
508 
509 	if (sc->sc_clk_sample) {
510 		error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
511 		if (error != 0)
512 			return error;
513 	}
514 	if (sc->sc_clk_output) {
515 		error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
516 		if (error != 0)
517 			return error;
518 	}
519 
520 	return 0;
521 }
522 
523 static void
524 sunxi_mmc_attach_i(device_t self)
525 {
526 	struct sunxi_mmc_softc *sc = device_private(self);
527 	const u_int flags = sc->sc_config->flags;
528 	struct sdmmcbus_attach_args saa;
529 	uint32_t width;
530 
531 	if (sc->sc_pwrseq)
532 		fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
533 
534 	sunxi_mmc_host_reset(sc);
535 	sunxi_mmc_bus_width(sc, 1);
536 	sunxi_mmc_set_clock(sc, 400, false);
537 
538 	if (sc->sc_pwrseq)
539 		fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
540 
541 	if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
542 		width = 4;
543 
544 	memset(&saa, 0, sizeof(saa));
545 	saa.saa_busname = "sdmmc";
546 	saa.saa_sct = &sunxi_mmc_chip_functions;
547 	saa.saa_sch = sc;
548 	saa.saa_dmat = sc->sc_dmat;
549 	saa.saa_clkmin = 400;
550 	saa.saa_clkmax = sc->sc_max_frequency / 1000;
551 	saa.saa_caps = SMC_CAPS_DMA |
552 		       SMC_CAPS_MULTI_SEG_DMA |
553 		       SMC_CAPS_AUTO_STOP |
554 		       SMC_CAPS_SD_HIGHSPEED |
555 		       SMC_CAPS_MMC_HIGHSPEED |
556 		       SMC_CAPS_POLLING;
557 
558 	if (sc->sc_config->delays || (flags & SUNXI_MMC_FLAG_NEW_TIMINGS))
559 		saa.saa_caps |= SMC_CAPS_MMC_DDR52;
560 
561 	if (flags & SUNXI_MMC_FLAG_HS200)
562 		saa.saa_caps |= SMC_CAPS_MMC_HS200;
563 
564 	if (width == 4)
565 		saa.saa_caps |= SMC_CAPS_4BIT_MODE;
566 	if (width == 8)
567 		saa.saa_caps |= SMC_CAPS_8BIT_MODE;
568 
569 	if (sc->sc_gpio_cd)
570 		saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
571 
572 	sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
573 }
574 
575 static int
576 sunxi_mmc_intr(void *priv)
577 {
578 	struct sunxi_mmc_softc *sc = priv;
579 	uint32_t idst, rint, imask;
580 
581 	mutex_enter(&sc->sc_intr_lock);
582 	idst = MMC_READ(sc, SUNXI_MMC_IDST);
583 	rint = MMC_READ(sc, SUNXI_MMC_RINT);
584 	if (!idst && !rint) {
585 		mutex_exit(&sc->sc_intr_lock);
586 		return 0;
587 	}
588 	MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
589 	MMC_WRITE(sc, SUNXI_MMC_RINT, rint & ~SUNXI_MMC_INT_SDIO_INT);
590 
591 	DPRINTF(sc->sc_dev, "mmc intr idst=%08X rint=%08X\n",
592 	    idst, rint);
593 
594 	if (idst != 0) {
595 		MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
596 		sc->sc_idma_idst |= idst;
597 		cv_broadcast(&sc->sc_idst_cv);
598 	}
599 
600 	if ((rint & ~SUNXI_MMC_INT_SDIO_INT) != 0) {
601 		imask = MMC_READ(sc, SUNXI_MMC_IMASK);
602 		MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_SDIO_INT);
603 		sc->sc_intr_rint |= (rint & ~SUNXI_MMC_INT_SDIO_INT);
604 		cv_broadcast(&sc->sc_intr_cv);
605 	}
606 
607 	if ((rint & SUNXI_MMC_INT_SDIO_INT) != 0) {
608 		sdmmc_card_intr(sc->sc_sdmmc_dev);
609 	}
610 
611 	mutex_exit(&sc->sc_intr_lock);
612 
613 	return 1;
614 }
615 
616 static int
617 sunxi_mmc_wait_rint(struct sunxi_mmc_softc *sc, uint32_t mask,
618     int timeout, bool poll)
619 {
620 	int retry;
621 	int error;
622 
623 	KASSERT(mutex_owned(&sc->sc_intr_lock));
624 
625 	if (sc->sc_intr_rint & mask)
626 		return 0;
627 
628 	if (poll)
629 		retry = timeout / hz * 1000;
630 	else
631 		retry = timeout / hz;
632 
633 	while (retry > 0) {
634 		if (poll) {
635 			sc->sc_intr_rint |= MMC_READ(sc, SUNXI_MMC_RINT);
636 		} else {
637 			error = cv_timedwait(&sc->sc_intr_cv,
638 			    &sc->sc_intr_lock, hz);
639 			if (error && error != EWOULDBLOCK)
640 				return error;
641 		}
642 		if (sc->sc_intr_rint & mask)
643 			return 0;
644 		if (poll)
645 			delay(1000);
646 		--retry;
647 	}
648 
649 	return ETIMEDOUT;
650 }
651 
652 static int
653 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
654 {
655 	struct sunxi_mmc_softc *sc = sch;
656 	uint32_t gctrl;
657 	int retry = 1000;
658 
659 	DPRINTF(sc->sc_dev, "host reset\n");
660 
661 	gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
662 	gctrl |= SUNXI_MMC_GCTRL_RESET;
663 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
664 	while (--retry > 0) {
665 		if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
666 			break;
667 		delay(100);
668 	}
669 
670 	MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
671 
672 	MMC_WRITE(sc, SUNXI_MMC_IMASK, 0);
673 
674 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffffffff);
675 
676 	gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
677 	gctrl |= SUNXI_MMC_GCTRL_INTEN;
678 	gctrl &= ~SUNXI_MMC_GCTRL_WAIT_MEM_ACCESS_DONE;
679 	gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
680 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
681 
682 	return 0;
683 }
684 
685 static uint32_t
686 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
687 {
688 	return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
689 }
690 
691 static int
692 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
693 {
694 	return 8192;
695 }
696 
697 static int
698 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
699 {
700 	struct sunxi_mmc_softc *sc = sch;
701 
702 	if (sc->sc_non_removable || sc->sc_broken_cd) {
703 		/*
704 		 * Non-removable or broken card detect flag set in
705 		 * DT, assume always present
706 		 */
707 		return 1;
708 	} else if (sc->sc_gpio_cd != NULL) {
709 		/* Use card detect GPIO */
710 		int v = 0, i;
711 		for (i = 0; i < 5; i++) {
712 			v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
713 			    sc->sc_gpio_cd_inverted);
714 			delay(1000);
715 		}
716 		if (v == 5)
717 			sc->sc_mmc_present = 0;
718 		else if (v == 0)
719 			sc->sc_mmc_present = 1;
720 		return sc->sc_mmc_present;
721 	} else {
722 		/* Use CARD_PRESENT field of SD_STATUS register */
723 		const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
724 		    SUNXI_MMC_STATUS_CARD_PRESENT;
725 		return present != 0;
726 	}
727 }
728 
729 static int
730 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
731 {
732 	struct sunxi_mmc_softc *sc = sch;
733 
734 	if (sc->sc_gpio_wp == NULL) {
735 		return 0;	/* no write protect pin, assume rw */
736 	} else {
737 		return fdtbus_gpio_read(sc->sc_gpio_wp) ^
738 		    sc->sc_gpio_wp_inverted;
739 	}
740 }
741 
742 static int
743 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
744 {
745 	return 0;
746 }
747 
748 static int
749 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
750 {
751 	uint32_t cmd;
752 	int retry;
753 
754 	DPRINTF(sc->sc_dev, "update clock\n");
755 
756 	cmd = SUNXI_MMC_CMD_START |
757 	      SUNXI_MMC_CMD_UPCLK_ONLY |
758 	      SUNXI_MMC_CMD_WAIT_PRE_OVER;
759 	MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
760 	retry = 0xfffff;
761 	while (--retry > 0) {
762 		if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
763 			break;
764 		delay(10);
765 	}
766 
767 	if (retry == 0) {
768 		aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
769 		DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
770 		    MMC_READ(sc, SUNXI_MMC_GCTRL));
771 		DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
772 		    MMC_READ(sc, SUNXI_MMC_CLKCR));
773 		DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
774 		    MMC_READ(sc, SUNXI_MMC_TIMEOUT));
775 		DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
776 		    MMC_READ(sc, SUNXI_MMC_WIDTH));
777 		DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
778 		    MMC_READ(sc, SUNXI_MMC_CMD));
779 		DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
780 		    MMC_READ(sc, SUNXI_MMC_MINT));
781 		DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
782 		    MMC_READ(sc, SUNXI_MMC_RINT));
783 		DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
784 		    MMC_READ(sc, SUNXI_MMC_STATUS));
785 		return ETIMEDOUT;
786 	}
787 
788 	return 0;
789 }
790 
791 static int
792 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
793 {
794 	struct sunxi_mmc_softc *sc = sch;
795 	uint32_t clkcr, gctrl, ntsr;
796 	const u_int flags = sc->sc_config->flags;
797 
798 	clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
799 	if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
800 		clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
801 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
802 			clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
803 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
804 		if (sunxi_mmc_update_clock(sc) != 0)
805 			return 1;
806 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
807 			clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
808 			clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
809 			MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
810 		}
811 	}
812 
813 	if (freq) {
814 
815 		clkcr &= ~SUNXI_MMC_CLKCR_DIV;
816 		clkcr |= __SHIFTIN(ddr, SUNXI_MMC_CLKCR_DIV);
817 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
818 
819 		if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
820 			ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
821 			ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
822 			MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
823 		}
824 
825 		if (flags & SUNXI_MMC_FLAG_CALIB_REG)
826 			MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
827 
828 		if (sunxi_mmc_update_clock(sc) != 0)
829 			return 1;
830 
831 		gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
832 		if (ddr)
833 			gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
834 		else
835 			gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
836 		MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
837 
838 		if (sunxi_mmc_set_clock(sc, freq, ddr) != 0)
839 			return 1;
840 
841 		clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
842 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
843 			clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
844 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
845 		if (sunxi_mmc_update_clock(sc) != 0)
846 			return 1;
847 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
848 			clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
849 			clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
850 			MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
851 		}
852 	}
853 
854 	return 0;
855 }
856 
857 static int
858 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
859 {
860 	struct sunxi_mmc_softc *sc = sch;
861 
862 	DPRINTF(sc->sc_dev, "width = %d\n", width);
863 
864 	switch (width) {
865 	case 1:
866 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
867 		break;
868 	case 4:
869 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
870 		break;
871 	case 8:
872 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
873 		break;
874 	default:
875 		return 1;
876 	}
877 
878 	sc->sc_mmc_width = width;
879 
880 	return 0;
881 }
882 
883 static int
884 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
885 {
886 	return -1;
887 }
888 
889 static int
890 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
891 {
892 	struct sunxi_mmc_softc *sc = sch;
893 	u_int uvol;
894 	int error;
895 
896 	if (sc->sc_reg_vqmmc == NULL)
897 		return 0;
898 
899 	switch (signal_voltage) {
900 	case SDMMC_SIGNAL_VOLTAGE_330:
901 		uvol = 3300000;
902 		break;
903 	case SDMMC_SIGNAL_VOLTAGE_180:
904 		uvol = 1800000;
905 		break;
906 	default:
907 		return EINVAL;
908 	}
909 
910 	error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
911 	if (error != 0)
912 		return error;
913 
914 	return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
915 }
916 
917 static int
918 sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
919 {
920 	switch (timing) {
921 	case SDMMC_TIMING_MMC_HS200:
922 		break;
923 	default:
924 		return EINVAL;
925 	}
926 
927 	return 0;
928 }
929 
930 static int
931 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
932 {
933 	struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
934 	bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
935 	bus_dmamap_t map;
936 	bus_size_t off;
937 	int desc, resid, seg;
938 	uint32_t val;
939 
940 	/*
941 	 * If the command includes a dma map use it, otherwise we need to
942 	 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
943 	 */
944 	if (cmd->c_dmamap) {
945 		map = cmd->c_dmamap;
946 	} else {
947 		if (cmd->c_datalen > sc->sc_dmabounce_buflen)
948 			return E2BIG;
949 		map = sc->sc_dmabounce_map;
950 
951 		if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
952 			memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
953 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
954 			    0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
955 		} else {
956 			memcpy(sc->sc_dmabounce_buf, cmd->c_data,
957 			    cmd->c_datalen);
958 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
959 			    0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
960 		}
961 	}
962 
963 	desc = 0;
964 	for (seg = 0; seg < map->dm_nsegs; seg++) {
965 		bus_addr_t paddr = map->dm_segs[seg].ds_addr;
966 		bus_size_t len = map->dm_segs[seg].ds_len;
967 		resid = min(len, cmd->c_resid);
968 		off = 0;
969 		while (resid > 0) {
970 			if (desc == sc->sc_idma_ndesc)
971 				break;
972 			len = min(sc->sc_config->idma_xferlen, resid);
973 			dma[desc].dma_buf_size = htole32(len);
974 			dma[desc].dma_buf_addr = htole32(paddr + off);
975 			dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
976 					       SUNXI_MMC_IDMA_CONFIG_OWN);
977 			cmd->c_resid -= len;
978 			resid -= len;
979 			off += len;
980 			if (desc == 0) {
981 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
982 			}
983 			if (cmd->c_resid == 0) {
984 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
985 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
986 				dma[desc].dma_next = 0;
987 			} else {
988 				dma[desc].dma_config |=
989 				    htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
990 				dma[desc].dma_next = htole32(
991 				    desc_paddr + ((desc+1) *
992 				    sizeof(struct sunxi_mmc_idma_descriptor)));
993 			}
994 			++desc;
995 		}
996 	}
997 	if (desc == sc->sc_idma_ndesc) {
998 		aprint_error_dev(sc->sc_dev,
999 		    "not enough descriptors for %d byte transfer! "
1000 		    "there are %u segments with a max xfer length of %u\n",
1001 		    cmd->c_datalen, map->dm_nsegs, sc->sc_config->idma_xferlen);
1002 		return EIO;
1003 	}
1004 
1005 	bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1006 	    sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
1007 
1008 	sc->sc_idma_idst = 0;
1009 
1010 	MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
1011 	MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
1012 
1013 	val = MMC_READ(sc, SUNXI_MMC_GCTRL);
1014 	val |= SUNXI_MMC_GCTRL_DMAEN;
1015 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1016 	val |= SUNXI_MMC_GCTRL_DMARESET;
1017 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1018 
1019 	MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
1020 	if (ISSET(cmd->c_flags, SCF_CMD_READ))
1021 		val = SUNXI_MMC_IDST_RECEIVE_INT;
1022 	else
1023 		val = 0;
1024 	MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
1025 	MMC_WRITE(sc, SUNXI_MMC_DMAC,
1026 	    SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
1027 
1028 	return 0;
1029 }
1030 
1031 static void
1032 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
1033 {
1034 	MMC_WRITE(sc, SUNXI_MMC_DMAC, 0);
1035 
1036 	bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1037 	    sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
1038 
1039 	if (cmd->c_dmamap == NULL) {
1040 		if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1041 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1042 			    0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
1043 			memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1044 			    cmd->c_datalen);
1045 		} else {
1046 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1047 			    0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1048 		}
1049 	}
1050 }
1051 
1052 static void
1053 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1054 {
1055 	struct sunxi_mmc_softc *sc = sch;
1056 	uint32_t cmdval = SUNXI_MMC_CMD_START;
1057 	uint32_t imask, oimask;
1058 	const bool poll = (cmd->c_flags & SCF_POLL) != 0;
1059 	int retry;
1060 
1061 	DPRINTF(sc->sc_dev,
1062 	    "opcode %d flags 0x%x data %p datalen %d blklen %d poll %d\n",
1063 	    cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1064 	    cmd->c_blklen, poll);
1065 
1066 	mutex_enter(&sc->sc_intr_lock);
1067 
1068 	if (cmd->c_opcode == 0)
1069 		cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1070 	if (cmd->c_flags & SCF_RSP_PRESENT)
1071 		cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1072 	if (cmd->c_flags & SCF_RSP_136)
1073 		cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1074 	if (cmd->c_flags & SCF_RSP_CRC)
1075 		cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1076 
1077 	imask = oimask = MMC_READ(sc, SUNXI_MMC_IMASK);
1078 	imask |= SUNXI_MMC_INT_ERROR;
1079 
1080 	if (cmd->c_datalen > 0) {
1081 		unsigned int nblks;
1082 
1083 		cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1084 		if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1085 			cmdval |= SUNXI_MMC_CMD_WRITE;
1086 		}
1087 
1088 		nblks = cmd->c_datalen / cmd->c_blklen;
1089 		if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1090 			++nblks;
1091 
1092 		if (nblks > 1) {
1093 			cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1094 			imask |= SUNXI_MMC_INT_AUTO_CMD_DONE;
1095 		} else {
1096 			imask |= SUNXI_MMC_INT_DATA_OVER;
1097 		}
1098 
1099 		MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1100 		MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1101 	} else {
1102 		imask |= SUNXI_MMC_INT_CMD_DONE;
1103 	}
1104 
1105 	MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1106 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1107 
1108 	sc->sc_intr_rint = 0;
1109 
1110 	MMC_WRITE(sc, SUNXI_MMC_A12A,
1111 	    (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1112 
1113 	MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1114 
1115 	DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1116 
1117 	if (cmd->c_datalen == 0) {
1118 		MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1119 	} else {
1120 		cmd->c_resid = cmd->c_datalen;
1121 		cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1122 		MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1123 		if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_CMD_READ)) {
1124 			const uint32_t idst_mask = SUNXI_MMC_IDST_RECEIVE_INT;
1125 
1126 			retry = 10;
1127 			while ((sc->sc_idma_idst & idst_mask) == 0) {
1128 				if (retry-- == 0) {
1129 					cmd->c_error = ETIMEDOUT;
1130 					break;
1131 				}
1132 				cv_timedwait(&sc->sc_idst_cv,
1133 				    &sc->sc_intr_lock, hz);
1134 			}
1135 		}
1136 	}
1137 
1138 	cmd->c_error = sunxi_mmc_wait_rint(sc,
1139 	    SUNXI_MMC_INT_ERROR|SUNXI_MMC_INT_CMD_DONE, hz * 10, poll);
1140 	if (cmd->c_error == 0 && (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1141 		if (sc->sc_intr_rint & SUNXI_MMC_INT_RESP_TIMEOUT) {
1142 			cmd->c_error = ETIMEDOUT;
1143 		} else {
1144 			cmd->c_error = EIO;
1145 		}
1146 	}
1147 	if (cmd->c_error) {
1148 		DPRINTF(sc->sc_dev,
1149 		    "cmd failed, error %d\n", cmd->c_error);
1150 		goto done;
1151 	}
1152 
1153 	if (cmd->c_datalen > 0) {
1154 		sunxi_mmc_dma_complete(sc, cmd);
1155 
1156 		cmd->c_error = sunxi_mmc_wait_rint(sc,
1157 		    SUNXI_MMC_INT_ERROR|
1158 		    SUNXI_MMC_INT_AUTO_CMD_DONE|
1159 		    SUNXI_MMC_INT_DATA_OVER,
1160 		    hz*10, poll);
1161 		if (cmd->c_error == 0 &&
1162 		    (sc->sc_intr_rint & SUNXI_MMC_INT_ERROR)) {
1163 			cmd->c_error = ETIMEDOUT;
1164 		}
1165 		if (cmd->c_error) {
1166 			DPRINTF(sc->sc_dev,
1167 			    "data timeout, rint = %08x\n",
1168 			    sc->sc_intr_rint);
1169 			cmd->c_error = ETIMEDOUT;
1170 			goto done;
1171 		}
1172 	}
1173 
1174 	if (cmd->c_flags & SCF_RSP_PRESENT) {
1175 		if (cmd->c_flags & SCF_RSP_136) {
1176 			cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1177 			cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1178 			cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1179 			cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1180 			if (cmd->c_flags & SCF_RSP_CRC) {
1181 				cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1182 				    (cmd->c_resp[1] << 24);
1183 				cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1184 				    (cmd->c_resp[2] << 24);
1185 				cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1186 				    (cmd->c_resp[3] << 24);
1187 				cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1188 			}
1189 		} else {
1190 			cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1191 		}
1192 	}
1193 
1194 done:
1195 	cmd->c_flags |= SCF_ITSDONE;
1196 	MMC_WRITE(sc, SUNXI_MMC_IMASK, oimask);
1197 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffff);
1198 	MMC_WRITE(sc, SUNXI_MMC_IDST, 0x337);
1199 	mutex_exit(&sc->sc_intr_lock);
1200 
1201 	if (cmd->c_error) {
1202 		DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1203 		MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1204 		    MMC_READ(sc, SUNXI_MMC_GCTRL) |
1205 		      SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1206 		for (retry = 0; retry < 1000; retry++) {
1207 			if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1208 				break;
1209 			delay(10);
1210 		}
1211 		sunxi_mmc_update_clock(sc);
1212 	}
1213 
1214 	MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1215 	    MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1216 }
1217 
1218 static void
1219 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1220 {
1221 	struct sunxi_mmc_softc *sc = sch;
1222 	uint32_t imask;
1223 
1224 	imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1225 	if (enable)
1226 		imask |= SUNXI_MMC_INT_SDIO_INT;
1227 	else
1228 		imask &= ~SUNXI_MMC_INT_SDIO_INT;
1229 	MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1230 }
1231 
1232 static void
1233 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1234 {
1235 	struct sunxi_mmc_softc *sc = sch;
1236 
1237 	MMC_WRITE(sc, SUNXI_MMC_RINT, SUNXI_MMC_INT_SDIO_INT);
1238 }
1239