xref: /netbsd-src/sys/arch/arm/sunxi/sunxi_mmc.c (revision 8ecbf5f02b752fcb7debe1a8fab1dc82602bc760)
1 /* $NetBSD: sunxi_mmc.c,v 1.41 2020/03/07 00:51:10 macallan Exp $ */
2 
3 /*-
4  * Copyright (c) 2014-2017 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_sunximmc.h"
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sunxi_mmc.c,v 1.41 2020/03/07 00:51:10 macallan Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/gpio.h>
41 
42 #include <dev/sdmmc/sdmmcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmc_ioreg.h>
45 
46 #include <dev/fdt/fdtvar.h>
47 
48 #include <arm/sunxi/sunxi_mmc.h>
49 
50 #ifdef SUNXI_MMC_DEBUG
51 static int sunxi_mmc_debug = SUNXI_MMC_DEBUG;
52 #define	DPRINTF(dev, fmt, ...)						\
53 do {									\
54 	if (sunxi_mmc_debug & __BIT(device_unit(dev)))			\
55 		device_printf((dev), fmt, ##__VA_ARGS__);		\
56 } while (0)
57 #else
58 #define	DPRINTF(dev, fmt, ...)		((void)0)
59 #endif
60 
61 enum sunxi_mmc_timing {
62 	SUNXI_MMC_TIMING_400K,
63 	SUNXI_MMC_TIMING_25M,
64 	SUNXI_MMC_TIMING_50M,
65 	SUNXI_MMC_TIMING_50M_DDR,
66 	SUNXI_MMC_TIMING_50M_DDR_8BIT,
67 };
68 
69 struct sunxi_mmc_delay {
70 	u_int	output_phase;
71 	u_int	sample_phase;
72 };
73 
74 static const struct sunxi_mmc_delay sun7i_mmc_delays[] = {
75 	[SUNXI_MMC_TIMING_400K]		= { 180,	180 },
76 	[SUNXI_MMC_TIMING_25M]		= { 180,	 75 },
77 	[SUNXI_MMC_TIMING_50M]		= {  90,	120 },
78 	[SUNXI_MMC_TIMING_50M_DDR]	= {  60,	120 },
79 	[SUNXI_MMC_TIMING_50M_DDR_8BIT]	= {  90,	180 },
80 };
81 
82 static const struct sunxi_mmc_delay sun9i_mmc_delays[] = {
83 	[SUNXI_MMC_TIMING_400K]		= { 180,	180 },
84 	[SUNXI_MMC_TIMING_25M]		= { 180,	 75 },
85 	[SUNXI_MMC_TIMING_50M]		= { 150,	120 },
86 	[SUNXI_MMC_TIMING_50M_DDR]	= {  54,	 36 },
87 	[SUNXI_MMC_TIMING_50M_DDR_8BIT]	= {  72,	 72 },
88 };
89 
90 #define SUNXI_MMC_NDESC		64
91 
92 struct sunxi_mmc_softc;
93 
94 static int	sunxi_mmc_match(device_t, cfdata_t, void *);
95 static void	sunxi_mmc_attach(device_t, device_t, void *);
96 static void	sunxi_mmc_attach_i(device_t);
97 
98 static int	sunxi_mmc_intr(void *);
99 static int	sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *);
100 static int	sunxi_mmc_idma_setup(struct sunxi_mmc_softc *);
101 static void	sunxi_mmc_dma_complete(struct sunxi_mmc_softc *, struct sdmmc_command *);
102 
103 static int	sunxi_mmc_host_reset(sdmmc_chipset_handle_t);
104 static uint32_t	sunxi_mmc_host_ocr(sdmmc_chipset_handle_t);
105 static int	sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t);
106 static int	sunxi_mmc_card_detect(sdmmc_chipset_handle_t);
107 static int	sunxi_mmc_write_protect(sdmmc_chipset_handle_t);
108 static int	sunxi_mmc_bus_power(sdmmc_chipset_handle_t, uint32_t);
109 static int	sunxi_mmc_bus_clock(sdmmc_chipset_handle_t, int, bool);
110 static int	sunxi_mmc_bus_width(sdmmc_chipset_handle_t, int);
111 static int	sunxi_mmc_bus_rod(sdmmc_chipset_handle_t, int);
112 static int	sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t, int);
113 static int	sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t, int);
114 static void	sunxi_mmc_exec_command(sdmmc_chipset_handle_t,
115 				      struct sdmmc_command *);
116 static void	sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t, int);
117 static void	sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t);
118 
119 static struct sdmmc_chip_functions sunxi_mmc_chip_functions = {
120 	.host_reset = sunxi_mmc_host_reset,
121 	.host_ocr = sunxi_mmc_host_ocr,
122 	.host_maxblklen = sunxi_mmc_host_maxblklen,
123 	.card_detect = sunxi_mmc_card_detect,
124 	.write_protect = sunxi_mmc_write_protect,
125 	.bus_power = sunxi_mmc_bus_power,
126 	.bus_clock_ddr = sunxi_mmc_bus_clock,
127 	.bus_width = sunxi_mmc_bus_width,
128 	.bus_rod = sunxi_mmc_bus_rod,
129 	.signal_voltage = sunxi_mmc_signal_voltage,
130 	.execute_tuning = sunxi_mmc_execute_tuning,
131 	.exec_command = sunxi_mmc_exec_command,
132 	.card_enable_intr = sunxi_mmc_card_enable_intr,
133 	.card_intr_ack = sunxi_mmc_card_intr_ack,
134 };
135 
136 struct sunxi_mmc_config {
137 	u_int idma_xferlen;
138 	u_int flags;
139 #define	SUNXI_MMC_FLAG_CALIB_REG	0x01
140 #define	SUNXI_MMC_FLAG_NEW_TIMINGS	0x02
141 #define	SUNXI_MMC_FLAG_MASK_DATA0	0x04
142 #define	SUNXI_MMC_FLAG_HS200		0x08
143 	const struct sunxi_mmc_delay *delays;
144 	uint32_t dma_ftrglevel;
145 };
146 
147 struct sunxi_mmc_softc {
148 	device_t sc_dev;
149 	bus_space_tag_t sc_bst;
150 	bus_space_handle_t sc_bsh;
151 	bus_dma_tag_t sc_dmat;
152 	int sc_phandle;
153 
154 	void *sc_ih;
155 	kmutex_t sc_intr_lock;
156 	kcondvar_t sc_intr_cv;
157 
158 	int sc_mmc_width;
159 	int sc_mmc_present;
160 
161 	u_int sc_max_frequency;
162 
163 	device_t sc_sdmmc_dev;
164 
165 	struct sunxi_mmc_config *sc_config;
166 
167 	bus_dma_segment_t sc_idma_segs[1];
168 	int sc_idma_nsegs;
169 	bus_size_t sc_idma_size;
170 	bus_dmamap_t sc_idma_map;
171 	int sc_idma_ndesc;
172 	void *sc_idma_desc;
173 
174 	bus_dmamap_t sc_dmabounce_map;
175 	void *sc_dmabounce_buf;
176 	size_t sc_dmabounce_buflen;
177 
178 	struct clk *sc_clk_ahb;
179 	struct clk *sc_clk_mmc;
180 	struct clk *sc_clk_output;
181 	struct clk *sc_clk_sample;
182 
183 	struct fdtbus_reset *sc_rst_ahb;
184 
185 	struct fdtbus_gpio_pin *sc_gpio_cd;
186 	int sc_gpio_cd_inverted;
187 	struct fdtbus_gpio_pin *sc_gpio_wp;
188 	int sc_gpio_wp_inverted;
189 
190 	struct fdtbus_regulator *sc_reg_vmmc;
191 	struct fdtbus_regulator *sc_reg_vqmmc;
192 
193 	struct fdtbus_mmc_pwrseq *sc_pwrseq;
194 
195 	bool sc_non_removable;
196 	bool sc_broken_cd;
197 
198 	uint32_t sc_intr_card;
199 	struct sdmmc_command *sc_curcmd;
200 	bool sc_wait_dma;
201 	bool sc_wait_cmd;
202 	bool sc_wait_data;
203 };
204 
205 CFATTACH_DECL_NEW(sunxi_mmc, sizeof(struct sunxi_mmc_softc),
206 	sunxi_mmc_match, sunxi_mmc_attach, NULL, NULL);
207 
208 #define MMC_WRITE(sc, reg, val)	\
209 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
210 #define MMC_READ(sc, reg) \
211 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
212 
213 static const struct sunxi_mmc_config sun4i_a10_mmc_config = {
214 	.idma_xferlen = 0x2000,
215 	.dma_ftrglevel = 0x20070008,
216 	.delays = NULL,
217 	.flags = 0,
218 };
219 
220 static const struct sunxi_mmc_config sun5i_a13_mmc_config = {
221 	.idma_xferlen = 0x10000,
222 	.dma_ftrglevel = 0x20070008,
223 	.delays = NULL,
224 	.flags = 0,
225 };
226 
227 static const struct sunxi_mmc_config sun7i_a20_mmc_config = {
228 	.idma_xferlen = 0x2000,
229 	.dma_ftrglevel = 0x20070008,
230 	.delays = sun7i_mmc_delays,
231 	.flags = 0,
232 };
233 
234 static const struct sunxi_mmc_config sun8i_a83t_emmc_config = {
235 	.idma_xferlen = 0x10000,
236 	.dma_ftrglevel = 0x20070008,
237 	.delays = NULL,
238 	.flags = SUNXI_MMC_FLAG_NEW_TIMINGS,
239 };
240 
241 static const struct sunxi_mmc_config sun9i_a80_mmc_config = {
242 	.idma_xferlen = 0x10000,
243 	.dma_ftrglevel = 0x200f0010,
244 	.delays = sun9i_mmc_delays,
245 	.flags = 0,
246 };
247 
248 static const struct sunxi_mmc_config sun50i_a64_mmc_config = {
249 	.idma_xferlen = 0x10000,
250 	.dma_ftrglevel = 0x20070008,
251 	.delays = NULL,
252 	.flags = SUNXI_MMC_FLAG_CALIB_REG |
253 		 SUNXI_MMC_FLAG_NEW_TIMINGS |
254 		 SUNXI_MMC_FLAG_MASK_DATA0,
255 };
256 
257 static const struct sunxi_mmc_config sun50i_a64_emmc_config = {
258 	.idma_xferlen = 0x2000,
259 	.dma_ftrglevel = 0x20070008,
260 	.delays = NULL,
261 	.flags = SUNXI_MMC_FLAG_CALIB_REG |
262 		 SUNXI_MMC_FLAG_NEW_TIMINGS |
263 		 SUNXI_MMC_FLAG_HS200,
264 };
265 
266 static const struct sunxi_mmc_config sun50i_h6_mmc_config = {
267 	.idma_xferlen = 0x10000,
268 	.dma_ftrglevel = 0x20070008,
269 	.delays = NULL,
270 	.flags = SUNXI_MMC_FLAG_CALIB_REG |
271 		 SUNXI_MMC_FLAG_NEW_TIMINGS |
272 		 SUNXI_MMC_FLAG_MASK_DATA0,
273 };
274 
275 static const struct sunxi_mmc_config sun50i_h6_emmc_config = {
276 	.idma_xferlen = 0x2000,
277 	.dma_ftrglevel = 0x20070008,
278 	.delays = NULL,
279 	.flags = SUNXI_MMC_FLAG_CALIB_REG,
280 };
281 
282 static const struct of_compat_data compat_data[] = {
283 	{ "allwinner,sun4i-a10-mmc",	(uintptr_t)&sun4i_a10_mmc_config },
284 	{ "allwinner,sun5i-a13-mmc",	(uintptr_t)&sun5i_a13_mmc_config },
285 	{ "allwinner,sun7i-a20-mmc",	(uintptr_t)&sun7i_a20_mmc_config },
286 	{ "allwinner,sun8i-a83t-emmc",	(uintptr_t)&sun8i_a83t_emmc_config },
287 	{ "allwinner,sun9i-a80-mmc",	(uintptr_t)&sun9i_a80_mmc_config },
288 	{ "allwinner,sun50i-a64-mmc",	(uintptr_t)&sun50i_a64_mmc_config },
289 	{ "allwinner,sun50i-a64-emmc",	(uintptr_t)&sun50i_a64_emmc_config },
290 	{ "allwinner,sun50i-h6-mmc",	(uintptr_t)&sun50i_h6_mmc_config },
291 	{ "allwinner,sun50i-h6-emmc",	(uintptr_t)&sun50i_h6_emmc_config },
292 	{ NULL }
293 };
294 
295 static int
296 sunxi_mmc_match(device_t parent, cfdata_t cf, void *aux)
297 {
298 	struct fdt_attach_args * const faa = aux;
299 
300 	return of_match_compat_data(faa->faa_phandle, compat_data);
301 }
302 
303 static void
304 sunxi_mmc_attach(device_t parent, device_t self, void *aux)
305 {
306 	struct sunxi_mmc_softc * const sc = device_private(self);
307 	struct fdt_attach_args * const faa = aux;
308 	const int phandle = faa->faa_phandle;
309 	char intrstr[128];
310 	bus_addr_t addr;
311 	bus_size_t size;
312 
313 	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
314 		aprint_error(": couldn't get registers\n");
315 		return;
316 	}
317 
318 	sc->sc_clk_ahb = fdtbus_clock_get(phandle, "ahb");
319 	sc->sc_clk_mmc = fdtbus_clock_get(phandle, "mmc");
320 	sc->sc_clk_output = fdtbus_clock_get(phandle, "output");
321 	sc->sc_clk_sample = fdtbus_clock_get(phandle, "sample");
322 
323 #if notyet
324 	if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL ||
325 	    sc->sc_clk_output == NULL || sc->sc_clk_sample == NULL) {
326 #else
327 	if (sc->sc_clk_ahb == NULL || sc->sc_clk_mmc == NULL) {
328 #endif
329 		aprint_error(": couldn't get clocks\n");
330 		return;
331 	}
332 
333 	sc->sc_rst_ahb = fdtbus_reset_get(phandle, "ahb");
334 
335 	sc->sc_pwrseq = fdtbus_mmc_pwrseq_get(phandle);
336 
337 	if (clk_enable(sc->sc_clk_ahb) != 0 ||
338 	    clk_enable(sc->sc_clk_mmc) != 0) {
339 		aprint_error(": couldn't enable clocks\n");
340 		return;
341 	}
342 
343 	if (sc->sc_rst_ahb != NULL) {
344 		if (fdtbus_reset_deassert(sc->sc_rst_ahb) != 0) {
345 			aprint_error(": couldn't de-assert resets\n");
346 			return;
347 		}
348 	}
349 
350 	sc->sc_dev = self;
351 	sc->sc_phandle = phandle;
352 	sc->sc_config = (void *)of_search_compatible(phandle, compat_data)->data;
353 	sc->sc_bst = faa->faa_bst;
354 	sc->sc_dmat = faa->faa_dmat;
355 	mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_BIO);
356 	cv_init(&sc->sc_intr_cv, "sunximmcirq");
357 
358 	if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
359 		aprint_error(": couldn't map registers\n");
360 		return;
361 	}
362 
363 	sc->sc_reg_vmmc = fdtbus_regulator_acquire(phandle, "vmmc-supply");
364 	if (sc->sc_reg_vmmc != NULL && fdtbus_regulator_enable(sc->sc_reg_vmmc)) {
365 		aprint_error(": couldn't enable vmmc-supply\n");
366 		return;
367 	}
368 
369 	aprint_naive("\n");
370 	aprint_normal(": SD/MMC controller\n");
371 
372 	sc->sc_reg_vqmmc = fdtbus_regulator_acquire(phandle, "vqmmc-supply");
373 
374 	sc->sc_gpio_cd = fdtbus_gpio_acquire(phandle, "cd-gpios",
375 	    GPIO_PIN_INPUT);
376 	sc->sc_gpio_wp = fdtbus_gpio_acquire(phandle, "wp-gpios",
377 	    GPIO_PIN_INPUT);
378 
379 	sc->sc_gpio_cd_inverted = of_hasprop(phandle, "cd-inverted") ? 0 : 1;
380 	sc->sc_gpio_wp_inverted = of_hasprop(phandle, "wp-inverted") ? 0 : 1;
381 
382 	sc->sc_non_removable = of_hasprop(phandle, "non-removable");
383 	sc->sc_broken_cd = of_hasprop(phandle, "broken-cd");
384 
385 	if (of_getprop_uint32(phandle, "max-frequency", &sc->sc_max_frequency))
386 		sc->sc_max_frequency = 52000000;
387 
388 	if (sunxi_mmc_dmabounce_setup(sc) != 0 ||
389 	    sunxi_mmc_idma_setup(sc) != 0) {
390 		aprint_error_dev(self, "failed to setup DMA\n");
391 		return;
392 	}
393 
394 	if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
395 		aprint_error_dev(self, "failed to decode interrupt\n");
396 		return;
397 	}
398 
399 	sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_BIO, FDT_INTR_MPSAFE,
400 	    sunxi_mmc_intr, sc);
401 	if (sc->sc_ih == NULL) {
402 		aprint_error_dev(self, "failed to establish interrupt on %s\n",
403 		    intrstr);
404 		return;
405 	}
406 	aprint_normal_dev(self, "interrupting on %s\n", intrstr);
407 
408 	config_interrupts(self, sunxi_mmc_attach_i);
409 }
410 
411 static int
412 sunxi_mmc_dmabounce_setup(struct sunxi_mmc_softc *sc)
413 {
414 	bus_dma_segment_t ds[1];
415 	int error, rseg;
416 
417 	sc->sc_dmabounce_buflen = sunxi_mmc_host_maxblklen(sc);
418 	error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmabounce_buflen, 0,
419 	    sc->sc_dmabounce_buflen, ds, 1, &rseg, BUS_DMA_WAITOK);
420 	if (error)
421 		return error;
422 	error = bus_dmamem_map(sc->sc_dmat, ds, 1, sc->sc_dmabounce_buflen,
423 	    &sc->sc_dmabounce_buf, BUS_DMA_WAITOK);
424 	if (error)
425 		goto free;
426 	error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmabounce_buflen, 1,
427 	    sc->sc_dmabounce_buflen, 0, BUS_DMA_WAITOK, &sc->sc_dmabounce_map);
428 	if (error)
429 		goto unmap;
430 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmabounce_map,
431 	    sc->sc_dmabounce_buf, sc->sc_dmabounce_buflen, NULL,
432 	    BUS_DMA_WAITOK);
433 	if (error)
434 		goto destroy;
435 	return 0;
436 
437 destroy:
438 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmabounce_map);
439 unmap:
440 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmabounce_buf,
441 	    sc->sc_dmabounce_buflen);
442 free:
443 	bus_dmamem_free(sc->sc_dmat, ds, rseg);
444 	return error;
445 }
446 
447 static int
448 sunxi_mmc_idma_setup(struct sunxi_mmc_softc *sc)
449 {
450 	int error;
451 
452 	sc->sc_idma_ndesc = SUNXI_MMC_NDESC;
453 	sc->sc_idma_size = sizeof(struct sunxi_mmc_idma_descriptor) *
454 	    sc->sc_idma_ndesc;
455 	error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_idma_size, 0,
456 	    sc->sc_idma_size, sc->sc_idma_segs, 1,
457 	    &sc->sc_idma_nsegs, BUS_DMA_WAITOK);
458 	if (error)
459 		return error;
460 	error = bus_dmamem_map(sc->sc_dmat, sc->sc_idma_segs,
461 	    sc->sc_idma_nsegs, sc->sc_idma_size,
462 	    &sc->sc_idma_desc, BUS_DMA_WAITOK);
463 	if (error)
464 		goto free;
465 	error = bus_dmamap_create(sc->sc_dmat, sc->sc_idma_size, 1,
466 	    sc->sc_idma_size, 0, BUS_DMA_WAITOK, &sc->sc_idma_map);
467 	if (error)
468 		goto unmap;
469 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_idma_map,
470 	    sc->sc_idma_desc, sc->sc_idma_size, NULL, BUS_DMA_WAITOK);
471 	if (error)
472 		goto destroy;
473 	return 0;
474 
475 destroy:
476 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_idma_map);
477 unmap:
478 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_idma_desc, sc->sc_idma_size);
479 free:
480 	bus_dmamem_free(sc->sc_dmat, sc->sc_idma_segs, sc->sc_idma_nsegs);
481 	return error;
482 }
483 
484 static int
485 sunxi_mmc_set_clock(struct sunxi_mmc_softc *sc, u_int freq, bool ddr, bool dbl)
486 {
487 	const struct sunxi_mmc_delay *delays;
488 	int error, timing = SUNXI_MMC_TIMING_400K;
489 
490 	if (sc->sc_config->delays) {
491 		if (freq <= 400) {
492 			timing = SUNXI_MMC_TIMING_400K;
493 		} else if (freq <= 25000) {
494 			timing = SUNXI_MMC_TIMING_25M;
495 		} else if (freq <= 52000) {
496 			if (ddr) {
497 				timing = sc->sc_mmc_width == 8 ?
498 				    SUNXI_MMC_TIMING_50M_DDR_8BIT :
499 				    SUNXI_MMC_TIMING_50M_DDR;
500 			} else {
501 				timing = SUNXI_MMC_TIMING_50M;
502 			}
503 		} else
504 			return EINVAL;
505 	}
506 	if (sc->sc_max_frequency) {
507 		if (freq * 1000 > sc->sc_max_frequency)
508 			return EINVAL;
509 	}
510 
511 	error = clk_set_rate(sc->sc_clk_mmc, (freq * 1000) << dbl);
512 	if (error != 0)
513 		return error;
514 
515 	if (sc->sc_config->delays == NULL)
516 		return 0;
517 
518 	delays = &sc->sc_config->delays[timing];
519 
520 	if (sc->sc_clk_sample) {
521 		error = clk_set_rate(sc->sc_clk_sample, delays->sample_phase);
522 		if (error != 0)
523 			return error;
524 	}
525 	if (sc->sc_clk_output) {
526 		error = clk_set_rate(sc->sc_clk_output, delays->output_phase);
527 		if (error != 0)
528 			return error;
529 	}
530 
531 	return 0;
532 }
533 
534 static void
535 sunxi_mmc_hw_reset(struct sunxi_mmc_softc *sc)
536 {
537 	MMC_WRITE(sc, SUNXI_MMC_HWRST, 0);
538 	delay(1000);
539 	MMC_WRITE(sc, SUNXI_MMC_HWRST, 1);
540 	delay(1000);
541 }
542 
543 static void
544 sunxi_mmc_attach_i(device_t self)
545 {
546 	struct sunxi_mmc_softc *sc = device_private(self);
547 	const u_int flags = sc->sc_config->flags;
548 	struct sdmmcbus_attach_args saa;
549 	uint32_t width;
550 	const bool supports_hs200 =
551 		of_hasprop(sc->sc_phandle, "mmc-hs200-1_2v") |
552 		of_hasprop(sc->sc_phandle, "mmc-hs200-1_8v");
553 
554 	const bool supports_ddr =
555 		of_hasprop(sc->sc_phandle, "mmc-ddr-1_2v") |
556 		of_hasprop(sc->sc_phandle, "mmc-ddr-1_8v") |
557 		of_hasprop(sc->sc_phandle, "mmc-ddr-3_3v");
558 
559 	if (sc->sc_pwrseq)
560 		fdtbus_mmc_pwrseq_pre_power_on(sc->sc_pwrseq);
561 
562 	if (of_hasprop(sc->sc_phandle, "cap-mmc-hw-reset"))
563 		sunxi_mmc_hw_reset(sc);
564 
565 	sunxi_mmc_host_reset(sc);
566 	sunxi_mmc_bus_width(sc, 1);
567 	sunxi_mmc_set_clock(sc, 400, false, false);
568 
569 	if (sc->sc_pwrseq)
570 		fdtbus_mmc_pwrseq_post_power_on(sc->sc_pwrseq);
571 
572 	if (of_getprop_uint32(sc->sc_phandle, "bus-width", &width) != 0)
573 		width = 4;
574 
575 	memset(&saa, 0, sizeof(saa));
576 	saa.saa_busname = "sdmmc";
577 	saa.saa_sct = &sunxi_mmc_chip_functions;
578 	saa.saa_sch = sc;
579 	saa.saa_dmat = sc->sc_dmat;
580 	saa.saa_clkmin = 400;
581 	saa.saa_clkmax = sc->sc_max_frequency / 1000;
582 	saa.saa_caps = SMC_CAPS_DMA |
583 		       SMC_CAPS_MULTI_SEG_DMA |
584 		       SMC_CAPS_AUTO_STOP |
585 		       SMC_CAPS_SD_HIGHSPEED |
586 		       SMC_CAPS_MMC_HIGHSPEED;
587 
588 	if ((sc->sc_config->delays || (flags & SUNXI_MMC_FLAG_NEW_TIMINGS)) &&
589 	     supports_ddr)
590 		saa.saa_caps |= SMC_CAPS_MMC_DDR52;
591 
592 	if ((flags & SUNXI_MMC_FLAG_HS200) != 0 && supports_hs200)
593 		saa.saa_caps |= SMC_CAPS_MMC_HS200;
594 
595 	if (width == 4)
596 		saa.saa_caps |= SMC_CAPS_4BIT_MODE;
597 	if (width == 8)
598 		saa.saa_caps |= SMC_CAPS_8BIT_MODE;
599 
600 	if (sc->sc_gpio_cd)
601 		saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
602 
603 	sc->sc_sdmmc_dev = config_found(self, &saa, NULL);
604 }
605 
606 static int
607 sunxi_mmc_intr(void *priv)
608 {
609 	struct sunxi_mmc_softc *sc = priv;
610 	struct sdmmc_command *cmd;
611 	uint32_t idst, mint, imask;
612 
613 	mutex_enter(&sc->sc_intr_lock);
614 	idst = MMC_READ(sc, SUNXI_MMC_IDST);
615 	mint = MMC_READ(sc, SUNXI_MMC_MINT);
616 	if (!idst && !mint) {
617 		mutex_exit(&sc->sc_intr_lock);
618 		return 0;
619 	}
620 	MMC_WRITE(sc, SUNXI_MMC_IDST, idst);
621 	MMC_WRITE(sc, SUNXI_MMC_RINT, mint);
622 
623 	cmd = sc->sc_curcmd;
624 
625 	DPRINTF(sc->sc_dev, "mmc intr idst=%08X mint=%08X\n",
626 	    idst, mint);
627 
628 	/* Handle SDIO card interrupt */
629 	if ((mint & SUNXI_MMC_INT_SDIO_INT) != 0) {
630 		imask = MMC_READ(sc, SUNXI_MMC_IMASK);
631 		MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_SDIO_INT);
632 		sdmmc_card_intr(sc->sc_sdmmc_dev);
633 	}
634 
635 	/* Error interrupts take priority over command and transfer interrupts */
636 	if (cmd != NULL && (mint & SUNXI_MMC_INT_ERROR) != 0) {
637 		imask = MMC_READ(sc, SUNXI_MMC_IMASK);
638 		MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_ERROR);
639 		if ((mint & SUNXI_MMC_INT_RESP_TIMEOUT) != 0) {
640 			cmd->c_error = ETIMEDOUT;
641 			/* Wait for command to complete */
642 			sc->sc_wait_data = sc->sc_wait_dma = false;
643 			if (cmd->c_opcode != SD_IO_SEND_OP_COND &&
644 			    cmd->c_opcode != SD_IO_RW_DIRECT &&
645 			    !ISSET(cmd->c_flags, SCF_TOUT_OK))
646 				device_printf(sc->sc_dev, "host controller timeout, mint=0x%08x\n", mint);
647 		} else {
648 			device_printf(sc->sc_dev, "host controller error, mint=0x%08x\n", mint);
649 			cmd->c_error = EIO;
650 			SET(cmd->c_flags, SCF_ITSDONE);
651 			goto done;
652 		}
653 	}
654 
655 	if (cmd != NULL && (idst & SUNXI_MMC_IDST_RECEIVE_INT) != 0) {
656 		MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
657 		if (sc->sc_wait_dma == false)
658 			device_printf(sc->sc_dev, "unexpected DMA receive interrupt\n");
659 		sc->sc_wait_dma = false;
660 	}
661 
662 	if (cmd != NULL && (mint & SUNXI_MMC_INT_CMD_DONE) != 0) {
663 		imask = MMC_READ(sc, SUNXI_MMC_IMASK);
664 		MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~SUNXI_MMC_INT_CMD_DONE);
665 		if (sc->sc_wait_cmd == false)
666 			device_printf(sc->sc_dev, "unexpected command complete interrupt\n");
667 		sc->sc_wait_cmd = false;
668 	}
669 
670 	const uint32_t dmadone_mask = SUNXI_MMC_INT_AUTO_CMD_DONE|SUNXI_MMC_INT_DATA_OVER;
671 	if (cmd != NULL && (mint & dmadone_mask) != 0) {
672 		imask = MMC_READ(sc, SUNXI_MMC_IMASK);
673 		MMC_WRITE(sc, SUNXI_MMC_IMASK, imask & ~dmadone_mask);
674 		if (sc->sc_wait_data == false)
675 			device_printf(sc->sc_dev, "unexpected data complete interrupt\n");
676 		sc->sc_wait_data = false;
677 	}
678 
679 	if (cmd != NULL &&
680 	    sc->sc_wait_dma == false &&
681 	    sc->sc_wait_cmd == false &&
682 	    sc->sc_wait_data == false) {
683 		SET(cmd->c_flags, SCF_ITSDONE);
684 	}
685 
686 done:
687 	if (cmd != NULL && ISSET(cmd->c_flags, SCF_ITSDONE)) {
688 		cv_broadcast(&sc->sc_intr_cv);
689 	}
690 
691 	mutex_exit(&sc->sc_intr_lock);
692 
693 	return 1;
694 }
695 
696 static int
697 sunxi_mmc_host_reset(sdmmc_chipset_handle_t sch)
698 {
699 	struct sunxi_mmc_softc *sc = sch;
700 	uint32_t gctrl;
701 	int retry = 1000;
702 
703 	DPRINTF(sc->sc_dev, "host reset\n");
704 
705 	gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
706 	gctrl |= SUNXI_MMC_GCTRL_RESET;
707 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
708 	while (--retry > 0) {
709 		if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
710 			break;
711 		delay(100);
712 	}
713 
714 	MMC_WRITE(sc, SUNXI_MMC_TIMEOUT, 0xffffffff);
715 
716 	MMC_WRITE(sc, SUNXI_MMC_IMASK, 0);
717 
718 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0xffffffff);
719 
720 	gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
721 	gctrl |= SUNXI_MMC_GCTRL_INTEN;
722 	gctrl &= ~SUNXI_MMC_GCTRL_WAIT_MEM_ACCESS_DONE;
723 	gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
724 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
725 
726 	return 0;
727 }
728 
729 static uint32_t
730 sunxi_mmc_host_ocr(sdmmc_chipset_handle_t sch)
731 {
732 	return MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V | MMC_OCR_HCS;
733 }
734 
735 static int
736 sunxi_mmc_host_maxblklen(sdmmc_chipset_handle_t sch)
737 {
738 	return 8192;
739 }
740 
741 static int
742 sunxi_mmc_card_detect(sdmmc_chipset_handle_t sch)
743 {
744 	struct sunxi_mmc_softc *sc = sch;
745 
746 	if (sc->sc_non_removable || sc->sc_broken_cd) {
747 		/*
748 		 * Non-removable or broken card detect flag set in
749 		 * DT, assume always present
750 		 */
751 		return 1;
752 	} else if (sc->sc_gpio_cd != NULL) {
753 		/* Use card detect GPIO */
754 		int v = 0, i;
755 		for (i = 0; i < 5; i++) {
756 			v += (fdtbus_gpio_read(sc->sc_gpio_cd) ^
757 			    sc->sc_gpio_cd_inverted);
758 			delay(1000);
759 		}
760 		if (v == 5)
761 			sc->sc_mmc_present = 0;
762 		else if (v == 0)
763 			sc->sc_mmc_present = 1;
764 		return sc->sc_mmc_present;
765 	} else {
766 		/* Use CARD_PRESENT field of SD_STATUS register */
767 		const uint32_t present = MMC_READ(sc, SUNXI_MMC_STATUS) &
768 		    SUNXI_MMC_STATUS_CARD_PRESENT;
769 		return present != 0;
770 	}
771 }
772 
773 static int
774 sunxi_mmc_write_protect(sdmmc_chipset_handle_t sch)
775 {
776 	struct sunxi_mmc_softc *sc = sch;
777 
778 	if (sc->sc_gpio_wp == NULL) {
779 		return 0;	/* no write protect pin, assume rw */
780 	} else {
781 		return fdtbus_gpio_read(sc->sc_gpio_wp) ^
782 		    sc->sc_gpio_wp_inverted;
783 	}
784 }
785 
786 static int
787 sunxi_mmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
788 {
789 	return 0;
790 }
791 
792 static int
793 sunxi_mmc_update_clock(struct sunxi_mmc_softc *sc)
794 {
795 	uint32_t cmd;
796 	int retry;
797 
798 	DPRINTF(sc->sc_dev, "update clock\n");
799 
800 	cmd = SUNXI_MMC_CMD_START |
801 	      SUNXI_MMC_CMD_UPCLK_ONLY |
802 	      SUNXI_MMC_CMD_WAIT_PRE_OVER;
803 	MMC_WRITE(sc, SUNXI_MMC_CMD, cmd);
804 	retry = 100000;
805 	while (--retry > 0) {
806 		if (!(MMC_READ(sc, SUNXI_MMC_CMD) & SUNXI_MMC_CMD_START))
807 			break;
808 		delay(10);
809 	}
810 
811 	if (retry == 0) {
812 		aprint_error_dev(sc->sc_dev, "timeout updating clock\n");
813 		DPRINTF(sc->sc_dev, "GCTRL: 0x%08x\n",
814 		    MMC_READ(sc, SUNXI_MMC_GCTRL));
815 		DPRINTF(sc->sc_dev, "CLKCR: 0x%08x\n",
816 		    MMC_READ(sc, SUNXI_MMC_CLKCR));
817 		DPRINTF(sc->sc_dev, "TIMEOUT: 0x%08x\n",
818 		    MMC_READ(sc, SUNXI_MMC_TIMEOUT));
819 		DPRINTF(sc->sc_dev, "WIDTH: 0x%08x\n",
820 		    MMC_READ(sc, SUNXI_MMC_WIDTH));
821 		DPRINTF(sc->sc_dev, "CMD: 0x%08x\n",
822 		    MMC_READ(sc, SUNXI_MMC_CMD));
823 		DPRINTF(sc->sc_dev, "MINT: 0x%08x\n",
824 		    MMC_READ(sc, SUNXI_MMC_MINT));
825 		DPRINTF(sc->sc_dev, "RINT: 0x%08x\n",
826 		    MMC_READ(sc, SUNXI_MMC_RINT));
827 		DPRINTF(sc->sc_dev, "STATUS: 0x%08x\n",
828 		    MMC_READ(sc, SUNXI_MMC_STATUS));
829 		return ETIMEDOUT;
830 	}
831 
832 	return 0;
833 }
834 
835 static int
836 sunxi_mmc_bus_clock(sdmmc_chipset_handle_t sch, int freq, bool ddr)
837 {
838 	struct sunxi_mmc_softc *sc = sch;
839 	uint32_t clkcr, gctrl, ntsr;
840 	const u_int flags = sc->sc_config->flags;
841 	bool dbl = 0;
842 
843 	clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
844 	if (clkcr & SUNXI_MMC_CLKCR_CARDCLKON) {
845 		clkcr &= ~SUNXI_MMC_CLKCR_CARDCLKON;
846 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
847 			clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
848 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
849 		if (sunxi_mmc_update_clock(sc) != 0)
850 			return 1;
851 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
852 			clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
853 			clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
854 			MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
855 		}
856 	}
857 
858 	if (freq) {
859 		/* For 8bits ddr in old timing modes, and all ddr in new
860 		 * timing modes, the module clock has to be 2x the card clock.
861 		 */
862 		if (ddr && ((flags & SUNXI_MMC_FLAG_NEW_TIMINGS) ||
863 		    sc->sc_mmc_width == 8))
864 			dbl = 1;
865 
866 		clkcr &= ~SUNXI_MMC_CLKCR_DIV;
867 		clkcr |= __SHIFTIN(dbl, SUNXI_MMC_CLKCR_DIV);
868 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
869 
870 		if (flags & SUNXI_MMC_FLAG_NEW_TIMINGS) {
871 			ntsr = MMC_READ(sc, SUNXI_MMC_NTSR);
872 			ntsr |= SUNXI_MMC_NTSR_MODE_SELECT;
873 			MMC_WRITE(sc, SUNXI_MMC_NTSR, ntsr);
874 		}
875 
876 		if (flags & SUNXI_MMC_FLAG_CALIB_REG)
877 			MMC_WRITE(sc, SUNXI_MMC_SAMP_DL, SUNXI_MMC_SAMP_DL_SW_EN);
878 
879 		if (sunxi_mmc_update_clock(sc) != 0)
880 			return 1;
881 
882 		gctrl = MMC_READ(sc, SUNXI_MMC_GCTRL);
883 		if (ddr)
884 			gctrl |= SUNXI_MMC_GCTRL_DDR_MODE;
885 		else
886 			gctrl &= ~SUNXI_MMC_GCTRL_DDR_MODE;
887 		MMC_WRITE(sc, SUNXI_MMC_GCTRL, gctrl);
888 
889 		if (sunxi_mmc_set_clock(sc, freq, ddr, dbl) != 0)
890 			return 1;
891 
892 		clkcr |= SUNXI_MMC_CLKCR_CARDCLKON;
893 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0)
894 			clkcr |= SUNXI_MMC_CLKCR_MASK_DATA0;
895 		MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
896 		if (sunxi_mmc_update_clock(sc) != 0)
897 			return 1;
898 		if (flags & SUNXI_MMC_CLKCR_MASK_DATA0) {
899 			clkcr = MMC_READ(sc, SUNXI_MMC_CLKCR);
900 			clkcr &= ~SUNXI_MMC_CLKCR_MASK_DATA0;
901 			MMC_WRITE(sc, SUNXI_MMC_CLKCR, clkcr);
902 		}
903 	}
904 
905 	return 0;
906 }
907 
908 static int
909 sunxi_mmc_bus_width(sdmmc_chipset_handle_t sch, int width)
910 {
911 	struct sunxi_mmc_softc *sc = sch;
912 
913 	DPRINTF(sc->sc_dev, "width = %d\n", width);
914 
915 	switch (width) {
916 	case 1:
917 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_1);
918 		break;
919 	case 4:
920 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_4);
921 		break;
922 	case 8:
923 		MMC_WRITE(sc, SUNXI_MMC_WIDTH, SUNXI_MMC_WIDTH_8);
924 		break;
925 	default:
926 		return 1;
927 	}
928 
929 	sc->sc_mmc_width = width;
930 
931 	return 0;
932 }
933 
934 static int
935 sunxi_mmc_bus_rod(sdmmc_chipset_handle_t sch, int on)
936 {
937 	return -1;
938 }
939 
940 static int
941 sunxi_mmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
942 {
943 	struct sunxi_mmc_softc *sc = sch;
944 	u_int uvol;
945 	int error;
946 
947 	if (sc->sc_reg_vqmmc == NULL)
948 		return 0;
949 
950 	switch (signal_voltage) {
951 	case SDMMC_SIGNAL_VOLTAGE_330:
952 		uvol = 3300000;
953 		break;
954 	case SDMMC_SIGNAL_VOLTAGE_180:
955 		uvol = 1800000;
956 		break;
957 	default:
958 		return EINVAL;
959 	}
960 
961 	error = fdtbus_regulator_supports_voltage(sc->sc_reg_vqmmc, uvol, uvol);
962 	if (error != 0)
963 		return 0;
964 
965 	error = fdtbus_regulator_set_voltage(sc->sc_reg_vqmmc, uvol, uvol);
966 	if (error != 0)
967 		return error;
968 
969 	return fdtbus_regulator_enable(sc->sc_reg_vqmmc);
970 }
971 
972 static int
973 sunxi_mmc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
974 {
975 	switch (timing) {
976 	case SDMMC_TIMING_MMC_HS200:
977 		break;
978 	default:
979 		return EINVAL;
980 	}
981 
982 	return 0;
983 }
984 
985 static int
986 sunxi_mmc_dma_prepare(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
987 {
988 	struct sunxi_mmc_idma_descriptor *dma = sc->sc_idma_desc;
989 	bus_addr_t desc_paddr = sc->sc_idma_map->dm_segs[0].ds_addr;
990 	bus_dmamap_t map;
991 	bus_size_t off;
992 	int desc, resid, seg;
993 	uint32_t val;
994 
995 	/*
996 	 * If the command includes a dma map use it, otherwise we need to
997 	 * bounce. This can happen for SDIO IO_RW_EXTENDED (CMD53) commands.
998 	 */
999 	if (cmd->c_dmamap) {
1000 		map = cmd->c_dmamap;
1001 	} else {
1002 		if (cmd->c_datalen > sc->sc_dmabounce_buflen)
1003 			return E2BIG;
1004 		map = sc->sc_dmabounce_map;
1005 
1006 		if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1007 			memset(sc->sc_dmabounce_buf, 0, cmd->c_datalen);
1008 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1009 			    0, cmd->c_datalen, BUS_DMASYNC_PREREAD);
1010 		} else {
1011 			memcpy(sc->sc_dmabounce_buf, cmd->c_data,
1012 			    cmd->c_datalen);
1013 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1014 			    0, cmd->c_datalen, BUS_DMASYNC_PREWRITE);
1015 		}
1016 	}
1017 
1018 	desc = 0;
1019 	for (seg = 0; seg < map->dm_nsegs; seg++) {
1020 		bus_addr_t paddr = map->dm_segs[seg].ds_addr;
1021 		bus_size_t len = map->dm_segs[seg].ds_len;
1022 		resid = uimin(len, cmd->c_resid);
1023 		off = 0;
1024 		while (resid > 0) {
1025 			if (desc == sc->sc_idma_ndesc)
1026 				break;
1027 			len = uimin(sc->sc_config->idma_xferlen, resid);
1028 			dma[desc].dma_buf_size = htole32(len);
1029 			dma[desc].dma_buf_addr = htole32(paddr + off);
1030 			dma[desc].dma_config = htole32(SUNXI_MMC_IDMA_CONFIG_CH |
1031 					       SUNXI_MMC_IDMA_CONFIG_OWN);
1032 			cmd->c_resid -= len;
1033 			resid -= len;
1034 			off += len;
1035 			if (desc == 0) {
1036 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_FD);
1037 			}
1038 			if (cmd->c_resid == 0) {
1039 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_LD);
1040 				dma[desc].dma_config |= htole32(SUNXI_MMC_IDMA_CONFIG_ER);
1041 				dma[desc].dma_next = 0;
1042 			} else {
1043 				dma[desc].dma_config |=
1044 				    htole32(SUNXI_MMC_IDMA_CONFIG_DIC);
1045 				dma[desc].dma_next = htole32(
1046 				    desc_paddr + ((desc+1) *
1047 				    sizeof(struct sunxi_mmc_idma_descriptor)));
1048 			}
1049 			++desc;
1050 		}
1051 	}
1052 	if (desc == sc->sc_idma_ndesc) {
1053 		aprint_error_dev(sc->sc_dev,
1054 		    "not enough descriptors for %d byte transfer! "
1055 		    "there are %u segments with a max xfer length of %u\n",
1056 		    cmd->c_datalen, map->dm_nsegs, sc->sc_config->idma_xferlen);
1057 		return EIO;
1058 	}
1059 
1060 	bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1061 	    sc->sc_idma_size, BUS_DMASYNC_PREWRITE);
1062 
1063 	MMC_WRITE(sc, SUNXI_MMC_DLBA, desc_paddr);
1064 	MMC_WRITE(sc, SUNXI_MMC_FTRGLEVEL, sc->sc_config->dma_ftrglevel);
1065 
1066 	val = MMC_READ(sc, SUNXI_MMC_GCTRL);
1067 	val |= SUNXI_MMC_GCTRL_DMAEN;
1068 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1069 	val |= SUNXI_MMC_GCTRL_DMARESET;
1070 	MMC_WRITE(sc, SUNXI_MMC_GCTRL, val);
1071 
1072 	MMC_WRITE(sc, SUNXI_MMC_DMAC, SUNXI_MMC_DMAC_SOFTRESET);
1073 	if (ISSET(cmd->c_flags, SCF_CMD_READ))
1074 		val = SUNXI_MMC_IDST_RECEIVE_INT;
1075 	else
1076 		val = 0;
1077 	MMC_WRITE(sc, SUNXI_MMC_IDIE, val);
1078 	MMC_WRITE(sc, SUNXI_MMC_DMAC,
1079 	    SUNXI_MMC_DMAC_IDMA_ON|SUNXI_MMC_DMAC_FIX_BURST);
1080 
1081 	return 0;
1082 }
1083 
1084 static void
1085 sunxi_mmc_dma_complete(struct sunxi_mmc_softc *sc, struct sdmmc_command *cmd)
1086 {
1087 	MMC_WRITE(sc, SUNXI_MMC_DMAC, 0);
1088 	MMC_WRITE(sc, SUNXI_MMC_IDIE, 0);
1089 
1090 	bus_dmamap_sync(sc->sc_dmat, sc->sc_idma_map, 0,
1091 	    sc->sc_idma_size, BUS_DMASYNC_POSTWRITE);
1092 
1093 	if (cmd->c_dmamap == NULL) {
1094 		if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1095 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1096 			    0, cmd->c_datalen, BUS_DMASYNC_POSTREAD);
1097 			memcpy(cmd->c_data, sc->sc_dmabounce_buf,
1098 			    cmd->c_datalen);
1099 		} else {
1100 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmabounce_map,
1101 			    0, cmd->c_datalen, BUS_DMASYNC_POSTWRITE);
1102 		}
1103 	}
1104 }
1105 
1106 static void
1107 sunxi_mmc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1108 {
1109 	struct sunxi_mmc_softc *sc = sch;
1110 	uint32_t cmdval = SUNXI_MMC_CMD_START;
1111 	uint32_t imask;
1112 	int retry, error;
1113 
1114 	DPRINTF(sc->sc_dev,
1115 	    "opcode %d flags 0x%x data %p datalen %d blklen %d\n",
1116 	    cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen,
1117 	    cmd->c_blklen);
1118 
1119 	mutex_enter(&sc->sc_intr_lock);
1120 	if (sc->sc_curcmd != NULL) {
1121 		device_printf(sc->sc_dev,
1122 		    "WARNING: driver submitted a command while the controller was busy\n");
1123 		cmd->c_error = EBUSY;
1124 		SET(cmd->c_flags, SCF_ITSDONE);
1125 		mutex_exit(&sc->sc_intr_lock);
1126 		return;
1127 	}
1128 	sc->sc_curcmd = cmd;
1129 
1130 	if (cmd->c_opcode == 0)
1131 		cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
1132 	if (cmd->c_flags & SCF_RSP_PRESENT)
1133 		cmdval |= SUNXI_MMC_CMD_RSP_EXP;
1134 	if (cmd->c_flags & SCF_RSP_136)
1135 		cmdval |= SUNXI_MMC_CMD_LONG_RSP;
1136 	if (cmd->c_flags & SCF_RSP_CRC)
1137 		cmdval |= SUNXI_MMC_CMD_CHECK_RSP_CRC;
1138 
1139 	imask = SUNXI_MMC_INT_ERROR | SUNXI_MMC_INT_CMD_DONE;
1140 
1141 	if (cmd->c_datalen > 0) {
1142 		unsigned int nblks;
1143 
1144 		cmdval |= SUNXI_MMC_CMD_DATA_EXP | SUNXI_MMC_CMD_WAIT_PRE_OVER;
1145 		if (!ISSET(cmd->c_flags, SCF_CMD_READ)) {
1146 			cmdval |= SUNXI_MMC_CMD_WRITE;
1147 		}
1148 
1149 		nblks = cmd->c_datalen / cmd->c_blklen;
1150 		if (nblks == 0 || (cmd->c_datalen % cmd->c_blklen) != 0)
1151 			++nblks;
1152 
1153 		if (nblks > 1) {
1154 			cmdval |= SUNXI_MMC_CMD_SEND_AUTO_STOP;
1155 			imask |= SUNXI_MMC_INT_AUTO_CMD_DONE;
1156 		} else {
1157 			imask |= SUNXI_MMC_INT_DATA_OVER;
1158 		}
1159 
1160 		MMC_WRITE(sc, SUNXI_MMC_BLKSZ, cmd->c_blklen);
1161 		MMC_WRITE(sc, SUNXI_MMC_BYTECNT, nblks * cmd->c_blklen);
1162 	}
1163 
1164 	MMC_WRITE(sc, SUNXI_MMC_IMASK, imask | sc->sc_intr_card);
1165 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0x7fff);
1166 
1167 	MMC_WRITE(sc, SUNXI_MMC_A12A,
1168 	    (cmdval & SUNXI_MMC_CMD_SEND_AUTO_STOP) ? 0 : 0xffff);
1169 
1170 	MMC_WRITE(sc, SUNXI_MMC_ARG, cmd->c_arg);
1171 
1172 	cmd->c_resid = cmd->c_datalen;
1173 	if (cmd->c_resid > 0) {
1174 		cmd->c_error = sunxi_mmc_dma_prepare(sc, cmd);
1175 		if (cmd->c_error != 0) {
1176 			SET(cmd->c_flags, SCF_ITSDONE);
1177 			goto done;
1178 		}
1179 		sc->sc_wait_dma = ISSET(cmd->c_flags, SCF_CMD_READ);
1180 		sc->sc_wait_data = true;
1181 	} else {
1182 		sc->sc_wait_dma = false;
1183 		sc->sc_wait_data = false;
1184 	}
1185 	sc->sc_wait_cmd = true;
1186 
1187 	DPRINTF(sc->sc_dev, "cmdval = %08x\n", cmdval);
1188 
1189 	MMC_WRITE(sc, SUNXI_MMC_CMD, cmdval | cmd->c_opcode);
1190 
1191 	struct bintime timeout = { .sec = 15, .frac = 0 };
1192 	const struct bintime epsilon = { .sec = 1, .frac = 0 };
1193 	while (!ISSET(cmd->c_flags, SCF_ITSDONE)) {
1194 		error = cv_timedwaitbt(&sc->sc_intr_cv,
1195 		    &sc->sc_intr_lock, &timeout, &epsilon);
1196 		if (error != 0) {
1197 			cmd->c_error = error;
1198 			SET(cmd->c_flags, SCF_ITSDONE);
1199 			goto done;
1200 		}
1201 	}
1202 
1203 	if (cmd->c_error == 0 && cmd->c_datalen > 0)
1204 		sunxi_mmc_dma_complete(sc, cmd);
1205 
1206 	if (cmd->c_flags & SCF_RSP_PRESENT) {
1207 		if (cmd->c_flags & SCF_RSP_136) {
1208 			cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1209 			cmd->c_resp[1] = MMC_READ(sc, SUNXI_MMC_RESP1);
1210 			cmd->c_resp[2] = MMC_READ(sc, SUNXI_MMC_RESP2);
1211 			cmd->c_resp[3] = MMC_READ(sc, SUNXI_MMC_RESP3);
1212 			if (cmd->c_flags & SCF_RSP_CRC) {
1213 				cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1214 				    (cmd->c_resp[1] << 24);
1215 				cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1216 				    (cmd->c_resp[2] << 24);
1217 				cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1218 				    (cmd->c_resp[3] << 24);
1219 				cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1220 			}
1221 		} else {
1222 			cmd->c_resp[0] = MMC_READ(sc, SUNXI_MMC_RESP0);
1223 		}
1224 	}
1225 
1226 done:
1227 	KASSERT(ISSET(cmd->c_flags, SCF_ITSDONE));
1228 	MMC_WRITE(sc, SUNXI_MMC_IMASK, sc->sc_intr_card);
1229 	MMC_WRITE(sc, SUNXI_MMC_RINT, 0x7fff);
1230 	MMC_WRITE(sc, SUNXI_MMC_IDST, 0x337);
1231 	sc->sc_curcmd = NULL;
1232 	mutex_exit(&sc->sc_intr_lock);
1233 
1234 	if (cmd->c_error) {
1235 		DPRINTF(sc->sc_dev, "i/o error %d\n", cmd->c_error);
1236 		MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1237 		    MMC_READ(sc, SUNXI_MMC_GCTRL) |
1238 		      SUNXI_MMC_GCTRL_DMARESET | SUNXI_MMC_GCTRL_FIFORESET);
1239 		for (retry = 0; retry < 1000; retry++) {
1240 			if (!(MMC_READ(sc, SUNXI_MMC_GCTRL) & SUNXI_MMC_GCTRL_RESET))
1241 				break;
1242 			delay(10);
1243 		}
1244 		sunxi_mmc_update_clock(sc);
1245 	}
1246 
1247 	MMC_WRITE(sc, SUNXI_MMC_GCTRL,
1248 	    MMC_READ(sc, SUNXI_MMC_GCTRL) | SUNXI_MMC_GCTRL_FIFORESET);
1249 }
1250 
1251 static void
1252 sunxi_mmc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1253 {
1254 	struct sunxi_mmc_softc *sc = sch;
1255 	uint32_t imask;
1256 
1257 	mutex_enter(&sc->sc_intr_lock);
1258 	imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1259 	if (enable)
1260 		imask |= SUNXI_MMC_INT_SDIO_INT;
1261 	else
1262 		imask &= ~SUNXI_MMC_INT_SDIO_INT;
1263 	sc->sc_intr_card = imask & SUNXI_MMC_INT_SDIO_INT;
1264 	MMC_WRITE(sc, SUNXI_MMC_IMASK, imask);
1265 	mutex_exit(&sc->sc_intr_lock);
1266 }
1267 
1268 static void
1269 sunxi_mmc_card_intr_ack(sdmmc_chipset_handle_t sch)
1270 {
1271 	struct sunxi_mmc_softc *sc = sch;
1272 	uint32_t imask;
1273 
1274 	mutex_enter(&sc->sc_intr_lock);
1275 	imask = MMC_READ(sc, SUNXI_MMC_IMASK);
1276 	MMC_WRITE(sc, SUNXI_MMC_IMASK, imask | sc->sc_intr_card);
1277 	mutex_exit(&sc->sc_intr_lock);
1278 }
1279