xref: /netbsd-src/sys/dev/sdmmc/sdhc.c (revision 7d62b00eb9ad855ffcd7da46b41e23feb5476fac)
1 /*	$NetBSD: sdhc.c,v 1.117 2022/11/02 10:38:04 jmcneill Exp $	*/
2 /*	$OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $	*/
3 
4 /*
5  * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * SD Host Controller driver based on the SD Host Controller Standard
22  * Simplified Specification Version 1.00 (www.sdcard.com).
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.117 2022/11/02 10:38:04 jmcneill Exp $");
27 
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31 
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40 
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46 
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s)	do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void	sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s)	do {} while (0)
53 #endif
54 
55 #define SDHC_COMMAND_TIMEOUT	hz
56 #define SDHC_BUFFER_TIMEOUT	hz
57 #define SDHC_TRANSFER_TIMEOUT	hz
58 #define SDHC_DMA_TIMEOUT	(hz*3)
59 #define SDHC_TUNING_TIMEOUT	hz
60 
61 struct sdhc_host {
62 	struct sdhc_softc *sc;		/* host controller device */
63 
64 	bus_space_tag_t iot;		/* host register set tag */
65 	bus_space_handle_t ioh;		/* host register set handle */
66 	bus_size_t ios;			/* host register space size */
67 	bus_dma_tag_t dmat;		/* host DMA tag */
68 
69 	device_t sdmmc;			/* generic SD/MMC device */
70 
71 	u_int clkbase;			/* base clock frequency in KHz */
72 	int maxblklen;			/* maximum block length */
73 	uint32_t ocr;			/* OCR value from capabilities */
74 
75 	uint8_t regs[14];		/* host controller state */
76 
77 	uint16_t intr_status;		/* soft interrupt status */
78 	uint16_t intr_error_status;	/* soft error status */
79 	kmutex_t intr_lock;
80 	kmutex_t bus_clock_lock;
81 	kcondvar_t intr_cv;
82 
83 	callout_t tuning_timer;
84 	int tuning_timing;
85 	u_int tuning_timer_count;
86 	u_int tuning_timer_pending;
87 
88 	int specver;			/* spec. version */
89 
90 	uint32_t flags;			/* flags for this host */
91 #define SHF_USE_DMA		0x0001
92 #define SHF_USE_4BIT_MODE	0x0002
93 #define SHF_USE_8BIT_MODE	0x0004
94 #define SHF_MODE_DMAEN		0x0008 /* needs SDHC_DMA_ENABLE in mode */
95 #define SHF_USE_ADMA2_32	0x0010
96 #define SHF_USE_ADMA2_64	0x0020
97 #define SHF_USE_ADMA2_MASK	0x0030
98 
99 	bus_dmamap_t		adma_map;
100 	bus_dma_segment_t	adma_segs[1];
101 	void			*adma2;
102 
103 	uint8_t			vdd;	/* last vdd setting */
104 };
105 
106 #define HDEVNAME(hp)	(device_xname((hp)->sc->sc_dev))
107 
108 static uint8_t
109 hread1(struct sdhc_host *hp, bus_size_t reg)
110 {
111 
112 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
113 		return bus_space_read_1(hp->iot, hp->ioh, reg);
114 	return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
115 }
116 
117 static uint16_t
118 hread2(struct sdhc_host *hp, bus_size_t reg)
119 {
120 
121 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
122 		return bus_space_read_2(hp->iot, hp->ioh, reg);
123 	return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
124 }
125 
126 #define HREAD1(hp, reg)		hread1(hp, reg)
127 #define HREAD2(hp, reg)		hread2(hp, reg)
128 #define HREAD4(hp, reg)		\
129 	(bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
130 
131 
132 static void
133 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
134 {
135 
136 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
137 		bus_space_write_1(hp->iot, hp->ioh, o, val);
138 	} else {
139 		const size_t shift = 8 * (o & 3);
140 		o &= -4;
141 		uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
142 		tmp = (val << shift) | (tmp & ~(0xffU << shift));
143 		bus_space_write_4(hp->iot, hp->ioh, o, tmp);
144 	}
145 }
146 
147 static void
148 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
149 {
150 
151 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
152 		bus_space_write_2(hp->iot, hp->ioh, o, val);
153 	} else {
154 		const size_t shift = 8 * (o & 2);
155 		o &= -4;
156 		uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
157 		tmp = (val << shift) | (tmp & ~(0xffffU << shift));
158 		bus_space_write_4(hp->iot, hp->ioh, o, tmp);
159 	}
160 }
161 
162 static void
163 hwrite4(struct sdhc_host *hp, bus_size_t o, uint32_t val)
164 {
165 
166 	bus_space_write_4(hp->iot, hp->ioh, o, val);
167 }
168 
169 #define HWRITE1(hp, reg, val)		hwrite1(hp, reg, val)
170 #define HWRITE2(hp, reg, val)		hwrite2(hp, reg, val)
171 #define HWRITE4(hp, reg, val)		hwrite4(hp, reg, val)
172 
173 #define HCLR1(hp, reg, bits)						\
174 	do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
175 #define HCLR2(hp, reg, bits)						\
176 	do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
177 #define HCLR4(hp, reg, bits)						\
178 	do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
179 #define HSET1(hp, reg, bits)						\
180 	do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
181 #define HSET2(hp, reg, bits)						\
182 	do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
183 #define HSET4(hp, reg, bits)						\
184 	do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
185 
186 static int	sdhc_host_reset(sdmmc_chipset_handle_t);
187 static int	sdhc_host_reset1(sdmmc_chipset_handle_t);
188 static uint32_t	sdhc_host_ocr(sdmmc_chipset_handle_t);
189 static int	sdhc_host_maxblklen(sdmmc_chipset_handle_t);
190 static int	sdhc_card_detect(sdmmc_chipset_handle_t);
191 static int	sdhc_write_protect(sdmmc_chipset_handle_t);
192 static int	sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
193 static int	sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
194 static int	sdhc_bus_width(sdmmc_chipset_handle_t, int);
195 static int	sdhc_bus_rod(sdmmc_chipset_handle_t, int);
196 static void	sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
197 static void	sdhc_card_intr_ack(sdmmc_chipset_handle_t);
198 static void	sdhc_exec_command(sdmmc_chipset_handle_t,
199 		    struct sdmmc_command *);
200 static int	sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
201 static int	sdhc_execute_tuning1(struct sdhc_host *, int);
202 static int	sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
203 static void	sdhc_tuning_timer(void *);
204 static void	sdhc_hw_reset(sdmmc_chipset_handle_t);
205 static int	sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
206 static int	sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
207 static int	sdhc_soft_reset(struct sdhc_host *, int);
208 static int	sdhc_wait_intr(struct sdhc_host *, int, int, bool);
209 static void	sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
210 static int	sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
211 static int	sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
212 static void	sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
213 static void	sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
214 static void	esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
215 static void	esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
216 
217 static struct sdmmc_chip_functions sdhc_functions = {
218 	/* host controller reset */
219 	.host_reset = sdhc_host_reset,
220 
221 	/* host controller capabilities */
222 	.host_ocr = sdhc_host_ocr,
223 	.host_maxblklen = sdhc_host_maxblklen,
224 
225 	/* card detection */
226 	.card_detect = sdhc_card_detect,
227 
228 	/* write protect */
229 	.write_protect = sdhc_write_protect,
230 
231 	/* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
232 	.bus_power = sdhc_bus_power,
233 	.bus_clock = NULL,	/* see sdhc_bus_clock_ddr */
234 	.bus_width = sdhc_bus_width,
235 	.bus_rod = sdhc_bus_rod,
236 
237 	/* command execution */
238 	.exec_command = sdhc_exec_command,
239 
240 	/* card interrupt */
241 	.card_enable_intr = sdhc_card_enable_intr,
242 	.card_intr_ack = sdhc_card_intr_ack,
243 
244 	/* UHS functions */
245 	.signal_voltage = sdhc_signal_voltage,
246 	.bus_clock_ddr = sdhc_bus_clock_ddr,
247 	.execute_tuning = sdhc_execute_tuning,
248 	.hw_reset = sdhc_hw_reset,
249 };
250 
251 static int
252 sdhc_cfprint(void *aux, const char *pnp)
253 {
254 	const struct sdmmcbus_attach_args * const saa = aux;
255 	const struct sdhc_host * const hp = saa->saa_sch;
256 
257 	if (pnp) {
258 		aprint_normal("sdmmc at %s", pnp);
259 	}
260 	for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
261 		if (hp->sc->sc_host[host] == hp) {
262 			aprint_normal(" slot %zu", host);
263 		}
264 	}
265 
266 	return UNCONF;
267 }
268 
269 /*
270  * Called by attachment driver.  For each SD card slot there is one SD
271  * host controller standard register set. (1.3)
272  */
273 int
274 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
275     bus_space_handle_t ioh, bus_size_t iosize)
276 {
277 	struct sdmmcbus_attach_args saa;
278 	struct sdhc_host *hp;
279 	uint32_t caps, caps2;
280 	uint16_t sdhcver;
281 	int error;
282 
283 	/* Allocate one more host structure. */
284 	hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
285 	if (hp == NULL) {
286 		aprint_error_dev(sc->sc_dev,
287 		    "couldn't alloc memory (sdhc host)\n");
288 		goto err1;
289 	}
290 	sc->sc_host[sc->sc_nhosts++] = hp;
291 
292 	/* Fill in the new host structure. */
293 	hp->sc = sc;
294 	hp->iot = iot;
295 	hp->ioh = ioh;
296 	hp->ios = iosize;
297 	hp->dmat = sc->sc_dmat;
298 
299 	mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
300 	mutex_init(&hp->bus_clock_lock, MUTEX_DEFAULT, IPL_NONE);
301 	cv_init(&hp->intr_cv, "sdhcintr");
302 	callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
303 	callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
304 
305 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
306 		sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
307 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
308 		sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
309 	} else if (iosize <= SDHC_HOST_CTL_VERSION) {
310 		sdhcver = SDHC_SPEC_NOVERS << SDHC_SPEC_VERS_SHIFT;
311 	} else {
312 		sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
313 	}
314 	aprint_normal_dev(sc->sc_dev, "SDHC ");
315 	hp->specver = SDHC_SPEC_VERSION(sdhcver);
316 	switch (SDHC_SPEC_VERSION(sdhcver)) {
317 	case SDHC_SPEC_VERS_100:
318 		aprint_normal("1.0");
319 		break;
320 	case SDHC_SPEC_VERS_200:
321 		aprint_normal("2.0");
322 		break;
323 	case SDHC_SPEC_VERS_300:
324 		aprint_normal("3.0");
325 		break;
326 	case SDHC_SPEC_VERS_400:
327 		aprint_normal("4.0");
328 		break;
329 	case SDHC_SPEC_VERS_410:
330 		aprint_normal("4.1");
331 		break;
332 	case SDHC_SPEC_VERS_420:
333 		aprint_normal("4.2");
334 		break;
335 	case SDHC_SPEC_NOVERS:
336 		hp->specver = -1;
337 		aprint_normal("NO-VERS");
338 		break;
339 	default:
340 		aprint_normal("unknown version(0x%x)",
341 		    SDHC_SPEC_VERSION(sdhcver));
342 		break;
343 	}
344 	if (SDHC_SPEC_VERSION(sdhcver) != SDHC_SPEC_NOVERS)
345 		aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
346 
347 	/*
348 	 * Reset the host controller and enable interrupts.
349 	 */
350 	(void)sdhc_host_reset(hp);
351 
352 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
353 		/* init uSDHC registers */
354 		HWRITE4(hp, SDHC_MMC_BOOT, 0);
355 		HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
356 		    SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
357 		HWRITE4(hp, SDHC_WATERMARK_LEVEL,
358 		    (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
359 		    (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
360 		    (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
361 		    (0x40 << SDHC_WATERMARK_READ_SHIFT));
362 		HSET4(hp, SDHC_VEND_SPEC,
363 		    SDHC_VEND_SPEC_MBO |
364 		    SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
365 		    SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
366 		    SDHC_VEND_SPEC_HCLK_SOFT_EN |
367 		    SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
368 		    SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
369 		    SDHC_VEND_SPEC_FRC_SDCLK_ON);
370 	}
371 
372 	/* Determine host capabilities. */
373 	if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
374 		caps = sc->sc_caps;
375 		caps2 = sc->sc_caps2;
376 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
377 		/* uSDHC capability register is little bit different */
378 		caps = HREAD4(hp, SDHC_CAPABILITIES);
379 		caps |= SDHC_8BIT_SUPP;
380 		if (caps & SDHC_ADMA1_SUPP)
381 			caps |= SDHC_ADMA2_SUPP;
382 		sc->sc_caps = caps;
383 		/* uSDHC has no SDHC_CAPABILITIES2 register */
384 		caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
385 	} else {
386 		caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
387 		if (hp->specver >= SDHC_SPEC_VERS_300) {
388 			caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
389 		} else {
390 			caps2 = sc->sc_caps2 = 0;
391 		}
392 	}
393 
394 	aprint_verbose(", caps <%08x/%08x>", caps, caps2);
395 
396 	const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
397 	    SDHC_RETUNING_MODES_MASK;
398 	if (retuning_mode == SDHC_RETUNING_MODE_1) {
399 		hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
400 		    SDHC_TIMER_COUNT_MASK;
401 		if (hp->tuning_timer_count == 0xf)
402 			hp->tuning_timer_count = 0;
403 		if (hp->tuning_timer_count)
404 			hp->tuning_timer_count =
405 			    1 << (hp->tuning_timer_count - 1);
406 	}
407 
408 	/*
409 	 * Use DMA if the host system and the controller support it.
410 	 * Supports integrated or external DMA egine, with or without
411 	 * SDHC_DMA_ENABLE in the command.
412 	 */
413 	if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
414 	    (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
415 	     ISSET(caps, SDHC_DMA_SUPPORT)))) {
416 		SET(hp->flags, SHF_USE_DMA);
417 
418 		if (ISSET(caps, SDHC_ADMA2_SUPP) &&
419 		    !ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA)) {
420 			SET(hp->flags, SHF_MODE_DMAEN);
421 			/*
422 			 * 64-bit mode was present in the 2.00 spec, removed
423 			 * from 3.00, and re-added in 4.00 with a different
424 			 * descriptor layout. We only support 2.00 and 3.00
425 			 * descriptors for now.
426 			 */
427 			if (hp->specver == SDHC_SPEC_VERS_200 &&
428 			    ISSET(caps, SDHC_64BIT_SYS_BUS)) {
429 				SET(hp->flags, SHF_USE_ADMA2_64);
430 				aprint_normal(", 64-bit ADMA2");
431 			} else {
432 				SET(hp->flags, SHF_USE_ADMA2_32);
433 				aprint_normal(", 32-bit ADMA2");
434 			}
435 		} else {
436 			if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
437 			    ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
438 				SET(hp->flags, SHF_MODE_DMAEN);
439 			if (sc->sc_vendor_transfer_data_dma) {
440 				aprint_normal(", platform DMA");
441 			} else {
442 				aprint_normal(", SDMA");
443 			}
444 		}
445 	} else {
446 		aprint_normal(", PIO");
447 	}
448 
449 	/*
450 	 * Determine the base clock frequency. (2.2.24)
451 	 */
452 	if (hp->specver >= SDHC_SPEC_VERS_300) {
453 		hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
454 	} else {
455 		hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
456 	}
457 	if (hp->clkbase == 0 ||
458 	    ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
459 		if (sc->sc_clkbase == 0) {
460 			/* The attachment driver must tell us. */
461 			aprint_error_dev(sc->sc_dev,
462 			    "unknown base clock frequency\n");
463 			goto err;
464 		}
465 		hp->clkbase = sc->sc_clkbase;
466 	}
467 	if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
468 		/* SDHC 1.0 supports only 10-63 MHz. */
469 		aprint_error_dev(sc->sc_dev,
470 		    "base clock frequency out of range: %u MHz\n",
471 		    hp->clkbase / 1000);
472 		goto err;
473 	}
474 	aprint_normal(", %u kHz", hp->clkbase);
475 
476 	/*
477 	 * XXX Set the data timeout counter value according to
478 	 * capabilities. (2.2.15)
479 	 */
480 	HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
481 #if 1
482 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
483 		HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
484 #endif
485 
486 	if (ISSET(caps, SDHC_EMBEDDED_SLOT))
487 		aprint_normal(", embedded slot");
488 
489 	/*
490 	 * Determine SD bus voltage levels supported by the controller.
491 	 */
492 	aprint_normal(",");
493 	if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
494 		SET(hp->ocr, MMC_OCR_HCS);
495 		aprint_normal(" HS");
496 	}
497 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_1_8_V)) {
498 		if (ISSET(caps2, SDHC_SDR50_SUPP)) {
499 			SET(hp->ocr, MMC_OCR_S18A);
500 			aprint_normal(" SDR50");
501 		}
502 		if (ISSET(caps2, SDHC_DDR50_SUPP)) {
503 			SET(hp->ocr, MMC_OCR_S18A);
504 			aprint_normal(" DDR50");
505 		}
506 		if (ISSET(caps2, SDHC_SDR104_SUPP)) {
507 			SET(hp->ocr, MMC_OCR_S18A);
508 			aprint_normal(" SDR104 HS200");
509 		}
510 		if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
511 			SET(hp->ocr, MMC_OCR_1_65V_1_95V);
512 			aprint_normal(" 1.8V");
513 		}
514 	}
515 	if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
516 		SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
517 		aprint_normal(" 3.0V");
518 	}
519 	if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
520 		SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
521 		aprint_normal(" 3.3V");
522 	}
523 	if (hp->specver >= SDHC_SPEC_VERS_300) {
524 		aprint_normal(", re-tuning mode %d", retuning_mode + 1);
525 		if (hp->tuning_timer_count)
526 			aprint_normal(" (%us timer)", hp->tuning_timer_count);
527 	}
528 
529 	/*
530 	 * Determine the maximum block length supported by the host
531 	 * controller. (2.2.24)
532 	 */
533 	switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
534 	case SDHC_MAX_BLK_LEN_512:
535 		hp->maxblklen = 512;
536 		break;
537 
538 	case SDHC_MAX_BLK_LEN_1024:
539 		hp->maxblklen = 1024;
540 		break;
541 
542 	case SDHC_MAX_BLK_LEN_2048:
543 		hp->maxblklen = 2048;
544 		break;
545 
546 	case SDHC_MAX_BLK_LEN_4096:
547 		hp->maxblklen = 4096;
548 		break;
549 
550 	default:
551 		aprint_error_dev(sc->sc_dev, "max block length unknown\n");
552 		goto err;
553 	}
554 	aprint_normal(", %u byte blocks", hp->maxblklen);
555 	aprint_normal("\n");
556 
557 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
558 		int rseg;
559 
560 		/* Allocate ADMA2 descriptor memory */
561 		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
562 		    PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
563 		if (error) {
564 			aprint_error_dev(sc->sc_dev,
565 			    "ADMA2 dmamem_alloc failed (%d)\n", error);
566 			goto adma_done;
567 		}
568 		error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
569 		    PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
570 		if (error) {
571 			aprint_error_dev(sc->sc_dev,
572 			    "ADMA2 dmamem_map failed (%d)\n", error);
573 			goto adma_done;
574 		}
575 		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
576 		    0, BUS_DMA_WAITOK, &hp->adma_map);
577 		if (error) {
578 			aprint_error_dev(sc->sc_dev,
579 			    "ADMA2 dmamap_create failed (%d)\n", error);
580 			goto adma_done;
581 		}
582 		error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
583 		    hp->adma2, PAGE_SIZE, NULL,
584 		    BUS_DMA_WAITOK|BUS_DMA_WRITE);
585 		if (error) {
586 			aprint_error_dev(sc->sc_dev,
587 			    "ADMA2 dmamap_load failed (%d)\n", error);
588 			goto adma_done;
589 		}
590 
591 		memset(hp->adma2, 0, PAGE_SIZE);
592 
593 adma_done:
594 		if (error)
595 			CLR(hp->flags, SHF_USE_ADMA2_MASK);
596 	}
597 
598 	/*
599 	 * Attach the generic SD/MMC bus driver.  (The bus driver must
600 	 * not invoke any chipset functions before it is attached.)
601 	 */
602 	memset(&saa, 0, sizeof(saa));
603 	saa.saa_busname = "sdmmc";
604 	saa.saa_sct = &sdhc_functions;
605 	saa.saa_sch = hp;
606 	saa.saa_dmat = hp->dmat;
607 	saa.saa_clkmax = hp->clkbase;
608 	if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
609 		saa.saa_clkmin = hp->clkbase / 256 / 2046;
610 	else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
611 		saa.saa_clkmin = hp->clkbase / 256 / 16;
612 	else if (hp->sc->sc_clkmsk != 0)
613 		saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
614 		    (ffs(hp->sc->sc_clkmsk) - 1));
615 	else if (hp->specver >= SDHC_SPEC_VERS_300)
616 		saa.saa_clkmin = hp->clkbase / 0x3ff;
617 	else
618 		saa.saa_clkmin = hp->clkbase / 256;
619 	if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
620 		saa.saa_caps |= SMC_CAPS_AUTO_STOP;
621 	saa.saa_caps |= SMC_CAPS_4BIT_MODE;
622 	if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
623 		saa.saa_caps |= SMC_CAPS_8BIT_MODE;
624 	if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
625 		saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED |
626 				SMC_CAPS_MMC_HIGHSPEED;
627 	if (ISSET(caps2, SDHC_SDR104_SUPP))
628 		saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
629 				SMC_CAPS_UHS_SDR50 |
630 				SMC_CAPS_MMC_HS200;
631 	if (ISSET(caps2, SDHC_SDR50_SUPP))
632 		saa.saa_caps |= SMC_CAPS_UHS_SDR50;
633 	if (ISSET(caps2, SDHC_DDR50_SUPP))
634 		saa.saa_caps |= SMC_CAPS_UHS_DDR50;
635 	if (ISSET(hp->flags, SHF_USE_DMA)) {
636 		saa.saa_caps |= SMC_CAPS_DMA;
637 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
638 			saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
639 	}
640 	if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
641 		saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
642 	if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
643 		saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
644 
645 	if (ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA2_ZEROLEN))
646 		saa.saa_max_seg = 65535;
647 
648 	hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint, CFARGS_NONE);
649 
650 	return 0;
651 
652 err:
653 	callout_destroy(&hp->tuning_timer);
654 	cv_destroy(&hp->intr_cv);
655 	mutex_destroy(&hp->bus_clock_lock);
656 	mutex_destroy(&hp->intr_lock);
657 	free(hp, M_DEVBUF);
658 	sc->sc_host[--sc->sc_nhosts] = NULL;
659 err1:
660 	return 1;
661 }
662 
663 int
664 sdhc_detach(struct sdhc_softc *sc, int flags)
665 {
666 	struct sdhc_host *hp;
667 	int rv = 0;
668 
669 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
670 		hp = sc->sc_host[n];
671 		if (hp == NULL)
672 			continue;
673 		if (hp->sdmmc != NULL) {
674 			rv = config_detach(hp->sdmmc, flags);
675 			if (rv)
676 				break;
677 			hp->sdmmc = NULL;
678 		}
679 		/* disable interrupts */
680 		if ((flags & DETACH_FORCE) == 0) {
681 			mutex_enter(&hp->intr_lock);
682 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
683 				HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
684 			} else {
685 				HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
686 			}
687 			sdhc_soft_reset(hp, SDHC_RESET_ALL);
688 			mutex_exit(&hp->intr_lock);
689 		}
690 		callout_halt(&hp->tuning_timer, NULL);
691 		callout_destroy(&hp->tuning_timer);
692 		cv_destroy(&hp->intr_cv);
693 		mutex_destroy(&hp->intr_lock);
694 		if (hp->ios > 0) {
695 			bus_space_unmap(hp->iot, hp->ioh, hp->ios);
696 			hp->ios = 0;
697 		}
698 		if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
699 			bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
700 			bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
701 			bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
702 			bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
703 		}
704 		free(hp, M_DEVBUF);
705 		sc->sc_host[n] = NULL;
706 	}
707 
708 	return rv;
709 }
710 
711 bool
712 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
713 {
714 	struct sdhc_softc *sc = device_private(dev);
715 	struct sdhc_host *hp;
716 	size_t i;
717 
718 	/* XXX poll for command completion or suspend command
719 	 * in progress */
720 
721 	/* Save the host controller state. */
722 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
723 		hp = sc->sc_host[n];
724 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
725 			for (i = 0; i < sizeof hp->regs; i += 4) {
726 				uint32_t v = HREAD4(hp, i);
727 				hp->regs[i + 0] = (v >> 0);
728 				hp->regs[i + 1] = (v >> 8);
729 				if (i + 3 < sizeof hp->regs) {
730 					hp->regs[i + 2] = (v >> 16);
731 					hp->regs[i + 3] = (v >> 24);
732 				}
733 			}
734 		} else {
735 			for (i = 0; i < sizeof hp->regs; i++) {
736 				hp->regs[i] = HREAD1(hp, i);
737 			}
738 		}
739 	}
740 	return true;
741 }
742 
743 bool
744 sdhc_resume(device_t dev, const pmf_qual_t *qual)
745 {
746 	struct sdhc_softc *sc = device_private(dev);
747 	struct sdhc_host *hp;
748 	size_t i;
749 
750 	/* Restore the host controller state. */
751 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
752 		hp = sc->sc_host[n];
753 		(void)sdhc_host_reset(hp);
754 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
755 			for (i = 0; i < sizeof hp->regs; i += 4) {
756 				if (i + 3 < sizeof hp->regs) {
757 					HWRITE4(hp, i,
758 					    (hp->regs[i + 0] << 0)
759 					    | (hp->regs[i + 1] << 8)
760 					    | (hp->regs[i + 2] << 16)
761 					    | (hp->regs[i + 3] << 24));
762 				} else {
763 					HWRITE4(hp, i,
764 					    (hp->regs[i + 0] << 0)
765 					    | (hp->regs[i + 1] << 8));
766 				}
767 			}
768 		} else {
769 			for (i = 0; i < sizeof hp->regs; i++) {
770 				HWRITE1(hp, i, hp->regs[i]);
771 			}
772 		}
773 	}
774 	return true;
775 }
776 
777 bool
778 sdhc_shutdown(device_t dev, int flags)
779 {
780 	struct sdhc_softc *sc = device_private(dev);
781 	struct sdhc_host *hp;
782 
783 	/* XXX chip locks up if we don't disable it before reboot. */
784 	for (size_t i = 0; i < sc->sc_nhosts; i++) {
785 		hp = sc->sc_host[i];
786 		(void)sdhc_host_reset(hp);
787 	}
788 	return true;
789 }
790 
791 /*
792  * Reset the host controller.  Called during initialization, when
793  * cards are removed, upon resume, and during error recovery.
794  */
795 static int
796 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
797 {
798 	struct sdhc_host *hp = (struct sdhc_host *)sch;
799 	uint32_t sdhcimask;
800 	int error;
801 
802 	KASSERT(mutex_owned(&hp->intr_lock));
803 
804 	/* Disable all interrupts. */
805 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
806 		HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
807 	} else {
808 		HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
809 	}
810 
811 	/* Let sdhc_bus_power restore power */
812 	hp->vdd = 0;
813 
814 	/*
815 	 * Reset the entire host controller and wait up to 100ms for
816 	 * the controller to clear the reset bit.
817 	 */
818 	error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
819 	if (error)
820 		goto out;
821 
822 	/* Set data timeout counter value to max for now. */
823 	HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
824 #if 1
825 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
826 		HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
827 #endif
828 
829 	/* Enable interrupts. */
830 	sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
831 	    SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
832 	    SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
833 	    SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
834 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
835 		sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
836 		HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
837 		sdhcimask ^=
838 		    (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
839 		sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
840 		HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
841 	} else {
842 		HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
843 		HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
844 		sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
845 		HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
846 		HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
847 	}
848 
849 out:
850 	return error;
851 }
852 
853 static int
854 sdhc_host_reset(sdmmc_chipset_handle_t sch)
855 {
856 	struct sdhc_host *hp = (struct sdhc_host *)sch;
857 	int error;
858 
859 	mutex_enter(&hp->intr_lock);
860 	error = sdhc_host_reset1(sch);
861 	mutex_exit(&hp->intr_lock);
862 
863 	return error;
864 }
865 
866 static uint32_t
867 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
868 {
869 	struct sdhc_host *hp = (struct sdhc_host *)sch;
870 
871 	return hp->ocr;
872 }
873 
874 static int
875 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
876 {
877 	struct sdhc_host *hp = (struct sdhc_host *)sch;
878 
879 	return hp->maxblklen;
880 }
881 
882 /*
883  * Return non-zero if the card is currently inserted.
884  */
885 static int
886 sdhc_card_detect(sdmmc_chipset_handle_t sch)
887 {
888 	struct sdhc_host *hp = (struct sdhc_host *)sch;
889 	int r;
890 
891 	if (hp->sc->sc_vendor_card_detect)
892 		return (*hp->sc->sc_vendor_card_detect)(hp->sc);
893 
894 	r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
895 
896 	return r ? 1 : 0;
897 }
898 
899 /*
900  * Return non-zero if the card is currently write-protected.
901  */
902 static int
903 sdhc_write_protect(sdmmc_chipset_handle_t sch)
904 {
905 	struct sdhc_host *hp = (struct sdhc_host *)sch;
906 	int r;
907 
908 	if (hp->sc->sc_vendor_write_protect)
909 		return (*hp->sc->sc_vendor_write_protect)(hp->sc);
910 
911 	r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
912 
913 	return r ? 0 : 1;
914 }
915 
916 /*
917  * Set or change SD bus voltage and enable or disable SD bus power.
918  * Return zero on success.
919  */
920 static int
921 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
922 {
923 	struct sdhc_host *hp = (struct sdhc_host *)sch;
924 	uint8_t vdd;
925 	int error = 0;
926 	const uint32_t pcmask =
927 	    ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
928 	uint32_t reg;
929 
930 	mutex_enter(&hp->intr_lock);
931 
932 	/*
933 	 * Disable bus power before voltage change.
934 	 */
935 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
936 	    && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0)) {
937 		hp->vdd = 0;
938 		HWRITE1(hp, SDHC_POWER_CTL, 0);
939 	}
940 
941 	/* If power is disabled, reset the host and return now. */
942 	if (ocr == 0) {
943 		(void)sdhc_host_reset1(hp);
944 		callout_halt(&hp->tuning_timer, &hp->intr_lock);
945 		goto out;
946 	}
947 
948 	/*
949 	 * Select the lowest voltage according to capabilities.
950 	 */
951 	ocr &= hp->ocr;
952 	if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
953 		vdd = SDHC_VOLTAGE_1_8V;
954 	} else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
955 		vdd = SDHC_VOLTAGE_3_0V;
956 	} else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
957 		vdd = SDHC_VOLTAGE_3_3V;
958 	} else {
959 		/* Unsupported voltage level requested. */
960 		error = EINVAL;
961 		goto out;
962 	}
963 
964 	/*
965 	 * Did voltage change ?
966 	 */
967 	if (vdd == hp->vdd)
968 		goto out;
969 
970 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
971 		/*
972 		 * Enable bus power.  Wait at least 1 ms (or 74 clocks) plus
973 		 * voltage ramp until power rises.
974 		 */
975 
976 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
977 			HWRITE1(hp, SDHC_POWER_CTL,
978 			    (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
979 		} else {
980 			reg = HREAD1(hp, SDHC_POWER_CTL) & pcmask;
981 			HWRITE1(hp, SDHC_POWER_CTL, reg);
982 			sdmmc_delay(1);
983 			reg |= (vdd << SDHC_VOLTAGE_SHIFT);
984 			HWRITE1(hp, SDHC_POWER_CTL, reg);
985 			sdmmc_delay(1);
986 			reg |= SDHC_BUS_POWER;
987 			HWRITE1(hp, SDHC_POWER_CTL, reg);
988 			sdmmc_delay(10000);
989 		}
990 
991 		/*
992 		 * The host system may not power the bus due to battery low,
993 		 * etc.  In that case, the host controller should clear the
994 		 * bus power bit.
995 		 */
996 		if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
997 			error = ENXIO;
998 			goto out;
999 		}
1000 	}
1001 
1002 	/* power successfully changed */
1003 	hp->vdd = vdd;
1004 
1005 out:
1006 	mutex_exit(&hp->intr_lock);
1007 
1008 	return error;
1009 }
1010 
1011 /*
1012  * Return the smallest possible base clock frequency divisor value
1013  * for the CLOCK_CTL register to produce `freq' (KHz).
1014  */
1015 static bool
1016 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
1017 {
1018 	u_int div;
1019 
1020 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
1021 		for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
1022 			if ((hp->clkbase / div) <= freq) {
1023 				*divp = SDHC_SDCLK_CGM
1024 				    | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
1025 				    | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
1026 				//freq = hp->clkbase / div;
1027 				return true;
1028 			}
1029 		}
1030 		/* No divisor found. */
1031 		return false;
1032 	}
1033 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
1034 		u_int dvs = (hp->clkbase + freq - 1) / freq;
1035 		u_int roundup = dvs & 1;
1036 		for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
1037 			if (dvs + roundup <= 16) {
1038 				dvs += roundup - 1;
1039 				*divp = (div << SDHC_SDCLK_DIV_SHIFT)
1040 				    |   (dvs << SDHC_SDCLK_DVS_SHIFT);
1041 				DPRINTF(2,
1042 				    ("%s: divisor for freq %u is %u * %u\n",
1043 				    HDEVNAME(hp), freq, div * 2, dvs + 1));
1044 				//freq = hp->clkbase / (div * 2) * (dvs + 1);
1045 				return true;
1046 			}
1047 			/*
1048 			 * If we drop bits, we need to round up the divisor.
1049 			 */
1050 			roundup |= dvs & 1;
1051 		}
1052 		/* No divisor found. */
1053 		return false;
1054 	}
1055 	if (hp->sc->sc_clkmsk != 0) {
1056 		div = howmany(hp->clkbase, freq);
1057 		if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1058 			return false;
1059 		*divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1060 		//freq = hp->clkbase / div;
1061 		return true;
1062 	}
1063 	if (hp->specver >= SDHC_SPEC_VERS_300) {
1064 		div = howmany(hp->clkbase, freq);
1065 		div = div > 1 ? howmany(div, 2) : 0;
1066 		if (div > 0x3ff)
1067 			return false;
1068 		*divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1069 			 << SDHC_SDCLK_XDIV_SHIFT) |
1070 			(((div >> 0) & SDHC_SDCLK_DIV_MASK)
1071 			 << SDHC_SDCLK_DIV_SHIFT);
1072 		//freq = hp->clkbase / (div ? div * 2 : 1);
1073 		return true;
1074 	} else {
1075 		for (div = 1; div <= 256; div *= 2) {
1076 			if ((hp->clkbase / div) <= freq) {
1077 				*divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1078 				//freq = hp->clkbase / div;
1079 				return true;
1080 			}
1081 		}
1082 		/* No divisor found. */
1083 		return false;
1084 	}
1085 	/* No divisor found. */
1086 	return false;
1087 }
1088 
1089 /*
1090  * Set or change SDCLK frequency or disable the SD clock.
1091  * Return zero on success.
1092  */
1093 static int
1094 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1095 {
1096 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1097 	u_int div;
1098 	u_int timo;
1099 	int16_t reg;
1100 	int error = 0;
1101 	bool present __diagused;
1102 
1103 #ifdef DIAGNOSTIC
1104 	present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1105 
1106 	/* Must not stop the clock if commands are in progress. */
1107 	if (present && sdhc_card_detect(hp)) {
1108 		aprint_normal_dev(hp->sc->sc_dev,
1109 		    "%s: command in progress\n", __func__);
1110 	}
1111 #endif
1112 
1113 	if (hp->sc->sc_vendor_bus_clock) {
1114 		mutex_enter(&hp->bus_clock_lock);
1115 		error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1116 		mutex_exit(&hp->bus_clock_lock);
1117 		if (error != 0)
1118 			return error;
1119 	}
1120 
1121 	mutex_enter(&hp->intr_lock);
1122 
1123 	/*
1124 	 * Stop SD clock before changing the frequency.
1125 	 */
1126 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1127 		HCLR4(hp, SDHC_VEND_SPEC,
1128 		    SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1129 		    SDHC_VEND_SPEC_FRC_SDCLK_ON);
1130 		if (freq == SDMMC_SDCLK_OFF) {
1131 			goto out;
1132 		}
1133 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1134 		HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1135 		if (freq == SDMMC_SDCLK_OFF) {
1136 			HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1137 			goto out;
1138 		}
1139 	} else {
1140 		HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1141 		if (freq == SDMMC_SDCLK_OFF)
1142 			goto out;
1143 	}
1144 
1145 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1146 		if (ddr)
1147 			HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1148 		else
1149 			HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1150 	} else if (hp->specver >= SDHC_SPEC_VERS_300) {
1151 		HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1152 		if (freq > 100000) {
1153 			HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1154 		} else if (freq > 50000) {
1155 			if (ddr) {
1156 				HSET2(hp, SDHC_HOST_CTL2,
1157 				    SDHC_UHS_MODE_SELECT_DDR50);
1158 			} else {
1159 				HSET2(hp, SDHC_HOST_CTL2,
1160 				    SDHC_UHS_MODE_SELECT_SDR50);
1161 			}
1162 		} else if (freq > 25000) {
1163 			if (ddr) {
1164 				HSET2(hp, SDHC_HOST_CTL2,
1165 				    SDHC_UHS_MODE_SELECT_DDR50);
1166 			} else {
1167 				HSET2(hp, SDHC_HOST_CTL2,
1168 				    SDHC_UHS_MODE_SELECT_SDR25);
1169 			}
1170 		} else if (freq > 400) {
1171 			HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1172 		}
1173 	}
1174 
1175 	/*
1176 	 * Slow down Ricoh 5U823 controller that isn't reliable
1177 	 * at 100MHz bus clock.
1178 	 */
1179 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1180 		if (freq == 100000)
1181 			--freq;
1182 	}
1183 
1184 	/*
1185 	 * Set the minimum base clock frequency divisor.
1186 	 */
1187 	if (!sdhc_clock_divisor(hp, freq, &div)) {
1188 		/* Invalid base clock frequency or `freq' value. */
1189 		aprint_error_dev(hp->sc->sc_dev,
1190 			"Invalid bus clock %d kHz\n", freq);
1191 		error = EINVAL;
1192 		goto out;
1193 	}
1194 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1195 		if (ddr) {
1196 			/* in ddr mode, divisor >>= 1 */
1197 			div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1198 			    SDHC_SDCLK_DIV_SHIFT)) |
1199 			    (div & (SDHC_SDCLK_DVS_MASK <<
1200 			    SDHC_SDCLK_DVS_SHIFT));
1201 		}
1202 		for (timo = 1000; timo > 0; timo--) {
1203 			if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1204 				break;
1205 			sdmmc_delay(10);
1206 		}
1207 		HWRITE4(hp, SDHC_CLOCK_CTL,
1208 		    div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1209 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1210 		HWRITE4(hp, SDHC_CLOCK_CTL,
1211 		    div | (SDHC_TIMEOUT_MAX << 16));
1212 	} else {
1213 		reg = HREAD2(hp, SDHC_CLOCK_CTL);
1214 		reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1215 		HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1216 	}
1217 
1218 	/*
1219 	 * Start internal clock.  Wait 10ms for stabilization.
1220 	 */
1221 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1222 		HSET4(hp, SDHC_VEND_SPEC,
1223 		    SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1224 		    SDHC_VEND_SPEC_FRC_SDCLK_ON);
1225 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1226 		sdmmc_delay(10000);
1227 		HSET4(hp, SDHC_CLOCK_CTL,
1228 		    8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1229 	} else {
1230 		HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1231 		for (timo = 1000; timo > 0; timo--) {
1232 			if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1233 			    SDHC_INTCLK_STABLE))
1234 				break;
1235 			sdmmc_delay(10);
1236 		}
1237 		if (timo == 0) {
1238 			error = ETIMEDOUT;
1239 			DPRINTF(1,("%s: timeout\n", __func__));
1240 			goto out;
1241 		}
1242 	}
1243 
1244 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1245 		HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1246 		/*
1247 		 * Sending 80 clocks at 400kHz takes 200us.
1248 		 * So delay for that time + slop and then
1249 		 * check a few times for completion.
1250 		 */
1251 		sdmmc_delay(210);
1252 		for (timo = 10; timo > 0; timo--) {
1253 			if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1254 			    SDHC_INIT_ACTIVE))
1255 				break;
1256 			sdmmc_delay(10);
1257 		}
1258 		DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1259 
1260 		/*
1261 		 * Enable SD clock.
1262 		 */
1263 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1264 			HSET4(hp, SDHC_VEND_SPEC,
1265 			    SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1266 			    SDHC_VEND_SPEC_FRC_SDCLK_ON);
1267 		} else {
1268 			HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1269 		}
1270 	} else {
1271 		/*
1272 		 * Enable SD clock.
1273 		 */
1274 		HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1275 
1276 		if (freq > 25000 &&
1277 		    !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1278 			HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1279 		else
1280 			HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1281 	}
1282 
1283 	mutex_exit(&hp->intr_lock);
1284 
1285 	if (hp->sc->sc_vendor_bus_clock_post) {
1286 		mutex_enter(&hp->bus_clock_lock);
1287 		error = (*hp->sc->sc_vendor_bus_clock_post)(hp->sc, freq);
1288 		mutex_exit(&hp->bus_clock_lock);
1289 	}
1290 	return error;
1291 
1292 out:
1293 	mutex_exit(&hp->intr_lock);
1294 
1295 	return error;
1296 }
1297 
1298 static int
1299 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1300 {
1301 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1302 	int reg;
1303 
1304 	switch (width) {
1305 	case 1:
1306 	case 4:
1307 		break;
1308 
1309 	case 8:
1310 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1311 			break;
1312 		/* FALLTHROUGH */
1313 	default:
1314 		DPRINTF(0,("%s: unsupported bus width (%d)\n",
1315 		    HDEVNAME(hp), width));
1316 		return 1;
1317 	}
1318 
1319 	if (hp->sc->sc_vendor_bus_width) {
1320 		const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1321 		if (error != 0)
1322 			return error;
1323 	}
1324 
1325 	mutex_enter(&hp->intr_lock);
1326 
1327 	reg = HREAD1(hp, SDHC_HOST_CTL);
1328 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1329 		reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1330 		if (width == 4)
1331 			reg |= SDHC_4BIT_MODE;
1332 		else if (width == 8)
1333 			reg |= SDHC_ESDHC_8BIT_MODE;
1334 	} else {
1335 		reg &= ~SDHC_4BIT_MODE;
1336 		if (hp->specver >= SDHC_SPEC_VERS_300) {
1337 			reg &= ~SDHC_8BIT_MODE;
1338 		}
1339 		if (width == 4) {
1340 			reg |= SDHC_4BIT_MODE;
1341 		} else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1342 			reg |= SDHC_8BIT_MODE;
1343 		}
1344 	}
1345 	HWRITE1(hp, SDHC_HOST_CTL, reg);
1346 
1347 	mutex_exit(&hp->intr_lock);
1348 
1349 	return 0;
1350 }
1351 
1352 static int
1353 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1354 {
1355 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1356 
1357 	if (hp->sc->sc_vendor_rod)
1358 		return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1359 
1360 	return 0;
1361 }
1362 
1363 static void
1364 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1365 {
1366 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1367 
1368 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1369 		mutex_enter(&hp->intr_lock);
1370 		if (enable) {
1371 			HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1372 			HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1373 		} else {
1374 			HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1375 			HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1376 		}
1377 		mutex_exit(&hp->intr_lock);
1378 	}
1379 }
1380 
1381 static void
1382 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1383 {
1384 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1385 
1386 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1387 		mutex_enter(&hp->intr_lock);
1388 		HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1389 		mutex_exit(&hp->intr_lock);
1390 	}
1391 }
1392 
1393 static int
1394 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1395 {
1396 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1397 	int error = 0;
1398 
1399 	if (hp->specver < SDHC_SPEC_VERS_300)
1400 		return EINVAL;
1401 
1402 	mutex_enter(&hp->intr_lock);
1403 	switch (signal_voltage) {
1404 	case SDMMC_SIGNAL_VOLTAGE_180:
1405 		if (hp->sc->sc_vendor_signal_voltage != NULL) {
1406 			error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1407 			    signal_voltage);
1408 			if (error != 0)
1409 				break;
1410 		}
1411 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1412 			HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1413 		break;
1414 	case SDMMC_SIGNAL_VOLTAGE_330:
1415 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1416 			HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1417 		if (hp->sc->sc_vendor_signal_voltage != NULL) {
1418 			error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1419 			    signal_voltage);
1420 			if (error != 0)
1421 				break;
1422 		}
1423 		break;
1424 	default:
1425 		error = EINVAL;
1426 		break;
1427 	}
1428 	mutex_exit(&hp->intr_lock);
1429 
1430 	return error;
1431 }
1432 
1433 /*
1434  * Sampling clock tuning procedure (UHS)
1435  */
1436 static int
1437 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1438 {
1439 	struct sdmmc_command cmd;
1440 	uint8_t hostctl;
1441 	int opcode, error, retry = 40;
1442 
1443 	KASSERT(mutex_owned(&hp->intr_lock));
1444 
1445 	hp->tuning_timing = timing;
1446 
1447 	switch (timing) {
1448 	case SDMMC_TIMING_MMC_HS200:
1449 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1450 		break;
1451 	case SDMMC_TIMING_UHS_SDR50:
1452 		if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1453 			return 0;
1454 		/* FALLTHROUGH */
1455 	case SDMMC_TIMING_UHS_SDR104:
1456 		opcode = MMC_SEND_TUNING_BLOCK;
1457 		break;
1458 	default:
1459 		return EINVAL;
1460 	}
1461 
1462 	hostctl = HREAD1(hp, SDHC_HOST_CTL);
1463 
1464 	/* enable buffer read ready interrupt */
1465 	HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1466 	HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1467 
1468 	/* disable DMA */
1469 	HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1470 
1471 	/* reset tuning circuit */
1472 	HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1473 
1474 	/* start of tuning */
1475 	HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1476 
1477 	do {
1478 		memset(&cmd, 0, sizeof(cmd));
1479 		cmd.c_opcode = opcode;
1480 		cmd.c_arg = 0;
1481 		cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1482 		if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1483 			cmd.c_blklen = cmd.c_datalen = 128;
1484 		} else {
1485 			cmd.c_blklen = cmd.c_datalen = 64;
1486 		}
1487 
1488 		error = sdhc_start_command(hp, &cmd);
1489 		if (error)
1490 			break;
1491 
1492 		if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1493 		    SDHC_TUNING_TIMEOUT, false)) {
1494 			break;
1495 		}
1496 
1497 		delay(1000);
1498 	} while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1499 
1500 	/* disable buffer read ready interrupt */
1501 	HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1502 	HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1503 
1504 	if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1505 		HCLR2(hp, SDHC_HOST_CTL2,
1506 		    SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1507 		sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1508 		aprint_error_dev(hp->sc->sc_dev,
1509 		    "tuning did not complete, using fixed sampling clock\n");
1510 		return 0;		/* tuning did not complete */
1511 	}
1512 
1513 	if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1514 		HCLR2(hp, SDHC_HOST_CTL2,
1515 		    SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1516 		sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1517 		aprint_error_dev(hp->sc->sc_dev,
1518 		    "tuning failed, using fixed sampling clock\n");
1519 		return 0;		/* tuning failed */
1520 	}
1521 
1522 	if (hp->tuning_timer_count) {
1523 		callout_schedule(&hp->tuning_timer,
1524 		    hz * hp->tuning_timer_count);
1525 	}
1526 
1527 	return 0;		/* tuning completed */
1528 }
1529 
1530 static int
1531 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1532 {
1533 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1534 	int error;
1535 
1536 	mutex_enter(&hp->intr_lock);
1537 	error = sdhc_execute_tuning1(hp, timing);
1538 	mutex_exit(&hp->intr_lock);
1539 	return error;
1540 }
1541 
1542 static void
1543 sdhc_tuning_timer(void *arg)
1544 {
1545 	struct sdhc_host *hp = arg;
1546 
1547 	atomic_swap_uint(&hp->tuning_timer_pending, 1);
1548 }
1549 
1550 static void
1551 sdhc_hw_reset(sdmmc_chipset_handle_t sch)
1552 {
1553 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1554 	struct sdhc_softc *sc = hp->sc;
1555 
1556 	if (sc->sc_vendor_hw_reset != NULL)
1557 		sc->sc_vendor_hw_reset(sc, hp);
1558 }
1559 
1560 static int
1561 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1562 {
1563 	uint32_t state;
1564 	int timeout;
1565 
1566 	for (timeout = 100000; timeout > 0; timeout--) {
1567 		if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1568 			return 0;
1569 		sdmmc_delay(10);
1570 	}
1571 	aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1572 	    mask, value, state);
1573 	return ETIMEDOUT;
1574 }
1575 
1576 static void
1577 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1578 {
1579 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1580 	int error;
1581 	bool probing;
1582 
1583 	mutex_enter(&hp->intr_lock);
1584 
1585 	if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1586 		(void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1587 	}
1588 
1589 	if (cmd->c_data &&
1590 	    ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1591 		const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1592 		if (ISSET(hp->flags, SHF_USE_DMA)) {
1593 			HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1594 			HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1595 		} else {
1596 			HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1597 			HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1598 		}
1599 	}
1600 
1601 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1602 		const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1603 		if (cmd->c_data != NULL) {
1604 			HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1605 			HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1606 		} else {
1607 			HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1608 			HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1609 		}
1610 	}
1611 
1612 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_STOP_WITH_TC)) {
1613 		if (cmd->c_opcode == MMC_STOP_TRANSMISSION)
1614 			SET(cmd->c_flags, SCF_RSP_BSY);
1615 	}
1616 
1617 	/*
1618 	 * Start the MMC command, or mark `cmd' as failed and return.
1619 	 */
1620 	error = sdhc_start_command(hp, cmd);
1621 	if (error) {
1622 		cmd->c_error = error;
1623 		goto out;
1624 	}
1625 
1626 	/*
1627 	 * Wait until the command phase is done, or until the command
1628 	 * is marked done for any other reason.
1629 	 */
1630 	probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1631 	if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT*3, probing)) {
1632 		DPRINTF(1,("%s: timeout for command\n", __func__));
1633 		sdmmc_delay(50);
1634 		cmd->c_error = ETIMEDOUT;
1635 		goto out;
1636 	}
1637 
1638 	/*
1639 	 * The host controller removes bits [0:7] from the response
1640 	 * data (CRC) and we pass the data up unchanged to the bus
1641 	 * driver (without padding).
1642 	 */
1643 	if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1644 		cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1645 		if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1646 			cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1647 			cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1648 			cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1649 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1650 				cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1651 				    (cmd->c_resp[1] << 24);
1652 				cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1653 				    (cmd->c_resp[2] << 24);
1654 				cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1655 				    (cmd->c_resp[3] << 24);
1656 				cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1657 			}
1658 		}
1659 	}
1660 	DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1661 
1662 	/*
1663 	 * If the command has data to transfer in any direction,
1664 	 * execute the transfer now.
1665 	 */
1666 	if (cmd->c_error == 0 && cmd->c_data != NULL)
1667 		sdhc_transfer_data(hp, cmd);
1668 	else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1669 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) &&
1670 		    !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1671 			DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1672 			    HDEVNAME(hp)));
1673 			cmd->c_error = ETIMEDOUT;
1674 			goto out;
1675 		}
1676 	}
1677 
1678 out:
1679 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1680 	    && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1681 		/* Turn off the LED. */
1682 		HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1683 	}
1684 	SET(cmd->c_flags, SCF_ITSDONE);
1685 
1686 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) &&
1687 	    cmd->c_opcode == MMC_STOP_TRANSMISSION)
1688 		(void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
1689 
1690 	mutex_exit(&hp->intr_lock);
1691 
1692 	DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1693 	    cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1694 	    cmd->c_flags, cmd->c_error));
1695 }
1696 
1697 static int
1698 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1699 {
1700 	struct sdhc_softc * const sc = hp->sc;
1701 	uint16_t blksize = 0;
1702 	uint16_t blkcount = 0;
1703 	uint16_t mode;
1704 	uint16_t command;
1705 	uint32_t pmask;
1706 	int error;
1707 
1708 	KASSERT(mutex_owned(&hp->intr_lock));
1709 
1710 	DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1711 	    HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1712 	    cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1713 
1714 	/*
1715 	 * The maximum block length for commands should be the minimum
1716 	 * of the host buffer size and the card buffer size. (1.7.2)
1717 	 */
1718 
1719 	/* Fragment the data into proper blocks. */
1720 	if (cmd->c_datalen > 0) {
1721 		blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1722 		blkcount = cmd->c_datalen / blksize;
1723 		if (cmd->c_datalen % blksize > 0) {
1724 			/* XXX: Split this command. (1.7.4) */
1725 			aprint_error_dev(sc->sc_dev,
1726 			    "data not a multiple of %u bytes\n", blksize);
1727 			return EINVAL;
1728 		}
1729 	}
1730 
1731 	/* Check limit imposed by 9-bit block count. (1.7.2) */
1732 	if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1733 		aprint_error_dev(sc->sc_dev, "too much data\n");
1734 		return EINVAL;
1735 	}
1736 
1737 	/* Prepare transfer mode register value. (2.2.5) */
1738 	mode = 0;
1739 	if (ISSET(cmd->c_flags, SCF_CMD_READ))
1740 		mode |= SDHC_READ_MODE;
1741 	if (blkcount > 0) {
1742 		mode |= SDHC_BLOCK_COUNT_ENABLE;
1743 		if (blkcount > 1) {
1744 			mode |= SDHC_MULTI_BLOCK_MODE;
1745 			if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP)
1746 			    && !ISSET(cmd->c_flags, SCF_NO_STOP))
1747 				mode |= SDHC_AUTO_CMD12_ENABLE;
1748 		}
1749 	}
1750 	if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1751 	    ISSET(hp->flags,  SHF_MODE_DMAEN)) {
1752 		mode |= SDHC_DMA_ENABLE;
1753 	}
1754 
1755 	/*
1756 	 * Prepare command register value. (2.2.6)
1757 	 */
1758 	command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1759 
1760 	if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1761 		command |= SDHC_CRC_CHECK_ENABLE;
1762 	if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1763 		command |= SDHC_INDEX_CHECK_ENABLE;
1764 	if (cmd->c_datalen > 0)
1765 		command |= SDHC_DATA_PRESENT_SELECT;
1766 
1767 	if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1768 		command |= SDHC_NO_RESPONSE;
1769 	else if (ISSET(cmd->c_flags, SCF_RSP_136))
1770 		command |= SDHC_RESP_LEN_136;
1771 	else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1772 		command |= SDHC_RESP_LEN_48_CHK_BUSY;
1773 	else
1774 		command |= SDHC_RESP_LEN_48;
1775 
1776 	/* Wait until command and optionally data inhibit bits are clear. (1.5) */
1777 	pmask = SDHC_CMD_INHIBIT_CMD;
1778 	if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1779 		pmask |= SDHC_CMD_INHIBIT_DAT;
1780 	error = sdhc_wait_state(hp, pmask, 0);
1781 	if (error) {
1782 		(void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1783 		device_printf(sc->sc_dev, "command or data phase inhibited\n");
1784 		return error;
1785 	}
1786 
1787 	DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1788 	    HDEVNAME(hp), blksize, blkcount, mode, command));
1789 
1790 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1791 		blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1792 		    SDHC_DMA_BOUNDARY_SHIFT;	/* PAGE_SIZE DMA boundary */
1793 	}
1794 
1795 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1796 		/* Alert the user not to remove the card. */
1797 		HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1798 	}
1799 
1800 	/* Set DMA start address. */
1801 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1802 		for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1803 			bus_addr_t paddr =
1804 			    cmd->c_dmamap->dm_segs[seg].ds_addr;
1805 			uint16_t len =
1806 			    cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1807 			    0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1808 			uint16_t attr =
1809 			    SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1810 			if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1811 				attr |= SDHC_ADMA2_END;
1812 			}
1813 			if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1814 				struct sdhc_adma2_descriptor32 *desc =
1815 				    hp->adma2;
1816 				desc[seg].attribute = htole16(attr);
1817 				desc[seg].length = htole16(len);
1818 				desc[seg].address = htole32(paddr);
1819 			} else {
1820 				struct sdhc_adma2_descriptor64 *desc =
1821 				    hp->adma2;
1822 				desc[seg].attribute = htole16(attr);
1823 				desc[seg].length = htole16(len);
1824 				desc[seg].address = htole32(paddr & 0xffffffff);
1825 				desc[seg].address_hi = htole32(
1826 				    (uint64_t)paddr >> 32);
1827 			}
1828 		}
1829 		if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1830 			struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1831 			desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1832 		} else {
1833 			struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1834 			desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1835 		}
1836 		bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1837 		    BUS_DMASYNC_PREWRITE);
1838 
1839 		const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1840 		HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1841 		if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1842 			HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1843 			    (uint64_t)desc_addr >> 32);
1844 		}
1845 
1846 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1847 			HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1848 			HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1849 		} else {
1850 			HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1851 			HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1852 		}
1853 	} else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1854 	    !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1855 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1856 			HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1857 		}
1858 		HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1859 	}
1860 
1861 	/*
1862 	 * Start a CPU data transfer.  Writing to the high order byte
1863 	 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1864 	 */
1865 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1866 		HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1867 		HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1868 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1869 			/* mode bits is in MIX_CTRL register on uSDHC */
1870 			HWRITE4(hp, SDHC_MIX_CTRL, mode |
1871 			    (HREAD4(hp, SDHC_MIX_CTRL) & ~SDHC_TRANSFER_MODE_MASK));
1872 			if (cmd->c_opcode == MMC_STOP_TRANSMISSION)
1873 				command |= SDHC_COMMAND_TYPE_ABORT;
1874 			HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1875 		} else {
1876 			HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1877 		}
1878 	} else {
1879 		HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1880 		HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1881 		HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1882 		HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1883 		HWRITE2(hp, SDHC_COMMAND, command);
1884 	}
1885 
1886 	return 0;
1887 }
1888 
1889 static void
1890 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1891 {
1892 	struct sdhc_softc *sc = hp->sc;
1893 	int error;
1894 
1895 	KASSERT(mutex_owned(&hp->intr_lock));
1896 
1897 	DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1898 	    MMC_R1(cmd->c_resp), cmd->c_datalen));
1899 
1900 #ifdef SDHC_DEBUG
1901 	/* XXX I forgot why I wanted to know when this happens :-( */
1902 	if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1903 	    ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1904 		aprint_error_dev(hp->sc->sc_dev,
1905 		    "CMD52/53 error response flags %#x\n",
1906 		    MMC_R1(cmd->c_resp) & 0xff00);
1907 	}
1908 #endif
1909 
1910 	if (cmd->c_dmamap != NULL) {
1911 		if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1912 			error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1913 			if (error == 0 && !sdhc_wait_intr(hp,
1914 			    SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1915 				DPRINTF(1,("%s: timeout\n", __func__));
1916 				error = ETIMEDOUT;
1917 			}
1918 		} else {
1919 			error = sdhc_transfer_data_dma(hp, cmd);
1920 		}
1921 	} else
1922 		error = sdhc_transfer_data_pio(hp, cmd);
1923 	if (error)
1924 		cmd->c_error = error;
1925 	SET(cmd->c_flags, SCF_ITSDONE);
1926 
1927 	DPRINTF(1,("%s: data transfer done (error=%d)\n",
1928 	    HDEVNAME(hp), cmd->c_error));
1929 }
1930 
1931 static int
1932 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1933 {
1934 	bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1935 	bus_addr_t posaddr;
1936 	bus_addr_t segaddr;
1937 	bus_size_t seglen;
1938 	u_int seg = 0;
1939 	int error = 0;
1940 	int status;
1941 
1942 	KASSERT(mutex_owned(&hp->intr_lock));
1943 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1944 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1945 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1946 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1947 
1948 	for (;;) {
1949 		status = sdhc_wait_intr(hp,
1950 		    SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1951 		    SDHC_DMA_TIMEOUT, false);
1952 
1953 		if (status & SDHC_TRANSFER_COMPLETE) {
1954 			break;
1955 		}
1956 		if (!status) {
1957 			DPRINTF(1,("%s: timeout\n", __func__));
1958 			error = ETIMEDOUT;
1959 			break;
1960 		}
1961 
1962 		if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1963 			continue;
1964 		}
1965 
1966 		if ((status & SDHC_DMA_INTERRUPT) == 0) {
1967 			continue;
1968 		}
1969 
1970 		/* DMA Interrupt (boundary crossing) */
1971 
1972 		segaddr = dm_segs[seg].ds_addr;
1973 		seglen = dm_segs[seg].ds_len;
1974 		posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1975 
1976 		if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1977 			continue;
1978 		}
1979 		if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1980 			HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1981 		else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1982 			HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1983 		KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1984 	}
1985 
1986 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1987 		bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1988 		    PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1989 	}
1990 
1991 	return error;
1992 }
1993 
1994 static int
1995 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1996 {
1997 	uint8_t *data = cmd->c_data;
1998 	void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1999 	u_int len, datalen;
2000 	u_int imask;
2001 	u_int pmask;
2002 	int error = 0;
2003 
2004 	KASSERT(mutex_owned(&hp->intr_lock));
2005 
2006 	if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
2007 		imask = SDHC_BUFFER_READ_READY;
2008 		pmask = SDHC_BUFFER_READ_ENABLE;
2009 		if (ISSET(hp->sc->sc_flags,
2010 		    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2011 			pio_func = esdhc_read_data_pio;
2012 		} else {
2013 			pio_func = sdhc_read_data_pio;
2014 		}
2015 	} else {
2016 		imask = SDHC_BUFFER_WRITE_READY;
2017 		pmask = SDHC_BUFFER_WRITE_ENABLE;
2018 		if (ISSET(hp->sc->sc_flags,
2019 		    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2020 			pio_func = esdhc_write_data_pio;
2021 		} else {
2022 			pio_func = sdhc_write_data_pio;
2023 		}
2024 	}
2025 	datalen = cmd->c_datalen;
2026 
2027 	KASSERT(mutex_owned(&hp->intr_lock));
2028 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
2029 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
2030 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
2031 
2032 	while (datalen > 0) {
2033 		if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
2034 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2035 				HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
2036 			} else {
2037 				HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
2038 			}
2039 			if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
2040 				DPRINTF(1,("%s: timeout\n", __func__));
2041 				error = ETIMEDOUT;
2042 				break;
2043 			}
2044 
2045 			error = sdhc_wait_state(hp, pmask, pmask);
2046 			if (error)
2047 				break;
2048 		}
2049 
2050 		len = MIN(datalen, cmd->c_blklen);
2051 		(*pio_func)(hp, data, len);
2052 		DPRINTF(2,("%s: pio data transfer %u @ %p\n",
2053 		    HDEVNAME(hp), len, data));
2054 
2055 		data += len;
2056 		datalen -= len;
2057 	}
2058 
2059 	if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
2060 	    SDHC_TRANSFER_TIMEOUT, false)) {
2061 		DPRINTF(1,("%s: timeout for transfer\n", __func__));
2062 		error = ETIMEDOUT;
2063 	}
2064 
2065 	return error;
2066 }
2067 
2068 static void
2069 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2070 {
2071 
2072 	if (((__uintptr_t)data & 3) == 0) {
2073 		while (datalen > 3) {
2074 			*(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
2075 			data += 4;
2076 			datalen -= 4;
2077 		}
2078 		if (datalen > 1) {
2079 			*(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2080 			data += 2;
2081 			datalen -= 2;
2082 		}
2083 		if (datalen > 0) {
2084 			*data = HREAD1(hp, SDHC_DATA);
2085 			data += 1;
2086 			datalen -= 1;
2087 		}
2088 	} else if (((__uintptr_t)data & 1) == 0) {
2089 		while (datalen > 1) {
2090 			*(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2091 			data += 2;
2092 			datalen -= 2;
2093 		}
2094 		if (datalen > 0) {
2095 			*data = HREAD1(hp, SDHC_DATA);
2096 			data += 1;
2097 			datalen -= 1;
2098 		}
2099 	} else {
2100 		while (datalen > 0) {
2101 			*data = HREAD1(hp, SDHC_DATA);
2102 			data += 1;
2103 			datalen -= 1;
2104 		}
2105 	}
2106 }
2107 
2108 static void
2109 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2110 {
2111 
2112 	if (((__uintptr_t)data & 3) == 0) {
2113 		while (datalen > 3) {
2114 			HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2115 			data += 4;
2116 			datalen -= 4;
2117 		}
2118 		if (datalen > 1) {
2119 			HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2120 			data += 2;
2121 			datalen -= 2;
2122 		}
2123 		if (datalen > 0) {
2124 			HWRITE1(hp, SDHC_DATA, *data);
2125 			data += 1;
2126 			datalen -= 1;
2127 		}
2128 	} else if (((__uintptr_t)data & 1) == 0) {
2129 		while (datalen > 1) {
2130 			HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2131 			data += 2;
2132 			datalen -= 2;
2133 		}
2134 		if (datalen > 0) {
2135 			HWRITE1(hp, SDHC_DATA, *data);
2136 			data += 1;
2137 			datalen -= 1;
2138 		}
2139 	} else {
2140 		while (datalen > 0) {
2141 			HWRITE1(hp, SDHC_DATA, *data);
2142 			data += 1;
2143 			datalen -= 1;
2144 		}
2145 	}
2146 }
2147 
2148 static void
2149 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2150 {
2151 	uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2152 	uint32_t v;
2153 
2154 	const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2155 	size_t count = 0;
2156 
2157 	while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2158 		if (count == 0) {
2159 			/*
2160 			 * If we've drained "watermark" words, we need to wait
2161 			 * a little bit so the read FIFO can refill.
2162 			 */
2163 			sdmmc_delay(10);
2164 			count = watermark;
2165 		}
2166 		v = HREAD4(hp, SDHC_DATA);
2167 		v = le32toh(v);
2168 		*(uint32_t *)data = v;
2169 		data += 4;
2170 		datalen -= 4;
2171 		status = HREAD2(hp, SDHC_NINTR_STATUS);
2172 		count--;
2173 	}
2174 	if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2175 		if (count == 0) {
2176 			sdmmc_delay(10);
2177 		}
2178 		v = HREAD4(hp, SDHC_DATA);
2179 		v = le32toh(v);
2180 		do {
2181 			*data++ = v;
2182 			v >>= 8;
2183 		} while (--datalen > 0);
2184 	}
2185 }
2186 
2187 static void
2188 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2189 {
2190 	uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2191 	uint32_t v;
2192 
2193 	const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2194 	size_t count = watermark;
2195 
2196 	while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2197 		if (count == 0) {
2198 			sdmmc_delay(10);
2199 			count = watermark;
2200 		}
2201 		v = *(uint32_t *)data;
2202 		v = htole32(v);
2203 		HWRITE4(hp, SDHC_DATA, v);
2204 		data += 4;
2205 		datalen -= 4;
2206 		status = HREAD2(hp, SDHC_NINTR_STATUS);
2207 		count--;
2208 	}
2209 	if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2210 		if (count == 0) {
2211 			sdmmc_delay(10);
2212 		}
2213 		v = *(uint32_t *)data;
2214 		v = htole32(v);
2215 		HWRITE4(hp, SDHC_DATA, v);
2216 	}
2217 }
2218 
2219 /* Prepare for another command. */
2220 static int
2221 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2222 {
2223 	int timo;
2224 
2225 	KASSERT(mutex_owned(&hp->intr_lock));
2226 
2227 	DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2228 
2229 	/* Request the reset.  */
2230 	HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2231 
2232 	/*
2233 	 * If necessary, wait for the controller to set the bits to
2234 	 * acknowledge the reset.
2235 	 */
2236 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2237 	    ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2238 		for (timo = 10000; timo > 0; timo--) {
2239 			if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2240 				break;
2241 			/* Short delay because I worry we may miss it...  */
2242 			sdmmc_delay(1);
2243 		}
2244 		if (timo == 0) {
2245 			DPRINTF(1,("%s: timeout for reset on\n", __func__));
2246 			return ETIMEDOUT;
2247 		}
2248 	}
2249 
2250 	/*
2251 	 * Wait for the controller to clear the bits to indicate that
2252 	 * the reset has completed.
2253 	 */
2254 	for (timo = 10; timo > 0; timo--) {
2255 		if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2256 			break;
2257 		sdmmc_delay(10000);
2258 	}
2259 	if (timo == 0) {
2260 		DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2261 		    HREAD1(hp, SDHC_SOFTWARE_RESET)));
2262 		return ETIMEDOUT;
2263 	}
2264 
2265 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2266 		HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2267 	}
2268 
2269 	return 0;
2270 }
2271 
2272 static int
2273 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2274 {
2275 	int status, error, nointr;
2276 
2277 	KASSERT(mutex_owned(&hp->intr_lock));
2278 
2279 	mask |= SDHC_ERROR_INTERRUPT;
2280 
2281 	nointr = 0;
2282 	status = hp->intr_status & mask;
2283 	while (status == 0) {
2284 		if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2285 		    == EWOULDBLOCK) {
2286 			nointr = 1;
2287 			break;
2288 		}
2289 		status = hp->intr_status & mask;
2290 	}
2291 	error = hp->intr_error_status;
2292 
2293 	DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2294 	    error));
2295 
2296 	hp->intr_status &= ~status;
2297 	hp->intr_error_status &= ~error;
2298 
2299 	if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2300 		if (ISSET(error, SDHC_DMA_ERROR))
2301 			device_printf(hp->sc->sc_dev,"dma error\n");
2302 		if (ISSET(error, SDHC_ADMA_ERROR))
2303 			device_printf(hp->sc->sc_dev,"adma error\n");
2304 		if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2305 			device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2306 		if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2307 			device_printf(hp->sc->sc_dev,"current limit error\n");
2308 		if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2309 			device_printf(hp->sc->sc_dev,"data end bit error\n");
2310 		if (ISSET(error, SDHC_DATA_CRC_ERROR))
2311 			device_printf(hp->sc->sc_dev,"data crc error\n");
2312 		if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2313 			device_printf(hp->sc->sc_dev,"data timeout error\n");
2314 		if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2315 			device_printf(hp->sc->sc_dev,"cmd index error\n");
2316 		if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2317 			device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2318 		if (ISSET(error, SDHC_CMD_CRC_ERROR))
2319 			device_printf(hp->sc->sc_dev,"cmd crc error\n");
2320 		if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2321 			if (!probing)
2322 				device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2323 #ifdef SDHC_DEBUG
2324 			else if (sdhcdebug > 0)
2325 				device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2326 #endif
2327 		}
2328 		if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2329 			device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2330 				(error & ~SDHC_EINTR_STATUS_MASK));
2331 		if (error == 0)
2332 			device_printf(hp->sc->sc_dev,"no error\n");
2333 
2334 		/* Command timeout has higher priority than command complete. */
2335 		if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2336 			CLR(status, SDHC_COMMAND_COMPLETE);
2337 
2338 		/* Transfer complete has higher priority than data timeout. */
2339 		if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2340 			CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2341 	}
2342 
2343 	if (nointr ||
2344 	    (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2345 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2346 			(void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2347 		hp->intr_error_status = 0;
2348 		status = 0;
2349 	}
2350 
2351 	return status;
2352 }
2353 
2354 /*
2355  * Established by attachment driver at interrupt priority IPL_SDMMC.
2356  */
2357 int
2358 sdhc_intr(void *arg)
2359 {
2360 	struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2361 	struct sdhc_host *hp;
2362 	int done = 0;
2363 	uint16_t status;
2364 	uint16_t error;
2365 
2366 	/* We got an interrupt, but we don't know from which slot. */
2367 	for (size_t host = 0; host < sc->sc_nhosts; host++) {
2368 		hp = sc->sc_host[host];
2369 		if (hp == NULL)
2370 			continue;
2371 
2372 		mutex_enter(&hp->intr_lock);
2373 
2374 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2375 			/* Find out which interrupts are pending. */
2376 			uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2377 			status = xstatus;
2378 			error = xstatus >> 16;
2379 			if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2380 			    (xstatus & SDHC_TRANSFER_COMPLETE) &&
2381 			    !(xstatus & SDHC_DMA_INTERRUPT)) {
2382 				/* read again due to uSDHC errata */
2383 				status = xstatus = HREAD4(hp,
2384 				    SDHC_NINTR_STATUS);
2385 				error = xstatus >> 16;
2386 			}
2387 			if (ISSET(sc->sc_flags,
2388 			    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2389 				if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2390 					SET(status, SDHC_ERROR_INTERRUPT);
2391 			}
2392 			if (error)
2393 				xstatus |= SDHC_ERROR_INTERRUPT;
2394 			else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2395 				goto next_port; /* no interrupt for us */
2396 			/* Acknowledge the interrupts we are about to handle. */
2397 			HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2398 		} else {
2399 			/* Find out which interrupts are pending. */
2400 			error = 0;
2401 			status = HREAD2(hp, SDHC_NINTR_STATUS);
2402 			if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2403 				goto next_port; /* no interrupt for us */
2404 			/* Acknowledge the interrupts we are about to handle. */
2405 			HWRITE2(hp, SDHC_NINTR_STATUS, status);
2406 			if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2407 				/* Acknowledge error interrupts. */
2408 				error = HREAD2(hp, SDHC_EINTR_STATUS);
2409 				HWRITE2(hp, SDHC_EINTR_STATUS, error);
2410 			}
2411 		}
2412 
2413 		DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2414 		    status, error));
2415 
2416 		/* Claim this interrupt. */
2417 		done = 1;
2418 
2419 		if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2420 		    ISSET(error, SDHC_ADMA_ERROR)) {
2421 			uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2422 			printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2423 			    adma_err);
2424 		}
2425 
2426 		/*
2427 		 * Wake up the sdmmc event thread to scan for cards.
2428 		 */
2429 		if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2430 			if (hp->sdmmc != NULL) {
2431 				sdmmc_needs_discover(hp->sdmmc);
2432 			}
2433 			if (ISSET(sc->sc_flags,
2434 			    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2435 				HCLR4(hp, SDHC_NINTR_STATUS_EN,
2436 				    status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2437 				HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2438 				    status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2439 			}
2440 		}
2441 
2442 		/*
2443 		 * Schedule re-tuning process (UHS).
2444 		 */
2445 		if (ISSET(status, SDHC_RETUNING_EVENT)) {
2446 			atomic_swap_uint(&hp->tuning_timer_pending, 1);
2447 		}
2448 
2449 		/*
2450 		 * Wake up the blocking process to service command
2451 		 * related interrupt(s).
2452 		 */
2453 		if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2454 		    SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2455 		    SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2456 			hp->intr_error_status |= error;
2457 			hp->intr_status |= status;
2458 			if (ISSET(sc->sc_flags,
2459 			    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2460 				HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2461 				    status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2462 			}
2463 			cv_broadcast(&hp->intr_cv);
2464 		}
2465 
2466 		/*
2467 		 * Service SD card interrupts.
2468 		 */
2469 		if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2470 		    && ISSET(status, SDHC_CARD_INTERRUPT)) {
2471 			DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2472 			HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2473 			sdmmc_card_intr(hp->sdmmc);
2474 		}
2475 next_port:
2476 		mutex_exit(&hp->intr_lock);
2477 	}
2478 
2479 	return done;
2480 }
2481 
2482 kmutex_t *
2483 sdhc_host_lock(struct sdhc_host *hp)
2484 {
2485 	return &hp->intr_lock;
2486 }
2487 
2488 uint8_t
2489 sdhc_host_read_1(struct sdhc_host *hp, int reg)
2490 {
2491 	return HREAD1(hp, reg);
2492 }
2493 
2494 uint16_t
2495 sdhc_host_read_2(struct sdhc_host *hp, int reg)
2496 {
2497 	return HREAD2(hp, reg);
2498 }
2499 
2500 uint32_t
2501 sdhc_host_read_4(struct sdhc_host *hp, int reg)
2502 {
2503 	return HREAD4(hp, reg);
2504 }
2505 
2506 void
2507 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val)
2508 {
2509 	HWRITE1(hp, reg, val);
2510 }
2511 
2512 void
2513 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val)
2514 {
2515 	HWRITE2(hp, reg, val);
2516 }
2517 
2518 void
2519 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val)
2520 {
2521 	HWRITE4(hp, reg, val);
2522 }
2523 
2524 #ifdef SDHC_DEBUG
2525 void
2526 sdhc_dump_regs(struct sdhc_host *hp)
2527 {
2528 
2529 	printf("0x%02x PRESENT_STATE:    %x\n", SDHC_PRESENT_STATE,
2530 	    HREAD4(hp, SDHC_PRESENT_STATE));
2531 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2532 		printf("0x%02x POWER_CTL:        %x\n", SDHC_POWER_CTL,
2533 		    HREAD1(hp, SDHC_POWER_CTL));
2534 	printf("0x%02x NINTR_STATUS:     %x\n", SDHC_NINTR_STATUS,
2535 	    HREAD2(hp, SDHC_NINTR_STATUS));
2536 	printf("0x%02x EINTR_STATUS:     %x\n", SDHC_EINTR_STATUS,
2537 	    HREAD2(hp, SDHC_EINTR_STATUS));
2538 	printf("0x%02x NINTR_STATUS_EN:  %x\n", SDHC_NINTR_STATUS_EN,
2539 	    HREAD2(hp, SDHC_NINTR_STATUS_EN));
2540 	printf("0x%02x EINTR_STATUS_EN:  %x\n", SDHC_EINTR_STATUS_EN,
2541 	    HREAD2(hp, SDHC_EINTR_STATUS_EN));
2542 	printf("0x%02x NINTR_SIGNAL_EN:  %x\n", SDHC_NINTR_SIGNAL_EN,
2543 	    HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2544 	printf("0x%02x EINTR_SIGNAL_EN:  %x\n", SDHC_EINTR_SIGNAL_EN,
2545 	    HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2546 	printf("0x%02x CAPABILITIES:     %x\n", SDHC_CAPABILITIES,
2547 	    HREAD4(hp, SDHC_CAPABILITIES));
2548 	printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2549 	    HREAD4(hp, SDHC_MAX_CAPABILITIES));
2550 }
2551 #endif
2552