xref: /netbsd-src/sys/dev/sdmmc/sdhc.c (revision 345cf9fb81bd0411c53e25d62cd93bdcaa865312)
1 /*	$NetBSD: sdhc.c,v 1.118 2024/01/20 00:22:11 jmcneill Exp $	*/
2 /*	$OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $	*/
3 
4 /*
5  * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * SD Host Controller driver based on the SD Host Controller Standard
22  * Simplified Specification Version 1.00 (www.sdcard.com).
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.118 2024/01/20 00:22:11 jmcneill Exp $");
27 
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31 
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40 
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46 
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s)	do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void	sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s)	do {} while (0)
53 #endif
54 
55 #define SDHC_COMMAND_TIMEOUT	hz
56 #define SDHC_BUFFER_TIMEOUT	hz
57 #define SDHC_TRANSFER_TIMEOUT	hz
58 #define SDHC_DMA_TIMEOUT	(hz*3)
59 #define SDHC_TUNING_TIMEOUT	hz
60 
61 struct sdhc_host {
62 	struct sdhc_softc *sc;		/* host controller device */
63 
64 	bus_space_tag_t iot;		/* host register set tag */
65 	bus_space_handle_t ioh;		/* host register set handle */
66 	bus_size_t ios;			/* host register space size */
67 	bus_dma_tag_t dmat;		/* host DMA tag */
68 
69 	device_t sdmmc;			/* generic SD/MMC device */
70 
71 	u_int clkbase;			/* base clock frequency in KHz */
72 	int maxblklen;			/* maximum block length */
73 	uint32_t ocr;			/* OCR value from capabilities */
74 
75 	uint8_t regs[14];		/* host controller state */
76 
77 	uint16_t intr_status;		/* soft interrupt status */
78 	uint16_t intr_error_status;	/* soft error status */
79 	kmutex_t intr_lock;
80 	kmutex_t bus_clock_lock;
81 	kcondvar_t intr_cv;
82 
83 	callout_t tuning_timer;
84 	int tuning_timing;
85 	u_int tuning_timer_count;
86 	u_int tuning_timer_pending;
87 
88 	int specver;			/* spec. version */
89 
90 	uint32_t flags;			/* flags for this host */
91 #define SHF_USE_DMA		0x0001
92 #define SHF_USE_4BIT_MODE	0x0002
93 #define SHF_USE_8BIT_MODE	0x0004
94 #define SHF_MODE_DMAEN		0x0008 /* needs SDHC_DMA_ENABLE in mode */
95 #define SHF_USE_ADMA2_32	0x0010
96 #define SHF_USE_ADMA2_64	0x0020
97 #define SHF_USE_ADMA2_MASK	0x0030
98 
99 	bus_dmamap_t		adma_map;
100 	bus_dma_segment_t	adma_segs[1];
101 	void			*adma2;
102 
103 	uint8_t			vdd;	/* last vdd setting */
104 };
105 
106 #define HDEVNAME(hp)	(device_xname((hp)->sc->sc_dev))
107 
108 static uint8_t
109 hread1(struct sdhc_host *hp, bus_size_t reg)
110 {
111 
112 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
113 		return bus_space_read_1(hp->iot, hp->ioh, reg);
114 	return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
115 }
116 
117 static uint16_t
118 hread2(struct sdhc_host *hp, bus_size_t reg)
119 {
120 
121 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
122 		return bus_space_read_2(hp->iot, hp->ioh, reg);
123 	return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
124 }
125 
126 #define HREAD1(hp, reg)		hread1(hp, reg)
127 #define HREAD2(hp, reg)		hread2(hp, reg)
128 #define HREAD4(hp, reg)		\
129 	(bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
130 
131 
132 static void
133 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
134 {
135 
136 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
137 		bus_space_write_1(hp->iot, hp->ioh, o, val);
138 	} else {
139 		const size_t shift = 8 * (o & 3);
140 		o &= -4;
141 		uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
142 		tmp = (val << shift) | (tmp & ~(0xffU << shift));
143 		bus_space_write_4(hp->iot, hp->ioh, o, tmp);
144 	}
145 	if (hp->sc->sc_write_delay != 0) {
146 		delay(hp->sc->sc_write_delay);
147 	}
148 }
149 
150 static void
151 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
152 {
153 
154 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
155 		bus_space_write_2(hp->iot, hp->ioh, o, val);
156 	} else {
157 		const size_t shift = 8 * (o & 2);
158 		o &= -4;
159 		uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
160 		tmp = (val << shift) | (tmp & ~(0xffffU << shift));
161 		bus_space_write_4(hp->iot, hp->ioh, o, tmp);
162 	}
163 	if (hp->sc->sc_write_delay != 0) {
164 		delay(hp->sc->sc_write_delay);
165 	}
166 }
167 
168 static void
169 hwrite4(struct sdhc_host *hp, bus_size_t o, uint32_t val)
170 {
171 
172 	bus_space_write_4(hp->iot, hp->ioh, o, val);
173 	if (hp->sc->sc_write_delay != 0) {
174 		delay(hp->sc->sc_write_delay);
175 	}
176 }
177 
178 #define HWRITE1(hp, reg, val)		hwrite1(hp, reg, val)
179 #define HWRITE2(hp, reg, val)		hwrite2(hp, reg, val)
180 #define HWRITE4(hp, reg, val)		hwrite4(hp, reg, val)
181 
182 #define HCLR1(hp, reg, bits)						\
183 	do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
184 #define HCLR2(hp, reg, bits)						\
185 	do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
186 #define HCLR4(hp, reg, bits)						\
187 	do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
188 #define HSET1(hp, reg, bits)						\
189 	do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
190 #define HSET2(hp, reg, bits)						\
191 	do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
192 #define HSET4(hp, reg, bits)						\
193 	do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
194 
195 static int	sdhc_host_reset(sdmmc_chipset_handle_t);
196 static int	sdhc_host_reset1(sdmmc_chipset_handle_t);
197 static uint32_t	sdhc_host_ocr(sdmmc_chipset_handle_t);
198 static int	sdhc_host_maxblklen(sdmmc_chipset_handle_t);
199 static int	sdhc_card_detect(sdmmc_chipset_handle_t);
200 static int	sdhc_write_protect(sdmmc_chipset_handle_t);
201 static int	sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
202 static int	sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
203 static int	sdhc_bus_width(sdmmc_chipset_handle_t, int);
204 static int	sdhc_bus_rod(sdmmc_chipset_handle_t, int);
205 static void	sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
206 static void	sdhc_card_intr_ack(sdmmc_chipset_handle_t);
207 static void	sdhc_exec_command(sdmmc_chipset_handle_t,
208 		    struct sdmmc_command *);
209 static int	sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
210 static int	sdhc_execute_tuning1(struct sdhc_host *, int);
211 static int	sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
212 static void	sdhc_tuning_timer(void *);
213 static void	sdhc_hw_reset(sdmmc_chipset_handle_t);
214 static int	sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
215 static int	sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
216 static int	sdhc_soft_reset(struct sdhc_host *, int);
217 static int	sdhc_wait_intr(struct sdhc_host *, int, int, bool);
218 static void	sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
219 static int	sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
220 static int	sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
221 static void	sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
222 static void	sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
223 static void	esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
224 static void	esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
225 
226 static struct sdmmc_chip_functions sdhc_functions = {
227 	/* host controller reset */
228 	.host_reset = sdhc_host_reset,
229 
230 	/* host controller capabilities */
231 	.host_ocr = sdhc_host_ocr,
232 	.host_maxblklen = sdhc_host_maxblklen,
233 
234 	/* card detection */
235 	.card_detect = sdhc_card_detect,
236 
237 	/* write protect */
238 	.write_protect = sdhc_write_protect,
239 
240 	/* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
241 	.bus_power = sdhc_bus_power,
242 	.bus_clock = NULL,	/* see sdhc_bus_clock_ddr */
243 	.bus_width = sdhc_bus_width,
244 	.bus_rod = sdhc_bus_rod,
245 
246 	/* command execution */
247 	.exec_command = sdhc_exec_command,
248 
249 	/* card interrupt */
250 	.card_enable_intr = sdhc_card_enable_intr,
251 	.card_intr_ack = sdhc_card_intr_ack,
252 
253 	/* UHS functions */
254 	.signal_voltage = sdhc_signal_voltage,
255 	.bus_clock_ddr = sdhc_bus_clock_ddr,
256 	.execute_tuning = sdhc_execute_tuning,
257 	.hw_reset = sdhc_hw_reset,
258 };
259 
260 static int
261 sdhc_cfprint(void *aux, const char *pnp)
262 {
263 	const struct sdmmcbus_attach_args * const saa = aux;
264 	const struct sdhc_host * const hp = saa->saa_sch;
265 
266 	if (pnp) {
267 		aprint_normal("sdmmc at %s", pnp);
268 	}
269 	for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
270 		if (hp->sc->sc_host[host] == hp) {
271 			aprint_normal(" slot %zu", host);
272 		}
273 	}
274 
275 	return UNCONF;
276 }
277 
278 /*
279  * Called by attachment driver.  For each SD card slot there is one SD
280  * host controller standard register set. (1.3)
281  */
282 int
283 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
284     bus_space_handle_t ioh, bus_size_t iosize)
285 {
286 	struct sdmmcbus_attach_args saa;
287 	struct sdhc_host *hp;
288 	uint32_t caps, caps2;
289 	uint16_t sdhcver;
290 	int error;
291 
292 	/* Allocate one more host structure. */
293 	hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
294 	if (hp == NULL) {
295 		aprint_error_dev(sc->sc_dev,
296 		    "couldn't alloc memory (sdhc host)\n");
297 		goto err1;
298 	}
299 	sc->sc_host[sc->sc_nhosts++] = hp;
300 
301 	/* Fill in the new host structure. */
302 	hp->sc = sc;
303 	hp->iot = iot;
304 	hp->ioh = ioh;
305 	hp->ios = iosize;
306 	hp->dmat = sc->sc_dmat;
307 
308 	mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
309 	mutex_init(&hp->bus_clock_lock, MUTEX_DEFAULT, IPL_NONE);
310 	cv_init(&hp->intr_cv, "sdhcintr");
311 	callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
312 	callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
313 
314 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
315 		sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
316 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
317 		sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
318 	} else if (iosize <= SDHC_HOST_CTL_VERSION) {
319 		sdhcver = SDHC_SPEC_NOVERS << SDHC_SPEC_VERS_SHIFT;
320 	} else {
321 		sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
322 	}
323 	aprint_normal_dev(sc->sc_dev, "SDHC ");
324 	hp->specver = SDHC_SPEC_VERSION(sdhcver);
325 	switch (SDHC_SPEC_VERSION(sdhcver)) {
326 	case SDHC_SPEC_VERS_100:
327 		aprint_normal("1.0");
328 		break;
329 	case SDHC_SPEC_VERS_200:
330 		aprint_normal("2.0");
331 		break;
332 	case SDHC_SPEC_VERS_300:
333 		aprint_normal("3.0");
334 		break;
335 	case SDHC_SPEC_VERS_400:
336 		aprint_normal("4.0");
337 		break;
338 	case SDHC_SPEC_VERS_410:
339 		aprint_normal("4.1");
340 		break;
341 	case SDHC_SPEC_VERS_420:
342 		aprint_normal("4.2");
343 		break;
344 	case SDHC_SPEC_NOVERS:
345 		hp->specver = -1;
346 		aprint_normal("NO-VERS");
347 		break;
348 	default:
349 		aprint_normal("unknown version(0x%x)",
350 		    SDHC_SPEC_VERSION(sdhcver));
351 		break;
352 	}
353 	if (SDHC_SPEC_VERSION(sdhcver) != SDHC_SPEC_NOVERS)
354 		aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
355 
356 	/*
357 	 * Reset the host controller and enable interrupts.
358 	 */
359 	(void)sdhc_host_reset(hp);
360 
361 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
362 		/* init uSDHC registers */
363 		HWRITE4(hp, SDHC_MMC_BOOT, 0);
364 		HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
365 		    SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
366 		HWRITE4(hp, SDHC_WATERMARK_LEVEL,
367 		    (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
368 		    (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
369 		    (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
370 		    (0x40 << SDHC_WATERMARK_READ_SHIFT));
371 		HSET4(hp, SDHC_VEND_SPEC,
372 		    SDHC_VEND_SPEC_MBO |
373 		    SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
374 		    SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
375 		    SDHC_VEND_SPEC_HCLK_SOFT_EN |
376 		    SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
377 		    SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
378 		    SDHC_VEND_SPEC_FRC_SDCLK_ON);
379 	}
380 
381 	/* Determine host capabilities. */
382 	if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
383 		caps = sc->sc_caps;
384 		caps2 = sc->sc_caps2;
385 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
386 		/* uSDHC capability register is little bit different */
387 		caps = HREAD4(hp, SDHC_CAPABILITIES);
388 		caps |= SDHC_8BIT_SUPP;
389 		if (caps & SDHC_ADMA1_SUPP)
390 			caps |= SDHC_ADMA2_SUPP;
391 		sc->sc_caps = caps;
392 		/* uSDHC has no SDHC_CAPABILITIES2 register */
393 		caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
394 	} else {
395 		caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
396 		if (hp->specver >= SDHC_SPEC_VERS_300) {
397 			caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
398 		} else {
399 			caps2 = sc->sc_caps2 = 0;
400 		}
401 	}
402 
403 	aprint_verbose(", caps <%08x/%08x>", caps, caps2);
404 
405 	const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
406 	    SDHC_RETUNING_MODES_MASK;
407 	if (retuning_mode == SDHC_RETUNING_MODE_1) {
408 		hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
409 		    SDHC_TIMER_COUNT_MASK;
410 		if (hp->tuning_timer_count == 0xf)
411 			hp->tuning_timer_count = 0;
412 		if (hp->tuning_timer_count)
413 			hp->tuning_timer_count =
414 			    1 << (hp->tuning_timer_count - 1);
415 	}
416 
417 	/*
418 	 * Use DMA if the host system and the controller support it.
419 	 * Supports integrated or external DMA egine, with or without
420 	 * SDHC_DMA_ENABLE in the command.
421 	 */
422 	if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
423 	    (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
424 	     ISSET(caps, SDHC_DMA_SUPPORT)))) {
425 		SET(hp->flags, SHF_USE_DMA);
426 
427 		if (ISSET(caps, SDHC_ADMA2_SUPP) &&
428 		    !ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA)) {
429 			SET(hp->flags, SHF_MODE_DMAEN);
430 			/*
431 			 * 64-bit mode was present in the 2.00 spec, removed
432 			 * from 3.00, and re-added in 4.00 with a different
433 			 * descriptor layout. We only support 2.00 and 3.00
434 			 * descriptors for now.
435 			 */
436 			if (hp->specver == SDHC_SPEC_VERS_200 &&
437 			    ISSET(caps, SDHC_64BIT_SYS_BUS)) {
438 				SET(hp->flags, SHF_USE_ADMA2_64);
439 				aprint_normal(", 64-bit ADMA2");
440 			} else {
441 				SET(hp->flags, SHF_USE_ADMA2_32);
442 				aprint_normal(", 32-bit ADMA2");
443 			}
444 		} else {
445 			if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
446 			    ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
447 				SET(hp->flags, SHF_MODE_DMAEN);
448 			if (sc->sc_vendor_transfer_data_dma) {
449 				aprint_normal(", platform DMA");
450 			} else {
451 				aprint_normal(", SDMA");
452 			}
453 		}
454 	} else {
455 		aprint_normal(", PIO");
456 	}
457 
458 	/*
459 	 * Determine the base clock frequency. (2.2.24)
460 	 */
461 	if (hp->specver >= SDHC_SPEC_VERS_300) {
462 		hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
463 	} else {
464 		hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
465 	}
466 	if (hp->clkbase == 0 ||
467 	    ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
468 		if (sc->sc_clkbase == 0) {
469 			/* The attachment driver must tell us. */
470 			aprint_error_dev(sc->sc_dev,
471 			    "unknown base clock frequency\n");
472 			goto err;
473 		}
474 		hp->clkbase = sc->sc_clkbase;
475 	}
476 	if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
477 		/* SDHC 1.0 supports only 10-63 MHz. */
478 		aprint_error_dev(sc->sc_dev,
479 		    "base clock frequency out of range: %u MHz\n",
480 		    hp->clkbase / 1000);
481 		goto err;
482 	}
483 	aprint_normal(", %u kHz", hp->clkbase);
484 
485 	/*
486 	 * XXX Set the data timeout counter value according to
487 	 * capabilities. (2.2.15)
488 	 */
489 	HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
490 #if 1
491 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
492 		HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
493 #endif
494 
495 	if (ISSET(caps, SDHC_EMBEDDED_SLOT))
496 		aprint_normal(", embedded slot");
497 
498 	/*
499 	 * Determine SD bus voltage levels supported by the controller.
500 	 */
501 	aprint_normal(",");
502 	if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
503 		SET(hp->ocr, MMC_OCR_HCS);
504 		aprint_normal(" HS");
505 	}
506 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_1_8_V)) {
507 		if (ISSET(caps2, SDHC_SDR50_SUPP)) {
508 			SET(hp->ocr, MMC_OCR_S18A);
509 			aprint_normal(" SDR50");
510 		}
511 		if (ISSET(caps2, SDHC_DDR50_SUPP)) {
512 			SET(hp->ocr, MMC_OCR_S18A);
513 			aprint_normal(" DDR50");
514 		}
515 		if (ISSET(caps2, SDHC_SDR104_SUPP)) {
516 			SET(hp->ocr, MMC_OCR_S18A);
517 			aprint_normal(" SDR104 HS200");
518 		}
519 		if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
520 			SET(hp->ocr, MMC_OCR_1_65V_1_95V);
521 			aprint_normal(" 1.8V");
522 		}
523 	}
524 	if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
525 		SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
526 		aprint_normal(" 3.0V");
527 	}
528 	if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
529 		SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
530 		aprint_normal(" 3.3V");
531 	}
532 	if (hp->specver >= SDHC_SPEC_VERS_300) {
533 		aprint_normal(", re-tuning mode %d", retuning_mode + 1);
534 		if (hp->tuning_timer_count)
535 			aprint_normal(" (%us timer)", hp->tuning_timer_count);
536 	}
537 
538 	/*
539 	 * Determine the maximum block length supported by the host
540 	 * controller. (2.2.24)
541 	 */
542 	switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
543 	case SDHC_MAX_BLK_LEN_512:
544 		hp->maxblklen = 512;
545 		break;
546 
547 	case SDHC_MAX_BLK_LEN_1024:
548 		hp->maxblklen = 1024;
549 		break;
550 
551 	case SDHC_MAX_BLK_LEN_2048:
552 		hp->maxblklen = 2048;
553 		break;
554 
555 	case SDHC_MAX_BLK_LEN_4096:
556 		hp->maxblklen = 4096;
557 		break;
558 
559 	default:
560 		aprint_error_dev(sc->sc_dev, "max block length unknown\n");
561 		goto err;
562 	}
563 	aprint_normal(", %u byte blocks", hp->maxblklen);
564 	aprint_normal("\n");
565 
566 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
567 		int rseg;
568 
569 		/* Allocate ADMA2 descriptor memory */
570 		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
571 		    PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
572 		if (error) {
573 			aprint_error_dev(sc->sc_dev,
574 			    "ADMA2 dmamem_alloc failed (%d)\n", error);
575 			goto adma_done;
576 		}
577 		error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
578 		    PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
579 		if (error) {
580 			aprint_error_dev(sc->sc_dev,
581 			    "ADMA2 dmamem_map failed (%d)\n", error);
582 			goto adma_done;
583 		}
584 		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
585 		    0, BUS_DMA_WAITOK, &hp->adma_map);
586 		if (error) {
587 			aprint_error_dev(sc->sc_dev,
588 			    "ADMA2 dmamap_create failed (%d)\n", error);
589 			goto adma_done;
590 		}
591 		error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
592 		    hp->adma2, PAGE_SIZE, NULL,
593 		    BUS_DMA_WAITOK|BUS_DMA_WRITE);
594 		if (error) {
595 			aprint_error_dev(sc->sc_dev,
596 			    "ADMA2 dmamap_load failed (%d)\n", error);
597 			goto adma_done;
598 		}
599 
600 		memset(hp->adma2, 0, PAGE_SIZE);
601 
602 adma_done:
603 		if (error)
604 			CLR(hp->flags, SHF_USE_ADMA2_MASK);
605 	}
606 
607 	/*
608 	 * Attach the generic SD/MMC bus driver.  (The bus driver must
609 	 * not invoke any chipset functions before it is attached.)
610 	 */
611 	memset(&saa, 0, sizeof(saa));
612 	saa.saa_busname = "sdmmc";
613 	saa.saa_sct = &sdhc_functions;
614 	saa.saa_sch = hp;
615 	saa.saa_dmat = hp->dmat;
616 	saa.saa_clkmax = hp->clkbase;
617 	if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
618 		saa.saa_clkmin = hp->clkbase / 256 / 2046;
619 	else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
620 		saa.saa_clkmin = hp->clkbase / 256 / 16;
621 	else if (hp->sc->sc_clkmsk != 0)
622 		saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
623 		    (ffs(hp->sc->sc_clkmsk) - 1));
624 	else if (hp->specver >= SDHC_SPEC_VERS_300)
625 		saa.saa_clkmin = hp->clkbase / 0x3ff;
626 	else
627 		saa.saa_clkmin = hp->clkbase / 256;
628 	if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
629 		saa.saa_caps |= SMC_CAPS_AUTO_STOP;
630 	saa.saa_caps |= SMC_CAPS_4BIT_MODE;
631 	if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
632 		saa.saa_caps |= SMC_CAPS_8BIT_MODE;
633 	if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
634 		saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED |
635 				SMC_CAPS_MMC_HIGHSPEED;
636 	if (ISSET(caps2, SDHC_SDR104_SUPP))
637 		saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
638 				SMC_CAPS_UHS_SDR50 |
639 				SMC_CAPS_MMC_HS200;
640 	if (ISSET(caps2, SDHC_SDR50_SUPP))
641 		saa.saa_caps |= SMC_CAPS_UHS_SDR50;
642 	if (ISSET(caps2, SDHC_DDR50_SUPP))
643 		saa.saa_caps |= SMC_CAPS_UHS_DDR50;
644 	if (ISSET(hp->flags, SHF_USE_DMA)) {
645 		saa.saa_caps |= SMC_CAPS_DMA;
646 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
647 			saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
648 	}
649 	if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
650 		saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
651 	if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
652 		saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
653 
654 	if (ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA2_ZEROLEN))
655 		saa.saa_max_seg = 65535;
656 
657 	hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint, CFARGS_NONE);
658 
659 	return 0;
660 
661 err:
662 	callout_destroy(&hp->tuning_timer);
663 	cv_destroy(&hp->intr_cv);
664 	mutex_destroy(&hp->bus_clock_lock);
665 	mutex_destroy(&hp->intr_lock);
666 	free(hp, M_DEVBUF);
667 	sc->sc_host[--sc->sc_nhosts] = NULL;
668 err1:
669 	return 1;
670 }
671 
672 int
673 sdhc_detach(struct sdhc_softc *sc, int flags)
674 {
675 	struct sdhc_host *hp;
676 	int rv = 0;
677 
678 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
679 		hp = sc->sc_host[n];
680 		if (hp == NULL)
681 			continue;
682 		if (hp->sdmmc != NULL) {
683 			rv = config_detach(hp->sdmmc, flags);
684 			if (rv)
685 				break;
686 			hp->sdmmc = NULL;
687 		}
688 		/* disable interrupts */
689 		if ((flags & DETACH_FORCE) == 0) {
690 			mutex_enter(&hp->intr_lock);
691 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
692 				HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
693 			} else {
694 				HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
695 			}
696 			sdhc_soft_reset(hp, SDHC_RESET_ALL);
697 			mutex_exit(&hp->intr_lock);
698 		}
699 		callout_halt(&hp->tuning_timer, NULL);
700 		callout_destroy(&hp->tuning_timer);
701 		cv_destroy(&hp->intr_cv);
702 		mutex_destroy(&hp->intr_lock);
703 		if (hp->ios > 0) {
704 			bus_space_unmap(hp->iot, hp->ioh, hp->ios);
705 			hp->ios = 0;
706 		}
707 		if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
708 			bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
709 			bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
710 			bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
711 			bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
712 		}
713 		free(hp, M_DEVBUF);
714 		sc->sc_host[n] = NULL;
715 	}
716 
717 	return rv;
718 }
719 
720 bool
721 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
722 {
723 	struct sdhc_softc *sc = device_private(dev);
724 	struct sdhc_host *hp;
725 	size_t i;
726 
727 	/* XXX poll for command completion or suspend command
728 	 * in progress */
729 
730 	/* Save the host controller state. */
731 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
732 		hp = sc->sc_host[n];
733 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
734 			for (i = 0; i < sizeof hp->regs; i += 4) {
735 				uint32_t v = HREAD4(hp, i);
736 				hp->regs[i + 0] = (v >> 0);
737 				hp->regs[i + 1] = (v >> 8);
738 				if (i + 3 < sizeof hp->regs) {
739 					hp->regs[i + 2] = (v >> 16);
740 					hp->regs[i + 3] = (v >> 24);
741 				}
742 			}
743 		} else {
744 			for (i = 0; i < sizeof hp->regs; i++) {
745 				hp->regs[i] = HREAD1(hp, i);
746 			}
747 		}
748 	}
749 	return true;
750 }
751 
752 bool
753 sdhc_resume(device_t dev, const pmf_qual_t *qual)
754 {
755 	struct sdhc_softc *sc = device_private(dev);
756 	struct sdhc_host *hp;
757 	size_t i;
758 
759 	/* Restore the host controller state. */
760 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
761 		hp = sc->sc_host[n];
762 		(void)sdhc_host_reset(hp);
763 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
764 			for (i = 0; i < sizeof hp->regs; i += 4) {
765 				if (i + 3 < sizeof hp->regs) {
766 					HWRITE4(hp, i,
767 					    (hp->regs[i + 0] << 0)
768 					    | (hp->regs[i + 1] << 8)
769 					    | (hp->regs[i + 2] << 16)
770 					    | (hp->regs[i + 3] << 24));
771 				} else {
772 					HWRITE4(hp, i,
773 					    (hp->regs[i + 0] << 0)
774 					    | (hp->regs[i + 1] << 8));
775 				}
776 			}
777 		} else {
778 			for (i = 0; i < sizeof hp->regs; i++) {
779 				HWRITE1(hp, i, hp->regs[i]);
780 			}
781 		}
782 	}
783 	return true;
784 }
785 
786 bool
787 sdhc_shutdown(device_t dev, int flags)
788 {
789 	struct sdhc_softc *sc = device_private(dev);
790 	struct sdhc_host *hp;
791 
792 	/* XXX chip locks up if we don't disable it before reboot. */
793 	for (size_t i = 0; i < sc->sc_nhosts; i++) {
794 		hp = sc->sc_host[i];
795 		(void)sdhc_host_reset(hp);
796 	}
797 	return true;
798 }
799 
800 /*
801  * Reset the host controller.  Called during initialization, when
802  * cards are removed, upon resume, and during error recovery.
803  */
804 static int
805 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
806 {
807 	struct sdhc_host *hp = (struct sdhc_host *)sch;
808 	uint32_t sdhcimask;
809 	int error;
810 
811 	KASSERT(mutex_owned(&hp->intr_lock));
812 
813 	/* Disable all interrupts. */
814 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
815 		HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
816 	} else {
817 		HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
818 	}
819 
820 	/* Let sdhc_bus_power restore power */
821 	hp->vdd = 0;
822 
823 	/*
824 	 * Reset the entire host controller and wait up to 100ms for
825 	 * the controller to clear the reset bit.
826 	 */
827 	error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
828 	if (error)
829 		goto out;
830 
831 	/* Set data timeout counter value to max for now. */
832 	HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
833 #if 1
834 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
835 		HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
836 #endif
837 
838 	/* Enable interrupts. */
839 	sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
840 	    SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
841 	    SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
842 	    SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
843 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
844 		sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
845 		HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
846 		sdhcimask ^=
847 		    (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
848 		sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
849 		HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
850 	} else {
851 		HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
852 		HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
853 		sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
854 		HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
855 		HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
856 	}
857 
858 out:
859 	return error;
860 }
861 
862 static int
863 sdhc_host_reset(sdmmc_chipset_handle_t sch)
864 {
865 	struct sdhc_host *hp = (struct sdhc_host *)sch;
866 	int error;
867 
868 	mutex_enter(&hp->intr_lock);
869 	error = sdhc_host_reset1(sch);
870 	mutex_exit(&hp->intr_lock);
871 
872 	return error;
873 }
874 
875 static uint32_t
876 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
877 {
878 	struct sdhc_host *hp = (struct sdhc_host *)sch;
879 
880 	return hp->ocr;
881 }
882 
883 static int
884 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
885 {
886 	struct sdhc_host *hp = (struct sdhc_host *)sch;
887 
888 	return hp->maxblklen;
889 }
890 
891 /*
892  * Return non-zero if the card is currently inserted.
893  */
894 static int
895 sdhc_card_detect(sdmmc_chipset_handle_t sch)
896 {
897 	struct sdhc_host *hp = (struct sdhc_host *)sch;
898 	int r;
899 
900 	if (hp->sc->sc_vendor_card_detect)
901 		return (*hp->sc->sc_vendor_card_detect)(hp->sc);
902 
903 	r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
904 
905 	return r ? 1 : 0;
906 }
907 
908 /*
909  * Return non-zero if the card is currently write-protected.
910  */
911 static int
912 sdhc_write_protect(sdmmc_chipset_handle_t sch)
913 {
914 	struct sdhc_host *hp = (struct sdhc_host *)sch;
915 	int r;
916 
917 	if (hp->sc->sc_vendor_write_protect)
918 		return (*hp->sc->sc_vendor_write_protect)(hp->sc);
919 
920 	r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
921 
922 	return r ? 0 : 1;
923 }
924 
925 /*
926  * Set or change SD bus voltage and enable or disable SD bus power.
927  * Return zero on success.
928  */
929 static int
930 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
931 {
932 	struct sdhc_host *hp = (struct sdhc_host *)sch;
933 	uint8_t vdd;
934 	int error = 0;
935 	const uint32_t pcmask =
936 	    ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
937 	uint32_t reg;
938 
939 	mutex_enter(&hp->intr_lock);
940 
941 	/*
942 	 * Disable bus power before voltage change.
943 	 */
944 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
945 	    && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0)) {
946 		hp->vdd = 0;
947 		HWRITE1(hp, SDHC_POWER_CTL, 0);
948 	}
949 
950 	/* If power is disabled, reset the host and return now. */
951 	if (ocr == 0) {
952 		(void)sdhc_host_reset1(hp);
953 		callout_halt(&hp->tuning_timer, &hp->intr_lock);
954 		goto out;
955 	}
956 
957 	/*
958 	 * Select the lowest voltage according to capabilities.
959 	 */
960 	ocr &= hp->ocr;
961 	if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
962 		vdd = SDHC_VOLTAGE_1_8V;
963 	} else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
964 		vdd = SDHC_VOLTAGE_3_0V;
965 	} else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
966 		vdd = SDHC_VOLTAGE_3_3V;
967 	} else {
968 		/* Unsupported voltage level requested. */
969 		error = EINVAL;
970 		goto out;
971 	}
972 
973 	/*
974 	 * Did voltage change ?
975 	 */
976 	if (vdd == hp->vdd)
977 		goto out;
978 
979 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
980 		/*
981 		 * Enable bus power.  Wait at least 1 ms (or 74 clocks) plus
982 		 * voltage ramp until power rises.
983 		 */
984 
985 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
986 			HWRITE1(hp, SDHC_POWER_CTL,
987 			    (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
988 		} else {
989 			reg = HREAD1(hp, SDHC_POWER_CTL) & pcmask;
990 			HWRITE1(hp, SDHC_POWER_CTL, reg);
991 			sdmmc_delay(1);
992 			reg |= (vdd << SDHC_VOLTAGE_SHIFT);
993 			HWRITE1(hp, SDHC_POWER_CTL, reg);
994 			sdmmc_delay(1);
995 			reg |= SDHC_BUS_POWER;
996 			HWRITE1(hp, SDHC_POWER_CTL, reg);
997 			sdmmc_delay(10000);
998 		}
999 
1000 		/*
1001 		 * The host system may not power the bus due to battery low,
1002 		 * etc.  In that case, the host controller should clear the
1003 		 * bus power bit.
1004 		 */
1005 		if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
1006 			error = ENXIO;
1007 			goto out;
1008 		}
1009 	}
1010 
1011 	/* power successfully changed */
1012 	hp->vdd = vdd;
1013 
1014 out:
1015 	mutex_exit(&hp->intr_lock);
1016 
1017 	return error;
1018 }
1019 
1020 /*
1021  * Return the smallest possible base clock frequency divisor value
1022  * for the CLOCK_CTL register to produce `freq' (KHz).
1023  */
1024 static bool
1025 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
1026 {
1027 	u_int div;
1028 
1029 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
1030 		for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
1031 			if ((hp->clkbase / div) <= freq) {
1032 				*divp = SDHC_SDCLK_CGM
1033 				    | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
1034 				    | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
1035 				//freq = hp->clkbase / div;
1036 				return true;
1037 			}
1038 		}
1039 		/* No divisor found. */
1040 		return false;
1041 	}
1042 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
1043 		u_int dvs = (hp->clkbase + freq - 1) / freq;
1044 		u_int roundup = dvs & 1;
1045 		for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
1046 			if (dvs + roundup <= 16) {
1047 				dvs += roundup - 1;
1048 				*divp = (div << SDHC_SDCLK_DIV_SHIFT)
1049 				    |   (dvs << SDHC_SDCLK_DVS_SHIFT);
1050 				DPRINTF(2,
1051 				    ("%s: divisor for freq %u is %u * %u\n",
1052 				    HDEVNAME(hp), freq, div * 2, dvs + 1));
1053 				//freq = hp->clkbase / (div * 2) * (dvs + 1);
1054 				return true;
1055 			}
1056 			/*
1057 			 * If we drop bits, we need to round up the divisor.
1058 			 */
1059 			roundup |= dvs & 1;
1060 		}
1061 		/* No divisor found. */
1062 		return false;
1063 	}
1064 	if (hp->sc->sc_clkmsk != 0) {
1065 		div = howmany(hp->clkbase, freq);
1066 		if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1067 			return false;
1068 		*divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1069 		//freq = hp->clkbase / div;
1070 		return true;
1071 	}
1072 	if (hp->specver >= SDHC_SPEC_VERS_300) {
1073 		div = howmany(hp->clkbase, freq);
1074 		div = div > 1 ? howmany(div, 2) : 0;
1075 		if (div > 0x3ff)
1076 			return false;
1077 		*divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1078 			 << SDHC_SDCLK_XDIV_SHIFT) |
1079 			(((div >> 0) & SDHC_SDCLK_DIV_MASK)
1080 			 << SDHC_SDCLK_DIV_SHIFT);
1081 		//freq = hp->clkbase / (div ? div * 2 : 1);
1082 		return true;
1083 	} else {
1084 		for (div = 1; div <= 256; div *= 2) {
1085 			if ((hp->clkbase / div) <= freq) {
1086 				*divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1087 				//freq = hp->clkbase / div;
1088 				return true;
1089 			}
1090 		}
1091 		/* No divisor found. */
1092 		return false;
1093 	}
1094 	/* No divisor found. */
1095 	return false;
1096 }
1097 
1098 /*
1099  * Set or change SDCLK frequency or disable the SD clock.
1100  * Return zero on success.
1101  */
1102 static int
1103 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1104 {
1105 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1106 	u_int div;
1107 	u_int timo;
1108 	int16_t reg;
1109 	int error = 0;
1110 	bool present __diagused;
1111 
1112 #ifdef DIAGNOSTIC
1113 	present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1114 
1115 	/* Must not stop the clock if commands are in progress. */
1116 	if (present && sdhc_card_detect(hp)) {
1117 		aprint_normal_dev(hp->sc->sc_dev,
1118 		    "%s: command in progress\n", __func__);
1119 	}
1120 #endif
1121 
1122 	if (hp->sc->sc_vendor_bus_clock) {
1123 		mutex_enter(&hp->bus_clock_lock);
1124 		error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1125 		mutex_exit(&hp->bus_clock_lock);
1126 		if (error != 0)
1127 			return error;
1128 	}
1129 
1130 	mutex_enter(&hp->intr_lock);
1131 
1132 	/*
1133 	 * Stop SD clock before changing the frequency.
1134 	 */
1135 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1136 		HCLR4(hp, SDHC_VEND_SPEC,
1137 		    SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1138 		    SDHC_VEND_SPEC_FRC_SDCLK_ON);
1139 		if (freq == SDMMC_SDCLK_OFF) {
1140 			goto out;
1141 		}
1142 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1143 		HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1144 		if (freq == SDMMC_SDCLK_OFF) {
1145 			HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1146 			goto out;
1147 		}
1148 	} else {
1149 		HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1150 		if (freq == SDMMC_SDCLK_OFF)
1151 			goto out;
1152 	}
1153 
1154 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1155 		if (ddr)
1156 			HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1157 		else
1158 			HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1159 	} else if (hp->specver >= SDHC_SPEC_VERS_300) {
1160 		HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1161 		if (freq > 100000) {
1162 			HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1163 		} else if (freq > 50000) {
1164 			if (ddr) {
1165 				HSET2(hp, SDHC_HOST_CTL2,
1166 				    SDHC_UHS_MODE_SELECT_DDR50);
1167 			} else {
1168 				HSET2(hp, SDHC_HOST_CTL2,
1169 				    SDHC_UHS_MODE_SELECT_SDR50);
1170 			}
1171 		} else if (freq > 25000) {
1172 			if (ddr) {
1173 				HSET2(hp, SDHC_HOST_CTL2,
1174 				    SDHC_UHS_MODE_SELECT_DDR50);
1175 			} else {
1176 				HSET2(hp, SDHC_HOST_CTL2,
1177 				    SDHC_UHS_MODE_SELECT_SDR25);
1178 			}
1179 		} else if (freq > 400) {
1180 			HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1181 		}
1182 	}
1183 
1184 	/*
1185 	 * Slow down Ricoh 5U823 controller that isn't reliable
1186 	 * at 100MHz bus clock.
1187 	 */
1188 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1189 		if (freq == 100000)
1190 			--freq;
1191 	}
1192 
1193 	/*
1194 	 * Set the minimum base clock frequency divisor.
1195 	 */
1196 	if (!sdhc_clock_divisor(hp, freq, &div)) {
1197 		/* Invalid base clock frequency or `freq' value. */
1198 		aprint_error_dev(hp->sc->sc_dev,
1199 			"Invalid bus clock %d kHz\n", freq);
1200 		error = EINVAL;
1201 		goto out;
1202 	}
1203 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1204 		if (ddr) {
1205 			/* in ddr mode, divisor >>= 1 */
1206 			div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1207 			    SDHC_SDCLK_DIV_SHIFT)) |
1208 			    (div & (SDHC_SDCLK_DVS_MASK <<
1209 			    SDHC_SDCLK_DVS_SHIFT));
1210 		}
1211 		for (timo = 1000; timo > 0; timo--) {
1212 			if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1213 				break;
1214 			sdmmc_delay(10);
1215 		}
1216 		HWRITE4(hp, SDHC_CLOCK_CTL,
1217 		    div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1218 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1219 		HWRITE4(hp, SDHC_CLOCK_CTL,
1220 		    div | (SDHC_TIMEOUT_MAX << 16));
1221 	} else {
1222 		reg = HREAD2(hp, SDHC_CLOCK_CTL);
1223 		reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1224 		HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1225 	}
1226 
1227 	/*
1228 	 * Start internal clock.  Wait 10ms for stabilization.
1229 	 */
1230 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1231 		HSET4(hp, SDHC_VEND_SPEC,
1232 		    SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1233 		    SDHC_VEND_SPEC_FRC_SDCLK_ON);
1234 	} else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1235 		sdmmc_delay(10000);
1236 		HSET4(hp, SDHC_CLOCK_CTL,
1237 		    8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1238 	} else {
1239 		HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1240 		for (timo = 1000; timo > 0; timo--) {
1241 			if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1242 			    SDHC_INTCLK_STABLE))
1243 				break;
1244 			sdmmc_delay(10);
1245 		}
1246 		if (timo == 0) {
1247 			error = ETIMEDOUT;
1248 			DPRINTF(1,("%s: timeout\n", __func__));
1249 			goto out;
1250 		}
1251 	}
1252 
1253 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1254 		HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1255 		/*
1256 		 * Sending 80 clocks at 400kHz takes 200us.
1257 		 * So delay for that time + slop and then
1258 		 * check a few times for completion.
1259 		 */
1260 		sdmmc_delay(210);
1261 		for (timo = 10; timo > 0; timo--) {
1262 			if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1263 			    SDHC_INIT_ACTIVE))
1264 				break;
1265 			sdmmc_delay(10);
1266 		}
1267 		DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1268 
1269 		/*
1270 		 * Enable SD clock.
1271 		 */
1272 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1273 			HSET4(hp, SDHC_VEND_SPEC,
1274 			    SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1275 			    SDHC_VEND_SPEC_FRC_SDCLK_ON);
1276 		} else {
1277 			HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1278 		}
1279 	} else {
1280 		/*
1281 		 * Enable SD clock.
1282 		 */
1283 		HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1284 
1285 		if (freq > 25000 &&
1286 		    !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1287 			HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1288 		else
1289 			HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1290 	}
1291 
1292 	mutex_exit(&hp->intr_lock);
1293 
1294 	if (hp->sc->sc_vendor_bus_clock_post) {
1295 		mutex_enter(&hp->bus_clock_lock);
1296 		error = (*hp->sc->sc_vendor_bus_clock_post)(hp->sc, freq);
1297 		mutex_exit(&hp->bus_clock_lock);
1298 	}
1299 	return error;
1300 
1301 out:
1302 	mutex_exit(&hp->intr_lock);
1303 
1304 	return error;
1305 }
1306 
1307 static int
1308 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1309 {
1310 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1311 	int reg;
1312 
1313 	switch (width) {
1314 	case 1:
1315 	case 4:
1316 		break;
1317 
1318 	case 8:
1319 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1320 			break;
1321 		/* FALLTHROUGH */
1322 	default:
1323 		DPRINTF(0,("%s: unsupported bus width (%d)\n",
1324 		    HDEVNAME(hp), width));
1325 		return 1;
1326 	}
1327 
1328 	if (hp->sc->sc_vendor_bus_width) {
1329 		const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1330 		if (error != 0)
1331 			return error;
1332 	}
1333 
1334 	mutex_enter(&hp->intr_lock);
1335 
1336 	reg = HREAD1(hp, SDHC_HOST_CTL);
1337 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1338 		reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1339 		if (width == 4)
1340 			reg |= SDHC_4BIT_MODE;
1341 		else if (width == 8)
1342 			reg |= SDHC_ESDHC_8BIT_MODE;
1343 	} else {
1344 		reg &= ~SDHC_4BIT_MODE;
1345 		if (hp->specver >= SDHC_SPEC_VERS_300) {
1346 			reg &= ~SDHC_8BIT_MODE;
1347 		}
1348 		if (width == 4) {
1349 			reg |= SDHC_4BIT_MODE;
1350 		} else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1351 			reg |= SDHC_8BIT_MODE;
1352 		}
1353 	}
1354 	HWRITE1(hp, SDHC_HOST_CTL, reg);
1355 
1356 	mutex_exit(&hp->intr_lock);
1357 
1358 	return 0;
1359 }
1360 
1361 static int
1362 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1363 {
1364 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1365 
1366 	if (hp->sc->sc_vendor_rod)
1367 		return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1368 
1369 	return 0;
1370 }
1371 
1372 static void
1373 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1374 {
1375 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1376 
1377 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1378 		mutex_enter(&hp->intr_lock);
1379 		if (enable) {
1380 			HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1381 			HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1382 		} else {
1383 			HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1384 			HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1385 		}
1386 		mutex_exit(&hp->intr_lock);
1387 	}
1388 }
1389 
1390 static void
1391 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1392 {
1393 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1394 
1395 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1396 		mutex_enter(&hp->intr_lock);
1397 		HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1398 		mutex_exit(&hp->intr_lock);
1399 	}
1400 }
1401 
1402 static int
1403 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1404 {
1405 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1406 	int error = 0;
1407 
1408 	if (hp->specver < SDHC_SPEC_VERS_300)
1409 		return EINVAL;
1410 
1411 	mutex_enter(&hp->intr_lock);
1412 	switch (signal_voltage) {
1413 	case SDMMC_SIGNAL_VOLTAGE_180:
1414 		if (hp->sc->sc_vendor_signal_voltage != NULL) {
1415 			error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1416 			    signal_voltage);
1417 			if (error != 0)
1418 				break;
1419 		}
1420 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1421 			HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1422 		break;
1423 	case SDMMC_SIGNAL_VOLTAGE_330:
1424 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1425 			HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1426 		if (hp->sc->sc_vendor_signal_voltage != NULL) {
1427 			error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1428 			    signal_voltage);
1429 			if (error != 0)
1430 				break;
1431 		}
1432 		break;
1433 	default:
1434 		error = EINVAL;
1435 		break;
1436 	}
1437 	mutex_exit(&hp->intr_lock);
1438 
1439 	return error;
1440 }
1441 
1442 /*
1443  * Sampling clock tuning procedure (UHS)
1444  */
1445 static int
1446 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1447 {
1448 	struct sdmmc_command cmd;
1449 	uint8_t hostctl;
1450 	int opcode, error, retry = 40;
1451 
1452 	KASSERT(mutex_owned(&hp->intr_lock));
1453 
1454 	hp->tuning_timing = timing;
1455 
1456 	switch (timing) {
1457 	case SDMMC_TIMING_MMC_HS200:
1458 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1459 		break;
1460 	case SDMMC_TIMING_UHS_SDR50:
1461 		if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1462 			return 0;
1463 		/* FALLTHROUGH */
1464 	case SDMMC_TIMING_UHS_SDR104:
1465 		opcode = MMC_SEND_TUNING_BLOCK;
1466 		break;
1467 	default:
1468 		return EINVAL;
1469 	}
1470 
1471 	hostctl = HREAD1(hp, SDHC_HOST_CTL);
1472 
1473 	/* enable buffer read ready interrupt */
1474 	HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1475 	HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1476 
1477 	/* disable DMA */
1478 	HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1479 
1480 	/* reset tuning circuit */
1481 	HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1482 
1483 	/* start of tuning */
1484 	HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1485 
1486 	do {
1487 		memset(&cmd, 0, sizeof(cmd));
1488 		cmd.c_opcode = opcode;
1489 		cmd.c_arg = 0;
1490 		cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1491 		if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1492 			cmd.c_blklen = cmd.c_datalen = 128;
1493 		} else {
1494 			cmd.c_blklen = cmd.c_datalen = 64;
1495 		}
1496 
1497 		error = sdhc_start_command(hp, &cmd);
1498 		if (error)
1499 			break;
1500 
1501 		if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1502 		    SDHC_TUNING_TIMEOUT, false)) {
1503 			break;
1504 		}
1505 
1506 		delay(1000);
1507 	} while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1508 
1509 	/* disable buffer read ready interrupt */
1510 	HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1511 	HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1512 
1513 	if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1514 		HCLR2(hp, SDHC_HOST_CTL2,
1515 		    SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1516 		sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1517 		aprint_error_dev(hp->sc->sc_dev,
1518 		    "tuning did not complete, using fixed sampling clock\n");
1519 		return 0;		/* tuning did not complete */
1520 	}
1521 
1522 	if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1523 		HCLR2(hp, SDHC_HOST_CTL2,
1524 		    SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1525 		sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1526 		aprint_error_dev(hp->sc->sc_dev,
1527 		    "tuning failed, using fixed sampling clock\n");
1528 		return 0;		/* tuning failed */
1529 	}
1530 
1531 	if (hp->tuning_timer_count) {
1532 		callout_schedule(&hp->tuning_timer,
1533 		    hz * hp->tuning_timer_count);
1534 	}
1535 
1536 	return 0;		/* tuning completed */
1537 }
1538 
1539 static int
1540 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1541 {
1542 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1543 	int error;
1544 
1545 	mutex_enter(&hp->intr_lock);
1546 	error = sdhc_execute_tuning1(hp, timing);
1547 	mutex_exit(&hp->intr_lock);
1548 	return error;
1549 }
1550 
1551 static void
1552 sdhc_tuning_timer(void *arg)
1553 {
1554 	struct sdhc_host *hp = arg;
1555 
1556 	atomic_swap_uint(&hp->tuning_timer_pending, 1);
1557 }
1558 
1559 static void
1560 sdhc_hw_reset(sdmmc_chipset_handle_t sch)
1561 {
1562 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1563 	struct sdhc_softc *sc = hp->sc;
1564 
1565 	if (sc->sc_vendor_hw_reset != NULL)
1566 		sc->sc_vendor_hw_reset(sc, hp);
1567 }
1568 
1569 static int
1570 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1571 {
1572 	uint32_t state;
1573 	int timeout;
1574 
1575 	for (timeout = 100000; timeout > 0; timeout--) {
1576 		if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1577 			return 0;
1578 		sdmmc_delay(10);
1579 	}
1580 	aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1581 	    mask, value, state);
1582 	return ETIMEDOUT;
1583 }
1584 
1585 static void
1586 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1587 {
1588 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1589 	int error;
1590 	bool probing;
1591 
1592 	mutex_enter(&hp->intr_lock);
1593 
1594 	if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1595 		(void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1596 	}
1597 
1598 	if (cmd->c_data &&
1599 	    ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1600 		const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1601 		if (ISSET(hp->flags, SHF_USE_DMA)) {
1602 			HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1603 			HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1604 		} else {
1605 			HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1606 			HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1607 		}
1608 	}
1609 
1610 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1611 		const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1612 		if (cmd->c_data != NULL) {
1613 			HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1614 			HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1615 		} else {
1616 			HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1617 			HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1618 		}
1619 	}
1620 
1621 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_STOP_WITH_TC)) {
1622 		if (cmd->c_opcode == MMC_STOP_TRANSMISSION)
1623 			SET(cmd->c_flags, SCF_RSP_BSY);
1624 	}
1625 
1626 	/*
1627 	 * Start the MMC command, or mark `cmd' as failed and return.
1628 	 */
1629 	error = sdhc_start_command(hp, cmd);
1630 	if (error) {
1631 		cmd->c_error = error;
1632 		goto out;
1633 	}
1634 
1635 	/*
1636 	 * Wait until the command phase is done, or until the command
1637 	 * is marked done for any other reason.
1638 	 */
1639 	probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1640 	if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT*3, probing)) {
1641 		DPRINTF(1,("%s: timeout for command\n", __func__));
1642 		sdmmc_delay(50);
1643 		cmd->c_error = ETIMEDOUT;
1644 		goto out;
1645 	}
1646 
1647 	/*
1648 	 * The host controller removes bits [0:7] from the response
1649 	 * data (CRC) and we pass the data up unchanged to the bus
1650 	 * driver (without padding).
1651 	 */
1652 	if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1653 		cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1654 		if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1655 			cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1656 			cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1657 			cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1658 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1659 				cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1660 				    (cmd->c_resp[1] << 24);
1661 				cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1662 				    (cmd->c_resp[2] << 24);
1663 				cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1664 				    (cmd->c_resp[3] << 24);
1665 				cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1666 			}
1667 		}
1668 	}
1669 	DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1670 
1671 	/*
1672 	 * If the command has data to transfer in any direction,
1673 	 * execute the transfer now.
1674 	 */
1675 	if (cmd->c_error == 0 && cmd->c_data != NULL)
1676 		sdhc_transfer_data(hp, cmd);
1677 	else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1678 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) &&
1679 		    !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1680 			DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1681 			    HDEVNAME(hp)));
1682 			cmd->c_error = ETIMEDOUT;
1683 			goto out;
1684 		}
1685 	}
1686 
1687 out:
1688 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1689 	    && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1690 		/* Turn off the LED. */
1691 		HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1692 	}
1693 	SET(cmd->c_flags, SCF_ITSDONE);
1694 
1695 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) &&
1696 	    cmd->c_opcode == MMC_STOP_TRANSMISSION)
1697 		(void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
1698 
1699 	mutex_exit(&hp->intr_lock);
1700 
1701 	DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1702 	    cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1703 	    cmd->c_flags, cmd->c_error));
1704 }
1705 
1706 static int
1707 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1708 {
1709 	struct sdhc_softc * const sc = hp->sc;
1710 	uint16_t blksize = 0;
1711 	uint16_t blkcount = 0;
1712 	uint16_t mode;
1713 	uint16_t command;
1714 	uint32_t pmask;
1715 	int error;
1716 
1717 	KASSERT(mutex_owned(&hp->intr_lock));
1718 
1719 	DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1720 	    HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1721 	    cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1722 
1723 	/*
1724 	 * The maximum block length for commands should be the minimum
1725 	 * of the host buffer size and the card buffer size. (1.7.2)
1726 	 */
1727 
1728 	/* Fragment the data into proper blocks. */
1729 	if (cmd->c_datalen > 0) {
1730 		blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1731 		blkcount = cmd->c_datalen / blksize;
1732 		if (cmd->c_datalen % blksize > 0) {
1733 			/* XXX: Split this command. (1.7.4) */
1734 			aprint_error_dev(sc->sc_dev,
1735 			    "data not a multiple of %u bytes\n", blksize);
1736 			return EINVAL;
1737 		}
1738 	}
1739 
1740 	/* Check limit imposed by 9-bit block count. (1.7.2) */
1741 	if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1742 		aprint_error_dev(sc->sc_dev, "too much data\n");
1743 		return EINVAL;
1744 	}
1745 
1746 	/* Prepare transfer mode register value. (2.2.5) */
1747 	mode = 0;
1748 	if (ISSET(cmd->c_flags, SCF_CMD_READ))
1749 		mode |= SDHC_READ_MODE;
1750 	if (blkcount > 0) {
1751 		mode |= SDHC_BLOCK_COUNT_ENABLE;
1752 		if (blkcount > 1) {
1753 			mode |= SDHC_MULTI_BLOCK_MODE;
1754 			if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP)
1755 			    && !ISSET(cmd->c_flags, SCF_NO_STOP))
1756 				mode |= SDHC_AUTO_CMD12_ENABLE;
1757 		}
1758 	}
1759 	if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1760 	    ISSET(hp->flags,  SHF_MODE_DMAEN)) {
1761 		mode |= SDHC_DMA_ENABLE;
1762 	}
1763 
1764 	/*
1765 	 * Prepare command register value. (2.2.6)
1766 	 */
1767 	command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1768 
1769 	if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1770 		command |= SDHC_CRC_CHECK_ENABLE;
1771 	if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1772 		command |= SDHC_INDEX_CHECK_ENABLE;
1773 	if (cmd->c_datalen > 0)
1774 		command |= SDHC_DATA_PRESENT_SELECT;
1775 
1776 	if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1777 		command |= SDHC_NO_RESPONSE;
1778 	else if (ISSET(cmd->c_flags, SCF_RSP_136))
1779 		command |= SDHC_RESP_LEN_136;
1780 	else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1781 		command |= SDHC_RESP_LEN_48_CHK_BUSY;
1782 	else
1783 		command |= SDHC_RESP_LEN_48;
1784 
1785 	/* Wait until command and optionally data inhibit bits are clear. (1.5) */
1786 	pmask = SDHC_CMD_INHIBIT_CMD;
1787 	if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1788 		pmask |= SDHC_CMD_INHIBIT_DAT;
1789 	error = sdhc_wait_state(hp, pmask, 0);
1790 	if (error) {
1791 		(void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1792 		device_printf(sc->sc_dev, "command or data phase inhibited\n");
1793 		return error;
1794 	}
1795 
1796 	DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1797 	    HDEVNAME(hp), blksize, blkcount, mode, command));
1798 
1799 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1800 		blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1801 		    SDHC_DMA_BOUNDARY_SHIFT;	/* PAGE_SIZE DMA boundary */
1802 	}
1803 
1804 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1805 		/* Alert the user not to remove the card. */
1806 		HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1807 	}
1808 
1809 	/* Set DMA start address. */
1810 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1811 		for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1812 			bus_addr_t paddr =
1813 			    cmd->c_dmamap->dm_segs[seg].ds_addr;
1814 			uint16_t len =
1815 			    cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1816 			    0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1817 			uint16_t attr =
1818 			    SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1819 			if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1820 				attr |= SDHC_ADMA2_END;
1821 			}
1822 			if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1823 				struct sdhc_adma2_descriptor32 *desc =
1824 				    hp->adma2;
1825 				desc[seg].attribute = htole16(attr);
1826 				desc[seg].length = htole16(len);
1827 				desc[seg].address = htole32(paddr);
1828 			} else {
1829 				struct sdhc_adma2_descriptor64 *desc =
1830 				    hp->adma2;
1831 				desc[seg].attribute = htole16(attr);
1832 				desc[seg].length = htole16(len);
1833 				desc[seg].address = htole32(paddr & 0xffffffff);
1834 				desc[seg].address_hi = htole32(
1835 				    (uint64_t)paddr >> 32);
1836 			}
1837 		}
1838 		if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1839 			struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1840 			desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1841 		} else {
1842 			struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1843 			desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1844 		}
1845 		bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1846 		    BUS_DMASYNC_PREWRITE);
1847 
1848 		const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1849 		HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1850 		if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1851 			HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1852 			    (uint64_t)desc_addr >> 32);
1853 		}
1854 
1855 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1856 			HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1857 			HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1858 		} else {
1859 			HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1860 			HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1861 		}
1862 	} else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1863 	    !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1864 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1865 			HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1866 		}
1867 		HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1868 	}
1869 
1870 	/*
1871 	 * Start a CPU data transfer.  Writing to the high order byte
1872 	 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1873 	 */
1874 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1875 		HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1876 		HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1877 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1878 			/* mode bits is in MIX_CTRL register on uSDHC */
1879 			HWRITE4(hp, SDHC_MIX_CTRL, mode |
1880 			    (HREAD4(hp, SDHC_MIX_CTRL) & ~SDHC_TRANSFER_MODE_MASK));
1881 			if (cmd->c_opcode == MMC_STOP_TRANSMISSION)
1882 				command |= SDHC_COMMAND_TYPE_ABORT;
1883 			HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1884 		} else {
1885 			HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1886 		}
1887 	} else {
1888 		HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1889 		HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1890 		HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1891 		HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1892 		HWRITE2(hp, SDHC_COMMAND, command);
1893 	}
1894 
1895 	return 0;
1896 }
1897 
1898 static void
1899 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1900 {
1901 	struct sdhc_softc *sc = hp->sc;
1902 	int error;
1903 
1904 	KASSERT(mutex_owned(&hp->intr_lock));
1905 
1906 	DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1907 	    MMC_R1(cmd->c_resp), cmd->c_datalen));
1908 
1909 #ifdef SDHC_DEBUG
1910 	/* XXX I forgot why I wanted to know when this happens :-( */
1911 	if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1912 	    ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1913 		aprint_error_dev(hp->sc->sc_dev,
1914 		    "CMD52/53 error response flags %#x\n",
1915 		    MMC_R1(cmd->c_resp) & 0xff00);
1916 	}
1917 #endif
1918 
1919 	if (cmd->c_dmamap != NULL) {
1920 		if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1921 			error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1922 			if (error == 0 && !sdhc_wait_intr(hp,
1923 			    SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1924 				DPRINTF(1,("%s: timeout\n", __func__));
1925 				error = ETIMEDOUT;
1926 			}
1927 		} else {
1928 			error = sdhc_transfer_data_dma(hp, cmd);
1929 		}
1930 	} else
1931 		error = sdhc_transfer_data_pio(hp, cmd);
1932 	if (error)
1933 		cmd->c_error = error;
1934 	SET(cmd->c_flags, SCF_ITSDONE);
1935 
1936 	DPRINTF(1,("%s: data transfer done (error=%d)\n",
1937 	    HDEVNAME(hp), cmd->c_error));
1938 }
1939 
1940 static int
1941 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1942 {
1943 	bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1944 	bus_addr_t posaddr;
1945 	bus_addr_t segaddr;
1946 	bus_size_t seglen;
1947 	u_int seg = 0;
1948 	int error = 0;
1949 	int status;
1950 
1951 	KASSERT(mutex_owned(&hp->intr_lock));
1952 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1953 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1954 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1955 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1956 
1957 	for (;;) {
1958 		status = sdhc_wait_intr(hp,
1959 		    SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1960 		    SDHC_DMA_TIMEOUT, false);
1961 
1962 		if (status & SDHC_TRANSFER_COMPLETE) {
1963 			break;
1964 		}
1965 		if (!status) {
1966 			DPRINTF(1,("%s: timeout\n", __func__));
1967 			error = ETIMEDOUT;
1968 			break;
1969 		}
1970 
1971 		if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1972 			continue;
1973 		}
1974 
1975 		if ((status & SDHC_DMA_INTERRUPT) == 0) {
1976 			continue;
1977 		}
1978 
1979 		/* DMA Interrupt (boundary crossing) */
1980 
1981 		segaddr = dm_segs[seg].ds_addr;
1982 		seglen = dm_segs[seg].ds_len;
1983 		posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1984 
1985 		if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1986 			continue;
1987 		}
1988 		if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1989 			HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1990 		else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1991 			HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1992 		KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1993 	}
1994 
1995 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1996 		bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1997 		    PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1998 	}
1999 
2000 	return error;
2001 }
2002 
2003 static int
2004 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
2005 {
2006 	uint8_t *data = cmd->c_data;
2007 	void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
2008 	u_int len, datalen;
2009 	u_int imask;
2010 	u_int pmask;
2011 	int error = 0;
2012 
2013 	KASSERT(mutex_owned(&hp->intr_lock));
2014 
2015 	if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
2016 		imask = SDHC_BUFFER_READ_READY;
2017 		pmask = SDHC_BUFFER_READ_ENABLE;
2018 		if (ISSET(hp->sc->sc_flags,
2019 		    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2020 			pio_func = esdhc_read_data_pio;
2021 		} else {
2022 			pio_func = sdhc_read_data_pio;
2023 		}
2024 	} else {
2025 		imask = SDHC_BUFFER_WRITE_READY;
2026 		pmask = SDHC_BUFFER_WRITE_ENABLE;
2027 		if (ISSET(hp->sc->sc_flags,
2028 		    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2029 			pio_func = esdhc_write_data_pio;
2030 		} else {
2031 			pio_func = sdhc_write_data_pio;
2032 		}
2033 	}
2034 	datalen = cmd->c_datalen;
2035 
2036 	KASSERT(mutex_owned(&hp->intr_lock));
2037 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
2038 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
2039 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
2040 
2041 	while (datalen > 0) {
2042 		if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
2043 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2044 				HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
2045 			} else {
2046 				HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
2047 			}
2048 			if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
2049 				DPRINTF(1,("%s: timeout\n", __func__));
2050 				error = ETIMEDOUT;
2051 				break;
2052 			}
2053 
2054 			error = sdhc_wait_state(hp, pmask, pmask);
2055 			if (error)
2056 				break;
2057 		}
2058 
2059 		len = MIN(datalen, cmd->c_blklen);
2060 		(*pio_func)(hp, data, len);
2061 		DPRINTF(2,("%s: pio data transfer %u @ %p\n",
2062 		    HDEVNAME(hp), len, data));
2063 
2064 		data += len;
2065 		datalen -= len;
2066 	}
2067 
2068 	if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
2069 	    SDHC_TRANSFER_TIMEOUT, false)) {
2070 		DPRINTF(1,("%s: timeout for transfer\n", __func__));
2071 		error = ETIMEDOUT;
2072 	}
2073 
2074 	return error;
2075 }
2076 
2077 static void
2078 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2079 {
2080 
2081 	if (((__uintptr_t)data & 3) == 0) {
2082 		while (datalen > 3) {
2083 			*(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
2084 			data += 4;
2085 			datalen -= 4;
2086 		}
2087 		if (datalen > 1) {
2088 			*(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2089 			data += 2;
2090 			datalen -= 2;
2091 		}
2092 		if (datalen > 0) {
2093 			*data = HREAD1(hp, SDHC_DATA);
2094 			data += 1;
2095 			datalen -= 1;
2096 		}
2097 	} else if (((__uintptr_t)data & 1) == 0) {
2098 		while (datalen > 1) {
2099 			*(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2100 			data += 2;
2101 			datalen -= 2;
2102 		}
2103 		if (datalen > 0) {
2104 			*data = HREAD1(hp, SDHC_DATA);
2105 			data += 1;
2106 			datalen -= 1;
2107 		}
2108 	} else {
2109 		while (datalen > 0) {
2110 			*data = HREAD1(hp, SDHC_DATA);
2111 			data += 1;
2112 			datalen -= 1;
2113 		}
2114 	}
2115 }
2116 
2117 static void
2118 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2119 {
2120 
2121 	if (((__uintptr_t)data & 3) == 0) {
2122 		while (datalen > 3) {
2123 			HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2124 			data += 4;
2125 			datalen -= 4;
2126 		}
2127 		if (datalen > 1) {
2128 			HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2129 			data += 2;
2130 			datalen -= 2;
2131 		}
2132 		if (datalen > 0) {
2133 			HWRITE1(hp, SDHC_DATA, *data);
2134 			data += 1;
2135 			datalen -= 1;
2136 		}
2137 	} else if (((__uintptr_t)data & 1) == 0) {
2138 		while (datalen > 1) {
2139 			HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2140 			data += 2;
2141 			datalen -= 2;
2142 		}
2143 		if (datalen > 0) {
2144 			HWRITE1(hp, SDHC_DATA, *data);
2145 			data += 1;
2146 			datalen -= 1;
2147 		}
2148 	} else {
2149 		while (datalen > 0) {
2150 			HWRITE1(hp, SDHC_DATA, *data);
2151 			data += 1;
2152 			datalen -= 1;
2153 		}
2154 	}
2155 }
2156 
2157 static void
2158 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2159 {
2160 	uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2161 	uint32_t v;
2162 
2163 	const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2164 	size_t count = 0;
2165 
2166 	while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2167 		if (count == 0) {
2168 			/*
2169 			 * If we've drained "watermark" words, we need to wait
2170 			 * a little bit so the read FIFO can refill.
2171 			 */
2172 			sdmmc_delay(10);
2173 			count = watermark;
2174 		}
2175 		v = HREAD4(hp, SDHC_DATA);
2176 		v = le32toh(v);
2177 		*(uint32_t *)data = v;
2178 		data += 4;
2179 		datalen -= 4;
2180 		status = HREAD2(hp, SDHC_NINTR_STATUS);
2181 		count--;
2182 	}
2183 	if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2184 		if (count == 0) {
2185 			sdmmc_delay(10);
2186 		}
2187 		v = HREAD4(hp, SDHC_DATA);
2188 		v = le32toh(v);
2189 		do {
2190 			*data++ = v;
2191 			v >>= 8;
2192 		} while (--datalen > 0);
2193 	}
2194 }
2195 
2196 static void
2197 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2198 {
2199 	uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2200 	uint32_t v;
2201 
2202 	const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2203 	size_t count = watermark;
2204 
2205 	while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2206 		if (count == 0) {
2207 			sdmmc_delay(10);
2208 			count = watermark;
2209 		}
2210 		v = *(uint32_t *)data;
2211 		v = htole32(v);
2212 		HWRITE4(hp, SDHC_DATA, v);
2213 		data += 4;
2214 		datalen -= 4;
2215 		status = HREAD2(hp, SDHC_NINTR_STATUS);
2216 		count--;
2217 	}
2218 	if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2219 		if (count == 0) {
2220 			sdmmc_delay(10);
2221 		}
2222 		v = *(uint32_t *)data;
2223 		v = htole32(v);
2224 		HWRITE4(hp, SDHC_DATA, v);
2225 	}
2226 }
2227 
2228 /* Prepare for another command. */
2229 static int
2230 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2231 {
2232 	int timo;
2233 
2234 	KASSERT(mutex_owned(&hp->intr_lock));
2235 
2236 	DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2237 
2238 	/* Request the reset.  */
2239 	HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2240 
2241 	/*
2242 	 * If necessary, wait for the controller to set the bits to
2243 	 * acknowledge the reset.
2244 	 */
2245 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2246 	    ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2247 		for (timo = 10000; timo > 0; timo--) {
2248 			if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2249 				break;
2250 			/* Short delay because I worry we may miss it...  */
2251 			sdmmc_delay(1);
2252 		}
2253 		if (timo == 0) {
2254 			DPRINTF(1,("%s: timeout for reset on\n", __func__));
2255 			return ETIMEDOUT;
2256 		}
2257 	}
2258 
2259 	/*
2260 	 * Wait for the controller to clear the bits to indicate that
2261 	 * the reset has completed.
2262 	 */
2263 	for (timo = 10; timo > 0; timo--) {
2264 		if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2265 			break;
2266 		sdmmc_delay(10000);
2267 	}
2268 	if (timo == 0) {
2269 		DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2270 		    HREAD1(hp, SDHC_SOFTWARE_RESET)));
2271 		return ETIMEDOUT;
2272 	}
2273 
2274 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2275 		HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2276 	}
2277 
2278 	return 0;
2279 }
2280 
2281 static int
2282 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2283 {
2284 	int status, error, nointr;
2285 
2286 	KASSERT(mutex_owned(&hp->intr_lock));
2287 
2288 	mask |= SDHC_ERROR_INTERRUPT;
2289 
2290 	nointr = 0;
2291 	status = hp->intr_status & mask;
2292 	while (status == 0) {
2293 		if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2294 		    == EWOULDBLOCK) {
2295 			nointr = 1;
2296 			break;
2297 		}
2298 		status = hp->intr_status & mask;
2299 	}
2300 	error = hp->intr_error_status;
2301 
2302 	DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2303 	    error));
2304 
2305 	hp->intr_status &= ~status;
2306 	hp->intr_error_status &= ~error;
2307 
2308 	if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2309 		if (ISSET(error, SDHC_DMA_ERROR))
2310 			device_printf(hp->sc->sc_dev,"dma error\n");
2311 		if (ISSET(error, SDHC_ADMA_ERROR))
2312 			device_printf(hp->sc->sc_dev,"adma error\n");
2313 		if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2314 			device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2315 		if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2316 			device_printf(hp->sc->sc_dev,"current limit error\n");
2317 		if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2318 			device_printf(hp->sc->sc_dev,"data end bit error\n");
2319 		if (ISSET(error, SDHC_DATA_CRC_ERROR))
2320 			device_printf(hp->sc->sc_dev,"data crc error\n");
2321 		if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2322 			device_printf(hp->sc->sc_dev,"data timeout error\n");
2323 		if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2324 			device_printf(hp->sc->sc_dev,"cmd index error\n");
2325 		if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2326 			device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2327 		if (ISSET(error, SDHC_CMD_CRC_ERROR))
2328 			device_printf(hp->sc->sc_dev,"cmd crc error\n");
2329 		if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2330 			if (!probing)
2331 				device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2332 #ifdef SDHC_DEBUG
2333 			else if (sdhcdebug > 0)
2334 				device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2335 #endif
2336 		}
2337 		if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2338 			device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2339 				(error & ~SDHC_EINTR_STATUS_MASK));
2340 		if (error == 0)
2341 			device_printf(hp->sc->sc_dev,"no error\n");
2342 
2343 		/* Command timeout has higher priority than command complete. */
2344 		if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2345 			CLR(status, SDHC_COMMAND_COMPLETE);
2346 
2347 		/* Transfer complete has higher priority than data timeout. */
2348 		if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2349 			CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2350 	}
2351 
2352 	if (nointr ||
2353 	    (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2354 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2355 			(void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2356 		hp->intr_error_status = 0;
2357 		status = 0;
2358 	}
2359 
2360 	return status;
2361 }
2362 
2363 /*
2364  * Established by attachment driver at interrupt priority IPL_SDMMC.
2365  */
2366 int
2367 sdhc_intr(void *arg)
2368 {
2369 	struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2370 	struct sdhc_host *hp;
2371 	int done = 0;
2372 	uint16_t status;
2373 	uint16_t error;
2374 
2375 	/* We got an interrupt, but we don't know from which slot. */
2376 	for (size_t host = 0; host < sc->sc_nhosts; host++) {
2377 		hp = sc->sc_host[host];
2378 		if (hp == NULL)
2379 			continue;
2380 
2381 		mutex_enter(&hp->intr_lock);
2382 
2383 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2384 			/* Find out which interrupts are pending. */
2385 			uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2386 			status = xstatus;
2387 			error = xstatus >> 16;
2388 			if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2389 			    (xstatus & SDHC_TRANSFER_COMPLETE) &&
2390 			    !(xstatus & SDHC_DMA_INTERRUPT)) {
2391 				/* read again due to uSDHC errata */
2392 				status = xstatus = HREAD4(hp,
2393 				    SDHC_NINTR_STATUS);
2394 				error = xstatus >> 16;
2395 			}
2396 			if (ISSET(sc->sc_flags,
2397 			    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2398 				if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2399 					SET(status, SDHC_ERROR_INTERRUPT);
2400 			}
2401 			if (error)
2402 				xstatus |= SDHC_ERROR_INTERRUPT;
2403 			else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2404 				goto next_port; /* no interrupt for us */
2405 			/* Acknowledge the interrupts we are about to handle. */
2406 			HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2407 		} else {
2408 			/* Find out which interrupts are pending. */
2409 			error = 0;
2410 			status = HREAD2(hp, SDHC_NINTR_STATUS);
2411 			if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2412 				goto next_port; /* no interrupt for us */
2413 			/* Acknowledge the interrupts we are about to handle. */
2414 			HWRITE2(hp, SDHC_NINTR_STATUS, status);
2415 			if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2416 				/* Acknowledge error interrupts. */
2417 				error = HREAD2(hp, SDHC_EINTR_STATUS);
2418 				HWRITE2(hp, SDHC_EINTR_STATUS, error);
2419 			}
2420 		}
2421 
2422 		DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2423 		    status, error));
2424 
2425 		/* Claim this interrupt. */
2426 		done = 1;
2427 
2428 		if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2429 		    ISSET(error, SDHC_ADMA_ERROR)) {
2430 			uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2431 			printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2432 			    adma_err);
2433 		}
2434 
2435 		/*
2436 		 * Wake up the sdmmc event thread to scan for cards.
2437 		 */
2438 		if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2439 			if (hp->sdmmc != NULL) {
2440 				sdmmc_needs_discover(hp->sdmmc);
2441 			}
2442 			if (ISSET(sc->sc_flags,
2443 			    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2444 				HCLR4(hp, SDHC_NINTR_STATUS_EN,
2445 				    status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2446 				HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2447 				    status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2448 			}
2449 		}
2450 
2451 		/*
2452 		 * Schedule re-tuning process (UHS).
2453 		 */
2454 		if (ISSET(status, SDHC_RETUNING_EVENT)) {
2455 			atomic_swap_uint(&hp->tuning_timer_pending, 1);
2456 		}
2457 
2458 		/*
2459 		 * Wake up the blocking process to service command
2460 		 * related interrupt(s).
2461 		 */
2462 		if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2463 		    SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2464 		    SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2465 			hp->intr_error_status |= error;
2466 			hp->intr_status |= status;
2467 			if (ISSET(sc->sc_flags,
2468 			    SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2469 				HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2470 				    status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2471 			}
2472 			cv_broadcast(&hp->intr_cv);
2473 		}
2474 
2475 		/*
2476 		 * Service SD card interrupts.
2477 		 */
2478 		if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2479 		    && ISSET(status, SDHC_CARD_INTERRUPT)) {
2480 			DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2481 			HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2482 			sdmmc_card_intr(hp->sdmmc);
2483 		}
2484 next_port:
2485 		mutex_exit(&hp->intr_lock);
2486 	}
2487 
2488 	return done;
2489 }
2490 
2491 kmutex_t *
2492 sdhc_host_lock(struct sdhc_host *hp)
2493 {
2494 	return &hp->intr_lock;
2495 }
2496 
2497 uint8_t
2498 sdhc_host_read_1(struct sdhc_host *hp, int reg)
2499 {
2500 	return HREAD1(hp, reg);
2501 }
2502 
2503 uint16_t
2504 sdhc_host_read_2(struct sdhc_host *hp, int reg)
2505 {
2506 	return HREAD2(hp, reg);
2507 }
2508 
2509 uint32_t
2510 sdhc_host_read_4(struct sdhc_host *hp, int reg)
2511 {
2512 	return HREAD4(hp, reg);
2513 }
2514 
2515 void
2516 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val)
2517 {
2518 	HWRITE1(hp, reg, val);
2519 }
2520 
2521 void
2522 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val)
2523 {
2524 	HWRITE2(hp, reg, val);
2525 }
2526 
2527 void
2528 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val)
2529 {
2530 	HWRITE4(hp, reg, val);
2531 }
2532 
2533 #ifdef SDHC_DEBUG
2534 void
2535 sdhc_dump_regs(struct sdhc_host *hp)
2536 {
2537 
2538 	printf("0x%02x PRESENT_STATE:    %x\n", SDHC_PRESENT_STATE,
2539 	    HREAD4(hp, SDHC_PRESENT_STATE));
2540 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2541 		printf("0x%02x POWER_CTL:        %x\n", SDHC_POWER_CTL,
2542 		    HREAD1(hp, SDHC_POWER_CTL));
2543 	printf("0x%02x NINTR_STATUS:     %x\n", SDHC_NINTR_STATUS,
2544 	    HREAD2(hp, SDHC_NINTR_STATUS));
2545 	printf("0x%02x EINTR_STATUS:     %x\n", SDHC_EINTR_STATUS,
2546 	    HREAD2(hp, SDHC_EINTR_STATUS));
2547 	printf("0x%02x NINTR_STATUS_EN:  %x\n", SDHC_NINTR_STATUS_EN,
2548 	    HREAD2(hp, SDHC_NINTR_STATUS_EN));
2549 	printf("0x%02x EINTR_STATUS_EN:  %x\n", SDHC_EINTR_STATUS_EN,
2550 	    HREAD2(hp, SDHC_EINTR_STATUS_EN));
2551 	printf("0x%02x NINTR_SIGNAL_EN:  %x\n", SDHC_NINTR_SIGNAL_EN,
2552 	    HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2553 	printf("0x%02x EINTR_SIGNAL_EN:  %x\n", SDHC_EINTR_SIGNAL_EN,
2554 	    HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2555 	printf("0x%02x CAPABILITIES:     %x\n", SDHC_CAPABILITIES,
2556 	    HREAD4(hp, SDHC_CAPABILITIES));
2557 	printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2558 	    HREAD4(hp, SDHC_MAX_CAPABILITIES));
2559 }
2560 #endif
2561