xref: /netbsd-src/sys/dev/sdmmc/sdhc.c (revision 63aea4bd5b445e491ff0389fe27ec78b3099dba3)
1 /*	$NetBSD: sdhc.c,v 1.91 2015/11/03 07:59:29 mlelstv Exp $	*/
2 /*	$OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $	*/
3 
4 /*
5  * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * SD Host Controller driver based on the SD Host Controller Standard
22  * Simplified Specification Version 1.00 (www.sdcard.com).
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.91 2015/11/03 07:59:29 mlelstv Exp $");
27 
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31 
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40 
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46 
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s)	do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void	sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s)	do {} while (0)
53 #endif
54 
55 #define SDHC_COMMAND_TIMEOUT	hz
56 #define SDHC_BUFFER_TIMEOUT	hz
57 #define SDHC_TRANSFER_TIMEOUT	hz
58 #define SDHC_DMA_TIMEOUT	(hz*3)
59 #define SDHC_TUNING_TIMEOUT	hz
60 
61 struct sdhc_host {
62 	struct sdhc_softc *sc;		/* host controller device */
63 
64 	bus_space_tag_t iot;		/* host register set tag */
65 	bus_space_handle_t ioh;		/* host register set handle */
66 	bus_size_t ios;			/* host register space size */
67 	bus_dma_tag_t dmat;		/* host DMA tag */
68 
69 	device_t sdmmc;			/* generic SD/MMC device */
70 
71 	u_int clkbase;			/* base clock frequency in KHz */
72 	int maxblklen;			/* maximum block length */
73 	uint32_t ocr;			/* OCR value from capabilities */
74 
75 	uint8_t regs[14];		/* host controller state */
76 
77 	uint16_t intr_status;		/* soft interrupt status */
78 	uint16_t intr_error_status;	/* soft error status */
79 	kmutex_t intr_lock;
80 	kcondvar_t intr_cv;
81 
82 	callout_t tuning_timer;
83 	int tuning_timing;
84 	u_int tuning_timer_count;
85 	u_int tuning_timer_pending;
86 
87 	int specver;			/* spec. version */
88 
89 	uint32_t flags;			/* flags for this host */
90 #define SHF_USE_DMA		0x0001
91 #define SHF_USE_4BIT_MODE	0x0002
92 #define SHF_USE_8BIT_MODE	0x0004
93 #define SHF_MODE_DMAEN		0x0008 /* needs SDHC_DMA_ENABLE in mode */
94 #define SHF_USE_ADMA2_32	0x0010
95 #define SHF_USE_ADMA2_64	0x0020
96 #define SHF_USE_ADMA2_MASK	0x0030
97 
98 	bus_dmamap_t		adma_map;
99 	bus_dma_segment_t	adma_segs[1];
100 	void			*adma2;
101 };
102 
103 #define HDEVNAME(hp)	(device_xname((hp)->sc->sc_dev))
104 
105 static uint8_t
106 hread1(struct sdhc_host *hp, bus_size_t reg)
107 {
108 
109 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
110 		return bus_space_read_1(hp->iot, hp->ioh, reg);
111 	return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
112 }
113 
114 static uint16_t
115 hread2(struct sdhc_host *hp, bus_size_t reg)
116 {
117 
118 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
119 		return bus_space_read_2(hp->iot, hp->ioh, reg);
120 	return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
121 }
122 
123 #define HREAD1(hp, reg)		hread1(hp, reg)
124 #define HREAD2(hp, reg)		hread2(hp, reg)
125 #define HREAD4(hp, reg)		\
126 	(bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
127 
128 
129 static void
130 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
131 {
132 
133 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
134 		bus_space_write_1(hp->iot, hp->ioh, o, val);
135 	} else {
136 		const size_t shift = 8 * (o & 3);
137 		o &= -4;
138 		uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
139 		tmp = (val << shift) | (tmp & ~(0xff << shift));
140 		bus_space_write_4(hp->iot, hp->ioh, o, tmp);
141 	}
142 }
143 
144 static void
145 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
146 {
147 
148 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
149 		bus_space_write_2(hp->iot, hp->ioh, o, val);
150 	} else {
151 		const size_t shift = 8 * (o & 2);
152 		o &= -4;
153 		uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
154 		tmp = (val << shift) | (tmp & ~(0xffff << shift));
155 		bus_space_write_4(hp->iot, hp->ioh, o, tmp);
156 	}
157 }
158 
159 #define HWRITE1(hp, reg, val)		hwrite1(hp, reg, val)
160 #define HWRITE2(hp, reg, val)		hwrite2(hp, reg, val)
161 #define HWRITE4(hp, reg, val)						\
162 	bus_space_write_4((hp)->iot, (hp)->ioh, (reg), (val))
163 
164 #define HCLR1(hp, reg, bits)						\
165 	do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
166 #define HCLR2(hp, reg, bits)						\
167 	do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
168 #define HCLR4(hp, reg, bits)						\
169 	do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
170 #define HSET1(hp, reg, bits)						\
171 	do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
172 #define HSET2(hp, reg, bits)						\
173 	do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
174 #define HSET4(hp, reg, bits)						\
175 	do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
176 
177 static int	sdhc_host_reset(sdmmc_chipset_handle_t);
178 static int	sdhc_host_reset1(sdmmc_chipset_handle_t);
179 static uint32_t	sdhc_host_ocr(sdmmc_chipset_handle_t);
180 static int	sdhc_host_maxblklen(sdmmc_chipset_handle_t);
181 static int	sdhc_card_detect(sdmmc_chipset_handle_t);
182 static int	sdhc_write_protect(sdmmc_chipset_handle_t);
183 static int	sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
184 static int	sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
185 static int	sdhc_bus_width(sdmmc_chipset_handle_t, int);
186 static int	sdhc_bus_rod(sdmmc_chipset_handle_t, int);
187 static void	sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
188 static void	sdhc_card_intr_ack(sdmmc_chipset_handle_t);
189 static void	sdhc_exec_command(sdmmc_chipset_handle_t,
190 		    struct sdmmc_command *);
191 static int	sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
192 static int	sdhc_execute_tuning1(struct sdhc_host *, int);
193 static int	sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
194 static void	sdhc_tuning_timer(void *);
195 static int	sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
196 static int	sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
197 static int	sdhc_soft_reset(struct sdhc_host *, int);
198 static int	sdhc_wait_intr(struct sdhc_host *, int, int, bool);
199 static void	sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
200 static int	sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
201 static int	sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
202 static void	sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
203 static void	sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
204 static void	esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
205 static void	esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
206 
207 static struct sdmmc_chip_functions sdhc_functions = {
208 	/* host controller reset */
209 	.host_reset = sdhc_host_reset,
210 
211 	/* host controller capabilities */
212 	.host_ocr = sdhc_host_ocr,
213 	.host_maxblklen = sdhc_host_maxblklen,
214 
215 	/* card detection */
216 	.card_detect = sdhc_card_detect,
217 
218 	/* write protect */
219 	.write_protect = sdhc_write_protect,
220 
221 	/* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
222 	.bus_power = sdhc_bus_power,
223 	.bus_clock = NULL,	/* see sdhc_bus_clock_ddr */
224 	.bus_width = sdhc_bus_width,
225 	.bus_rod = sdhc_bus_rod,
226 
227 	/* command execution */
228 	.exec_command = sdhc_exec_command,
229 
230 	/* card interrupt */
231 	.card_enable_intr = sdhc_card_enable_intr,
232 	.card_intr_ack = sdhc_card_intr_ack,
233 
234 	/* UHS functions */
235 	.signal_voltage = sdhc_signal_voltage,
236 	.bus_clock_ddr = sdhc_bus_clock_ddr,
237 	.execute_tuning = sdhc_execute_tuning,
238 };
239 
240 static int
241 sdhc_cfprint(void *aux, const char *pnp)
242 {
243 	const struct sdmmcbus_attach_args * const saa = aux;
244 	const struct sdhc_host * const hp = saa->saa_sch;
245 
246 	if (pnp) {
247 		aprint_normal("sdmmc at %s", pnp);
248 	}
249 	for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
250 		if (hp->sc->sc_host[host] == hp) {
251 			aprint_normal(" slot %zu", host);
252 		}
253 	}
254 
255 	return UNCONF;
256 }
257 
258 /*
259  * Called by attachment driver.  For each SD card slot there is one SD
260  * host controller standard register set. (1.3)
261  */
262 int
263 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
264     bus_space_handle_t ioh, bus_size_t iosize)
265 {
266 	struct sdmmcbus_attach_args saa;
267 	struct sdhc_host *hp;
268 	uint32_t caps, caps2;
269 	uint16_t sdhcver;
270 	int error;
271 
272 	/* Allocate one more host structure. */
273 	hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
274 	if (hp == NULL) {
275 		aprint_error_dev(sc->sc_dev,
276 		    "couldn't alloc memory (sdhc host)\n");
277 		goto err1;
278 	}
279 	sc->sc_host[sc->sc_nhosts++] = hp;
280 
281 	/* Fill in the new host structure. */
282 	hp->sc = sc;
283 	hp->iot = iot;
284 	hp->ioh = ioh;
285 	hp->ios = iosize;
286 	hp->dmat = sc->sc_dmat;
287 
288 	mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
289 	cv_init(&hp->intr_cv, "sdhcintr");
290 	callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
291 	callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
292 
293 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
294 		sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
295 	} else {
296 		sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
297 	}
298 	aprint_normal_dev(sc->sc_dev, "SDHC ");
299 	hp->specver = SDHC_SPEC_VERSION(sdhcver);
300 	switch (SDHC_SPEC_VERSION(sdhcver)) {
301 	case SDHC_SPEC_VERS_100:
302 		aprint_normal("1.0");
303 		break;
304 
305 	case SDHC_SPEC_VERS_200:
306 		aprint_normal("2.0");
307 		break;
308 
309 	case SDHC_SPEC_VERS_300:
310 		aprint_normal("3.0");
311 		break;
312 
313 	case SDHC_SPEC_VERS_400:
314 		aprint_normal("4.0");
315 		break;
316 
317 	default:
318 		aprint_normal("unknown version(0x%x)",
319 		    SDHC_SPEC_VERSION(sdhcver));
320 		break;
321 	}
322 	aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
323 
324 	/*
325 	 * Reset the host controller and enable interrupts.
326 	 */
327 	(void)sdhc_host_reset(hp);
328 
329 	/* Determine host capabilities. */
330 	if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
331 		caps = sc->sc_caps;
332 		caps2 = sc->sc_caps2;
333 	} else {
334 		caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
335 		if (hp->specver >= SDHC_SPEC_VERS_300) {
336 			caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
337 		} else {
338 			caps2 = sc->sc_caps2 = 0;
339 		}
340 	}
341 
342 	const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
343 	    SDHC_RETUNING_MODES_MASK;
344 	if (retuning_mode == SDHC_RETUNING_MODE_1) {
345 		hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
346 		    SDHC_TIMER_COUNT_MASK;
347 		if (hp->tuning_timer_count == 0xf)
348 			hp->tuning_timer_count = 0;
349 		if (hp->tuning_timer_count)
350 			hp->tuning_timer_count =
351 			    1 << (hp->tuning_timer_count - 1);
352 	}
353 
354 	/*
355 	 * Use DMA if the host system and the controller support it.
356 	 * Suports integrated or external DMA egine, with or without
357 	 * SDHC_DMA_ENABLE in the command.
358 	 */
359 	if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
360 	    (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
361 	     ISSET(caps, SDHC_DMA_SUPPORT)))) {
362 		SET(hp->flags, SHF_USE_DMA);
363 
364 		if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) &&
365 		    ISSET(caps, SDHC_ADMA2_SUPP)) {
366 			SET(hp->flags, SHF_MODE_DMAEN);
367 			/*
368 			 * 64-bit mode was present in the 2.00 spec, removed
369 			 * from 3.00, and re-added in 4.00 with a different
370 			 * descriptor layout. We only support 2.00 and 3.00
371 			 * descriptors for now.
372 			 */
373 			if (hp->specver == SDHC_SPEC_VERS_200 &&
374 			    ISSET(caps, SDHC_64BIT_SYS_BUS)) {
375 				SET(hp->flags, SHF_USE_ADMA2_64);
376 				aprint_normal(", 64-bit ADMA2");
377 			} else {
378 				SET(hp->flags, SHF_USE_ADMA2_32);
379 				aprint_normal(", 32-bit ADMA2");
380 			}
381 		} else {
382 			if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
383 			    ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
384 				SET(hp->flags, SHF_MODE_DMAEN);
385 			if (sc->sc_vendor_transfer_data_dma) {
386 				aprint_normal(", platform DMA");
387 			} else {
388 				aprint_normal(", SDMA");
389 			}
390 		}
391 	} else {
392 		aprint_normal(", PIO");
393 	}
394 
395 	/*
396 	 * Determine the base clock frequency. (2.2.24)
397 	 */
398 	if (hp->specver >= SDHC_SPEC_VERS_300) {
399 		hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
400 	} else {
401 		hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
402 	}
403 	if (hp->clkbase == 0 ||
404 	    ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
405 		if (sc->sc_clkbase == 0) {
406 			/* The attachment driver must tell us. */
407 			aprint_error_dev(sc->sc_dev,
408 			    "unknown base clock frequency\n");
409 			goto err;
410 		}
411 		hp->clkbase = sc->sc_clkbase;
412 	}
413 	if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
414 		/* SDHC 1.0 supports only 10-63 MHz. */
415 		aprint_error_dev(sc->sc_dev,
416 		    "base clock frequency out of range: %u MHz\n",
417 		    hp->clkbase / 1000);
418 		goto err;
419 	}
420 	aprint_normal(", %u kHz", hp->clkbase);
421 
422 	/*
423 	 * XXX Set the data timeout counter value according to
424 	 * capabilities. (2.2.15)
425 	 */
426 	HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
427 #if 1
428 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
429 		HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
430 #endif
431 
432 	if (ISSET(caps, SDHC_EMBEDDED_SLOT))
433 		aprint_normal(", embedded slot");
434 
435 	/*
436 	 * Determine SD bus voltage levels supported by the controller.
437 	 */
438 	aprint_normal(",");
439 	if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
440 		SET(hp->ocr, MMC_OCR_HCS);
441 		aprint_normal(" HS");
442 	}
443 	if (ISSET(caps2, SDHC_SDR50_SUPP)) {
444 		SET(hp->ocr, MMC_OCR_S18A);
445 		aprint_normal(" SDR50");
446 	}
447 	if (ISSET(caps2, SDHC_DDR50_SUPP)) {
448 		SET(hp->ocr, MMC_OCR_S18A);
449 		aprint_normal(" DDR50");
450 	}
451 	if (ISSET(caps2, SDHC_SDR104_SUPP)) {
452 		SET(hp->ocr, MMC_OCR_S18A);
453 		aprint_normal(" SDR104 HS200");
454 	}
455 	if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
456 		SET(hp->ocr, MMC_OCR_1_7V_1_8V | MMC_OCR_1_8V_1_9V);
457 		aprint_normal(" 1.8V");
458 	}
459 	if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
460 		SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
461 		aprint_normal(" 3.0V");
462 	}
463 	if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
464 		SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
465 		aprint_normal(" 3.3V");
466 	}
467 	if (hp->specver >= SDHC_SPEC_VERS_300) {
468 		aprint_normal(", re-tuning mode %d", retuning_mode + 1);
469 		if (hp->tuning_timer_count)
470 			aprint_normal(" (%us timer)", hp->tuning_timer_count);
471 	}
472 
473 	/*
474 	 * Determine the maximum block length supported by the host
475 	 * controller. (2.2.24)
476 	 */
477 	switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
478 	case SDHC_MAX_BLK_LEN_512:
479 		hp->maxblklen = 512;
480 		break;
481 
482 	case SDHC_MAX_BLK_LEN_1024:
483 		hp->maxblklen = 1024;
484 		break;
485 
486 	case SDHC_MAX_BLK_LEN_2048:
487 		hp->maxblklen = 2048;
488 		break;
489 
490 	case SDHC_MAX_BLK_LEN_4096:
491 		hp->maxblklen = 4096;
492 		break;
493 
494 	default:
495 		aprint_error_dev(sc->sc_dev, "max block length unknown\n");
496 		goto err;
497 	}
498 	aprint_normal(", %u byte blocks", hp->maxblklen);
499 	aprint_normal("\n");
500 
501 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
502 		int rseg;
503 
504 		/* Allocate ADMA2 descriptor memory */
505 		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
506 		    PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
507 		if (error) {
508 			aprint_error_dev(sc->sc_dev,
509 			    "ADMA2 dmamem_alloc failed (%d)\n", error);
510 			goto adma_done;
511 		}
512 		error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
513 		    PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
514 		if (error) {
515 			aprint_error_dev(sc->sc_dev,
516 			    "ADMA2 dmamem_map failed (%d)\n", error);
517 			goto adma_done;
518 		}
519 		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
520 		    0, BUS_DMA_WAITOK, &hp->adma_map);
521 		if (error) {
522 			aprint_error_dev(sc->sc_dev,
523 			    "ADMA2 dmamap_create failed (%d)\n", error);
524 			goto adma_done;
525 		}
526 		error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
527 		    hp->adma2, PAGE_SIZE, NULL,
528 		    BUS_DMA_WAITOK|BUS_DMA_WRITE);
529 		if (error) {
530 			aprint_error_dev(sc->sc_dev,
531 			    "ADMA2 dmamap_load failed (%d)\n", error);
532 			goto adma_done;
533 		}
534 
535 		memset(hp->adma2, 0, PAGE_SIZE);
536 
537 adma_done:
538 		if (error)
539 			CLR(hp->flags, SHF_USE_ADMA2_MASK);
540 	}
541 
542 	/*
543 	 * Attach the generic SD/MMC bus driver.  (The bus driver must
544 	 * not invoke any chipset functions before it is attached.)
545 	 */
546 	memset(&saa, 0, sizeof(saa));
547 	saa.saa_busname = "sdmmc";
548 	saa.saa_sct = &sdhc_functions;
549 	saa.saa_sch = hp;
550 	saa.saa_dmat = hp->dmat;
551 	saa.saa_clkmax = hp->clkbase;
552 	if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
553 		saa.saa_clkmin = hp->clkbase / 256 / 2046;
554 	else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
555 		saa.saa_clkmin = hp->clkbase / 256 / 16;
556 	else if (hp->sc->sc_clkmsk != 0)
557 		saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
558 		    (ffs(hp->sc->sc_clkmsk) - 1));
559 	else if (hp->specver >= SDHC_SPEC_VERS_300)
560 		saa.saa_clkmin = hp->clkbase / 0x3ff;
561 	else
562 		saa.saa_clkmin = hp->clkbase / 256;
563 	saa.saa_caps = SMC_CAPS_4BIT_MODE|SMC_CAPS_AUTO_STOP;
564 	if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
565 		saa.saa_caps |= SMC_CAPS_8BIT_MODE;
566 	if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
567 		saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED;
568 	if (ISSET(caps2, SDHC_SDR104_SUPP))
569 		saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
570 				SMC_CAPS_UHS_SDR50 |
571 				SMC_CAPS_MMC_HS200;
572 	if (ISSET(caps2, SDHC_SDR50_SUPP))
573 		saa.saa_caps |= SMC_CAPS_UHS_SDR50;
574 	if (ISSET(caps2, SDHC_DDR50_SUPP))
575 		saa.saa_caps |= SMC_CAPS_UHS_DDR50;
576 	if (ISSET(hp->flags, SHF_USE_DMA)) {
577 		saa.saa_caps |= SMC_CAPS_DMA;
578 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
579 			saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
580 	}
581 	if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
582 		saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
583 	if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
584 		saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
585 	hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint);
586 
587 	return 0;
588 
589 err:
590 	callout_destroy(&hp->tuning_timer);
591 	cv_destroy(&hp->intr_cv);
592 	mutex_destroy(&hp->intr_lock);
593 	free(hp, M_DEVBUF);
594 	sc->sc_host[--sc->sc_nhosts] = NULL;
595 err1:
596 	return 1;
597 }
598 
599 int
600 sdhc_detach(struct sdhc_softc *sc, int flags)
601 {
602 	struct sdhc_host *hp;
603 	int rv = 0;
604 
605 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
606 		hp = sc->sc_host[n];
607 		if (hp == NULL)
608 			continue;
609 		if (hp->sdmmc != NULL) {
610 			rv = config_detach(hp->sdmmc, flags);
611 			if (rv)
612 				break;
613 			hp->sdmmc = NULL;
614 		}
615 		/* disable interrupts */
616 		if ((flags & DETACH_FORCE) == 0) {
617 			mutex_enter(&hp->intr_lock);
618 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
619 				HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
620 			} else {
621 				HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
622 			}
623 			sdhc_soft_reset(hp, SDHC_RESET_ALL);
624 			mutex_exit(&hp->intr_lock);
625 		}
626 		callout_halt(&hp->tuning_timer, NULL);
627 		callout_destroy(&hp->tuning_timer);
628 		cv_destroy(&hp->intr_cv);
629 		mutex_destroy(&hp->intr_lock);
630 		if (hp->ios > 0) {
631 			bus_space_unmap(hp->iot, hp->ioh, hp->ios);
632 			hp->ios = 0;
633 		}
634 		if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
635 			bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
636 			bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
637 			bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
638 			bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
639 		}
640 		free(hp, M_DEVBUF);
641 		sc->sc_host[n] = NULL;
642 	}
643 
644 	return rv;
645 }
646 
647 bool
648 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
649 {
650 	struct sdhc_softc *sc = device_private(dev);
651 	struct sdhc_host *hp;
652 	size_t i;
653 
654 	/* XXX poll for command completion or suspend command
655 	 * in progress */
656 
657 	/* Save the host controller state. */
658 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
659 		hp = sc->sc_host[n];
660 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
661 			for (i = 0; i < sizeof hp->regs; i += 4) {
662 				uint32_t v = HREAD4(hp, i);
663 				hp->regs[i + 0] = (v >> 0);
664 				hp->regs[i + 1] = (v >> 8);
665 				if (i + 3 < sizeof hp->regs) {
666 					hp->regs[i + 2] = (v >> 16);
667 					hp->regs[i + 3] = (v >> 24);
668 				}
669 			}
670 		} else {
671 			for (i = 0; i < sizeof hp->regs; i++) {
672 				hp->regs[i] = HREAD1(hp, i);
673 			}
674 		}
675 	}
676 	return true;
677 }
678 
679 bool
680 sdhc_resume(device_t dev, const pmf_qual_t *qual)
681 {
682 	struct sdhc_softc *sc = device_private(dev);
683 	struct sdhc_host *hp;
684 	size_t i;
685 
686 	/* Restore the host controller state. */
687 	for (size_t n = 0; n < sc->sc_nhosts; n++) {
688 		hp = sc->sc_host[n];
689 		(void)sdhc_host_reset(hp);
690 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
691 			for (i = 0; i < sizeof hp->regs; i += 4) {
692 				if (i + 3 < sizeof hp->regs) {
693 					HWRITE4(hp, i,
694 					    (hp->regs[i + 0] << 0)
695 					    | (hp->regs[i + 1] << 8)
696 					    | (hp->regs[i + 2] << 16)
697 					    | (hp->regs[i + 3] << 24));
698 				} else {
699 					HWRITE4(hp, i,
700 					    (hp->regs[i + 0] << 0)
701 					    | (hp->regs[i + 1] << 8));
702 				}
703 			}
704 		} else {
705 			for (i = 0; i < sizeof hp->regs; i++) {
706 				HWRITE1(hp, i, hp->regs[i]);
707 			}
708 		}
709 	}
710 	return true;
711 }
712 
713 bool
714 sdhc_shutdown(device_t dev, int flags)
715 {
716 	struct sdhc_softc *sc = device_private(dev);
717 	struct sdhc_host *hp;
718 
719 	/* XXX chip locks up if we don't disable it before reboot. */
720 	for (size_t i = 0; i < sc->sc_nhosts; i++) {
721 		hp = sc->sc_host[i];
722 		(void)sdhc_host_reset(hp);
723 	}
724 	return true;
725 }
726 
727 /*
728  * Reset the host controller.  Called during initialization, when
729  * cards are removed, upon resume, and during error recovery.
730  */
731 static int
732 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
733 {
734 	struct sdhc_host *hp = (struct sdhc_host *)sch;
735 	uint32_t sdhcimask;
736 	int error;
737 
738 	KASSERT(mutex_owned(&hp->intr_lock));
739 
740 	/* Disable all interrupts. */
741 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
742 		HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
743 	} else {
744 		HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
745 	}
746 
747 	/*
748 	 * Reset the entire host controller and wait up to 100ms for
749 	 * the controller to clear the reset bit.
750 	 */
751 	error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
752 	if (error)
753 		goto out;
754 
755 	/* Set data timeout counter value to max for now. */
756 	HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
757 #if 1
758 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
759 		HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
760 #endif
761 
762 	/* Enable interrupts. */
763 	sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
764 	    SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
765 	    SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
766 	    SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
767 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
768 		sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
769 		HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
770 		sdhcimask ^=
771 		    (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
772 		sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
773 		HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
774 	} else {
775 		HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
776 		HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
777 		sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
778 		HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
779 		HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
780 	}
781 
782 out:
783 	return error;
784 }
785 
786 static int
787 sdhc_host_reset(sdmmc_chipset_handle_t sch)
788 {
789 	struct sdhc_host *hp = (struct sdhc_host *)sch;
790 	int error;
791 
792 	mutex_enter(&hp->intr_lock);
793 	error = sdhc_host_reset1(sch);
794 	mutex_exit(&hp->intr_lock);
795 
796 	return error;
797 }
798 
799 static uint32_t
800 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
801 {
802 	struct sdhc_host *hp = (struct sdhc_host *)sch;
803 
804 	return hp->ocr;
805 }
806 
807 static int
808 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
809 {
810 	struct sdhc_host *hp = (struct sdhc_host *)sch;
811 
812 	return hp->maxblklen;
813 }
814 
815 /*
816  * Return non-zero if the card is currently inserted.
817  */
818 static int
819 sdhc_card_detect(sdmmc_chipset_handle_t sch)
820 {
821 	struct sdhc_host *hp = (struct sdhc_host *)sch;
822 	int r;
823 
824 	if (hp->sc->sc_vendor_card_detect)
825 		return (*hp->sc->sc_vendor_card_detect)(hp->sc);
826 
827 	r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
828 
829 	return r ? 1 : 0;
830 }
831 
832 /*
833  * Return non-zero if the card is currently write-protected.
834  */
835 static int
836 sdhc_write_protect(sdmmc_chipset_handle_t sch)
837 {
838 	struct sdhc_host *hp = (struct sdhc_host *)sch;
839 	int r;
840 
841 	if (hp->sc->sc_vendor_write_protect)
842 		return (*hp->sc->sc_vendor_write_protect)(hp->sc);
843 
844 	r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
845 
846 	return r ? 0 : 1;
847 }
848 
849 /*
850  * Set or change SD bus voltage and enable or disable SD bus power.
851  * Return zero on success.
852  */
853 static int
854 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
855 {
856 	struct sdhc_host *hp = (struct sdhc_host *)sch;
857 	uint8_t vdd;
858 	int error = 0;
859 	const uint32_t pcmask =
860 	    ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
861 
862 	mutex_enter(&hp->intr_lock);
863 
864 	/*
865 	 * Disable bus power before voltage change.
866 	 */
867 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
868 	    && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0))
869 		HWRITE1(hp, SDHC_POWER_CTL, 0);
870 
871 	/* If power is disabled, reset the host and return now. */
872 	if (ocr == 0) {
873 		(void)sdhc_host_reset1(hp);
874 		callout_halt(&hp->tuning_timer, &hp->intr_lock);
875 		goto out;
876 	}
877 
878 	/*
879 	 * Select the lowest voltage according to capabilities.
880 	 */
881 	ocr &= hp->ocr;
882 	if (ISSET(ocr, MMC_OCR_1_7V_1_8V|MMC_OCR_1_8V_1_9V)) {
883 		vdd = SDHC_VOLTAGE_1_8V;
884 	} else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
885 		vdd = SDHC_VOLTAGE_3_0V;
886 	} else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
887 		vdd = SDHC_VOLTAGE_3_3V;
888 	} else {
889 		/* Unsupported voltage level requested. */
890 		error = EINVAL;
891 		goto out;
892 	}
893 
894 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
895 		/*
896 		 * Enable bus power.  Wait at least 1 ms (or 74 clocks) plus
897 		 * voltage ramp until power rises.
898 		 */
899 
900 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
901 			HWRITE1(hp, SDHC_POWER_CTL,
902 			    (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
903 		} else {
904 			HWRITE1(hp, SDHC_POWER_CTL,
905 			    HREAD1(hp, SDHC_POWER_CTL) & pcmask);
906 			sdmmc_delay(1);
907 			HWRITE1(hp, SDHC_POWER_CTL,
908 			    (vdd << SDHC_VOLTAGE_SHIFT));
909 			sdmmc_delay(1);
910 			HSET1(hp, SDHC_POWER_CTL, SDHC_BUS_POWER);
911 			sdmmc_delay(10000);
912 		}
913 
914 		/*
915 		 * The host system may not power the bus due to battery low,
916 		 * etc.  In that case, the host controller should clear the
917 		 * bus power bit.
918 		 */
919 		if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
920 			error = ENXIO;
921 			goto out;
922 		}
923 	}
924 
925 out:
926 	mutex_exit(&hp->intr_lock);
927 
928 	return error;
929 }
930 
931 /*
932  * Return the smallest possible base clock frequency divisor value
933  * for the CLOCK_CTL register to produce `freq' (KHz).
934  */
935 static bool
936 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
937 {
938 	u_int div;
939 
940 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
941 		for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
942 			if ((hp->clkbase / div) <= freq) {
943 				*divp = SDHC_SDCLK_CGM
944 				    | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
945 				    | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
946 				//freq = hp->clkbase / div;
947 				return true;
948 			}
949 		}
950 		/* No divisor found. */
951 		return false;
952 	}
953 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
954 		u_int dvs = (hp->clkbase + freq - 1) / freq;
955 		u_int roundup = dvs & 1;
956 		for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
957 			if (dvs + roundup <= 16) {
958 				dvs += roundup - 1;
959 				*divp = (div << SDHC_SDCLK_DIV_SHIFT)
960 				    |   (dvs << SDHC_SDCLK_DVS_SHIFT);
961 				DPRINTF(2,
962 				    ("%s: divisor for freq %u is %u * %u\n",
963 				    HDEVNAME(hp), freq, div * 2, dvs + 1));
964 				//freq = hp->clkbase / (div * 2) * (dvs + 1);
965 				return true;
966 			}
967 			/*
968 			 * If we drop bits, we need to round up the divisor.
969 			 */
970 			roundup |= dvs & 1;
971 		}
972 		/* No divisor found. */
973 		return false;
974 	}
975 	if (hp->sc->sc_clkmsk != 0) {
976 		div = howmany(hp->clkbase, freq);
977 		if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
978 			return false;
979 		*divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
980 		//freq = hp->clkbase / div;
981 		return true;
982 	}
983 	if (hp->specver >= SDHC_SPEC_VERS_300) {
984 		div = howmany(hp->clkbase, freq);
985 		div = div > 1 ? howmany(div, 2) : 0;
986 		if (div > 0x3ff)
987 			return false;
988 		*divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
989 			 << SDHC_SDCLK_XDIV_SHIFT) |
990 			(((div >> 0) & SDHC_SDCLK_DIV_MASK)
991 			 << SDHC_SDCLK_DIV_SHIFT);
992 		//freq = hp->clkbase / (div ? div * 2 : 1);
993 		return true;
994 	} else {
995 		for (div = 1; div <= 256; div *= 2) {
996 			if ((hp->clkbase / div) <= freq) {
997 				*divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
998 				//freq = hp->clkbase / div;
999 				return true;
1000 			}
1001 		}
1002 		/* No divisor found. */
1003 		return false;
1004 	}
1005 	/* No divisor found. */
1006 	return false;
1007 }
1008 
1009 /*
1010  * Set or change SDCLK frequency or disable the SD clock.
1011  * Return zero on success.
1012  */
1013 static int
1014 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1015 {
1016 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1017 	u_int div;
1018 	u_int timo;
1019 	int16_t reg;
1020 	int error = 0;
1021 	bool present __diagused;
1022 
1023 	mutex_enter(&hp->intr_lock);
1024 
1025 #ifdef DIAGNOSTIC
1026 	present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1027 
1028 	/* Must not stop the clock if commands are in progress. */
1029 	if (present && sdhc_card_detect(hp)) {
1030 		aprint_normal_dev(hp->sc->sc_dev,
1031 		    "%s: command in progress\n", __func__);
1032 	}
1033 #endif
1034 
1035 	if (hp->sc->sc_vendor_bus_clock) {
1036 		error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1037 		if (error != 0)
1038 			goto out;
1039 	}
1040 
1041 	/*
1042 	 * Stop SD clock before changing the frequency.
1043 	 */
1044 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1045 		HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1046 		if (freq == SDMMC_SDCLK_OFF) {
1047 			HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1048 			goto out;
1049 		}
1050 	} else {
1051 		HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1052 		if (freq == SDMMC_SDCLK_OFF)
1053 			goto out;
1054 	}
1055 
1056 	if (hp->specver >= SDHC_SPEC_VERS_300) {
1057 		HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1058 		if (freq > 100000) {
1059 			HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1060 		} else if (freq > 50000) {
1061 			HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR50);
1062 		} else if (freq > 25000) {
1063 			if (ddr) {
1064 				HSET2(hp, SDHC_HOST_CTL2,
1065 				    SDHC_UHS_MODE_SELECT_DDR50);
1066 			} else {
1067 				HSET2(hp, SDHC_HOST_CTL2,
1068 				    SDHC_UHS_MODE_SELECT_SDR25);
1069 			}
1070 		} else if (freq > 400) {
1071 			HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1072 		}
1073 	}
1074 
1075 	/*
1076 	 * Slow down Ricoh 5U823 controller that isn't reliable
1077 	 * at 100MHz bus clock.
1078 	 */
1079 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1080 		if (freq == 100000)
1081 			--freq;
1082 	}
1083 
1084 	/*
1085 	 * Set the minimum base clock frequency divisor.
1086 	 */
1087 	if (!sdhc_clock_divisor(hp, freq, &div)) {
1088 		/* Invalid base clock frequency or `freq' value. */
1089 		aprint_error_dev(hp->sc->sc_dev,
1090 			"Invalid bus clock %d kHz\n", freq);
1091 		error = EINVAL;
1092 		goto out;
1093 	}
1094 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1095 		HWRITE4(hp, SDHC_CLOCK_CTL,
1096 		    div | (SDHC_TIMEOUT_MAX << 16));
1097 	} else {
1098 		reg = HREAD2(hp, SDHC_CLOCK_CTL);
1099 		reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1100 		HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1101 	}
1102 
1103 	/*
1104 	 * Start internal clock.  Wait 10ms for stabilization.
1105 	 */
1106 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1107 		sdmmc_delay(10000);
1108 		HSET4(hp, SDHC_CLOCK_CTL,
1109 		    8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1110 	} else {
1111 		HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1112 		for (timo = 1000; timo > 0; timo--) {
1113 			if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1114 			    SDHC_INTCLK_STABLE))
1115 				break;
1116 			sdmmc_delay(10);
1117 		}
1118 		if (timo == 0) {
1119 			error = ETIMEDOUT;
1120 			DPRINTF(1,("%s: timeout\n", __func__));
1121 			goto out;
1122 		}
1123 	}
1124 
1125 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1126 		HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1127 		/*
1128 		 * Sending 80 clocks at 400kHz takes 200us.
1129 		 * So delay for that time + slop and then
1130 		 * check a few times for completion.
1131 		 */
1132 		sdmmc_delay(210);
1133 		for (timo = 10; timo > 0; timo--) {
1134 			if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1135 			    SDHC_INIT_ACTIVE))
1136 				break;
1137 			sdmmc_delay(10);
1138 		}
1139 		DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1140 
1141 		/*
1142 		 * Enable SD clock.
1143 		 */
1144 		HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1145 	} else {
1146 		/*
1147 		 * Enable SD clock.
1148 		 */
1149 		HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1150 
1151 		if (freq > 25000 &&
1152 		    !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1153 			HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1154 		else
1155 			HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1156 	}
1157 
1158 out:
1159 	mutex_exit(&hp->intr_lock);
1160 
1161 	return error;
1162 }
1163 
1164 static int
1165 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1166 {
1167 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1168 	int reg;
1169 
1170 	switch (width) {
1171 	case 1:
1172 	case 4:
1173 		break;
1174 
1175 	case 8:
1176 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1177 			break;
1178 		/* FALLTHROUGH */
1179 	default:
1180 		DPRINTF(0,("%s: unsupported bus width (%d)\n",
1181 		    HDEVNAME(hp), width));
1182 		return 1;
1183 	}
1184 
1185 	if (hp->sc->sc_vendor_bus_width) {
1186 		const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1187 		if (error != 0)
1188 			return error;
1189 	}
1190 
1191 	mutex_enter(&hp->intr_lock);
1192 
1193 	reg = HREAD1(hp, SDHC_HOST_CTL);
1194 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1195 		reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1196 		if (width == 4)
1197 			reg |= SDHC_4BIT_MODE;
1198 		else if (width == 8)
1199 			reg |= SDHC_ESDHC_8BIT_MODE;
1200 	} else {
1201 		reg &= ~SDHC_4BIT_MODE;
1202 		if (hp->specver >= SDHC_SPEC_VERS_300) {
1203 			reg &= ~SDHC_8BIT_MODE;
1204 		}
1205 		if (width == 4) {
1206 			reg |= SDHC_4BIT_MODE;
1207 		} else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1208 			reg |= SDHC_8BIT_MODE;
1209 		}
1210 	}
1211 	HWRITE1(hp, SDHC_HOST_CTL, reg);
1212 
1213 	mutex_exit(&hp->intr_lock);
1214 
1215 	return 0;
1216 }
1217 
1218 static int
1219 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1220 {
1221 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1222 
1223 	if (hp->sc->sc_vendor_rod)
1224 		return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1225 
1226 	return 0;
1227 }
1228 
1229 static void
1230 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1231 {
1232 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1233 
1234 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1235 		mutex_enter(&hp->intr_lock);
1236 		if (enable) {
1237 			HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1238 			HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1239 		} else {
1240 			HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1241 			HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1242 		}
1243 		mutex_exit(&hp->intr_lock);
1244 	}
1245 }
1246 
1247 static void
1248 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1249 {
1250 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1251 
1252 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1253 		mutex_enter(&hp->intr_lock);
1254 		HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1255 		mutex_exit(&hp->intr_lock);
1256 	}
1257 }
1258 
1259 static int
1260 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1261 {
1262 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1263 
1264 	mutex_enter(&hp->intr_lock);
1265 	switch (signal_voltage) {
1266 	case SDMMC_SIGNAL_VOLTAGE_180:
1267 		HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1268 		break;
1269 	case SDMMC_SIGNAL_VOLTAGE_330:
1270 		HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1271 		break;
1272 	default:
1273 		return EINVAL;
1274 	}
1275 	mutex_exit(&hp->intr_lock);
1276 
1277 	return 0;
1278 }
1279 
1280 /*
1281  * Sampling clock tuning procedure (UHS)
1282  */
1283 static int
1284 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1285 {
1286 	struct sdmmc_command cmd;
1287 	uint8_t hostctl;
1288 	int opcode, error, retry = 40;
1289 
1290 	KASSERT(mutex_owned(&hp->intr_lock));
1291 
1292 	hp->tuning_timing = timing;
1293 
1294 	switch (timing) {
1295 	case SDMMC_TIMING_MMC_HS200:
1296 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1297 		break;
1298 	case SDMMC_TIMING_UHS_SDR50:
1299 		if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1300 			return 0;
1301 		/* FALLTHROUGH */
1302 	case SDMMC_TIMING_UHS_SDR104:
1303 		opcode = MMC_SEND_TUNING_BLOCK;
1304 		break;
1305 	default:
1306 		return EINVAL;
1307 	}
1308 
1309 	hostctl = HREAD1(hp, SDHC_HOST_CTL);
1310 
1311 	/* enable buffer read ready interrupt */
1312 	HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1313 	HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1314 
1315 	/* disable DMA */
1316 	HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1317 
1318 	/* reset tuning circuit */
1319 	HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1320 
1321 	/* start of tuning */
1322 	HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1323 
1324 	do {
1325 		memset(&cmd, 0, sizeof(cmd));
1326 		cmd.c_opcode = opcode;
1327 		cmd.c_arg = 0;
1328 		cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1329 		if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1330 			cmd.c_blklen = cmd.c_datalen = 128;
1331 		} else {
1332 			cmd.c_blklen = cmd.c_datalen = 64;
1333 		}
1334 
1335 		error = sdhc_start_command(hp, &cmd);
1336 		if (error)
1337 			break;
1338 
1339 		if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1340 		    SDHC_TUNING_TIMEOUT, false)) {
1341 			break;
1342 		}
1343 
1344 		delay(1000);
1345 	} while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1346 
1347 	/* disable buffer read ready interrupt */
1348 	HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1349 	HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1350 
1351 	if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1352 		HCLR2(hp, SDHC_HOST_CTL2,
1353 		    SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1354 		sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1355 		aprint_error_dev(hp->sc->sc_dev,
1356 		    "tuning did not complete, using fixed sampling clock\n");
1357 		return EIO;		/* tuning did not complete */
1358 	}
1359 
1360 	if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1361 		HCLR2(hp, SDHC_HOST_CTL2,
1362 		    SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1363 		sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1364 		aprint_error_dev(hp->sc->sc_dev,
1365 		    "tuning failed, using fixed sampling clock\n");
1366 		return EIO;		/* tuning failed */
1367 	}
1368 
1369 	if (hp->tuning_timer_count) {
1370 		callout_schedule(&hp->tuning_timer,
1371 		    hz * hp->tuning_timer_count);
1372 	}
1373 
1374 	return 0;		/* tuning completed */
1375 }
1376 
1377 static int
1378 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1379 {
1380 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1381 	int error;
1382 
1383 	mutex_enter(&hp->intr_lock);
1384 	error = sdhc_execute_tuning1(hp, timing);
1385 	mutex_exit(&hp->intr_lock);
1386 	return error;
1387 }
1388 
1389 static void
1390 sdhc_tuning_timer(void *arg)
1391 {
1392 	struct sdhc_host *hp = arg;
1393 
1394 	atomic_swap_uint(&hp->tuning_timer_pending, 1);
1395 }
1396 
1397 static int
1398 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1399 {
1400 	uint32_t state;
1401 	int timeout;
1402 
1403 	for (timeout = 10000; timeout > 0; timeout--) {
1404 		if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1405 			return 0;
1406 		sdmmc_delay(10);
1407 	}
1408 	aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1409 	    mask, value, state);
1410 	return ETIMEDOUT;
1411 }
1412 
1413 static void
1414 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1415 {
1416 	struct sdhc_host *hp = (struct sdhc_host *)sch;
1417 	int error;
1418 	bool probing;
1419 
1420 	mutex_enter(&hp->intr_lock);
1421 
1422 	if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1423 		(void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1424 	}
1425 
1426 	if (cmd->c_data && ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1427 		const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1428 		if (ISSET(hp->flags, SHF_USE_DMA)) {
1429 			HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1430 			HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1431 		} else {
1432 			HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1433 			HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1434 		}
1435 	}
1436 
1437 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1438 		const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1439 		if (cmd->c_data != NULL) {
1440 			HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1441 			HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1442 		} else {
1443 			HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1444 			HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1445 		}
1446 	}
1447 
1448 	/*
1449 	 * Start the MMC command, or mark `cmd' as failed and return.
1450 	 */
1451 	error = sdhc_start_command(hp, cmd);
1452 	if (error) {
1453 		cmd->c_error = error;
1454 		goto out;
1455 	}
1456 
1457 	/*
1458 	 * Wait until the command phase is done, or until the command
1459 	 * is marked done for any other reason.
1460 	 */
1461 	probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1462 	if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT, probing)) {
1463 		DPRINTF(1,("%s: timeout for command\n", __func__));
1464 		cmd->c_error = ETIMEDOUT;
1465 		goto out;
1466 	}
1467 
1468 	/*
1469 	 * The host controller removes bits [0:7] from the response
1470 	 * data (CRC) and we pass the data up unchanged to the bus
1471 	 * driver (without padding).
1472 	 */
1473 	if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1474 		cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1475 		if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1476 			cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1477 			cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1478 			cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1479 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1480 				cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1481 				    (cmd->c_resp[1] << 24);
1482 				cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1483 				    (cmd->c_resp[2] << 24);
1484 				cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1485 				    (cmd->c_resp[3] << 24);
1486 				cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1487 			}
1488 		}
1489 	}
1490 	DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1491 
1492 	/*
1493 	 * If the command has data to transfer in any direction,
1494 	 * execute the transfer now.
1495 	 */
1496 	if (cmd->c_error == 0 && cmd->c_data != NULL)
1497 		sdhc_transfer_data(hp, cmd);
1498 	else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1499 		if (!sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1500 			DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1501 			    HDEVNAME(hp)));
1502 			cmd->c_error = ETIMEDOUT;
1503 			goto out;
1504 		}
1505 	}
1506 
1507 out:
1508 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1509 	    && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1510 		/* Turn off the LED. */
1511 		HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1512 	}
1513 	SET(cmd->c_flags, SCF_ITSDONE);
1514 
1515 	mutex_exit(&hp->intr_lock);
1516 
1517 	DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1518 	    cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1519 	    cmd->c_flags, cmd->c_error));
1520 }
1521 
1522 static int
1523 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1524 {
1525 	struct sdhc_softc * const sc = hp->sc;
1526 	uint16_t blksize = 0;
1527 	uint16_t blkcount = 0;
1528 	uint16_t mode;
1529 	uint16_t command;
1530 	uint32_t pmask;
1531 	int error;
1532 
1533 	KASSERT(mutex_owned(&hp->intr_lock));
1534 
1535 	DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1536 	    HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1537 	    cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1538 
1539 	/*
1540 	 * The maximum block length for commands should be the minimum
1541 	 * of the host buffer size and the card buffer size. (1.7.2)
1542 	 */
1543 
1544 	/* Fragment the data into proper blocks. */
1545 	if (cmd->c_datalen > 0) {
1546 		blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1547 		blkcount = cmd->c_datalen / blksize;
1548 		if (cmd->c_datalen % blksize > 0) {
1549 			/* XXX: Split this command. (1.7.4) */
1550 			aprint_error_dev(sc->sc_dev,
1551 			    "data not a multiple of %u bytes\n", blksize);
1552 			return EINVAL;
1553 		}
1554 	}
1555 
1556 	/* Check limit imposed by 9-bit block count. (1.7.2) */
1557 	if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1558 		aprint_error_dev(sc->sc_dev, "too much data\n");
1559 		return EINVAL;
1560 	}
1561 
1562 	/* Prepare transfer mode register value. (2.2.5) */
1563 	mode = SDHC_BLOCK_COUNT_ENABLE;
1564 	if (ISSET(cmd->c_flags, SCF_CMD_READ))
1565 		mode |= SDHC_READ_MODE;
1566 	if (blkcount > 1) {
1567 		mode |= SDHC_MULTI_BLOCK_MODE;
1568 		/* XXX only for memory commands? */
1569 		mode |= SDHC_AUTO_CMD12_ENABLE;
1570 	}
1571 	if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1572 	    ISSET(hp->flags,  SHF_MODE_DMAEN)) {
1573 		mode |= SDHC_DMA_ENABLE;
1574 	}
1575 
1576 	/*
1577 	 * Prepare command register value. (2.2.6)
1578 	 */
1579 	command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1580 
1581 	if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1582 		command |= SDHC_CRC_CHECK_ENABLE;
1583 	if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1584 		command |= SDHC_INDEX_CHECK_ENABLE;
1585 	if (cmd->c_datalen > 0)
1586 		command |= SDHC_DATA_PRESENT_SELECT;
1587 
1588 	if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1589 		command |= SDHC_NO_RESPONSE;
1590 	else if (ISSET(cmd->c_flags, SCF_RSP_136))
1591 		command |= SDHC_RESP_LEN_136;
1592 	else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1593 		command |= SDHC_RESP_LEN_48_CHK_BUSY;
1594 	else
1595 		command |= SDHC_RESP_LEN_48;
1596 
1597 	/* Wait until command and optionally data inhibit bits are clear. (1.5) */
1598 	pmask = SDHC_CMD_INHIBIT_CMD;
1599 	if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1600 		pmask |= SDHC_CMD_INHIBIT_DAT;
1601 	error = sdhc_wait_state(hp, pmask, 0);
1602 	if (error) {
1603 		(void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1604 		device_printf(sc->sc_dev, "command or data phase inhibited\n");
1605 		return error;
1606 	}
1607 
1608 	DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1609 	    HDEVNAME(hp), blksize, blkcount, mode, command));
1610 
1611 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1612 		blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1613 		    SDHC_DMA_BOUNDARY_SHIFT;	/* PAGE_SIZE DMA boundary */
1614 	}
1615 
1616 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1617 		/* Alert the user not to remove the card. */
1618 		HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1619 	}
1620 
1621 	/* Set DMA start address. */
1622 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1623 		for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1624 			bus_addr_t paddr =
1625 			    cmd->c_dmamap->dm_segs[seg].ds_addr;
1626 			uint16_t len =
1627 			    cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1628 			    0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1629 			uint16_t attr =
1630 			    SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1631 			if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1632 				attr |= SDHC_ADMA2_END;
1633 			}
1634 			if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1635 				struct sdhc_adma2_descriptor32 *desc =
1636 				    hp->adma2;
1637 				desc[seg].attribute = htole16(attr);
1638 				desc[seg].length = htole16(len);
1639 				desc[seg].address = htole32(paddr);
1640 			} else {
1641 				struct sdhc_adma2_descriptor64 *desc =
1642 				    hp->adma2;
1643 				desc[seg].attribute = htole16(attr);
1644 				desc[seg].length = htole16(len);
1645 				desc[seg].address = htole32(paddr & 0xffffffff);
1646 				desc[seg].address_hi = htole32(
1647 				    (uint64_t)paddr >> 32);
1648 			}
1649 		}
1650 		if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1651 			struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1652 			desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1653 		} else {
1654 			struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1655 			desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1656 		}
1657 		bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1658 		    BUS_DMASYNC_PREWRITE);
1659 		HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1660 		HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1661 
1662 		const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1663 
1664 		HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1665 		if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1666 			HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1667 			    (uint64_t)desc_addr >> 32);
1668 		}
1669 	} else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1670 	    !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1671 		HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1672 	}
1673 
1674 	/*
1675 	 * Start a CPU data transfer.  Writing to the high order byte
1676 	 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1677 	 */
1678 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1679 		HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1680 		HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1681 		HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1682 	} else {
1683 		HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1684 		HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1685 		HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1686 		HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1687 		HWRITE2(hp, SDHC_COMMAND, command);
1688 	}
1689 
1690 	return 0;
1691 }
1692 
1693 static void
1694 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1695 {
1696 	struct sdhc_softc *sc = hp->sc;
1697 	int error;
1698 
1699 	KASSERT(mutex_owned(&hp->intr_lock));
1700 
1701 	DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1702 	    MMC_R1(cmd->c_resp), cmd->c_datalen));
1703 
1704 #ifdef SDHC_DEBUG
1705 	/* XXX I forgot why I wanted to know when this happens :-( */
1706 	if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1707 	    ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1708 		aprint_error_dev(hp->sc->sc_dev,
1709 		    "CMD52/53 error response flags %#x\n",
1710 		    MMC_R1(cmd->c_resp) & 0xff00);
1711 	}
1712 #endif
1713 
1714 	if (cmd->c_dmamap != NULL) {
1715 		if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1716 			error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1717 			if (error == 0 && !sdhc_wait_intr(hp,
1718 			    SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1719 				DPRINTF(1,("%s: timeout\n", __func__));
1720 				error = ETIMEDOUT;
1721 			}
1722 		} else {
1723 			error = sdhc_transfer_data_dma(hp, cmd);
1724 		}
1725 	} else
1726 		error = sdhc_transfer_data_pio(hp, cmd);
1727 	if (error)
1728 		cmd->c_error = error;
1729 	SET(cmd->c_flags, SCF_ITSDONE);
1730 
1731 	DPRINTF(1,("%s: data transfer done (error=%d)\n",
1732 	    HDEVNAME(hp), cmd->c_error));
1733 }
1734 
1735 static int
1736 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1737 {
1738 	bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1739 	bus_addr_t posaddr;
1740 	bus_addr_t segaddr;
1741 	bus_size_t seglen;
1742 	u_int seg = 0;
1743 	int error = 0;
1744 	int status;
1745 
1746 	KASSERT(mutex_owned(&hp->intr_lock));
1747 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1748 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1749 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1750 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1751 
1752 	for (;;) {
1753 		status = sdhc_wait_intr(hp,
1754 		    SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1755 		    SDHC_DMA_TIMEOUT, false);
1756 
1757 		if (status & SDHC_TRANSFER_COMPLETE) {
1758 			break;
1759 		}
1760 		if (!status) {
1761 			DPRINTF(1,("%s: timeout\n", __func__));
1762 			error = ETIMEDOUT;
1763 			break;
1764 		}
1765 
1766 		if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1767 			continue;
1768 		}
1769 
1770 		if ((status & SDHC_DMA_INTERRUPT) == 0) {
1771 			continue;
1772 		}
1773 
1774 		/* DMA Interrupt (boundary crossing) */
1775 
1776 		segaddr = dm_segs[seg].ds_addr;
1777 		seglen = dm_segs[seg].ds_len;
1778 		posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1779 
1780 		if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1781 			continue;
1782 		}
1783 		if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1784 			HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1785 		else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1786 			HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1787 		KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1788 	}
1789 
1790 	if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1791 		bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1792 		    PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1793 	}
1794 
1795 	return error;
1796 }
1797 
1798 static int
1799 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1800 {
1801 	uint8_t *data = cmd->c_data;
1802 	void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1803 	u_int len, datalen;
1804 	u_int imask;
1805 	u_int pmask;
1806 	int error = 0;
1807 
1808 	KASSERT(mutex_owned(&hp->intr_lock));
1809 
1810 	if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1811 		imask = SDHC_BUFFER_READ_READY;
1812 		pmask = SDHC_BUFFER_READ_ENABLE;
1813 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1814 			pio_func = esdhc_read_data_pio;
1815 		} else {
1816 			pio_func = sdhc_read_data_pio;
1817 		}
1818 	} else {
1819 		imask = SDHC_BUFFER_WRITE_READY;
1820 		pmask = SDHC_BUFFER_WRITE_ENABLE;
1821 		if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1822 			pio_func = esdhc_write_data_pio;
1823 		} else {
1824 			pio_func = sdhc_write_data_pio;
1825 		}
1826 	}
1827 	datalen = cmd->c_datalen;
1828 
1829 	KASSERT(mutex_owned(&hp->intr_lock));
1830 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
1831 	KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1832 	KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1833 
1834 	while (datalen > 0) {
1835 		if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), imask)) {
1836 			if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1837 				HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
1838 			} else {
1839 				HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
1840 			}
1841 			if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
1842 				DPRINTF(1,("%s: timeout\n", __func__));
1843 				error = ETIMEDOUT;
1844 				break;
1845 			}
1846 
1847 			error = sdhc_wait_state(hp, pmask, pmask);
1848 			if (error)
1849 				break;
1850 		}
1851 
1852 		len = MIN(datalen, cmd->c_blklen);
1853 		(*pio_func)(hp, data, len);
1854 		DPRINTF(2,("%s: pio data transfer %u @ %p\n",
1855 		    HDEVNAME(hp), len, data));
1856 
1857 		data += len;
1858 		datalen -= len;
1859 	}
1860 
1861 	if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
1862 	    SDHC_TRANSFER_TIMEOUT, false)) {
1863 		DPRINTF(1,("%s: timeout for transfer\n", __func__));
1864 		error = ETIMEDOUT;
1865 	}
1866 
1867 	return error;
1868 }
1869 
1870 static void
1871 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1872 {
1873 
1874 	if (((__uintptr_t)data & 3) == 0) {
1875 		while (datalen > 3) {
1876 			*(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
1877 			data += 4;
1878 			datalen -= 4;
1879 		}
1880 		if (datalen > 1) {
1881 			*(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
1882 			data += 2;
1883 			datalen -= 2;
1884 		}
1885 		if (datalen > 0) {
1886 			*data = HREAD1(hp, SDHC_DATA);
1887 			data += 1;
1888 			datalen -= 1;
1889 		}
1890 	} else if (((__uintptr_t)data & 1) == 0) {
1891 		while (datalen > 1) {
1892 			*(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
1893 			data += 2;
1894 			datalen -= 2;
1895 		}
1896 		if (datalen > 0) {
1897 			*data = HREAD1(hp, SDHC_DATA);
1898 			data += 1;
1899 			datalen -= 1;
1900 		}
1901 	} else {
1902 		while (datalen > 0) {
1903 			*data = HREAD1(hp, SDHC_DATA);
1904 			data += 1;
1905 			datalen -= 1;
1906 		}
1907 	}
1908 }
1909 
1910 static void
1911 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1912 {
1913 
1914 	if (((__uintptr_t)data & 3) == 0) {
1915 		while (datalen > 3) {
1916 			HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
1917 			data += 4;
1918 			datalen -= 4;
1919 		}
1920 		if (datalen > 1) {
1921 			HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
1922 			data += 2;
1923 			datalen -= 2;
1924 		}
1925 		if (datalen > 0) {
1926 			HWRITE1(hp, SDHC_DATA, *data);
1927 			data += 1;
1928 			datalen -= 1;
1929 		}
1930 	} else if (((__uintptr_t)data & 1) == 0) {
1931 		while (datalen > 1) {
1932 			HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
1933 			data += 2;
1934 			datalen -= 2;
1935 		}
1936 		if (datalen > 0) {
1937 			HWRITE1(hp, SDHC_DATA, *data);
1938 			data += 1;
1939 			datalen -= 1;
1940 		}
1941 	} else {
1942 		while (datalen > 0) {
1943 			HWRITE1(hp, SDHC_DATA, *data);
1944 			data += 1;
1945 			datalen -= 1;
1946 		}
1947 	}
1948 }
1949 
1950 static void
1951 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1952 {
1953 	uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
1954 	uint32_t v;
1955 
1956 	const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
1957 	size_t count = 0;
1958 
1959 	while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
1960 		if (count == 0) {
1961 			/*
1962 			 * If we've drained "watermark" words, we need to wait
1963 			 * a little bit so the read FIFO can refill.
1964 			 */
1965 			sdmmc_delay(10);
1966 			count = watermark;
1967 		}
1968 		v = HREAD4(hp, SDHC_DATA);
1969 		v = le32toh(v);
1970 		*(uint32_t *)data = v;
1971 		data += 4;
1972 		datalen -= 4;
1973 		status = HREAD2(hp, SDHC_NINTR_STATUS);
1974 		count--;
1975 	}
1976 	if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
1977 		if (count == 0) {
1978 			sdmmc_delay(10);
1979 		}
1980 		v = HREAD4(hp, SDHC_DATA);
1981 		v = le32toh(v);
1982 		do {
1983 			*data++ = v;
1984 			v >>= 8;
1985 		} while (--datalen > 0);
1986 	}
1987 }
1988 
1989 static void
1990 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1991 {
1992 	uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
1993 	uint32_t v;
1994 
1995 	const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
1996 	size_t count = watermark;
1997 
1998 	while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
1999 		if (count == 0) {
2000 			sdmmc_delay(10);
2001 			count = watermark;
2002 		}
2003 		v = *(uint32_t *)data;
2004 		v = htole32(v);
2005 		HWRITE4(hp, SDHC_DATA, v);
2006 		data += 4;
2007 		datalen -= 4;
2008 		status = HREAD2(hp, SDHC_NINTR_STATUS);
2009 		count--;
2010 	}
2011 	if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2012 		if (count == 0) {
2013 			sdmmc_delay(10);
2014 		}
2015 		v = *(uint32_t *)data;
2016 		v = htole32(v);
2017 		HWRITE4(hp, SDHC_DATA, v);
2018 	}
2019 }
2020 
2021 /* Prepare for another command. */
2022 static int
2023 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2024 {
2025 	int timo;
2026 
2027 	KASSERT(mutex_owned(&hp->intr_lock));
2028 
2029 	DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2030 
2031 	/* Request the reset.  */
2032 	HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2033 
2034 	/*
2035 	 * If necessary, wait for the controller to set the bits to
2036 	 * acknowledge the reset.
2037 	 */
2038 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2039 	    ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2040 		for (timo = 10000; timo > 0; timo--) {
2041 			if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2042 				break;
2043 			/* Short delay because I worry we may miss it...  */
2044 			sdmmc_delay(1);
2045 		}
2046 		if (timo == 0) {
2047 			DPRINTF(1,("%s: timeout for reset on\n", __func__));
2048 			return ETIMEDOUT;
2049 		}
2050 	}
2051 
2052 	/*
2053 	 * Wait for the controller to clear the bits to indicate that
2054 	 * the reset has completed.
2055 	 */
2056 	for (timo = 10; timo > 0; timo--) {
2057 		if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2058 			break;
2059 		sdmmc_delay(10000);
2060 	}
2061 	if (timo == 0) {
2062 		DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2063 		    HREAD1(hp, SDHC_SOFTWARE_RESET)));
2064 		return ETIMEDOUT;
2065 	}
2066 
2067 	if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2068 		HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2069 	}
2070 
2071 	return 0;
2072 }
2073 
2074 static int
2075 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2076 {
2077 	int status, error, nointr;
2078 
2079 	KASSERT(mutex_owned(&hp->intr_lock));
2080 
2081 	mask |= SDHC_ERROR_INTERRUPT;
2082 
2083 	nointr = 0;
2084 	status = hp->intr_status & mask;
2085 	while (status == 0) {
2086 		if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2087 		    == EWOULDBLOCK) {
2088 			nointr = 1;
2089 			break;
2090 		}
2091 		status = hp->intr_status & mask;
2092 	}
2093 	error = hp->intr_error_status;
2094 
2095 	DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2096 	    error));
2097 
2098 	hp->intr_status &= ~status;
2099 	hp->intr_error_status &= ~error;
2100 
2101 	if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2102 		if (ISSET(error, SDHC_DMA_ERROR))
2103 			device_printf(hp->sc->sc_dev,"dma error\n");
2104 		if (ISSET(error, SDHC_ADMA_ERROR))
2105 			device_printf(hp->sc->sc_dev,"adma error\n");
2106 		if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2107 			device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2108 		if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2109 			device_printf(hp->sc->sc_dev,"current limit error\n");
2110 		if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2111 			device_printf(hp->sc->sc_dev,"data end bit error\n");
2112 		if (ISSET(error, SDHC_DATA_CRC_ERROR))
2113 			device_printf(hp->sc->sc_dev,"data crc error\n");
2114 		if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2115 			device_printf(hp->sc->sc_dev,"data timeout error\n");
2116 		if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2117 			device_printf(hp->sc->sc_dev,"cmd index error\n");
2118 		if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2119 			device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2120 		if (ISSET(error, SDHC_CMD_CRC_ERROR))
2121 			device_printf(hp->sc->sc_dev,"cmd crc error\n");
2122 		if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2123 			if (!probing)
2124 				device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2125 #ifdef SDHC_DEBUG
2126 			else if (sdhcdebug > 0)
2127 				device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2128 #endif
2129 		}
2130 		if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2131 			device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2132 				(error & ~SDHC_EINTR_STATUS_MASK));
2133 		if (error == 0)
2134 			device_printf(hp->sc->sc_dev,"no error\n");
2135 
2136 		/* Command timeout has higher priority than command complete. */
2137 		if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2138 			CLR(status, SDHC_COMMAND_COMPLETE);
2139 
2140 		/* Transfer complete has higher priority than data timeout. */
2141 		if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2142 			CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2143 	}
2144 
2145 	if (nointr ||
2146 	    (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2147 		if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2148 			(void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2149 		hp->intr_error_status = 0;
2150 		status = 0;
2151 	}
2152 
2153 	return status;
2154 }
2155 
2156 /*
2157  * Established by attachment driver at interrupt priority IPL_SDMMC.
2158  */
2159 int
2160 sdhc_intr(void *arg)
2161 {
2162 	struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2163 	struct sdhc_host *hp;
2164 	int done = 0;
2165 	uint16_t status;
2166 	uint16_t error;
2167 
2168 	/* We got an interrupt, but we don't know from which slot. */
2169 	for (size_t host = 0; host < sc->sc_nhosts; host++) {
2170 		hp = sc->sc_host[host];
2171 		if (hp == NULL)
2172 			continue;
2173 
2174 		mutex_enter(&hp->intr_lock);
2175 
2176 		if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2177 			/* Find out which interrupts are pending. */
2178 			uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2179 			status = xstatus;
2180 			error = xstatus >> 16;
2181 			if (ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2182 				if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2183 					SET(status, SDHC_ERROR_INTERRUPT);
2184 			}
2185 			if (error)
2186 				xstatus |= SDHC_ERROR_INTERRUPT;
2187 			else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2188 				goto next_port; /* no interrupt for us */
2189 			/* Acknowledge the interrupts we are about to handle. */
2190 			HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2191 		} else {
2192 			/* Find out which interrupts are pending. */
2193 			error = 0;
2194 			status = HREAD2(hp, SDHC_NINTR_STATUS);
2195 			if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2196 				goto next_port; /* no interrupt for us */
2197 			/* Acknowledge the interrupts we are about to handle. */
2198 			HWRITE2(hp, SDHC_NINTR_STATUS, status);
2199 			if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2200 				/* Acknowledge error interrupts. */
2201 				error = HREAD2(hp, SDHC_EINTR_STATUS);
2202 				HWRITE2(hp, SDHC_EINTR_STATUS, error);
2203 			}
2204 		}
2205 
2206 		DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2207 		    status, error));
2208 
2209 		/* Claim this interrupt. */
2210 		done = 1;
2211 
2212 		if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2213 		    ISSET(error, SDHC_ADMA_ERROR)) {
2214 			uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2215 			printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2216 			    adma_err);
2217 		}
2218 
2219 		/*
2220 		 * Wake up the sdmmc event thread to scan for cards.
2221 		 */
2222 		if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2223 			if (hp->sdmmc != NULL) {
2224 				sdmmc_needs_discover(hp->sdmmc);
2225 			}
2226 			if (ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2227 				HCLR4(hp, SDHC_NINTR_STATUS_EN,
2228 				    status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2229 				HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2230 				    status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2231 			}
2232 		}
2233 
2234 		/*
2235 		 * Schedule re-tuning process (UHS).
2236 		 */
2237 		if (ISSET(status, SDHC_RETUNING_EVENT)) {
2238 			atomic_swap_uint(&hp->tuning_timer_pending, 1);
2239 		}
2240 
2241 		/*
2242 		 * Wake up the blocking process to service command
2243 		 * related interrupt(s).
2244 		 */
2245 		if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2246 		    SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2247 		    SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2248 			hp->intr_error_status |= error;
2249 			hp->intr_status |= status;
2250 			if (ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2251 				HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2252 				    status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2253 			}
2254 			cv_broadcast(&hp->intr_cv);
2255 		}
2256 
2257 		/*
2258 		 * Service SD card interrupts.
2259 		 */
2260 		if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED)
2261 		    && ISSET(status, SDHC_CARD_INTERRUPT)) {
2262 			DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2263 			HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2264 			sdmmc_card_intr(hp->sdmmc);
2265 		}
2266 next_port:
2267 		mutex_exit(&hp->intr_lock);
2268 	}
2269 
2270 	return done;
2271 }
2272 
2273 kmutex_t *
2274 sdhc_host_lock(struct sdhc_host *hp)
2275 {
2276 	return &hp->intr_lock;
2277 }
2278 
2279 #ifdef SDHC_DEBUG
2280 void
2281 sdhc_dump_regs(struct sdhc_host *hp)
2282 {
2283 
2284 	printf("0x%02x PRESENT_STATE:    %x\n", SDHC_PRESENT_STATE,
2285 	    HREAD4(hp, SDHC_PRESENT_STATE));
2286 	if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2287 		printf("0x%02x POWER_CTL:        %x\n", SDHC_POWER_CTL,
2288 		    HREAD1(hp, SDHC_POWER_CTL));
2289 	printf("0x%02x NINTR_STATUS:     %x\n", SDHC_NINTR_STATUS,
2290 	    HREAD2(hp, SDHC_NINTR_STATUS));
2291 	printf("0x%02x EINTR_STATUS:     %x\n", SDHC_EINTR_STATUS,
2292 	    HREAD2(hp, SDHC_EINTR_STATUS));
2293 	printf("0x%02x NINTR_STATUS_EN:  %x\n", SDHC_NINTR_STATUS_EN,
2294 	    HREAD2(hp, SDHC_NINTR_STATUS_EN));
2295 	printf("0x%02x EINTR_STATUS_EN:  %x\n", SDHC_EINTR_STATUS_EN,
2296 	    HREAD2(hp, SDHC_EINTR_STATUS_EN));
2297 	printf("0x%02x NINTR_SIGNAL_EN:  %x\n", SDHC_NINTR_SIGNAL_EN,
2298 	    HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2299 	printf("0x%02x EINTR_SIGNAL_EN:  %x\n", SDHC_EINTR_SIGNAL_EN,
2300 	    HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2301 	printf("0x%02x CAPABILITIES:     %x\n", SDHC_CAPABILITIES,
2302 	    HREAD4(hp, SDHC_CAPABILITIES));
2303 	printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2304 	    HREAD4(hp, SDHC_MAX_CAPABILITIES));
2305 }
2306 #endif
2307