1 /* $NetBSD: sdhc.c,v 1.119 2024/05/09 01:33:13 dyoung Exp $ */
2 /* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */
3
4 /*
5 * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * SD Host Controller driver based on the SD Host Controller Standard
22 * Simplified Specification Version 1.00 (www.sdcard.com).
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.119 2024/05/09 01:33:13 dyoung Exp $");
27
28 #ifdef _KERNEL_OPT
29 #include "opt_sdmmc.h"
30 #endif
31
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/atomic.h>
40
41 #include <dev/sdmmc/sdhcreg.h>
42 #include <dev/sdmmc/sdhcvar.h>
43 #include <dev/sdmmc/sdmmcchip.h>
44 #include <dev/sdmmc/sdmmcreg.h>
45 #include <dev/sdmmc/sdmmcvar.h>
46
47 #ifdef SDHC_DEBUG
48 int sdhcdebug = 1;
49 #define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0)
50 void sdhc_dump_regs(struct sdhc_host *);
51 #else
52 #define DPRINTF(n,s) do {} while (0)
53 #endif
54
55 #define SDHC_COMMAND_TIMEOUT hz
56 #define SDHC_BUFFER_TIMEOUT hz
57 #define SDHC_TRANSFER_TIMEOUT hz
58 #define SDHC_DMA_TIMEOUT (hz*3)
59 #define SDHC_TUNING_TIMEOUT hz
60
61 struct sdhc_host {
62 struct sdhc_softc *sc; /* host controller device */
63
64 bus_space_tag_t iot; /* host register set tag */
65 bus_space_handle_t ioh; /* host register set handle */
66 bus_size_t ios; /* host register space size */
67 bus_dma_tag_t dmat; /* host DMA tag */
68
69 device_t sdmmc; /* generic SD/MMC device */
70
71 u_int clkbase; /* base clock frequency in KHz */
72 int maxblklen; /* maximum block length */
73 uint32_t ocr; /* OCR value from capabilities */
74
75 uint8_t regs[14]; /* host controller state */
76
77 uint16_t intr_status; /* soft interrupt status */
78 uint16_t intr_error_status; /* soft error status */
79 kmutex_t intr_lock;
80 kmutex_t bus_clock_lock;
81 kcondvar_t intr_cv;
82
83 callout_t tuning_timer;
84 int tuning_timing;
85 u_int tuning_timer_count;
86 u_int tuning_timer_pending;
87
88 int specver; /* spec. version */
89
90 uint32_t flags; /* flags for this host */
91 #define SHF_USE_DMA 0x0001
92 #define SHF_USE_4BIT_MODE 0x0002
93 #define SHF_USE_8BIT_MODE 0x0004
94 #define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */
95 #define SHF_USE_ADMA2_32 0x0010
96 #define SHF_USE_ADMA2_64 0x0020
97 #define SHF_USE_ADMA2_MASK 0x0030
98
99 bus_dmamap_t adma_map;
100 bus_dma_segment_t adma_segs[1];
101 void *adma2;
102
103 uint8_t vdd; /* last vdd setting */
104 };
105
106 #define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev))
107
108 static uint8_t
hread1(struct sdhc_host * hp,bus_size_t reg)109 hread1(struct sdhc_host *hp, bus_size_t reg)
110 {
111
112 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
113 return bus_space_read_1(hp->iot, hp->ioh, reg);
114 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
115 }
116
117 static uint16_t
hread2(struct sdhc_host * hp,bus_size_t reg)118 hread2(struct sdhc_host *hp, bus_size_t reg)
119 {
120
121 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
122 return bus_space_read_2(hp->iot, hp->ioh, reg);
123 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
124 }
125
126 #define HREAD1(hp, reg) hread1(hp, reg)
127 #define HREAD2(hp, reg) hread2(hp, reg)
128 #define HREAD4(hp, reg) \
129 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
130
131
132 static void
hwrite1(struct sdhc_host * hp,bus_size_t o,uint8_t val)133 hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
134 {
135
136 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
137 bus_space_write_1(hp->iot, hp->ioh, o, val);
138 } else {
139 const size_t shift = 8 * (o & 3);
140 o &= -4;
141 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
142 tmp = (val << shift) | (tmp & ~(0xffU << shift));
143 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
144 }
145 if (hp->sc->sc_write_delay != 0) {
146 delay(hp->sc->sc_write_delay);
147 }
148 }
149
150 static void
hwrite2(struct sdhc_host * hp,bus_size_t o,uint16_t val)151 hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
152 {
153
154 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
155 bus_space_write_2(hp->iot, hp->ioh, o, val);
156 } else {
157 const size_t shift = 8 * (o & 2);
158 o &= -4;
159 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
160 tmp = (val << shift) | (tmp & ~(0xffffU << shift));
161 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
162 }
163 if (hp->sc->sc_write_delay != 0) {
164 delay(hp->sc->sc_write_delay);
165 }
166 }
167
168 static void
hwrite4(struct sdhc_host * hp,bus_size_t o,uint32_t val)169 hwrite4(struct sdhc_host *hp, bus_size_t o, uint32_t val)
170 {
171
172 bus_space_write_4(hp->iot, hp->ioh, o, val);
173 if (hp->sc->sc_write_delay != 0) {
174 delay(hp->sc->sc_write_delay);
175 }
176 }
177
178 #define HWRITE1(hp, reg, val) hwrite1(hp, reg, val)
179 #define HWRITE2(hp, reg, val) hwrite2(hp, reg, val)
180 #define HWRITE4(hp, reg, val) hwrite4(hp, reg, val)
181
182 #define HCLR1(hp, reg, bits) \
183 do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
184 #define HCLR2(hp, reg, bits) \
185 do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
186 #define HCLR4(hp, reg, bits) \
187 do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
188 #define HSET1(hp, reg, bits) \
189 do if ((bits) != 0) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
190 #define HSET2(hp, reg, bits) \
191 do if ((bits) != 0) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
192 #define HSET4(hp, reg, bits) \
193 do if ((bits) != 0) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
194
195 static int sdhc_host_reset(sdmmc_chipset_handle_t);
196 static int sdhc_host_reset1(sdmmc_chipset_handle_t);
197 static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t);
198 static int sdhc_host_maxblklen(sdmmc_chipset_handle_t);
199 static int sdhc_card_detect(sdmmc_chipset_handle_t);
200 static int sdhc_write_protect(sdmmc_chipset_handle_t);
201 static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
202 static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
203 static int sdhc_bus_width(sdmmc_chipset_handle_t, int);
204 static int sdhc_bus_rod(sdmmc_chipset_handle_t, int);
205 static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
206 static void sdhc_card_intr_ack(sdmmc_chipset_handle_t);
207 static void sdhc_exec_command(sdmmc_chipset_handle_t,
208 struct sdmmc_command *);
209 static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
210 static int sdhc_execute_tuning1(struct sdhc_host *, int);
211 static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
212 static void sdhc_tuning_timer(void *);
213 static void sdhc_hw_reset(sdmmc_chipset_handle_t);
214 static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
215 static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
216 static int sdhc_soft_reset(struct sdhc_host *, int);
217 static int sdhc_wait_intr(struct sdhc_host *, int, int, bool);
218 static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
219 static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
220 static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
221 static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
222 static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
223 static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
224 static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
225
226 static struct sdmmc_chip_functions sdhc_functions = {
227 /* host controller reset */
228 .host_reset = sdhc_host_reset,
229
230 /* host controller capabilities */
231 .host_ocr = sdhc_host_ocr,
232 .host_maxblklen = sdhc_host_maxblklen,
233
234 /* card detection */
235 .card_detect = sdhc_card_detect,
236
237 /* write protect */
238 .write_protect = sdhc_write_protect,
239
240 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
241 .bus_power = sdhc_bus_power,
242 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */
243 .bus_width = sdhc_bus_width,
244 .bus_rod = sdhc_bus_rod,
245
246 /* command execution */
247 .exec_command = sdhc_exec_command,
248
249 /* card interrupt */
250 .card_enable_intr = sdhc_card_enable_intr,
251 .card_intr_ack = sdhc_card_intr_ack,
252
253 /* UHS functions */
254 .signal_voltage = sdhc_signal_voltage,
255 .bus_clock_ddr = sdhc_bus_clock_ddr,
256 .execute_tuning = sdhc_execute_tuning,
257 .hw_reset = sdhc_hw_reset,
258 };
259
260 static int
sdhc_cfprint(void * aux,const char * pnp)261 sdhc_cfprint(void *aux, const char *pnp)
262 {
263 const struct sdmmcbus_attach_args * const saa = aux;
264 const struct sdhc_host * const hp = saa->saa_sch;
265
266 if (pnp) {
267 aprint_normal("sdmmc at %s", pnp);
268 }
269 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
270 if (hp->sc->sc_host[host] == hp) {
271 aprint_normal(" slot %zu", host);
272 }
273 }
274
275 return UNCONF;
276 }
277
278 /*
279 * Called by attachment driver. For each SD card slot there is one SD
280 * host controller standard register set. (1.3)
281 */
282 int
sdhc_host_found(struct sdhc_softc * sc,bus_space_tag_t iot,bus_space_handle_t ioh,bus_size_t iosize)283 sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
284 bus_space_handle_t ioh, bus_size_t iosize)
285 {
286 struct sdmmcbus_attach_args saa;
287 struct sdhc_host *hp;
288 uint32_t caps, caps2;
289 uint16_t sdhcver;
290 int error;
291
292 /* Allocate one more host structure. */
293 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
294 if (hp == NULL) {
295 aprint_error_dev(sc->sc_dev,
296 "couldn't alloc memory (sdhc host)\n");
297 goto err1;
298 }
299 sc->sc_host[sc->sc_nhosts++] = hp;
300
301 /* Fill in the new host structure. */
302 hp->sc = sc;
303 hp->iot = iot;
304 hp->ioh = ioh;
305 hp->ios = iosize;
306 hp->dmat = sc->sc_dmat;
307
308 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
309 mutex_init(&hp->bus_clock_lock, MUTEX_DEFAULT, IPL_NONE);
310 cv_init(&hp->intr_cv, "sdhcintr");
311 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
312 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
313
314 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
315 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
316 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
317 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
318 } else if (iosize <= SDHC_HOST_CTL_VERSION) {
319 sdhcver = SDHC_SPEC_NOVERS << SDHC_SPEC_VERS_SHIFT;
320 } else {
321 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
322 }
323 aprint_normal_dev(sc->sc_dev, "SDHC ");
324 hp->specver = SDHC_SPEC_VERSION(sdhcver);
325 switch (SDHC_SPEC_VERSION(sdhcver)) {
326 case SDHC_SPEC_VERS_100:
327 aprint_normal("1.0");
328 break;
329 case SDHC_SPEC_VERS_200:
330 aprint_normal("2.0");
331 break;
332 case SDHC_SPEC_VERS_300:
333 aprint_normal("3.0");
334 break;
335 case SDHC_SPEC_VERS_400:
336 aprint_normal("4.0");
337 break;
338 case SDHC_SPEC_VERS_410:
339 aprint_normal("4.1");
340 break;
341 case SDHC_SPEC_VERS_420:
342 aprint_normal("4.2");
343 break;
344 case SDHC_SPEC_NOVERS:
345 hp->specver = -1;
346 aprint_normal("NO-VERS");
347 break;
348 default:
349 aprint_normal("unknown version(0x%x)",
350 SDHC_SPEC_VERSION(sdhcver));
351 break;
352 }
353 if (SDHC_SPEC_VERSION(sdhcver) != SDHC_SPEC_NOVERS)
354 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
355
356 /*
357 * Reset the host controller and enable interrupts.
358 */
359 (void)sdhc_host_reset(hp);
360
361 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
362 /* init uSDHC registers */
363 HWRITE4(hp, SDHC_MMC_BOOT, 0);
364 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
365 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
366 HWRITE4(hp, SDHC_WATERMARK_LEVEL,
367 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
368 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
369 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
370 (0x40 << SDHC_WATERMARK_READ_SHIFT));
371 HSET4(hp, SDHC_VEND_SPEC,
372 SDHC_VEND_SPEC_MBO |
373 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
374 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
375 SDHC_VEND_SPEC_HCLK_SOFT_EN |
376 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
377 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
378 SDHC_VEND_SPEC_FRC_SDCLK_ON);
379 }
380
381 /* Determine host capabilities. */
382 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
383 caps = sc->sc_caps;
384 caps2 = sc->sc_caps2;
385 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
386 /* uSDHC capability register is little bit different */
387 caps = HREAD4(hp, SDHC_CAPABILITIES);
388 caps |= SDHC_8BIT_SUPP;
389 if (caps & SDHC_ADMA1_SUPP)
390 caps |= SDHC_ADMA2_SUPP;
391 sc->sc_caps = caps;
392 /* uSDHC has no SDHC_CAPABILITIES2 register */
393 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
394 } else {
395 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
396 if (hp->specver >= SDHC_SPEC_VERS_300) {
397 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
398 } else {
399 caps2 = sc->sc_caps2 = 0;
400 }
401 }
402
403 aprint_verbose(", caps <%08x/%08x>", caps, caps2);
404
405 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
406 SDHC_RETUNING_MODES_MASK;
407 if (retuning_mode == SDHC_RETUNING_MODE_1) {
408 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
409 SDHC_TIMER_COUNT_MASK;
410 if (hp->tuning_timer_count == 0xf)
411 hp->tuning_timer_count = 0;
412 if (hp->tuning_timer_count)
413 hp->tuning_timer_count =
414 1 << (hp->tuning_timer_count - 1);
415 }
416
417 /*
418 * Use DMA if the host system and the controller support it.
419 * Supports integrated or external DMA egine, with or without
420 * SDHC_DMA_ENABLE in the command.
421 */
422 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
423 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
424 ISSET(caps, SDHC_DMA_SUPPORT)))) {
425 SET(hp->flags, SHF_USE_DMA);
426
427 if (ISSET(caps, SDHC_ADMA2_SUPP) &&
428 !ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA)) {
429 SET(hp->flags, SHF_MODE_DMAEN);
430 /*
431 * 64-bit mode was present in the 2.00 spec, removed
432 * from 3.00, and re-added in 4.00 with a different
433 * descriptor layout. We only support 2.00 and 3.00
434 * descriptors for now.
435 */
436 if (hp->specver == SDHC_SPEC_VERS_200 &&
437 ISSET(caps, SDHC_64BIT_SYS_BUS)) {
438 SET(hp->flags, SHF_USE_ADMA2_64);
439 aprint_normal(", 64-bit ADMA2");
440 } else {
441 SET(hp->flags, SHF_USE_ADMA2_32);
442 aprint_normal(", 32-bit ADMA2");
443 }
444 } else {
445 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
446 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
447 SET(hp->flags, SHF_MODE_DMAEN);
448 if (sc->sc_vendor_transfer_data_dma) {
449 aprint_normal(", platform DMA");
450 } else {
451 aprint_normal(", SDMA");
452 }
453 }
454 } else {
455 aprint_normal(", PIO");
456 }
457
458 /*
459 * Determine the base clock frequency. (2.2.24)
460 */
461 if (hp->specver >= SDHC_SPEC_VERS_300) {
462 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
463 } else {
464 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
465 }
466 if (hp->clkbase == 0 ||
467 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
468 if (sc->sc_clkbase == 0) {
469 /* The attachment driver must tell us. */
470 aprint_error_dev(sc->sc_dev,
471 "unknown base clock frequency\n");
472 goto err;
473 }
474 hp->clkbase = sc->sc_clkbase;
475 }
476 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
477 /* SDHC 1.0 supports only 10-63 MHz. */
478 aprint_error_dev(sc->sc_dev,
479 "base clock frequency out of range: %u MHz\n",
480 hp->clkbase / 1000);
481 goto err;
482 }
483 aprint_normal(", %u kHz", hp->clkbase);
484
485 /*
486 * XXX Set the data timeout counter value according to
487 * capabilities. (2.2.15)
488 */
489 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
490 #if 1
491 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
492 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
493 #endif
494
495 if (ISSET(caps, SDHC_EMBEDDED_SLOT))
496 aprint_normal(", embedded slot");
497
498 /*
499 * Determine SD bus voltage levels supported by the controller.
500 */
501 aprint_normal(",");
502 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
503 SET(hp->ocr, MMC_OCR_HCS);
504 aprint_normal(" HS");
505 }
506 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_1_8_V)) {
507 if (ISSET(caps2, SDHC_SDR50_SUPP)) {
508 SET(hp->ocr, MMC_OCR_S18A);
509 aprint_normal(" SDR50");
510 }
511 if (ISSET(caps2, SDHC_DDR50_SUPP)) {
512 SET(hp->ocr, MMC_OCR_S18A);
513 aprint_normal(" DDR50");
514 }
515 if (ISSET(caps2, SDHC_SDR104_SUPP)) {
516 SET(hp->ocr, MMC_OCR_S18A);
517 aprint_normal(" SDR104 HS200");
518 }
519 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
520 SET(hp->ocr, MMC_OCR_1_65V_1_95V);
521 aprint_normal(" 1.8V");
522 }
523 }
524 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
525 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
526 aprint_normal(" 3.0V");
527 }
528 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
529 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
530 aprint_normal(" 3.3V");
531 }
532 if (hp->specver >= SDHC_SPEC_VERS_300) {
533 aprint_normal(", re-tuning mode %d", retuning_mode + 1);
534 if (hp->tuning_timer_count)
535 aprint_normal(" (%us timer)", hp->tuning_timer_count);
536 }
537
538 /*
539 * Determine the maximum block length supported by the host
540 * controller. (2.2.24)
541 */
542 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
543 case SDHC_MAX_BLK_LEN_512:
544 hp->maxblklen = 512;
545 break;
546
547 case SDHC_MAX_BLK_LEN_1024:
548 hp->maxblklen = 1024;
549 break;
550
551 case SDHC_MAX_BLK_LEN_2048:
552 hp->maxblklen = 2048;
553 break;
554
555 case SDHC_MAX_BLK_LEN_4096:
556 hp->maxblklen = 4096;
557 break;
558
559 default:
560 aprint_error_dev(sc->sc_dev, "max block length unknown\n");
561 goto err;
562 }
563 aprint_normal(", %u byte blocks", hp->maxblklen);
564 aprint_normal("\n");
565
566 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
567 int rseg;
568
569 /* Allocate ADMA2 descriptor memory */
570 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
571 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
572 if (error) {
573 aprint_error_dev(sc->sc_dev,
574 "ADMA2 dmamem_alloc failed (%d)\n", error);
575 goto adma_done;
576 }
577 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
578 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
579 if (error) {
580 aprint_error_dev(sc->sc_dev,
581 "ADMA2 dmamem_map failed (%d)\n", error);
582 goto adma_done;
583 }
584 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
585 0, BUS_DMA_WAITOK, &hp->adma_map);
586 if (error) {
587 aprint_error_dev(sc->sc_dev,
588 "ADMA2 dmamap_create failed (%d)\n", error);
589 goto adma_done;
590 }
591 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
592 hp->adma2, PAGE_SIZE, NULL,
593 BUS_DMA_WAITOK|BUS_DMA_WRITE);
594 if (error) {
595 aprint_error_dev(sc->sc_dev,
596 "ADMA2 dmamap_load failed (%d)\n", error);
597 goto adma_done;
598 }
599
600 memset(hp->adma2, 0, PAGE_SIZE);
601
602 adma_done:
603 if (error)
604 CLR(hp->flags, SHF_USE_ADMA2_MASK);
605 }
606
607 /*
608 * Attach the generic SD/MMC bus driver. (The bus driver must
609 * not invoke any chipset functions before it is attached.)
610 */
611 memset(&saa, 0, sizeof(saa));
612 saa.saa_busname = "sdmmc";
613 saa.saa_sct = &sdhc_functions;
614 saa.saa_sch = hp;
615 saa.saa_dmat = hp->dmat;
616 saa.saa_clkmax = hp->clkbase;
617 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
618 saa.saa_clkmin = hp->clkbase / 256 / 2046;
619 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
620 saa.saa_clkmin = hp->clkbase / 256 / 16;
621 else if (hp->sc->sc_clkmsk != 0)
622 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
623 (ffs(hp->sc->sc_clkmsk) - 1));
624 else if (hp->specver >= SDHC_SPEC_VERS_300)
625 saa.saa_clkmin = hp->clkbase / 0x3ff;
626 else
627 saa.saa_clkmin = hp->clkbase / 256;
628 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP))
629 saa.saa_caps |= SMC_CAPS_AUTO_STOP;
630 saa.saa_caps |= SMC_CAPS_4BIT_MODE;
631 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
632 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
633 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
634 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED |
635 SMC_CAPS_MMC_HIGHSPEED;
636 if (ISSET(caps2, SDHC_SDR104_SUPP))
637 saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
638 SMC_CAPS_UHS_SDR50 |
639 SMC_CAPS_MMC_HS200;
640 if (ISSET(caps2, SDHC_SDR50_SUPP))
641 saa.saa_caps |= SMC_CAPS_UHS_SDR50;
642 if (ISSET(caps2, SDHC_DDR50_SUPP))
643 saa.saa_caps |= SMC_CAPS_UHS_DDR50;
644 if (ISSET(hp->flags, SHF_USE_DMA)) {
645 saa.saa_caps |= SMC_CAPS_DMA;
646 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
647 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
648 }
649 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
650 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
651 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
652 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
653
654 if (ISSET(sc->sc_flags, SDHC_FLAG_BROKEN_ADMA2_ZEROLEN))
655 saa.saa_max_seg = 65535;
656
657 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint, CFARGS_NONE);
658
659 return 0;
660
661 err:
662 callout_destroy(&hp->tuning_timer);
663 cv_destroy(&hp->intr_cv);
664 mutex_destroy(&hp->bus_clock_lock);
665 mutex_destroy(&hp->intr_lock);
666 free(hp, M_DEVBUF);
667 sc->sc_host[--sc->sc_nhosts] = NULL;
668 err1:
669 return 1;
670 }
671
672 int
sdhc_detach(struct sdhc_softc * sc,int flags)673 sdhc_detach(struct sdhc_softc *sc, int flags)
674 {
675 struct sdhc_host *hp;
676 int rv = 0;
677
678 for (size_t n = 0; n < sc->sc_nhosts; n++) {
679 hp = sc->sc_host[n];
680 if (hp == NULL)
681 continue;
682 if (hp->sdmmc != NULL) {
683 rv = config_detach(hp->sdmmc, flags);
684 if (rv)
685 break;
686 hp->sdmmc = NULL;
687 }
688 /* disable interrupts */
689 if ((flags & DETACH_FORCE) == 0) {
690 mutex_enter(&hp->intr_lock);
691 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
692 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
693 } else {
694 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
695 }
696 sdhc_soft_reset(hp, SDHC_RESET_ALL);
697 mutex_exit(&hp->intr_lock);
698 }
699 callout_halt(&hp->tuning_timer, NULL);
700 callout_destroy(&hp->tuning_timer);
701 cv_destroy(&hp->intr_cv);
702 mutex_destroy(&hp->intr_lock);
703 if (hp->ios > 0) {
704 bus_space_unmap(hp->iot, hp->ioh, hp->ios);
705 hp->ios = 0;
706 }
707 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
708 bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
709 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
710 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
711 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
712 }
713 free(hp, M_DEVBUF);
714 sc->sc_host[n] = NULL;
715 }
716
717 return rv;
718 }
719
720 bool
sdhc_suspend(device_t dev,const pmf_qual_t * qual)721 sdhc_suspend(device_t dev, const pmf_qual_t *qual)
722 {
723 struct sdhc_softc *sc = device_private(dev);
724 struct sdhc_host *hp;
725 size_t i;
726
727 /* XXX poll for command completion or suspend command
728 * in progress */
729
730 /* Save the host controller state. */
731 for (size_t n = 0; n < sc->sc_nhosts; n++) {
732 hp = sc->sc_host[n];
733 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
734 for (i = 0; i < sizeof hp->regs; i += 4) {
735 uint32_t v = HREAD4(hp, i);
736 hp->regs[i + 0] = (v >> 0);
737 hp->regs[i + 1] = (v >> 8);
738 if (i + 3 < sizeof hp->regs) {
739 hp->regs[i + 2] = (v >> 16);
740 hp->regs[i + 3] = (v >> 24);
741 }
742 }
743 } else {
744 for (i = 0; i < sizeof hp->regs; i++) {
745 hp->regs[i] = HREAD1(hp, i);
746 }
747 }
748 }
749 return true;
750 }
751
752 bool
sdhc_resume(device_t dev,const pmf_qual_t * qual)753 sdhc_resume(device_t dev, const pmf_qual_t *qual)
754 {
755 struct sdhc_softc *sc = device_private(dev);
756 struct sdhc_host *hp;
757 size_t i;
758
759 /* Restore the host controller state. */
760 for (size_t n = 0; n < sc->sc_nhosts; n++) {
761 hp = sc->sc_host[n];
762 (void)sdhc_host_reset(hp);
763 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
764 for (i = 0; i < sizeof hp->regs; i += 4) {
765 if (i + 3 < sizeof hp->regs) {
766 HWRITE4(hp, i,
767 (hp->regs[i + 0] << 0)
768 | (hp->regs[i + 1] << 8)
769 | (hp->regs[i + 2] << 16)
770 | (hp->regs[i + 3] << 24));
771 } else {
772 HWRITE4(hp, i,
773 (hp->regs[i + 0] << 0)
774 | (hp->regs[i + 1] << 8));
775 }
776 }
777 } else {
778 for (i = 0; i < sizeof hp->regs; i++) {
779 HWRITE1(hp, i, hp->regs[i]);
780 }
781 }
782 }
783 return true;
784 }
785
786 bool
sdhc_shutdown(device_t dev,int flags)787 sdhc_shutdown(device_t dev, int flags)
788 {
789 struct sdhc_softc *sc = device_private(dev);
790 struct sdhc_host *hp;
791
792 /* XXX chip locks up if we don't disable it before reboot. */
793 for (size_t i = 0; i < sc->sc_nhosts; i++) {
794 hp = sc->sc_host[i];
795 (void)sdhc_host_reset(hp);
796 }
797 return true;
798 }
799
800 /*
801 * Reset the host controller. Called during initialization, when
802 * cards are removed, upon resume, and during error recovery.
803 */
804 static int
sdhc_host_reset1(sdmmc_chipset_handle_t sch)805 sdhc_host_reset1(sdmmc_chipset_handle_t sch)
806 {
807 struct sdhc_host *hp = (struct sdhc_host *)sch;
808 uint32_t sdhcimask;
809 int error;
810
811 KASSERT(mutex_owned(&hp->intr_lock));
812
813 /* Disable all interrupts. */
814 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
815 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
816 } else {
817 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
818 }
819
820 /* Let sdhc_bus_power restore power */
821 hp->vdd = 0;
822
823 /*
824 * Reset the entire host controller and wait up to 100ms for
825 * the controller to clear the reset bit.
826 */
827 error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
828 if (error)
829 goto out;
830
831 /* Set data timeout counter value to max for now. */
832 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
833 #if 1
834 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
835 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
836 #endif
837
838 /* Enable interrupts. */
839 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
840 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
841 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
842 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
843 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
844 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
845 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
846 sdhcimask ^=
847 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
848 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
849 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
850 } else {
851 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
852 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
853 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
854 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
855 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
856 }
857
858 out:
859 return error;
860 }
861
862 static int
sdhc_host_reset(sdmmc_chipset_handle_t sch)863 sdhc_host_reset(sdmmc_chipset_handle_t sch)
864 {
865 struct sdhc_host *hp = (struct sdhc_host *)sch;
866 int error;
867
868 mutex_enter(&hp->intr_lock);
869 error = sdhc_host_reset1(sch);
870 mutex_exit(&hp->intr_lock);
871
872 return error;
873 }
874
875 static uint32_t
sdhc_host_ocr(sdmmc_chipset_handle_t sch)876 sdhc_host_ocr(sdmmc_chipset_handle_t sch)
877 {
878 struct sdhc_host *hp = (struct sdhc_host *)sch;
879
880 return hp->ocr;
881 }
882
883 static int
sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)884 sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
885 {
886 struct sdhc_host *hp = (struct sdhc_host *)sch;
887
888 return hp->maxblklen;
889 }
890
891 /*
892 * Return non-zero if the card is currently inserted.
893 */
894 static int
sdhc_card_detect(sdmmc_chipset_handle_t sch)895 sdhc_card_detect(sdmmc_chipset_handle_t sch)
896 {
897 struct sdhc_host *hp = (struct sdhc_host *)sch;
898 int r;
899
900 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NON_REMOVABLE))
901 return 1;
902
903 if (hp->sc->sc_vendor_card_detect)
904 return (*hp->sc->sc_vendor_card_detect)(hp->sc);
905
906 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
907
908 return r ? 1 : 0;
909 }
910
911 /*
912 * Return non-zero if the card is currently write-protected.
913 */
914 static int
sdhc_write_protect(sdmmc_chipset_handle_t sch)915 sdhc_write_protect(sdmmc_chipset_handle_t sch)
916 {
917 struct sdhc_host *hp = (struct sdhc_host *)sch;
918 int r;
919
920 if (hp->sc->sc_vendor_write_protect)
921 return (*hp->sc->sc_vendor_write_protect)(hp->sc);
922
923 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
924
925 return r ? 0 : 1;
926 }
927
928 /*
929 * Set or change SD bus voltage and enable or disable SD bus power.
930 * Return zero on success.
931 */
932 static int
sdhc_bus_power(sdmmc_chipset_handle_t sch,uint32_t ocr)933 sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
934 {
935 struct sdhc_host *hp = (struct sdhc_host *)sch;
936 uint8_t vdd;
937 int error = 0;
938 const uint32_t pcmask =
939 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
940 uint32_t reg;
941
942 mutex_enter(&hp->intr_lock);
943
944 /*
945 * Disable bus power before voltage change.
946 */
947 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
948 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0)) {
949 hp->vdd = 0;
950 HWRITE1(hp, SDHC_POWER_CTL, 0);
951 }
952
953 /* If power is disabled, reset the host and return now. */
954 if (ocr == 0) {
955 (void)sdhc_host_reset1(hp);
956 callout_halt(&hp->tuning_timer, &hp->intr_lock);
957 goto out;
958 }
959
960 /*
961 * Select the lowest voltage according to capabilities.
962 */
963 ocr &= hp->ocr;
964 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
965 vdd = SDHC_VOLTAGE_1_8V;
966 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
967 vdd = SDHC_VOLTAGE_3_0V;
968 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
969 vdd = SDHC_VOLTAGE_3_3V;
970 } else {
971 /* Unsupported voltage level requested. */
972 error = EINVAL;
973 goto out;
974 }
975
976 /*
977 * Did voltage change ?
978 */
979 if (vdd == hp->vdd)
980 goto out;
981
982 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
983 /*
984 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus
985 * voltage ramp until power rises.
986 */
987
988 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
989 HWRITE1(hp, SDHC_POWER_CTL,
990 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
991 } else {
992 reg = HREAD1(hp, SDHC_POWER_CTL) & pcmask;
993 HWRITE1(hp, SDHC_POWER_CTL, reg);
994 sdmmc_delay(1);
995 reg |= (vdd << SDHC_VOLTAGE_SHIFT);
996 HWRITE1(hp, SDHC_POWER_CTL, reg);
997 sdmmc_delay(1);
998 reg |= SDHC_BUS_POWER;
999 HWRITE1(hp, SDHC_POWER_CTL, reg);
1000 sdmmc_delay(10000);
1001 }
1002
1003 /*
1004 * The host system may not power the bus due to battery low,
1005 * etc. In that case, the host controller should clear the
1006 * bus power bit.
1007 */
1008 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
1009 error = ENXIO;
1010 goto out;
1011 }
1012 }
1013
1014 /* power successfully changed */
1015 hp->vdd = vdd;
1016
1017 out:
1018 mutex_exit(&hp->intr_lock);
1019
1020 return error;
1021 }
1022
1023 /*
1024 * Return the smallest possible base clock frequency divisor value
1025 * for the CLOCK_CTL register to produce `freq' (KHz).
1026 */
1027 static bool
sdhc_clock_divisor(struct sdhc_host * hp,u_int freq,u_int * divp)1028 sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
1029 {
1030 u_int div;
1031
1032 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
1033 for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
1034 if ((hp->clkbase / div) <= freq) {
1035 *divp = SDHC_SDCLK_CGM
1036 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
1037 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
1038 //freq = hp->clkbase / div;
1039 return true;
1040 }
1041 }
1042 /* No divisor found. */
1043 return false;
1044 }
1045 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
1046 u_int dvs = (hp->clkbase + freq - 1) / freq;
1047 u_int roundup = dvs & 1;
1048 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
1049 if (dvs + roundup <= 16) {
1050 dvs += roundup - 1;
1051 *divp = (div << SDHC_SDCLK_DIV_SHIFT)
1052 | (dvs << SDHC_SDCLK_DVS_SHIFT);
1053 DPRINTF(2,
1054 ("%s: divisor for freq %u is %u * %u\n",
1055 HDEVNAME(hp), freq, div * 2, dvs + 1));
1056 //freq = hp->clkbase / (div * 2) * (dvs + 1);
1057 return true;
1058 }
1059 /*
1060 * If we drop bits, we need to round up the divisor.
1061 */
1062 roundup |= dvs & 1;
1063 }
1064 /* No divisor found. */
1065 return false;
1066 }
1067 if (hp->sc->sc_clkmsk != 0) {
1068 div = howmany(hp->clkbase, freq);
1069 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1070 return false;
1071 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1072 //freq = hp->clkbase / div;
1073 return true;
1074 }
1075 if (hp->specver >= SDHC_SPEC_VERS_300) {
1076 div = howmany(hp->clkbase, freq);
1077 div = div > 1 ? howmany(div, 2) : 0;
1078 if (div > 0x3ff)
1079 return false;
1080 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1081 << SDHC_SDCLK_XDIV_SHIFT) |
1082 (((div >> 0) & SDHC_SDCLK_DIV_MASK)
1083 << SDHC_SDCLK_DIV_SHIFT);
1084 //freq = hp->clkbase / (div ? div * 2 : 1);
1085 return true;
1086 } else {
1087 for (div = 1; div <= 256; div *= 2) {
1088 if ((hp->clkbase / div) <= freq) {
1089 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1090 //freq = hp->clkbase / div;
1091 return true;
1092 }
1093 }
1094 /* No divisor found. */
1095 return false;
1096 }
1097 /* No divisor found. */
1098 return false;
1099 }
1100
1101 /*
1102 * Set or change SDCLK frequency or disable the SD clock.
1103 * Return zero on success.
1104 */
1105 static int
sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch,int freq,bool ddr)1106 sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1107 {
1108 struct sdhc_host *hp = (struct sdhc_host *)sch;
1109 u_int div;
1110 u_int timo;
1111 int16_t reg;
1112 int error = 0;
1113 bool present __diagused;
1114
1115 #ifdef DIAGNOSTIC
1116 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1117
1118 /* Must not stop the clock if commands are in progress. */
1119 if (present && sdhc_card_detect(hp)) {
1120 aprint_normal_dev(hp->sc->sc_dev,
1121 "%s: command in progress\n", __func__);
1122 }
1123 #endif
1124
1125 if (hp->sc->sc_vendor_bus_clock) {
1126 mutex_enter(&hp->bus_clock_lock);
1127 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1128 mutex_exit(&hp->bus_clock_lock);
1129 if (error != 0)
1130 return error;
1131 }
1132
1133 mutex_enter(&hp->intr_lock);
1134
1135 /*
1136 * Stop SD clock before changing the frequency.
1137 */
1138 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1139 HCLR4(hp, SDHC_VEND_SPEC,
1140 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1141 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1142 if (freq == SDMMC_SDCLK_OFF) {
1143 goto out;
1144 }
1145 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1146 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1147 if (freq == SDMMC_SDCLK_OFF) {
1148 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1149 goto out;
1150 }
1151 } else {
1152 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1153 if (freq == SDMMC_SDCLK_OFF)
1154 goto out;
1155 }
1156
1157 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1158 if (ddr)
1159 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1160 else
1161 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1162 } else if (hp->specver >= SDHC_SPEC_VERS_300) {
1163 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1164 if (freq > 100000) {
1165 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1166 } else if (freq > 50000) {
1167 if (ddr) {
1168 HSET2(hp, SDHC_HOST_CTL2,
1169 SDHC_UHS_MODE_SELECT_DDR50);
1170 } else {
1171 HSET2(hp, SDHC_HOST_CTL2,
1172 SDHC_UHS_MODE_SELECT_SDR50);
1173 }
1174 } else if (freq > 25000) {
1175 if (ddr) {
1176 HSET2(hp, SDHC_HOST_CTL2,
1177 SDHC_UHS_MODE_SELECT_DDR50);
1178 } else {
1179 HSET2(hp, SDHC_HOST_CTL2,
1180 SDHC_UHS_MODE_SELECT_SDR25);
1181 }
1182 } else if (freq > 400) {
1183 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1184 }
1185 }
1186
1187 /*
1188 * Slow down Ricoh 5U823 controller that isn't reliable
1189 * at 100MHz bus clock.
1190 */
1191 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1192 if (freq == 100000)
1193 --freq;
1194 }
1195
1196 /*
1197 * Set the minimum base clock frequency divisor.
1198 */
1199 if (!sdhc_clock_divisor(hp, freq, &div)) {
1200 /* Invalid base clock frequency or `freq' value. */
1201 aprint_error_dev(hp->sc->sc_dev,
1202 "Invalid bus clock %d kHz\n", freq);
1203 error = EINVAL;
1204 goto out;
1205 }
1206 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1207 if (ddr) {
1208 /* in ddr mode, divisor >>= 1 */
1209 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1210 SDHC_SDCLK_DIV_SHIFT)) |
1211 (div & (SDHC_SDCLK_DVS_MASK <<
1212 SDHC_SDCLK_DVS_SHIFT));
1213 }
1214 for (timo = 1000; timo > 0; timo--) {
1215 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1216 break;
1217 sdmmc_delay(10);
1218 }
1219 HWRITE4(hp, SDHC_CLOCK_CTL,
1220 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1221 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1222 HWRITE4(hp, SDHC_CLOCK_CTL,
1223 div | (SDHC_TIMEOUT_MAX << 16));
1224 } else {
1225 reg = HREAD2(hp, SDHC_CLOCK_CTL);
1226 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1227 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1228 }
1229
1230 /*
1231 * Start internal clock. Wait 10ms for stabilization.
1232 */
1233 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1234 HSET4(hp, SDHC_VEND_SPEC,
1235 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1236 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1237 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1238 sdmmc_delay(10000);
1239 HSET4(hp, SDHC_CLOCK_CTL,
1240 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1241 } else {
1242 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1243 for (timo = 1000; timo > 0; timo--) {
1244 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1245 SDHC_INTCLK_STABLE))
1246 break;
1247 sdmmc_delay(10);
1248 }
1249 if (timo == 0) {
1250 error = ETIMEDOUT;
1251 DPRINTF(1,("%s: timeout\n", __func__));
1252 goto out;
1253 }
1254 }
1255
1256 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1257 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1258 /*
1259 * Sending 80 clocks at 400kHz takes 200us.
1260 * So delay for that time + slop and then
1261 * check a few times for completion.
1262 */
1263 sdmmc_delay(210);
1264 for (timo = 10; timo > 0; timo--) {
1265 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1266 SDHC_INIT_ACTIVE))
1267 break;
1268 sdmmc_delay(10);
1269 }
1270 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1271
1272 /*
1273 * Enable SD clock.
1274 */
1275 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1276 HSET4(hp, SDHC_VEND_SPEC,
1277 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1278 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1279 } else {
1280 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1281 }
1282 } else {
1283 /*
1284 * Enable SD clock.
1285 */
1286 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1287
1288 if (freq > 25000 &&
1289 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1290 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1291 else
1292 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1293 }
1294
1295 mutex_exit(&hp->intr_lock);
1296
1297 if (hp->sc->sc_vendor_bus_clock_post) {
1298 mutex_enter(&hp->bus_clock_lock);
1299 error = (*hp->sc->sc_vendor_bus_clock_post)(hp->sc, freq);
1300 mutex_exit(&hp->bus_clock_lock);
1301 }
1302 return error;
1303
1304 out:
1305 mutex_exit(&hp->intr_lock);
1306
1307 return error;
1308 }
1309
1310 static int
sdhc_bus_width(sdmmc_chipset_handle_t sch,int width)1311 sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1312 {
1313 struct sdhc_host *hp = (struct sdhc_host *)sch;
1314 int reg;
1315
1316 switch (width) {
1317 case 1:
1318 case 4:
1319 break;
1320
1321 case 8:
1322 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1323 break;
1324 /* FALLTHROUGH */
1325 default:
1326 DPRINTF(0,("%s: unsupported bus width (%d)\n",
1327 HDEVNAME(hp), width));
1328 return 1;
1329 }
1330
1331 if (hp->sc->sc_vendor_bus_width) {
1332 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1333 if (error != 0)
1334 return error;
1335 }
1336
1337 mutex_enter(&hp->intr_lock);
1338
1339 reg = HREAD1(hp, SDHC_HOST_CTL);
1340 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1341 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1342 if (width == 4)
1343 reg |= SDHC_4BIT_MODE;
1344 else if (width == 8)
1345 reg |= SDHC_ESDHC_8BIT_MODE;
1346 } else {
1347 reg &= ~SDHC_4BIT_MODE;
1348 if (hp->specver >= SDHC_SPEC_VERS_300) {
1349 reg &= ~SDHC_8BIT_MODE;
1350 }
1351 if (width == 4) {
1352 reg |= SDHC_4BIT_MODE;
1353 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1354 reg |= SDHC_8BIT_MODE;
1355 }
1356 }
1357 HWRITE1(hp, SDHC_HOST_CTL, reg);
1358
1359 mutex_exit(&hp->intr_lock);
1360
1361 return 0;
1362 }
1363
1364 static int
sdhc_bus_rod(sdmmc_chipset_handle_t sch,int on)1365 sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1366 {
1367 struct sdhc_host *hp = (struct sdhc_host *)sch;
1368
1369 if (hp->sc->sc_vendor_rod)
1370 return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1371
1372 return 0;
1373 }
1374
1375 static void
sdhc_card_enable_intr(sdmmc_chipset_handle_t sch,int enable)1376 sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1377 {
1378 struct sdhc_host *hp = (struct sdhc_host *)sch;
1379
1380 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1381 mutex_enter(&hp->intr_lock);
1382 if (enable) {
1383 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1384 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1385 } else {
1386 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1387 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1388 }
1389 mutex_exit(&hp->intr_lock);
1390 }
1391 }
1392
1393 static void
sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)1394 sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1395 {
1396 struct sdhc_host *hp = (struct sdhc_host *)sch;
1397
1398 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1399 mutex_enter(&hp->intr_lock);
1400 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1401 mutex_exit(&hp->intr_lock);
1402 }
1403 }
1404
1405 static int
sdhc_signal_voltage(sdmmc_chipset_handle_t sch,int signal_voltage)1406 sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1407 {
1408 struct sdhc_host *hp = (struct sdhc_host *)sch;
1409 int error = 0;
1410
1411 if (hp->specver < SDHC_SPEC_VERS_300)
1412 return EINVAL;
1413
1414 mutex_enter(&hp->intr_lock);
1415 switch (signal_voltage) {
1416 case SDMMC_SIGNAL_VOLTAGE_180:
1417 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1418 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1419 signal_voltage);
1420 if (error != 0)
1421 break;
1422 }
1423 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1424 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1425 break;
1426 case SDMMC_SIGNAL_VOLTAGE_330:
1427 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1428 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1429 if (hp->sc->sc_vendor_signal_voltage != NULL) {
1430 error = hp->sc->sc_vendor_signal_voltage(hp->sc,
1431 signal_voltage);
1432 if (error != 0)
1433 break;
1434 }
1435 break;
1436 default:
1437 error = EINVAL;
1438 break;
1439 }
1440 mutex_exit(&hp->intr_lock);
1441
1442 return error;
1443 }
1444
1445 /*
1446 * Sampling clock tuning procedure (UHS)
1447 */
1448 static int
sdhc_execute_tuning1(struct sdhc_host * hp,int timing)1449 sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1450 {
1451 struct sdmmc_command cmd;
1452 uint8_t hostctl;
1453 int opcode, error, retry = 40;
1454
1455 KASSERT(mutex_owned(&hp->intr_lock));
1456
1457 hp->tuning_timing = timing;
1458
1459 switch (timing) {
1460 case SDMMC_TIMING_MMC_HS200:
1461 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1462 break;
1463 case SDMMC_TIMING_UHS_SDR50:
1464 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1465 return 0;
1466 /* FALLTHROUGH */
1467 case SDMMC_TIMING_UHS_SDR104:
1468 opcode = MMC_SEND_TUNING_BLOCK;
1469 break;
1470 default:
1471 return EINVAL;
1472 }
1473
1474 hostctl = HREAD1(hp, SDHC_HOST_CTL);
1475
1476 /* enable buffer read ready interrupt */
1477 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1478 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1479
1480 /* disable DMA */
1481 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1482
1483 /* reset tuning circuit */
1484 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1485
1486 /* start of tuning */
1487 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1488
1489 do {
1490 memset(&cmd, 0, sizeof(cmd));
1491 cmd.c_opcode = opcode;
1492 cmd.c_arg = 0;
1493 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1494 if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1495 cmd.c_blklen = cmd.c_datalen = 128;
1496 } else {
1497 cmd.c_blklen = cmd.c_datalen = 64;
1498 }
1499
1500 error = sdhc_start_command(hp, &cmd);
1501 if (error)
1502 break;
1503
1504 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1505 SDHC_TUNING_TIMEOUT, false)) {
1506 break;
1507 }
1508
1509 delay(1000);
1510 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1511
1512 /* disable buffer read ready interrupt */
1513 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1514 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1515
1516 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1517 HCLR2(hp, SDHC_HOST_CTL2,
1518 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1519 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1520 aprint_error_dev(hp->sc->sc_dev,
1521 "tuning did not complete, using fixed sampling clock\n");
1522 return 0; /* tuning did not complete */
1523 }
1524
1525 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1526 HCLR2(hp, SDHC_HOST_CTL2,
1527 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1528 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1529 aprint_error_dev(hp->sc->sc_dev,
1530 "tuning failed, using fixed sampling clock\n");
1531 return 0; /* tuning failed */
1532 }
1533
1534 if (hp->tuning_timer_count) {
1535 callout_schedule(&hp->tuning_timer,
1536 hz * hp->tuning_timer_count);
1537 }
1538
1539 return 0; /* tuning completed */
1540 }
1541
1542 static int
sdhc_execute_tuning(sdmmc_chipset_handle_t sch,int timing)1543 sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1544 {
1545 struct sdhc_host *hp = (struct sdhc_host *)sch;
1546 int error;
1547
1548 mutex_enter(&hp->intr_lock);
1549 error = sdhc_execute_tuning1(hp, timing);
1550 mutex_exit(&hp->intr_lock);
1551 return error;
1552 }
1553
1554 static void
sdhc_tuning_timer(void * arg)1555 sdhc_tuning_timer(void *arg)
1556 {
1557 struct sdhc_host *hp = arg;
1558
1559 atomic_swap_uint(&hp->tuning_timer_pending, 1);
1560 }
1561
1562 static void
sdhc_hw_reset(sdmmc_chipset_handle_t sch)1563 sdhc_hw_reset(sdmmc_chipset_handle_t sch)
1564 {
1565 struct sdhc_host *hp = (struct sdhc_host *)sch;
1566 struct sdhc_softc *sc = hp->sc;
1567
1568 if (sc->sc_vendor_hw_reset != NULL)
1569 sc->sc_vendor_hw_reset(sc, hp);
1570 }
1571
1572 static int
sdhc_wait_state(struct sdhc_host * hp,uint32_t mask,uint32_t value)1573 sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1574 {
1575 uint32_t state;
1576 int timeout;
1577
1578 for (timeout = 100000; timeout > 0; timeout--) {
1579 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1580 return 0;
1581 sdmmc_delay(10);
1582 }
1583 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1584 mask, value, state);
1585 return ETIMEDOUT;
1586 }
1587
1588 static void
sdhc_exec_command(sdmmc_chipset_handle_t sch,struct sdmmc_command * cmd)1589 sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1590 {
1591 struct sdhc_host *hp = (struct sdhc_host *)sch;
1592 int error;
1593 bool probing;
1594
1595 mutex_enter(&hp->intr_lock);
1596
1597 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1598 (void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1599 }
1600
1601 if (cmd->c_data &&
1602 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1603 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1604 if (ISSET(hp->flags, SHF_USE_DMA)) {
1605 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1606 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1607 } else {
1608 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1609 HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1610 }
1611 }
1612
1613 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1614 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1615 if (cmd->c_data != NULL) {
1616 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1617 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1618 } else {
1619 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1620 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1621 }
1622 }
1623
1624 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_STOP_WITH_TC)) {
1625 if (cmd->c_opcode == MMC_STOP_TRANSMISSION)
1626 SET(cmd->c_flags, SCF_RSP_BSY);
1627 }
1628
1629 /*
1630 * Start the MMC command, or mark `cmd' as failed and return.
1631 */
1632 error = sdhc_start_command(hp, cmd);
1633 if (error) {
1634 cmd->c_error = error;
1635 goto out;
1636 }
1637
1638 /*
1639 * Wait until the command phase is done, or until the command
1640 * is marked done for any other reason.
1641 */
1642 probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1643 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT*3, probing)) {
1644 DPRINTF(1,("%s: timeout for command\n", __func__));
1645 sdmmc_delay(50);
1646 cmd->c_error = ETIMEDOUT;
1647 goto out;
1648 }
1649
1650 /*
1651 * The host controller removes bits [0:7] from the response
1652 * data (CRC) and we pass the data up unchanged to the bus
1653 * driver (without padding).
1654 */
1655 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1656 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1657 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1658 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1659 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1660 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1661 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1662 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1663 (cmd->c_resp[1] << 24);
1664 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1665 (cmd->c_resp[2] << 24);
1666 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1667 (cmd->c_resp[3] << 24);
1668 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1669 }
1670 }
1671 }
1672 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1673
1674 /*
1675 * If the command has data to transfer in any direction,
1676 * execute the transfer now.
1677 */
1678 if (cmd->c_error == 0 && cmd->c_data != NULL)
1679 sdhc_transfer_data(hp, cmd);
1680 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1681 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_BUSY_INTR) &&
1682 !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1683 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1684 HDEVNAME(hp)));
1685 cmd->c_error = ETIMEDOUT;
1686 goto out;
1687 }
1688 }
1689
1690 out:
1691 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1692 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1693 /* Turn off the LED. */
1694 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1695 }
1696 SET(cmd->c_flags, SCF_ITSDONE);
1697
1698 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP) &&
1699 cmd->c_opcode == MMC_STOP_TRANSMISSION)
1700 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
1701
1702 mutex_exit(&hp->intr_lock);
1703
1704 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1705 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1706 cmd->c_flags, cmd->c_error));
1707 }
1708
1709 static int
sdhc_start_command(struct sdhc_host * hp,struct sdmmc_command * cmd)1710 sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1711 {
1712 struct sdhc_softc * const sc = hp->sc;
1713 uint16_t blksize = 0;
1714 uint16_t blkcount = 0;
1715 uint16_t mode;
1716 uint16_t command;
1717 uint32_t pmask;
1718 int error;
1719
1720 KASSERT(mutex_owned(&hp->intr_lock));
1721
1722 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1723 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1724 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1725
1726 /*
1727 * The maximum block length for commands should be the minimum
1728 * of the host buffer size and the card buffer size. (1.7.2)
1729 */
1730
1731 /* Fragment the data into proper blocks. */
1732 if (cmd->c_datalen > 0) {
1733 blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1734 blkcount = cmd->c_datalen / blksize;
1735 if (cmd->c_datalen % blksize > 0) {
1736 /* XXX: Split this command. (1.7.4) */
1737 aprint_error_dev(sc->sc_dev,
1738 "data not a multiple of %u bytes\n", blksize);
1739 return EINVAL;
1740 }
1741 }
1742
1743 /* Check limit imposed by 9-bit block count. (1.7.2) */
1744 if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1745 aprint_error_dev(sc->sc_dev, "too much data\n");
1746 return EINVAL;
1747 }
1748
1749 /* Prepare transfer mode register value. (2.2.5) */
1750 mode = 0;
1751 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1752 mode |= SDHC_READ_MODE;
1753 if (blkcount > 0) {
1754 mode |= SDHC_BLOCK_COUNT_ENABLE;
1755 if (blkcount > 1) {
1756 mode |= SDHC_MULTI_BLOCK_MODE;
1757 if (!ISSET(sc->sc_flags, SDHC_FLAG_NO_AUTO_STOP)
1758 && !ISSET(cmd->c_flags, SCF_NO_STOP))
1759 mode |= SDHC_AUTO_CMD12_ENABLE;
1760 }
1761 }
1762 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1763 ISSET(hp->flags, SHF_MODE_DMAEN)) {
1764 mode |= SDHC_DMA_ENABLE;
1765 }
1766
1767 /*
1768 * Prepare command register value. (2.2.6)
1769 */
1770 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1771
1772 if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1773 command |= SDHC_CRC_CHECK_ENABLE;
1774 if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1775 command |= SDHC_INDEX_CHECK_ENABLE;
1776 if (cmd->c_datalen > 0)
1777 command |= SDHC_DATA_PRESENT_SELECT;
1778
1779 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1780 command |= SDHC_NO_RESPONSE;
1781 else if (ISSET(cmd->c_flags, SCF_RSP_136))
1782 command |= SDHC_RESP_LEN_136;
1783 else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1784 command |= SDHC_RESP_LEN_48_CHK_BUSY;
1785 else
1786 command |= SDHC_RESP_LEN_48;
1787
1788 /* Wait until command and optionally data inhibit bits are clear. (1.5) */
1789 pmask = SDHC_CMD_INHIBIT_CMD;
1790 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1791 pmask |= SDHC_CMD_INHIBIT_DAT;
1792 error = sdhc_wait_state(hp, pmask, 0);
1793 if (error) {
1794 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1795 device_printf(sc->sc_dev, "command or data phase inhibited\n");
1796 return error;
1797 }
1798
1799 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1800 HDEVNAME(hp), blksize, blkcount, mode, command));
1801
1802 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1803 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1804 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */
1805 }
1806
1807 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1808 /* Alert the user not to remove the card. */
1809 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1810 }
1811
1812 /* Set DMA start address. */
1813 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1814 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1815 bus_addr_t paddr =
1816 cmd->c_dmamap->dm_segs[seg].ds_addr;
1817 uint16_t len =
1818 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1819 0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1820 uint16_t attr =
1821 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1822 if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1823 attr |= SDHC_ADMA2_END;
1824 }
1825 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1826 struct sdhc_adma2_descriptor32 *desc =
1827 hp->adma2;
1828 desc[seg].attribute = htole16(attr);
1829 desc[seg].length = htole16(len);
1830 desc[seg].address = htole32(paddr);
1831 } else {
1832 struct sdhc_adma2_descriptor64 *desc =
1833 hp->adma2;
1834 desc[seg].attribute = htole16(attr);
1835 desc[seg].length = htole16(len);
1836 desc[seg].address = htole32(paddr & 0xffffffff);
1837 desc[seg].address_hi = htole32(
1838 (uint64_t)paddr >> 32);
1839 }
1840 }
1841 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1842 struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1843 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1844 } else {
1845 struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1846 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1847 }
1848 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1849 BUS_DMASYNC_PREWRITE);
1850
1851 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1852 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1853 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1854 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1855 (uint64_t)desc_addr >> 32);
1856 }
1857
1858 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1859 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1860 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1861 } else {
1862 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1863 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1864 }
1865 } else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1866 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1867 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1868 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1869 }
1870 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1871 }
1872
1873 /*
1874 * Start a CPU data transfer. Writing to the high order byte
1875 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1876 */
1877 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1878 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1879 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1880 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1881 /* mode bits is in MIX_CTRL register on uSDHC */
1882 HWRITE4(hp, SDHC_MIX_CTRL, mode |
1883 (HREAD4(hp, SDHC_MIX_CTRL) & ~SDHC_TRANSFER_MODE_MASK));
1884 if (cmd->c_opcode == MMC_STOP_TRANSMISSION)
1885 command |= SDHC_COMMAND_TYPE_ABORT;
1886 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1887 } else {
1888 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1889 }
1890 } else {
1891 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1892 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1893 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1894 HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1895 HWRITE2(hp, SDHC_COMMAND, command);
1896 }
1897
1898 return 0;
1899 }
1900
1901 static void
sdhc_transfer_data(struct sdhc_host * hp,struct sdmmc_command * cmd)1902 sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1903 {
1904 struct sdhc_softc *sc = hp->sc;
1905 int error;
1906
1907 KASSERT(mutex_owned(&hp->intr_lock));
1908
1909 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1910 MMC_R1(cmd->c_resp), cmd->c_datalen));
1911
1912 #ifdef SDHC_DEBUG
1913 /* XXX I forgot why I wanted to know when this happens :-( */
1914 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1915 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1916 aprint_error_dev(hp->sc->sc_dev,
1917 "CMD52/53 error response flags %#x\n",
1918 MMC_R1(cmd->c_resp) & 0xff00);
1919 }
1920 #endif
1921
1922 if (cmd->c_dmamap != NULL) {
1923 if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1924 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1925 if (error == 0 && !sdhc_wait_intr(hp,
1926 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1927 DPRINTF(1,("%s: timeout\n", __func__));
1928 error = ETIMEDOUT;
1929 }
1930 } else {
1931 error = sdhc_transfer_data_dma(hp, cmd);
1932 }
1933 } else
1934 error = sdhc_transfer_data_pio(hp, cmd);
1935 if (error)
1936 cmd->c_error = error;
1937 SET(cmd->c_flags, SCF_ITSDONE);
1938
1939 DPRINTF(1,("%s: data transfer done (error=%d)\n",
1940 HDEVNAME(hp), cmd->c_error));
1941 }
1942
1943 static int
sdhc_transfer_data_dma(struct sdhc_host * hp,struct sdmmc_command * cmd)1944 sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1945 {
1946 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1947 bus_addr_t posaddr;
1948 bus_addr_t segaddr;
1949 bus_size_t seglen;
1950 u_int seg = 0;
1951 int error = 0;
1952 int status;
1953
1954 KASSERT(mutex_owned(&hp->intr_lock));
1955 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1956 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1957 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1958 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1959
1960 for (;;) {
1961 status = sdhc_wait_intr(hp,
1962 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1963 SDHC_DMA_TIMEOUT, false);
1964
1965 if (status & SDHC_TRANSFER_COMPLETE) {
1966 break;
1967 }
1968 if (!status) {
1969 DPRINTF(1,("%s: timeout\n", __func__));
1970 error = ETIMEDOUT;
1971 break;
1972 }
1973
1974 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1975 continue;
1976 }
1977
1978 if ((status & SDHC_DMA_INTERRUPT) == 0) {
1979 continue;
1980 }
1981
1982 /* DMA Interrupt (boundary crossing) */
1983
1984 segaddr = dm_segs[seg].ds_addr;
1985 seglen = dm_segs[seg].ds_len;
1986 posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1987
1988 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1989 continue;
1990 }
1991 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1992 HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1993 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1994 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1995 KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1996 }
1997
1998 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1999 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
2000 PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
2001 }
2002
2003 return error;
2004 }
2005
2006 static int
sdhc_transfer_data_pio(struct sdhc_host * hp,struct sdmmc_command * cmd)2007 sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
2008 {
2009 uint8_t *data = cmd->c_data;
2010 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
2011 u_int len, datalen;
2012 u_int imask;
2013 u_int pmask;
2014 int error = 0;
2015
2016 KASSERT(mutex_owned(&hp->intr_lock));
2017
2018 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
2019 imask = SDHC_BUFFER_READ_READY;
2020 pmask = SDHC_BUFFER_READ_ENABLE;
2021 if (ISSET(hp->sc->sc_flags,
2022 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2023 pio_func = esdhc_read_data_pio;
2024 } else {
2025 pio_func = sdhc_read_data_pio;
2026 }
2027 } else {
2028 imask = SDHC_BUFFER_WRITE_READY;
2029 pmask = SDHC_BUFFER_WRITE_ENABLE;
2030 if (ISSET(hp->sc->sc_flags,
2031 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2032 pio_func = esdhc_write_data_pio;
2033 } else {
2034 pio_func = sdhc_write_data_pio;
2035 }
2036 }
2037 datalen = cmd->c_datalen;
2038
2039 KASSERT(mutex_owned(&hp->intr_lock));
2040 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
2041 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
2042 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
2043
2044 while (datalen > 0) {
2045 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
2046 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2047 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
2048 } else {
2049 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
2050 }
2051 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
2052 DPRINTF(1,("%s: timeout\n", __func__));
2053 error = ETIMEDOUT;
2054 break;
2055 }
2056
2057 error = sdhc_wait_state(hp, pmask, pmask);
2058 if (error)
2059 break;
2060 }
2061
2062 len = MIN(datalen, cmd->c_blklen);
2063 (*pio_func)(hp, data, len);
2064 DPRINTF(2,("%s: pio data transfer %u @ %p\n",
2065 HDEVNAME(hp), len, data));
2066
2067 data += len;
2068 datalen -= len;
2069 }
2070
2071 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
2072 SDHC_TRANSFER_TIMEOUT, false)) {
2073 DPRINTF(1,("%s: timeout for transfer\n", __func__));
2074 error = ETIMEDOUT;
2075 }
2076
2077 return error;
2078 }
2079
2080 static void
sdhc_read_data_pio(struct sdhc_host * hp,uint8_t * data,u_int datalen)2081 sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2082 {
2083
2084 if (((__uintptr_t)data & 3) == 0) {
2085 while (datalen > 3) {
2086 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
2087 data += 4;
2088 datalen -= 4;
2089 }
2090 if (datalen > 1) {
2091 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2092 data += 2;
2093 datalen -= 2;
2094 }
2095 if (datalen > 0) {
2096 *data = HREAD1(hp, SDHC_DATA);
2097 data += 1;
2098 datalen -= 1;
2099 }
2100 } else if (((__uintptr_t)data & 1) == 0) {
2101 while (datalen > 1) {
2102 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
2103 data += 2;
2104 datalen -= 2;
2105 }
2106 if (datalen > 0) {
2107 *data = HREAD1(hp, SDHC_DATA);
2108 data += 1;
2109 datalen -= 1;
2110 }
2111 } else {
2112 while (datalen > 0) {
2113 *data = HREAD1(hp, SDHC_DATA);
2114 data += 1;
2115 datalen -= 1;
2116 }
2117 }
2118 }
2119
2120 static void
sdhc_write_data_pio(struct sdhc_host * hp,uint8_t * data,u_int datalen)2121 sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2122 {
2123
2124 if (((__uintptr_t)data & 3) == 0) {
2125 while (datalen > 3) {
2126 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2127 data += 4;
2128 datalen -= 4;
2129 }
2130 if (datalen > 1) {
2131 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2132 data += 2;
2133 datalen -= 2;
2134 }
2135 if (datalen > 0) {
2136 HWRITE1(hp, SDHC_DATA, *data);
2137 data += 1;
2138 datalen -= 1;
2139 }
2140 } else if (((__uintptr_t)data & 1) == 0) {
2141 while (datalen > 1) {
2142 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2143 data += 2;
2144 datalen -= 2;
2145 }
2146 if (datalen > 0) {
2147 HWRITE1(hp, SDHC_DATA, *data);
2148 data += 1;
2149 datalen -= 1;
2150 }
2151 } else {
2152 while (datalen > 0) {
2153 HWRITE1(hp, SDHC_DATA, *data);
2154 data += 1;
2155 datalen -= 1;
2156 }
2157 }
2158 }
2159
2160 static void
esdhc_read_data_pio(struct sdhc_host * hp,uint8_t * data,u_int datalen)2161 esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2162 {
2163 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2164 uint32_t v;
2165
2166 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2167 size_t count = 0;
2168
2169 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2170 if (count == 0) {
2171 /*
2172 * If we've drained "watermark" words, we need to wait
2173 * a little bit so the read FIFO can refill.
2174 */
2175 sdmmc_delay(10);
2176 count = watermark;
2177 }
2178 v = HREAD4(hp, SDHC_DATA);
2179 v = le32toh(v);
2180 *(uint32_t *)data = v;
2181 data += 4;
2182 datalen -= 4;
2183 status = HREAD2(hp, SDHC_NINTR_STATUS);
2184 count--;
2185 }
2186 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2187 if (count == 0) {
2188 sdmmc_delay(10);
2189 }
2190 v = HREAD4(hp, SDHC_DATA);
2191 v = le32toh(v);
2192 do {
2193 *data++ = v;
2194 v >>= 8;
2195 } while (--datalen > 0);
2196 }
2197 }
2198
2199 static void
esdhc_write_data_pio(struct sdhc_host * hp,uint8_t * data,u_int datalen)2200 esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2201 {
2202 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2203 uint32_t v;
2204
2205 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2206 size_t count = watermark;
2207
2208 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2209 if (count == 0) {
2210 sdmmc_delay(10);
2211 count = watermark;
2212 }
2213 v = *(uint32_t *)data;
2214 v = htole32(v);
2215 HWRITE4(hp, SDHC_DATA, v);
2216 data += 4;
2217 datalen -= 4;
2218 status = HREAD2(hp, SDHC_NINTR_STATUS);
2219 count--;
2220 }
2221 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2222 if (count == 0) {
2223 sdmmc_delay(10);
2224 }
2225 v = *(uint32_t *)data;
2226 v = htole32(v);
2227 HWRITE4(hp, SDHC_DATA, v);
2228 }
2229 }
2230
2231 /* Prepare for another command. */
2232 static int
sdhc_soft_reset(struct sdhc_host * hp,int mask)2233 sdhc_soft_reset(struct sdhc_host *hp, int mask)
2234 {
2235 int timo;
2236
2237 KASSERT(mutex_owned(&hp->intr_lock));
2238
2239 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2240
2241 /* Request the reset. */
2242 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2243
2244 /*
2245 * If necessary, wait for the controller to set the bits to
2246 * acknowledge the reset.
2247 */
2248 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2249 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2250 for (timo = 10000; timo > 0; timo--) {
2251 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2252 break;
2253 /* Short delay because I worry we may miss it... */
2254 sdmmc_delay(1);
2255 }
2256 if (timo == 0) {
2257 DPRINTF(1,("%s: timeout for reset on\n", __func__));
2258 return ETIMEDOUT;
2259 }
2260 }
2261
2262 /*
2263 * Wait for the controller to clear the bits to indicate that
2264 * the reset has completed.
2265 */
2266 for (timo = 10; timo > 0; timo--) {
2267 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2268 break;
2269 sdmmc_delay(10000);
2270 }
2271 if (timo == 0) {
2272 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2273 HREAD1(hp, SDHC_SOFTWARE_RESET)));
2274 return ETIMEDOUT;
2275 }
2276
2277 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2278 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2279 }
2280
2281 return 0;
2282 }
2283
2284 static int
sdhc_wait_intr(struct sdhc_host * hp,int mask,int timo,bool probing)2285 sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2286 {
2287 int status, error, nointr;
2288
2289 KASSERT(mutex_owned(&hp->intr_lock));
2290
2291 mask |= SDHC_ERROR_INTERRUPT;
2292
2293 nointr = 0;
2294 status = hp->intr_status & mask;
2295 while (status == 0) {
2296 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2297 == EWOULDBLOCK) {
2298 nointr = 1;
2299 break;
2300 }
2301 status = hp->intr_status & mask;
2302 }
2303 error = hp->intr_error_status;
2304
2305 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2306 error));
2307
2308 hp->intr_status &= ~status;
2309 hp->intr_error_status &= ~error;
2310
2311 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2312 if (ISSET(error, SDHC_DMA_ERROR))
2313 device_printf(hp->sc->sc_dev,"dma error\n");
2314 if (ISSET(error, SDHC_ADMA_ERROR))
2315 device_printf(hp->sc->sc_dev,"adma error\n");
2316 if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2317 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2318 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2319 device_printf(hp->sc->sc_dev,"current limit error\n");
2320 if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2321 device_printf(hp->sc->sc_dev,"data end bit error\n");
2322 if (ISSET(error, SDHC_DATA_CRC_ERROR))
2323 device_printf(hp->sc->sc_dev,"data crc error\n");
2324 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2325 device_printf(hp->sc->sc_dev,"data timeout error\n");
2326 if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2327 device_printf(hp->sc->sc_dev,"cmd index error\n");
2328 if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2329 device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2330 if (ISSET(error, SDHC_CMD_CRC_ERROR))
2331 device_printf(hp->sc->sc_dev,"cmd crc error\n");
2332 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2333 if (!probing)
2334 device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2335 #ifdef SDHC_DEBUG
2336 else if (sdhcdebug > 0)
2337 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2338 #endif
2339 }
2340 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2341 device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2342 (error & ~SDHC_EINTR_STATUS_MASK));
2343 if (error == 0)
2344 device_printf(hp->sc->sc_dev,"no error\n");
2345
2346 /* Command timeout has higher priority than command complete. */
2347 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2348 CLR(status, SDHC_COMMAND_COMPLETE);
2349
2350 /* Transfer complete has higher priority than data timeout. */
2351 if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2352 CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2353 }
2354
2355 if (nointr ||
2356 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2357 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2358 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2359 hp->intr_error_status = 0;
2360 status = 0;
2361 }
2362
2363 return status;
2364 }
2365
2366 /*
2367 * Established by attachment driver at interrupt priority IPL_SDMMC.
2368 */
2369 int
sdhc_intr(void * arg)2370 sdhc_intr(void *arg)
2371 {
2372 struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2373 struct sdhc_host *hp;
2374 int done = 0;
2375 uint16_t status;
2376 uint16_t error;
2377
2378 /* We got an interrupt, but we don't know from which slot. */
2379 for (size_t host = 0; host < sc->sc_nhosts; host++) {
2380 hp = sc->sc_host[host];
2381 if (hp == NULL)
2382 continue;
2383
2384 mutex_enter(&hp->intr_lock);
2385
2386 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2387 /* Find out which interrupts are pending. */
2388 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2389 status = xstatus;
2390 error = xstatus >> 16;
2391 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2392 (xstatus & SDHC_TRANSFER_COMPLETE) &&
2393 !(xstatus & SDHC_DMA_INTERRUPT)) {
2394 /* read again due to uSDHC errata */
2395 status = xstatus = HREAD4(hp,
2396 SDHC_NINTR_STATUS);
2397 error = xstatus >> 16;
2398 }
2399 if (ISSET(sc->sc_flags,
2400 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2401 if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2402 SET(status, SDHC_ERROR_INTERRUPT);
2403 }
2404 if (error)
2405 xstatus |= SDHC_ERROR_INTERRUPT;
2406 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2407 goto next_port; /* no interrupt for us */
2408 /* Acknowledge the interrupts we are about to handle. */
2409 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2410 } else {
2411 /* Find out which interrupts are pending. */
2412 error = 0;
2413 status = HREAD2(hp, SDHC_NINTR_STATUS);
2414 if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2415 goto next_port; /* no interrupt for us */
2416 /* Acknowledge the interrupts we are about to handle. */
2417 HWRITE2(hp, SDHC_NINTR_STATUS, status);
2418 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2419 /* Acknowledge error interrupts. */
2420 error = HREAD2(hp, SDHC_EINTR_STATUS);
2421 HWRITE2(hp, SDHC_EINTR_STATUS, error);
2422 }
2423 }
2424
2425 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2426 status, error));
2427
2428 /* Claim this interrupt. */
2429 done = 1;
2430
2431 if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2432 ISSET(error, SDHC_ADMA_ERROR)) {
2433 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2434 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2435 adma_err);
2436 }
2437
2438 /*
2439 * Wake up the sdmmc event thread to scan for cards.
2440 */
2441 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2442 if (hp->sdmmc != NULL) {
2443 sdmmc_needs_discover(hp->sdmmc);
2444 }
2445 if (ISSET(sc->sc_flags,
2446 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2447 HCLR4(hp, SDHC_NINTR_STATUS_EN,
2448 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2449 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2450 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2451 }
2452 }
2453
2454 /*
2455 * Schedule re-tuning process (UHS).
2456 */
2457 if (ISSET(status, SDHC_RETUNING_EVENT)) {
2458 atomic_swap_uint(&hp->tuning_timer_pending, 1);
2459 }
2460
2461 /*
2462 * Wake up the blocking process to service command
2463 * related interrupt(s).
2464 */
2465 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2466 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2467 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2468 hp->intr_error_status |= error;
2469 hp->intr_status |= status;
2470 if (ISSET(sc->sc_flags,
2471 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2472 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2473 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2474 }
2475 cv_broadcast(&hp->intr_cv);
2476 }
2477
2478 /*
2479 * Service SD card interrupts.
2480 */
2481 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2482 && ISSET(status, SDHC_CARD_INTERRUPT)) {
2483 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2484 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2485 sdmmc_card_intr(hp->sdmmc);
2486 }
2487 next_port:
2488 mutex_exit(&hp->intr_lock);
2489 }
2490
2491 return done;
2492 }
2493
2494 kmutex_t *
sdhc_host_lock(struct sdhc_host * hp)2495 sdhc_host_lock(struct sdhc_host *hp)
2496 {
2497 return &hp->intr_lock;
2498 }
2499
2500 uint8_t
sdhc_host_read_1(struct sdhc_host * hp,int reg)2501 sdhc_host_read_1(struct sdhc_host *hp, int reg)
2502 {
2503 return HREAD1(hp, reg);
2504 }
2505
2506 uint16_t
sdhc_host_read_2(struct sdhc_host * hp,int reg)2507 sdhc_host_read_2(struct sdhc_host *hp, int reg)
2508 {
2509 return HREAD2(hp, reg);
2510 }
2511
2512 uint32_t
sdhc_host_read_4(struct sdhc_host * hp,int reg)2513 sdhc_host_read_4(struct sdhc_host *hp, int reg)
2514 {
2515 return HREAD4(hp, reg);
2516 }
2517
2518 void
sdhc_host_write_1(struct sdhc_host * hp,int reg,uint8_t val)2519 sdhc_host_write_1(struct sdhc_host *hp, int reg, uint8_t val)
2520 {
2521 HWRITE1(hp, reg, val);
2522 }
2523
2524 void
sdhc_host_write_2(struct sdhc_host * hp,int reg,uint16_t val)2525 sdhc_host_write_2(struct sdhc_host *hp, int reg, uint16_t val)
2526 {
2527 HWRITE2(hp, reg, val);
2528 }
2529
2530 void
sdhc_host_write_4(struct sdhc_host * hp,int reg,uint32_t val)2531 sdhc_host_write_4(struct sdhc_host *hp, int reg, uint32_t val)
2532 {
2533 HWRITE4(hp, reg, val);
2534 }
2535
2536 #ifdef SDHC_DEBUG
2537 void
sdhc_dump_regs(struct sdhc_host * hp)2538 sdhc_dump_regs(struct sdhc_host *hp)
2539 {
2540
2541 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE,
2542 HREAD4(hp, SDHC_PRESENT_STATE));
2543 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2544 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL,
2545 HREAD1(hp, SDHC_POWER_CTL));
2546 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS,
2547 HREAD2(hp, SDHC_NINTR_STATUS));
2548 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS,
2549 HREAD2(hp, SDHC_EINTR_STATUS));
2550 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN,
2551 HREAD2(hp, SDHC_NINTR_STATUS_EN));
2552 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN,
2553 HREAD2(hp, SDHC_EINTR_STATUS_EN));
2554 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN,
2555 HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2556 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN,
2557 HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2558 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES,
2559 HREAD4(hp, SDHC_CAPABILITIES));
2560 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2561 HREAD4(hp, SDHC_MAX_CAPABILITIES));
2562 }
2563 #endif
2564