xref: /netbsd-src/sys/dev/sdmmc/sdmmc_mem.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: sdmmc_mem.c,v 1.64 2018/02/07 14:42:07 bouyer Exp $	*/
2 /*	$OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $	*/
3 
4 /*
5  * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka@netbsd.org>
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43  */
44 
45 /* Routines for SD/MMC memory cards. */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.64 2018/02/07 14:42:07 bouyer Exp $");
49 
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53 
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61 
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65 
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s)	do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s)	do {} while (/*CONSTCOND*/0)
70 #endif
71 
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73 
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78     sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80     uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_ssr(struct sdmmc_softc *, struct sdmmc_function *,
83     sdmmc_bitfield512_t *);
84 static int sdmmc_mem_decode_ssr(struct sdmmc_softc *, struct sdmmc_function *,
85     sdmmc_bitfield512_t *);
86 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
87 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
88 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
89 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
90     uint8_t, bool);
91 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
92 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
93 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
94     u_char *, size_t);
95 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
96     u_char *, size_t);
97 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
98     uint32_t, u_char *, size_t);
99 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
100     uint32_t, u_char *, size_t);
101 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
102     uint32_t, u_char *, size_t);
103 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
104     uint32_t, u_char *, size_t);
105 
106 static const struct {
107 	const char *name;
108 	int v;
109 	int freq;
110 } switch_group0_functions[] = {
111 	/* Default/SDR12 */
112 	{ "Default/SDR12",	 0,			 25000 },
113 
114 	/* High-Speed/SDR25 */
115 	{ "High-Speed/SDR25",	SMC_CAPS_SD_HIGHSPEED,	 50000 },
116 
117 	/* SDR50 */
118 	{ "SDR50",		SMC_CAPS_UHS_SDR50,	100000 },
119 
120 	/* SDR104 */
121 	{ "SDR104",		SMC_CAPS_UHS_SDR104,	208000 },
122 
123 	/* DDR50 */
124 	{ "DDR50",		SMC_CAPS_UHS_DDR50,	 50000 },
125 };
126 
127 /*
128  * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
129  */
130 int
131 sdmmc_mem_enable(struct sdmmc_softc *sc)
132 {
133 	uint32_t host_ocr;
134 	uint32_t card_ocr;
135 	uint32_t new_ocr;
136 	uint32_t ocr = 0;
137 	int error;
138 
139 	SDMMC_LOCK(sc);
140 
141 	/* Set host mode to SD "combo" card or SD memory-only. */
142 	CLR(sc->sc_flags, SMF_UHS_MODE);
143 	SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
144 
145 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
146 		sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
147 
148 	/* Reset memory (*must* do that before CMD55 or CMD1). */
149 	sdmmc_go_idle_state(sc);
150 
151 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
152 		/* Check SD Ver.2 */
153 		error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
154 		if (error == 0 && card_ocr == 0x1aa)
155 			SET(ocr, MMC_OCR_HCS);
156 	}
157 
158 	/*
159 	 * Read the SD/MMC memory OCR value by issuing CMD55 followed
160 	 * by ACMD41 to read the OCR value from memory-only SD cards.
161 	 * MMC cards will not respond to CMD55 or ACMD41 and this is
162 	 * how we distinguish them from SD cards.
163 	 */
164 mmc_mode:
165 	error = sdmmc_mem_send_op_cond(sc,
166 	    ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
167 	if (error) {
168 		if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
169 		    !ISSET(sc->sc_flags, SMF_IO_MODE)) {
170 			/* Not a SD card, switch to MMC mode. */
171 			DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
172 			CLR(sc->sc_flags, SMF_SD_MODE);
173 			goto mmc_mode;
174 		}
175 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
176 			DPRINTF(("%s: couldn't read memory OCR\n",
177 			    SDMMCDEVNAME(sc)));
178 			goto out;
179 		} else {
180 			/* Not a "combo" card. */
181 			CLR(sc->sc_flags, SMF_MEM_MODE);
182 			error = 0;
183 			goto out;
184 		}
185 	}
186 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
187 		/* get card OCR */
188 		error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
189 		if (error) {
190 			DPRINTF(("%s: couldn't read SPI memory OCR\n",
191 			    SDMMCDEVNAME(sc)));
192 			goto out;
193 		}
194 	}
195 
196 	/* Set the lowest voltage supported by the card and host. */
197 	host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
198 	error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
199 	if (error) {
200 		DPRINTF(("%s: couldn't supply voltage requested by card\n",
201 		    SDMMCDEVNAME(sc)));
202 		goto out;
203 	}
204 
205 	DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
206 	DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
207 
208 	host_ocr &= card_ocr; /* only allow the common voltages */
209 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
210 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
211 			/* Tell the card(s) to enter the idle state (again). */
212 			sdmmc_go_idle_state(sc);
213 			/* Check SD Ver.2 */
214 			error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
215 			if (error == 0 && card_ocr == 0x1aa)
216 				SET(ocr, MMC_OCR_HCS);
217 
218 			if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
219 				SET(ocr, MMC_OCR_S18A);
220 		} else {
221 			SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
222 		}
223 	}
224 	host_ocr |= ocr;
225 
226 	/* Send the new OCR value until all cards are ready. */
227 	error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
228 	if (error) {
229 		DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
230 		goto out;
231 	}
232 
233 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
234 		/*
235 		 * Card and host support low voltage mode, begin switch
236 		 * sequence.
237 		 */
238 		struct sdmmc_command cmd;
239 		memset(&cmd, 0, sizeof(cmd));
240 		cmd.c_arg = 0;
241 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
242 		cmd.c_opcode = SD_VOLTAGE_SWITCH;
243 		DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
244 		error = sdmmc_mmc_command(sc, &cmd);
245 		if (error) {
246 			DPRINTF(("%s: voltage switch command failed\n",
247 			    SDMMCDEVNAME(sc)));
248 			goto out;
249 		}
250 
251 		error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
252 		if (error)
253 			goto out;
254 
255 		SET(sc->sc_flags, SMF_UHS_MODE);
256 	}
257 
258 out:
259 	SDMMC_UNLOCK(sc);
260 
261 	if (error)
262 		printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
263 		    __func__, error);
264 
265 	return error;
266 }
267 
268 static int
269 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
270 {
271 	int error;
272 
273 	/*
274 	 * Stop the clock
275 	 */
276 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
277 	    SDMMC_SDCLK_OFF, false);
278 	if (error)
279 		goto out;
280 
281 	delay(1000);
282 
283 	/*
284 	 * Card switch command was successful, update host controller
285 	 * signal voltage setting.
286 	 */
287 	DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
288 	    signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
289 	error = sdmmc_chip_signal_voltage(sc->sc_sct,
290 	    sc->sc_sch, signal_voltage);
291 	if (error)
292 		goto out;
293 
294 	delay(5000);
295 
296 	/*
297 	 * Switch to SDR12 timing
298 	 */
299 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
300 	    false);
301 	if (error)
302 		goto out;
303 
304 	delay(1000);
305 
306 out:
307 	return error;
308 }
309 
310 /*
311  * Read the CSD and CID from all cards and assign each card a unique
312  * relative card address (RCA).  CMD2 is ignored by SDIO-only cards.
313  */
314 void
315 sdmmc_mem_scan(struct sdmmc_softc *sc)
316 {
317 	sdmmc_response resp;
318 	struct sdmmc_function *sf;
319 	uint16_t next_rca;
320 	int error;
321 	int retry;
322 
323 	SDMMC_LOCK(sc);
324 
325 	/*
326 	 * CMD2 is a broadcast command understood by SD cards and MMC
327 	 * cards.  All cards begin to respond to the command, but back
328 	 * off if another card drives the CMD line to a different level.
329 	 * Only one card will get its entire response through.  That
330 	 * card remains silent once it has been assigned a RCA.
331 	 */
332 	for (retry = 0; retry < 100; retry++) {
333 		error = sdmmc_mem_send_cid(sc, &resp);
334 		if (error) {
335 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
336 			    error == ETIMEDOUT) {
337 				/* No more cards there. */
338 				break;
339 			}
340 			DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
341 			break;
342 		}
343 
344 		/* In MMC mode, find the next available RCA. */
345 		next_rca = 1;
346 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
347 			SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
348 				next_rca++;
349 		}
350 
351 		/* Allocate a sdmmc_function structure. */
352 		sf = sdmmc_function_alloc(sc);
353 		sf->rca = next_rca;
354 
355 		/*
356 		 * Remember the CID returned in the CMD2 response for
357 		 * later decoding.
358 		 */
359 		memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
360 
361 		/*
362 		 * Silence the card by assigning it a unique RCA, or
363 		 * querying it for its RCA in the case of SD.
364 		 */
365 		if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
366 			if (sdmmc_set_relative_addr(sc, sf) != 0) {
367 				aprint_error_dev(sc->sc_dev,
368 				    "couldn't set mem RCA\n");
369 				sdmmc_function_free(sf);
370 				break;
371 			}
372 		}
373 
374 		/*
375 		 * If this is a memory-only card, the card responding
376 		 * first becomes an alias for SDIO function 0.
377 		 */
378 		if (sc->sc_fn0 == NULL)
379 			sc->sc_fn0 = sf;
380 
381 		SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
382 
383 		/* only one function in SPI mode */
384 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
385 			break;
386 	}
387 
388 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
389 		/* Go to Data Transfer Mode, if possible. */
390 		sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
391 
392 	/*
393 	 * All cards are either inactive or awaiting further commands.
394 	 * Read the CSDs and decode the raw CID for each card.
395 	 */
396 	SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
397 		error = sdmmc_mem_send_csd(sc, sf, &resp);
398 		if (error) {
399 			SET(sf->flags, SFF_ERROR);
400 			continue;
401 		}
402 
403 		if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
404 		    sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
405 			SET(sf->flags, SFF_ERROR);
406 			continue;
407 		}
408 
409 #ifdef SDMMC_DEBUG
410 		printf("%s: CID: ", SDMMCDEVNAME(sc));
411 		sdmmc_print_cid(&sf->cid);
412 #endif
413 	}
414 
415 	SDMMC_UNLOCK(sc);
416 }
417 
418 int
419 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
420     struct sdmmc_function *sf)
421 {
422 	/* TRAN_SPEED(2:0): transfer rate exponent */
423 	static const int speed_exponent[8] = {
424 		100 *    1,	/* 100 Kbits/s */
425 		  1 * 1000,	/*   1 Mbits/s */
426 		 10 * 1000,	/*  10 Mbits/s */
427 		100 * 1000,	/* 100 Mbits/s */
428 		         0,
429 		         0,
430 		         0,
431 		         0,
432 	};
433 	/* TRAN_SPEED(6:3): time mantissa */
434 	static const int speed_mantissa[16] = {
435 		0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
436 	};
437 	struct sdmmc_csd *csd = &sf->csd;
438 	int e, m;
439 
440 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
441 		/*
442 		 * CSD version 1.0 corresponds to SD system
443 		 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
444 		 */
445 		csd->csdver = SD_CSD_CSDVER(resp);
446 		switch (csd->csdver) {
447 		case SD_CSD_CSDVER_2_0:
448 			DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
449 			SET(sf->flags, SFF_SDHC);
450 			csd->capacity = SD_CSD_V2_CAPACITY(resp);
451 			csd->read_bl_len = SD_CSD_V2_BL_LEN;
452 			break;
453 
454 		case SD_CSD_CSDVER_1_0:
455 			DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
456 			csd->capacity = SD_CSD_CAPACITY(resp);
457 			csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
458 			break;
459 
460 		default:
461 			aprint_error_dev(sc->sc_dev,
462 			    "unknown SD CSD structure version 0x%x\n",
463 			    csd->csdver);
464 			return 1;
465 		}
466 
467 		csd->mmcver = SD_CSD_MMCVER(resp);
468 		csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
469 		csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
470 		e = SD_CSD_SPEED_EXP(resp);
471 		m = SD_CSD_SPEED_MANT(resp);
472 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
473 		csd->ccc = SD_CSD_CCC(resp);
474 	} else {
475 		csd->csdver = MMC_CSD_CSDVER(resp);
476 		if (csd->csdver == MMC_CSD_CSDVER_1_0) {
477 			aprint_error_dev(sc->sc_dev,
478 			    "unknown MMC CSD structure version 0x%x\n",
479 			    csd->csdver);
480 			return 1;
481 		}
482 
483 		csd->mmcver = MMC_CSD_MMCVER(resp);
484 		csd->capacity = MMC_CSD_CAPACITY(resp);
485 		csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
486 		csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
487 		csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
488 		e = MMC_CSD_TRAN_SPEED_EXP(resp);
489 		m = MMC_CSD_TRAN_SPEED_MANT(resp);
490 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
491 	}
492 	if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
493 		csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
494 
495 #ifdef SDMMC_DUMP_CSD
496 	sdmmc_print_csd(resp, csd);
497 #endif
498 
499 	return 0;
500 }
501 
502 int
503 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
504     struct sdmmc_function *sf)
505 {
506 	struct sdmmc_cid *cid = &sf->cid;
507 
508 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
509 		cid->mid = SD_CID_MID(resp);
510 		cid->oid = SD_CID_OID(resp);
511 		SD_CID_PNM_CPY(resp, cid->pnm);
512 		cid->rev = SD_CID_REV(resp);
513 		cid->psn = SD_CID_PSN(resp);
514 		cid->mdt = SD_CID_MDT(resp);
515 	} else {
516 		switch(sf->csd.mmcver) {
517 		case MMC_CSD_MMCVER_1_0:
518 		case MMC_CSD_MMCVER_1_4:
519 			cid->mid = MMC_CID_MID_V1(resp);
520 			MMC_CID_PNM_V1_CPY(resp, cid->pnm);
521 			cid->rev = MMC_CID_REV_V1(resp);
522 			cid->psn = MMC_CID_PSN_V1(resp);
523 			cid->mdt = MMC_CID_MDT_V1(resp);
524 			break;
525 		case MMC_CSD_MMCVER_2_0:
526 		case MMC_CSD_MMCVER_3_1:
527 		case MMC_CSD_MMCVER_4_0:
528 			cid->mid = MMC_CID_MID_V2(resp);
529 			cid->oid = MMC_CID_OID_V2(resp);
530 			MMC_CID_PNM_V2_CPY(resp, cid->pnm);
531 			cid->psn = MMC_CID_PSN_V2(resp);
532 			break;
533 		default:
534 			aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
535 			    sf->csd.mmcver);
536 			return 1;
537 		}
538 	}
539 	return 0;
540 }
541 
542 void
543 sdmmc_print_cid(struct sdmmc_cid *cid)
544 {
545 
546 	printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
547 	    " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
548 	    cid->mdt);
549 }
550 
551 #ifdef SDMMC_DUMP_CSD
552 void
553 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
554 {
555 
556 	printf("csdver = %d\n", csd->csdver);
557 	printf("mmcver = %d\n", csd->mmcver);
558 	printf("capacity = 0x%08x\n", csd->capacity);
559 	printf("read_bl_len = %d\n", csd->read_bl_len);
560 	printf("write_bl_len = %d\n", csd->write_bl_len);
561 	printf("r2w_factor = %d\n", csd->r2w_factor);
562 	printf("tran_speed = %d\n", csd->tran_speed);
563 	printf("ccc = 0x%x\n", csd->ccc);
564 }
565 #endif
566 
567 /*
568  * Initialize a SD/MMC memory card.
569  */
570 int
571 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
572 {
573 	int error = 0;
574 
575 	SDMMC_LOCK(sc);
576 
577 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
578 		error = sdmmc_select_card(sc, sf);
579 		if (error)
580 			goto out;
581 	}
582 
583 	error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
584 	if (error)
585 		goto out;
586 
587 	if (ISSET(sc->sc_flags, SMF_SD_MODE))
588 		error = sdmmc_mem_sd_init(sc, sf);
589 	else
590 		error = sdmmc_mem_mmc_init(sc, sf);
591 
592 out:
593 	SDMMC_UNLOCK(sc);
594 
595 	return error;
596 }
597 
598 /*
599  * Get or set the card's memory OCR value (SD or MMC).
600  */
601 int
602 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
603 {
604 	struct sdmmc_command cmd;
605 	int error;
606 	int retry;
607 
608 	/* Don't lock */
609 
610 	DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
611 	    SDMMCDEVNAME(sc), ocr));
612 
613 	/*
614 	 * If we change the OCR value, retry the command until the OCR
615 	 * we receive in response has the "CARD BUSY" bit set, meaning
616 	 * that all cards are ready for identification.
617 	 */
618 	for (retry = 0; retry < 100; retry++) {
619 		memset(&cmd, 0, sizeof(cmd));
620 		cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
621 		    ocr : (ocr & MMC_OCR_HCS);
622 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
623 		    | SCF_TOUT_OK;
624 
625 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
626 			cmd.c_opcode = SD_APP_OP_COND;
627 			error = sdmmc_app_command(sc, NULL, &cmd);
628 		} else {
629 			cmd.c_opcode = MMC_SEND_OP_COND;
630 			error = sdmmc_mmc_command(sc, &cmd);
631 		}
632 		if (error)
633 			break;
634 
635 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
636 			if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
637 				break;
638 		} else {
639 			if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
640 			    ocr == 0)
641 				break;
642 		}
643 
644 		error = ETIMEDOUT;
645 		sdmmc_delay(10000);
646 	}
647 	if (ocrp != NULL) {
648 		if (error == 0 &&
649 		    !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
650 			*ocrp = MMC_R3(cmd.c_resp);
651 		} else {
652 			*ocrp = ocr;
653 		}
654 	}
655 	DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
656 	    SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
657 	return error;
658 }
659 
660 int
661 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
662 {
663 	struct sdmmc_command cmd;
664 	int error;
665 
666 	/* Don't lock */
667 
668 	memset(&cmd, 0, sizeof(cmd));
669 	cmd.c_arg = ocr;
670 	cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7;
671 	cmd.c_opcode = SD_SEND_IF_COND;
672 
673 	error = sdmmc_mmc_command(sc, &cmd);
674 	if (error == 0 && ocrp != NULL) {
675 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
676 			*ocrp = MMC_SPI_R7(cmd.c_resp);
677 		} else {
678 			*ocrp = MMC_R7(cmd.c_resp);
679 		}
680 		DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
681 		    SDMMCDEVNAME(sc), error, *ocrp));
682 	}
683 	return error;
684 }
685 
686 /*
687  * Set the read block length appropriately for this card, according to
688  * the card CSD register value.
689  */
690 int
691 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
692    int block_len)
693 {
694 	struct sdmmc_command cmd;
695 	int error;
696 
697 	/* Don't lock */
698 
699 	memset(&cmd, 0, sizeof(cmd));
700 	cmd.c_opcode = MMC_SET_BLOCKLEN;
701 	cmd.c_arg = block_len;
702 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
703 
704 	error = sdmmc_mmc_command(sc, &cmd);
705 
706 	DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
707 	    SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
708 
709 	return error;
710 }
711 
712 /* make 512-bit BE quantity __bitfield()-compatible */
713 static void
714 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
715 	size_t i;
716 	uint32_t tmp0, tmp1;
717 	const size_t bitswords = __arraycount(buf->_bits);
718 	for (i = 0; i < bitswords/2; i++) {
719 		tmp0 = buf->_bits[i];
720 		tmp1 = buf->_bits[bitswords - 1 - i];
721 		buf->_bits[i] = be32toh(tmp1);
722 		buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
723 	}
724 }
725 
726 static int
727 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
728 {
729 	if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
730 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
731 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
732 			return SD_ACCESS_MODE_SDR104;
733 		}
734 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
735 		    ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
736 			return SD_ACCESS_MODE_DDR50;
737 		}
738 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
739 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
740 			return SD_ACCESS_MODE_SDR50;
741 		}
742 	}
743 	if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
744 	    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
745 		return SD_ACCESS_MODE_SDR25;
746 	}
747 	return SD_ACCESS_MODE_SDR12;
748 }
749 
750 static int
751 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
752 {
753 	int timing = -1;
754 
755 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
756 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
757 			return 0;
758 
759 		switch (sf->csd.tran_speed) {
760 		case 100000:
761 			timing = SDMMC_TIMING_UHS_SDR50;
762 			break;
763 		case 208000:
764 			timing = SDMMC_TIMING_UHS_SDR104;
765 			break;
766 		default:
767 			return 0;
768 		}
769 	} else {
770 		switch (sf->csd.tran_speed) {
771 		case 200000:
772 			timing = SDMMC_TIMING_MMC_HS200;
773 			break;
774 		default:
775 			return 0;
776 		}
777 	}
778 
779 	DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
780 	    timing));
781 
782 	return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
783 }
784 
785 static int
786 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
787 {
788 	int support_func, best_func, bus_clock, error, i;
789 	sdmmc_bitfield512_t status;
790 	bool ddr = false;
791 
792 	/* change bus clock */
793 	bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
794 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
795 	if (error) {
796 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
797 		return error;
798 	}
799 
800 	error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
801 	if (error) {
802 		aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
803 		return error;
804 	}
805 	error = sdmmc_mem_decode_scr(sc, sf);
806 	if (error)
807 		return error;
808 
809 	if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
810 	    ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
811 		DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
812 		error = sdmmc_set_bus_width(sf, 4);
813 		if (error) {
814 			aprint_error_dev(sc->sc_dev,
815 			    "can't change bus width (%d bit)\n", 4);
816 			return error;
817 		}
818 		sf->width = 4;
819 	}
820 
821 	best_func = 0;
822 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
823 	    ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
824 		DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
825 		error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
826 		if (error) {
827 			aprint_error_dev(sc->sc_dev,
828 			    "switch func mode 0 failed\n");
829 			return error;
830 		}
831 
832 		support_func = SFUNC_STATUS_GROUP(&status, 1);
833 
834 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
835 			/* XXX UHS-I card started in 1.8V mode, switch now */
836 			error = sdmmc_mem_signal_voltage(sc,
837 			    SDMMC_SIGNAL_VOLTAGE_180);
838 			if (error) {
839 				aprint_error_dev(sc->sc_dev,
840 				    "failed to recover UHS card\n");
841 				return error;
842 			}
843 			SET(sc->sc_flags, SMF_UHS_MODE);
844 		}
845 
846 		for (i = 0; i < __arraycount(switch_group0_functions); i++) {
847 			if (!(support_func & (1 << i)))
848 				continue;
849 			DPRINTF(("%s: card supports mode %s\n",
850 			    SDMMCDEVNAME(sc),
851 			    switch_group0_functions[i].name));
852 		}
853 
854 		best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
855 
856 		DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
857 		    switch_group0_functions[best_func].name));
858 
859 		if (best_func != 0) {
860 			DPRINTF(("%s: switch func mode 1(func=%d)\n",
861 			    SDMMCDEVNAME(sc), best_func));
862 			error =
863 			    sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
864 			if (error) {
865 				aprint_error_dev(sc->sc_dev,
866 				    "switch func mode 1 failed:"
867 				    " group 1 function %d(0x%2x)\n",
868 				    best_func, support_func);
869 				return error;
870 			}
871 			sf->csd.tran_speed =
872 			    switch_group0_functions[best_func].freq;
873 
874 			if (best_func == SD_ACCESS_MODE_DDR50)
875 				ddr = true;
876 
877 			/* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
878 			delay(25);
879 		}
880 	}
881 
882 	/* update bus clock */
883 	if (sc->sc_busclk > sf->csd.tran_speed)
884 		sc->sc_busclk = sf->csd.tran_speed;
885 	if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
886 		return 0;
887 
888 	/* change bus clock */
889 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
890 	    ddr);
891 	if (error) {
892 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
893 		return error;
894 	}
895 
896 	sc->sc_transfer_mode = switch_group0_functions[best_func].name;
897 	sc->sc_busddr = ddr;
898 
899 	/* get card status */
900 	error = sdmmc_mem_send_ssr(sc, sf, &status);
901 	if (error) {
902 		aprint_error_dev(sc->sc_dev, "can't get SD status: %d\n",
903 		    error);
904 		return error;
905 	}
906 	sdmmc_mem_decode_ssr(sc, sf, &status);
907 
908 	/* execute tuning (UHS) */
909 	error = sdmmc_mem_execute_tuning(sc, sf);
910 	if (error) {
911 		aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
912 		return error;
913 	}
914 
915 	return 0;
916 }
917 
918 static int
919 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
920 {
921 	int width, value, hs_timing, bus_clock, error;
922 	uint8_t ext_csd[512];
923 	uint32_t sectors = 0;
924 	bool ddr = false;
925 
926 	sc->sc_transfer_mode = NULL;
927 
928 	/* change bus clock */
929 	bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
930 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
931 	if (error) {
932 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
933 		return error;
934 	}
935 
936 	if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
937 		error = sdmmc_mem_send_cxd_data(sc,
938 		    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
939 		if (error) {
940 			aprint_error_dev(sc->sc_dev,
941 			    "can't read EXT_CSD (error=%d)\n", error);
942 			return error;
943 		}
944 		if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
945 		    (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
946 			aprint_error_dev(sc->sc_dev,
947 			    "unrecognised future version (%d)\n",
948 				ext_csd[EXT_CSD_STRUCTURE]);
949 			return ENOTSUP;
950 		}
951 		sf->ext_csd.rev = ext_csd[EXT_CSD_REV];
952 
953 		if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
954 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
955 			sf->csd.tran_speed = 200000;	/* 200MHz SDR */
956 			hs_timing = EXT_CSD_HS_TIMING_HS200;
957 		} else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
958 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
959 			sf->csd.tran_speed = 52000;	/* 52MHz */
960 			hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
961 			ddr = true;
962 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
963 			sf->csd.tran_speed = 52000;	/* 52MHz */
964 			hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
965 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
966 			sf->csd.tran_speed = 26000;	/* 26MHz */
967 			hs_timing = EXT_CSD_HS_TIMING_LEGACY;
968 		} else {
969 			aprint_error_dev(sc->sc_dev,
970 			    "unknown CARD_TYPE: 0x%x\n",
971 			    ext_csd[EXT_CSD_CARD_TYPE]);
972 			return ENOTSUP;
973 		}
974 
975 		if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
976 			width = 8;
977 			value = EXT_CSD_BUS_WIDTH_8;
978 		} else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
979 			width = 4;
980 			value = EXT_CSD_BUS_WIDTH_4;
981 		} else {
982 			width = 1;
983 			value = EXT_CSD_BUS_WIDTH_1;
984 		}
985 
986 		if (width != 1) {
987 			error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
988 			    EXT_CSD_BUS_WIDTH, value, false);
989 			if (error == 0)
990 				error = sdmmc_chip_bus_width(sc->sc_sct,
991 				    sc->sc_sch, width);
992 			else {
993 				DPRINTF(("%s: can't change bus width"
994 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
995 				return error;
996 			}
997 
998 			/* XXXX: need bus test? (using by CMD14 & CMD19) */
999 			delay(10000);
1000 		}
1001 		sf->width = width;
1002 
1003 		if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1004 		    !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
1005 			hs_timing = EXT_CSD_HS_TIMING_LEGACY;
1006 		}
1007 		if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1008 			error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
1009 			    EXT_CSD_HS_TIMING, hs_timing, false);
1010 			if (error) {
1011 				aprint_error_dev(sc->sc_dev,
1012 				    "can't change high speed %d, error %d\n",
1013 				    hs_timing, error);
1014 				return error;
1015 			}
1016 		}
1017 
1018 		if (sc->sc_busclk > sf->csd.tran_speed)
1019 			sc->sc_busclk = sf->csd.tran_speed;
1020 		if (sc->sc_busclk != bus_clock) {
1021 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1022 			    sc->sc_busclk, false);
1023 			if (error) {
1024 				aprint_error_dev(sc->sc_dev,
1025 				    "can't change bus clock\n");
1026 				return error;
1027 			}
1028 		}
1029 
1030 		if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1031 			error = sdmmc_mem_send_cxd_data(sc,
1032 			    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1033 			if (error) {
1034 				aprint_error_dev(sc->sc_dev,
1035 				    "can't re-read EXT_CSD\n");
1036 				return error;
1037 			}
1038 			if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1039 				aprint_error_dev(sc->sc_dev,
1040 				    "HS_TIMING set failed\n");
1041 				return EINVAL;
1042 			}
1043 		}
1044 
1045 		/*
1046 		 * HS_TIMING must be set to “0x1” before setting BUS_WIDTH
1047 		 * for dual data rate operation
1048 		 */
1049 		if (ddr &&
1050 		    hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1051 		    width > 1) {
1052 			error = sdmmc_mem_mmc_switch(sf,
1053 			    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1054 			    (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1055 			      EXT_CSD_BUS_WIDTH_4_DDR, false);
1056 			if (error) {
1057 				DPRINTF(("%s: can't switch to DDR"
1058 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
1059 				return error;
1060 			}
1061 
1062 			delay(10000);
1063 
1064 			error = sdmmc_mem_signal_voltage(sc,
1065 			    SDMMC_SIGNAL_VOLTAGE_180);
1066 			if (error) {
1067 				aprint_error_dev(sc->sc_dev,
1068 				    "can't switch signaling voltage\n");
1069 				return error;
1070 			}
1071 
1072 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1073 			    sc->sc_busclk, ddr);
1074 			if (error) {
1075 				aprint_error_dev(sc->sc_dev,
1076 				    "can't change bus clock\n");
1077 				return error;
1078 			}
1079 
1080 			delay(10000);
1081 
1082 			sc->sc_transfer_mode = "DDR52";
1083 			sc->sc_busddr = ddr;
1084 		}
1085 
1086 		sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1087 		    ext_csd[EXT_CSD_SEC_COUNT + 1] << 8  |
1088 		    ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1089 		    ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1090 		if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1091 			SET(sf->flags, SFF_SDHC);
1092 			sf->csd.capacity = sectors;
1093 		}
1094 
1095 		if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1096 			sc->sc_transfer_mode = "HS200";
1097 
1098 			/* execute tuning (HS200) */
1099 			error = sdmmc_mem_execute_tuning(sc, sf);
1100 			if (error) {
1101 				aprint_error_dev(sc->sc_dev,
1102 				    "can't execute MMC tuning\n");
1103 				return error;
1104 			}
1105 		}
1106 
1107 		if (sf->ext_csd.rev >= 5) {
1108 			sf->ext_csd.rst_n_function =
1109 			    ext_csd[EXT_CSD_RST_N_FUNCTION];
1110 		}
1111 
1112 		if (sf->ext_csd.rev >= 6) {
1113 			sf->ext_csd.cache_size =
1114 			    le32dec(&ext_csd[EXT_CSD_CACHE_SIZE]) * 1024;
1115 		}
1116 		if (sf->ext_csd.cache_size > 0) {
1117 			/* eMMC cache present, enable it */
1118 			error = sdmmc_mem_mmc_switch(sf,
1119 			    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL,
1120 			    EXT_CSD_CACHE_CTRL_CACHE_EN, false);
1121 			if (error) {
1122 				aprint_error_dev(sc->sc_dev,
1123 				    "can't enable cache: %d\n", error);
1124 			} else {
1125 				SET(sf->flags, SFF_CACHE_ENABLED);
1126 			}
1127 		}
1128 	} else {
1129 		if (sc->sc_busclk > sf->csd.tran_speed)
1130 			sc->sc_busclk = sf->csd.tran_speed;
1131 		if (sc->sc_busclk != bus_clock) {
1132 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1133 			    sc->sc_busclk, false);
1134 			if (error) {
1135 				aprint_error_dev(sc->sc_dev,
1136 				    "can't change bus clock\n");
1137 				return error;
1138 			}
1139 		}
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 static int
1146 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1147 {
1148 	struct sdmmc_command cmd;
1149 	int error;
1150 
1151 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1152 		memset(&cmd, 0, sizeof cmd);
1153 		cmd.c_opcode = MMC_ALL_SEND_CID;
1154 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1155 
1156 		error = sdmmc_mmc_command(sc, &cmd);
1157 	} else {
1158 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1159 		    sizeof(cmd.c_resp));
1160 	}
1161 
1162 #ifdef SDMMC_DEBUG
1163 	if (error == 0)
1164 		sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1165 #endif
1166 	if (error == 0 && resp != NULL)
1167 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1168 	return error;
1169 }
1170 
1171 static int
1172 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1173     sdmmc_response *resp)
1174 {
1175 	struct sdmmc_command cmd;
1176 	int error;
1177 
1178 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1179 		memset(&cmd, 0, sizeof cmd);
1180 		cmd.c_opcode = MMC_SEND_CSD;
1181 		cmd.c_arg = MMC_ARG_RCA(sf->rca);
1182 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1183 
1184 		error = sdmmc_mmc_command(sc, &cmd);
1185 	} else {
1186 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1187 		    sizeof(cmd.c_resp));
1188 	}
1189 
1190 #ifdef SDMMC_DEBUG
1191 	if (error == 0)
1192 		sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1193 #endif
1194 	if (error == 0 && resp != NULL)
1195 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1196 	return error;
1197 }
1198 
1199 static int
1200 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1201     uint32_t *scr)
1202 {
1203 	struct sdmmc_command cmd;
1204 	bus_dma_segment_t ds[1];
1205 	void *ptr = NULL;
1206 	int datalen = 8;
1207 	int rseg;
1208 	int error = 0;
1209 
1210 	/* Don't lock */
1211 
1212 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1213 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1214 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1215 		if (error)
1216 			goto out;
1217 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1218 		    BUS_DMA_NOWAIT);
1219 		if (error)
1220 			goto dmamem_free;
1221 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1222 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1223 		if (error)
1224 			goto dmamem_unmap;
1225 
1226 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1227 		    BUS_DMASYNC_PREREAD);
1228 	} else {
1229 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1230 		if (ptr == NULL)
1231 			goto out;
1232 	}
1233 
1234 	memset(&cmd, 0, sizeof(cmd));
1235 	cmd.c_data = ptr;
1236 	cmd.c_datalen = datalen;
1237 	cmd.c_blklen = datalen;
1238 	cmd.c_arg = 0;
1239 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1240 	cmd.c_opcode = SD_APP_SEND_SCR;
1241 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1242 		cmd.c_dmamap = sc->sc_dmap;
1243 
1244 	error = sdmmc_app_command(sc, sf, &cmd);
1245 	if (error == 0) {
1246 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1247 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1248 			    BUS_DMASYNC_POSTREAD);
1249 		}
1250 		memcpy(scr, ptr, datalen);
1251 	}
1252 
1253 out:
1254 	if (ptr != NULL) {
1255 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1256 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1257 dmamem_unmap:
1258 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1259 dmamem_free:
1260 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1261 		} else {
1262 			free(ptr, M_DEVBUF);
1263 		}
1264 	}
1265 	DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1266 	    error));
1267 
1268 #ifdef SDMMC_DEBUG
1269 	if (error == 0)
1270 		sdmmc_dump_data("SCR", scr, datalen);
1271 #endif
1272 	return error;
1273 }
1274 
1275 static int
1276 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1277 {
1278 	sdmmc_response resp;
1279 	int ver;
1280 
1281 	memset(resp, 0, sizeof(resp));
1282 	/*
1283 	 * Change the raw-scr received from the DMA stream to resp.
1284 	 */
1285 	resp[0] = be32toh(sf->raw_scr[1]) >> 8;		// LSW
1286 	resp[1] = be32toh(sf->raw_scr[0]);		// MSW
1287 	resp[0] |= (resp[1] & 0xff) << 24;
1288 	resp[1] >>= 8;
1289 
1290 	ver = SCR_STRUCTURE(resp);
1291 	sf->scr.sd_spec = SCR_SD_SPEC(resp);
1292 	sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1293 
1294 	DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1295 	    SDMMCDEVNAME(sc), resp[1], resp[0],
1296 	    ver, sf->scr.sd_spec, sf->scr.bus_width));
1297 
1298 	if (ver != 0 && ver != 1) {
1299 		DPRINTF(("%s: unknown structure version: %d\n",
1300 		    SDMMCDEVNAME(sc), ver));
1301 		return EINVAL;
1302 	}
1303 	return 0;
1304 }
1305 
1306 static int
1307 sdmmc_mem_send_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1308     sdmmc_bitfield512_t *ssr)
1309 {
1310 	struct sdmmc_command cmd;
1311 	bus_dma_segment_t ds[1];
1312 	void *ptr = NULL;
1313 	int datalen = 64;
1314 	int rseg;
1315 	int error = 0;
1316 
1317 	/* Don't lock */
1318 
1319 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1320 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1321 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1322 		if (error)
1323 			goto out;
1324 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1325 		    BUS_DMA_NOWAIT);
1326 		if (error)
1327 			goto dmamem_free;
1328 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1329 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1330 		if (error)
1331 			goto dmamem_unmap;
1332 
1333 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1334 		    BUS_DMASYNC_PREREAD);
1335 	} else {
1336 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1337 		if (ptr == NULL)
1338 			goto out;
1339 	}
1340 
1341 	memset(&cmd, 0, sizeof(cmd));
1342 	cmd.c_data = ptr;
1343 	cmd.c_datalen = datalen;
1344 	cmd.c_blklen = datalen;
1345 	cmd.c_arg = 0;
1346 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1347 	cmd.c_opcode = SD_APP_SD_STATUS;
1348 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1349 		cmd.c_dmamap = sc->sc_dmap;
1350 
1351 	error = sdmmc_app_command(sc, sf, &cmd);
1352 	if (error == 0) {
1353 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1354 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1355 			    BUS_DMASYNC_POSTREAD);
1356 		}
1357 		memcpy(ssr, ptr, datalen);
1358 	}
1359 
1360 out:
1361 	if (ptr != NULL) {
1362 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1363 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1364 dmamem_unmap:
1365 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1366 dmamem_free:
1367 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1368 		} else {
1369 			free(ptr, M_DEVBUF);
1370 		}
1371 	}
1372 	DPRINTF(("%s: sdmem_mem_send_ssr: error = %d\n", SDMMCDEVNAME(sc),
1373 	    error));
1374 
1375 	if (error == 0)
1376 		sdmmc_be512_to_bitfield512(ssr);
1377 
1378 #ifdef SDMMC_DEBUG
1379 	if (error == 0)
1380 		sdmmc_dump_data("SSR", ssr, datalen);
1381 #endif
1382 	return error;
1383 }
1384 
1385 static int
1386 sdmmc_mem_decode_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1387     sdmmc_bitfield512_t *ssr_bitfield)
1388 {
1389 	uint32_t *ssr = (uint32_t *)ssr_bitfield;
1390 	int speed_class_val, bus_width_val;
1391 
1392 	const int bus_width = SSR_DAT_BUS_WIDTH(ssr);
1393 	const int speed_class = SSR_SPEED_CLASS(ssr);
1394 	const int uhs_speed_grade = SSR_UHS_SPEED_GRADE(ssr);
1395 	const int video_speed_class = SSR_VIDEO_SPEED_CLASS(ssr);
1396 	const int app_perf_class = SSR_APP_PERF_CLASS(ssr);
1397 
1398 	switch (speed_class) {
1399 	case SSR_SPEED_CLASS_0:	speed_class_val = 0; break;
1400 	case SSR_SPEED_CLASS_2: speed_class_val = 2; break;
1401 	case SSR_SPEED_CLASS_4: speed_class_val = 4; break;
1402 	case SSR_SPEED_CLASS_6: speed_class_val = 6; break;
1403 	case SSR_SPEED_CLASS_10: speed_class_val = 10; break;
1404 	default: speed_class_val = -1; break;
1405 	}
1406 
1407 	switch (bus_width) {
1408 	case SSR_DAT_BUS_WIDTH_1: bus_width_val = 1; break;
1409 	case SSR_DAT_BUS_WIDTH_4: bus_width_val = 4; break;
1410 	default: bus_width_val = -1;
1411 	}
1412 
1413 	/*
1414 	 * Log card status
1415 	 */
1416 	device_printf(sc->sc_dev, "SD card status:");
1417 	if (bus_width_val != -1)
1418 		printf(" %d-bit", bus_width_val);
1419 	else
1420 		printf(" unknown bus width");
1421 	if (speed_class_val != -1)
1422 		printf(", C%d", speed_class_val);
1423 	if (uhs_speed_grade)
1424 		printf(", U%d", uhs_speed_grade);
1425 	if (video_speed_class)
1426 		printf(", V%d", video_speed_class);
1427 	if (app_perf_class)
1428 		printf(", A%d", app_perf_class);
1429 	printf("\n");
1430 
1431 	return 0;
1432 }
1433 
1434 static int
1435 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1436     size_t datalen)
1437 {
1438 	struct sdmmc_command cmd;
1439 	bus_dma_segment_t ds[1];
1440 	void *ptr = NULL;
1441 	int rseg;
1442 	int error = 0;
1443 
1444 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1445 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1446 		    1, &rseg, BUS_DMA_NOWAIT);
1447 		if (error)
1448 			goto out;
1449 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1450 		    BUS_DMA_NOWAIT);
1451 		if (error)
1452 			goto dmamem_free;
1453 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1454 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1455 		if (error)
1456 			goto dmamem_unmap;
1457 
1458 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1459 		    BUS_DMASYNC_PREREAD);
1460 	} else {
1461 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1462 		if (ptr == NULL)
1463 			goto out;
1464 	}
1465 
1466 	memset(&cmd, 0, sizeof(cmd));
1467 	cmd.c_data = ptr;
1468 	cmd.c_datalen = datalen;
1469 	cmd.c_blklen = datalen;
1470 	cmd.c_opcode = opcode;
1471 	cmd.c_arg = 0;
1472 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1473 	if (opcode == MMC_SEND_EXT_CSD)
1474 		SET(cmd.c_flags, SCF_RSP_R1);
1475 	else
1476 		SET(cmd.c_flags, SCF_RSP_R2);
1477 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1478 		cmd.c_dmamap = sc->sc_dmap;
1479 
1480 	error = sdmmc_mmc_command(sc, &cmd);
1481 	if (error == 0) {
1482 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1483 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1484 			    BUS_DMASYNC_POSTREAD);
1485 		}
1486 		memcpy(data, ptr, datalen);
1487 #ifdef SDMMC_DEBUG
1488 		sdmmc_dump_data("CXD", data, datalen);
1489 #endif
1490 	}
1491 
1492 out:
1493 	if (ptr != NULL) {
1494 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1495 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1496 dmamem_unmap:
1497 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1498 dmamem_free:
1499 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1500 		} else {
1501 			free(ptr, M_DEVBUF);
1502 		}
1503 	}
1504 	return error;
1505 }
1506 
1507 static int
1508 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1509 {
1510 	struct sdmmc_softc *sc = sf->sc;
1511 	struct sdmmc_command cmd;
1512 	int error;
1513 
1514 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1515 		return ENODEV;
1516 
1517 	memset(&cmd, 0, sizeof(cmd));
1518 	cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1519 	cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1520 
1521 	switch (width) {
1522 	case 1:
1523 		cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1524 		break;
1525 
1526 	case 4:
1527 		cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1528 		break;
1529 
1530 	default:
1531 		return EINVAL;
1532 	}
1533 
1534 	error = sdmmc_app_command(sc, sf, &cmd);
1535 	if (error == 0)
1536 		error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1537 	return error;
1538 }
1539 
1540 static int
1541 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1542     int function, sdmmc_bitfield512_t *status)
1543 {
1544 	struct sdmmc_softc *sc = sf->sc;
1545 	struct sdmmc_command cmd;
1546 	bus_dma_segment_t ds[1];
1547 	void *ptr = NULL;
1548 	int gsft, rseg, error = 0;
1549 	const int statlen = 64;
1550 
1551 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1552 	    !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1553 		return EINVAL;
1554 
1555 	if (group <= 0 || group > 6 ||
1556 	    function < 0 || function > 15)
1557 		return EINVAL;
1558 
1559 	gsft = (group - 1) << 2;
1560 
1561 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1562 		error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1563 		    1, &rseg, BUS_DMA_NOWAIT);
1564 		if (error)
1565 			goto out;
1566 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1567 		    BUS_DMA_NOWAIT);
1568 		if (error)
1569 			goto dmamem_free;
1570 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1571 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1572 		if (error)
1573 			goto dmamem_unmap;
1574 
1575 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1576 		    BUS_DMASYNC_PREREAD);
1577 	} else {
1578 		ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1579 		if (ptr == NULL)
1580 			goto out;
1581 	}
1582 
1583 	memset(&cmd, 0, sizeof(cmd));
1584 	cmd.c_data = ptr;
1585 	cmd.c_datalen = statlen;
1586 	cmd.c_blklen = statlen;
1587 	cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1588 	cmd.c_arg =
1589 	    (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1590 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1591 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1592 		cmd.c_dmamap = sc->sc_dmap;
1593 
1594 	error = sdmmc_mmc_command(sc, &cmd);
1595 	if (error == 0) {
1596 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1597 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1598 			    BUS_DMASYNC_POSTREAD);
1599 		}
1600 		memcpy(status, ptr, statlen);
1601 	}
1602 
1603 out:
1604 	if (ptr != NULL) {
1605 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1606 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1607 dmamem_unmap:
1608 			bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1609 dmamem_free:
1610 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1611 		} else {
1612 			free(ptr, M_DEVBUF);
1613 		}
1614 	}
1615 
1616 	if (error == 0)
1617 		sdmmc_be512_to_bitfield512(status);
1618 
1619 	return error;
1620 }
1621 
1622 static int
1623 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1624     uint8_t value, bool poll)
1625 {
1626 	struct sdmmc_softc *sc = sf->sc;
1627 	struct sdmmc_command cmd;
1628 	int error;
1629 
1630 	memset(&cmd, 0, sizeof(cmd));
1631 	cmd.c_opcode = MMC_SWITCH;
1632 	cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1633 	    (index << 16) | (value << 8) | set;
1634 	cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1635 
1636 	if (poll)
1637 		cmd.c_flags |= SCF_POLL;
1638 
1639 	error = sdmmc_mmc_command(sc, &cmd);
1640 	if (error)
1641 		return error;
1642 
1643 	if (index == EXT_CSD_HS_TIMING && value >= 2) {
1644 		do {
1645 			memset(&cmd, 0, sizeof(cmd));
1646 			cmd.c_opcode = MMC_SEND_STATUS;
1647 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1648 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1649 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1650 			if (poll)
1651 				cmd.c_flags |= SCF_POLL;
1652 			error = sdmmc_mmc_command(sc, &cmd);
1653 			if (error)
1654 				break;
1655 			if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1656 				aprint_error_dev(sc->sc_dev, "switch error\n");
1657 				return EINVAL;
1658 			}
1659 			/* XXX time out */
1660 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1661 
1662 		if (error) {
1663 			aprint_error_dev(sc->sc_dev,
1664 			    "error waiting for high speed switch: %d\n",
1665 			    error);
1666 			return error;
1667 		}
1668 	}
1669 
1670 	return 0;
1671 }
1672 
1673 /*
1674  * SPI mode function
1675  */
1676 static int
1677 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1678 {
1679 	struct sdmmc_command cmd;
1680 	int error;
1681 
1682 	memset(&cmd, 0, sizeof(cmd));
1683 	cmd.c_opcode = MMC_READ_OCR;
1684 	cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1685 	cmd.c_flags = SCF_RSP_SPI_R3;
1686 
1687 	error = sdmmc_mmc_command(sc, &cmd);
1688 	if (error == 0 && card_ocr != NULL)
1689 		*card_ocr = cmd.c_resp[1];
1690 	DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1691 	    SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1692 	return error;
1693 }
1694 
1695 /*
1696  * read/write function
1697  */
1698 /* read */
1699 static int
1700 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1701     u_char *data, size_t datalen)
1702 {
1703 	struct sdmmc_softc *sc = sf->sc;
1704 	int error = 0;
1705 	int i;
1706 
1707 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1708 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1709 
1710 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1711 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1712 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1713 		if (error)
1714 			break;
1715 	}
1716 	return error;
1717 }
1718 
1719 /*
1720  * Simulate multi-segment dma transfer.
1721  */
1722 static int
1723 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1724     uint32_t blkno, u_char *data, size_t datalen)
1725 {
1726 	struct sdmmc_softc *sc = sf->sc;
1727 	bool use_bbuf = false;
1728 	int error = 0;
1729 	int i;
1730 
1731 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1732 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1733 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1734 			use_bbuf = true;
1735 			break;
1736 		}
1737 	}
1738 	if (use_bbuf) {
1739 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1740 		    BUS_DMASYNC_PREREAD);
1741 
1742 		error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1743 		    blkno, data, datalen);
1744 		if (error) {
1745 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1746 			return error;
1747 		}
1748 
1749 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1750 		    BUS_DMASYNC_POSTREAD);
1751 
1752 		/* Copy from bounce buffer */
1753 		memcpy(data, sf->bbuf, datalen);
1754 
1755 		return 0;
1756 	}
1757 
1758 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1759 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1760 
1761 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1762 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1763 		if (error)
1764 			return error;
1765 
1766 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1767 		    BUS_DMASYNC_PREREAD);
1768 
1769 		error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1770 		    blkno, data, len);
1771 		if (error) {
1772 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1773 			return error;
1774 		}
1775 
1776 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1777 		    BUS_DMASYNC_POSTREAD);
1778 
1779 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1780 
1781 		blkno += len / SDMMC_SECTOR_SIZE;
1782 		data += len;
1783 	}
1784 	return 0;
1785 }
1786 
1787 static int
1788 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1789     uint32_t blkno, u_char *data, size_t datalen)
1790 {
1791 	struct sdmmc_softc *sc = sf->sc;
1792 	struct sdmmc_command cmd;
1793 	int error;
1794 
1795 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1796 		error = sdmmc_select_card(sc, sf);
1797 		if (error)
1798 			goto out;
1799 	}
1800 
1801 	memset(&cmd, 0, sizeof(cmd));
1802 	cmd.c_data = data;
1803 	cmd.c_datalen = datalen;
1804 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
1805 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1806 	    MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1807 	cmd.c_arg = blkno;
1808 	if (!ISSET(sf->flags, SFF_SDHC))
1809 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1810 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1811 	if (ISSET(sf->flags, SFF_SDHC))
1812 		cmd.c_flags |= SCF_XFER_SDHC;
1813 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1814 		cmd.c_dmamap = dmap;
1815 
1816 	sc->sc_ev_xfer.ev_count++;
1817 
1818 	error = sdmmc_mmc_command(sc, &cmd);
1819 	if (error) {
1820 		sc->sc_ev_xfer_error.ev_count++;
1821 		goto out;
1822 	}
1823 
1824 	const u_int counter = __builtin_ctz(cmd.c_datalen);
1825 	if (counter >= 9 && counter <= 16) {
1826 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1827 	} else {
1828 		sc->sc_ev_xfer_unaligned.ev_count++;
1829 	}
1830 
1831 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1832 		if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1833 			memset(&cmd, 0, sizeof cmd);
1834 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
1835 			cmd.c_arg = MMC_ARG_RCA(sf->rca);
1836 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1837 			error = sdmmc_mmc_command(sc, &cmd);
1838 			if (error)
1839 				goto out;
1840 		}
1841 	}
1842 
1843 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1844 		do {
1845 			memset(&cmd, 0, sizeof(cmd));
1846 			cmd.c_opcode = MMC_SEND_STATUS;
1847 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1848 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1849 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1850 			error = sdmmc_mmc_command(sc, &cmd);
1851 			if (error)
1852 				break;
1853 			/* XXX time out */
1854 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1855 	}
1856 
1857 out:
1858 	return error;
1859 }
1860 
1861 int
1862 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1863     size_t datalen)
1864 {
1865 	struct sdmmc_softc *sc = sf->sc;
1866 	int error;
1867 
1868 	SDMMC_LOCK(sc);
1869 	mutex_enter(&sc->sc_mtx);
1870 
1871 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1872 		error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1873 		goto out;
1874 	}
1875 
1876 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1877 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1878 		    datalen);
1879 		goto out;
1880 	}
1881 
1882 	/* DMA transfer */
1883 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1884 	    BUS_DMA_NOWAIT|BUS_DMA_READ);
1885 	if (error)
1886 		goto out;
1887 
1888 #ifdef SDMMC_DEBUG
1889 	printf("data=%p, datalen=%zu\n", data, datalen);
1890 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1891 		printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1892 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1893 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1894 	}
1895 #endif
1896 
1897 	if (sc->sc_dmap->dm_nsegs > 1
1898 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1899 		error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1900 		    data, datalen);
1901 		goto unload;
1902 	}
1903 
1904 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1905 	    BUS_DMASYNC_PREREAD);
1906 
1907 	error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1908 	    datalen);
1909 	if (error)
1910 		goto unload;
1911 
1912 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1913 	    BUS_DMASYNC_POSTREAD);
1914 unload:
1915 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1916 
1917 out:
1918 	mutex_exit(&sc->sc_mtx);
1919 	SDMMC_UNLOCK(sc);
1920 
1921 	return error;
1922 }
1923 
1924 /* write */
1925 static int
1926 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1927     u_char *data, size_t datalen)
1928 {
1929 	struct sdmmc_softc *sc = sf->sc;
1930 	int error = 0;
1931 	int i;
1932 
1933 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1934 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1935 
1936 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1937 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1938 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1939 		if (error)
1940 			break;
1941 	}
1942 	return error;
1943 }
1944 
1945 /*
1946  * Simulate multi-segment dma transfer.
1947  */
1948 static int
1949 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1950     uint32_t blkno, u_char *data, size_t datalen)
1951 {
1952 	struct sdmmc_softc *sc = sf->sc;
1953 	bool use_bbuf = false;
1954 	int error = 0;
1955 	int i;
1956 
1957 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1958 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1959 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1960 			use_bbuf = true;
1961 			break;
1962 		}
1963 	}
1964 	if (use_bbuf) {
1965 		/* Copy to bounce buffer */
1966 		memcpy(sf->bbuf, data, datalen);
1967 
1968 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1969 		    BUS_DMASYNC_PREWRITE);
1970 
1971 		error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1972 		    blkno, data, datalen);
1973 		if (error) {
1974 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1975 			return error;
1976 		}
1977 
1978 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1979 		    BUS_DMASYNC_POSTWRITE);
1980 
1981 		return 0;
1982 	}
1983 
1984 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1985 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1986 
1987 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1988 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1989 		if (error)
1990 			return error;
1991 
1992 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1993 		    BUS_DMASYNC_PREWRITE);
1994 
1995 		error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
1996 		    blkno, data, len);
1997 		if (error) {
1998 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1999 			return error;
2000 		}
2001 
2002 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2003 		    BUS_DMASYNC_POSTWRITE);
2004 
2005 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2006 
2007 		blkno += len / SDMMC_SECTOR_SIZE;
2008 		data += len;
2009 	}
2010 
2011 	return error;
2012 }
2013 
2014 static int
2015 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
2016     uint32_t blkno, u_char *data, size_t datalen)
2017 {
2018 	struct sdmmc_softc *sc = sf->sc;
2019 	struct sdmmc_command cmd;
2020 	int error;
2021 
2022 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2023 		error = sdmmc_select_card(sc, sf);
2024 		if (error)
2025 			goto out;
2026 	}
2027 
2028 	const int nblk = howmany(datalen, SDMMC_SECTOR_SIZE);
2029 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && nblk > 1) {
2030 		/* Set the number of write blocks to be pre-erased */
2031 		memset(&cmd, 0, sizeof(cmd));
2032 		cmd.c_opcode = SD_APP_SET_WR_BLK_ERASE_COUNT;
2033 		cmd.c_flags = SCF_RSP_R1 | SCF_RSP_SPI_R1 | SCF_CMD_AC;
2034 		cmd.c_arg = nblk;
2035 		error = sdmmc_app_command(sc, sf, &cmd);
2036 		if (error)
2037 			goto out;
2038 	}
2039 
2040 	memset(&cmd, 0, sizeof(cmd));
2041 	cmd.c_data = data;
2042 	cmd.c_datalen = datalen;
2043 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
2044 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
2045 	    MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
2046 	cmd.c_arg = blkno;
2047 	if (!ISSET(sf->flags, SFF_SDHC))
2048 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2049 	cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
2050 	if (ISSET(sf->flags, SFF_SDHC))
2051 		cmd.c_flags |= SCF_XFER_SDHC;
2052 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
2053 		cmd.c_dmamap = dmap;
2054 
2055 	sc->sc_ev_xfer.ev_count++;
2056 
2057 	error = sdmmc_mmc_command(sc, &cmd);
2058 	if (error) {
2059 		sc->sc_ev_xfer_error.ev_count++;
2060 		goto out;
2061 	}
2062 
2063 	const u_int counter = __builtin_ctz(cmd.c_datalen);
2064 	if (counter >= 9 && counter <= 16) {
2065 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
2066 	} else {
2067 		sc->sc_ev_xfer_unaligned.ev_count++;
2068 	}
2069 
2070 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
2071 		if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
2072 			memset(&cmd, 0, sizeof(cmd));
2073 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
2074 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
2075 			error = sdmmc_mmc_command(sc, &cmd);
2076 			if (error)
2077 				goto out;
2078 		}
2079 	}
2080 
2081 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2082 		do {
2083 			memset(&cmd, 0, sizeof(cmd));
2084 			cmd.c_opcode = MMC_SEND_STATUS;
2085 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2086 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
2087 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
2088 			error = sdmmc_mmc_command(sc, &cmd);
2089 			if (error)
2090 				break;
2091 			/* XXX time out */
2092 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
2093 	}
2094 
2095 out:
2096 	return error;
2097 }
2098 
2099 int
2100 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
2101     size_t datalen)
2102 {
2103 	struct sdmmc_softc *sc = sf->sc;
2104 	int error;
2105 
2106 	SDMMC_LOCK(sc);
2107 	mutex_enter(&sc->sc_mtx);
2108 
2109 	if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
2110 		aprint_normal_dev(sc->sc_dev, "write-protected\n");
2111 		error = EIO;
2112 		goto out;
2113 	}
2114 
2115 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
2116 		error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
2117 		goto out;
2118 	}
2119 
2120 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
2121 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2122 		    datalen);
2123 		goto out;
2124 	}
2125 
2126 	/* DMA transfer */
2127 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
2128 	    BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2129 	if (error)
2130 		goto out;
2131 
2132 #ifdef SDMMC_DEBUG
2133 	aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
2134 	    __func__, data, datalen);
2135 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2136 		aprint_normal_dev(sc->sc_dev,
2137 		    "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
2138 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
2139 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
2140 	}
2141 #endif
2142 
2143 	if (sc->sc_dmap->dm_nsegs > 1
2144 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
2145 		error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
2146 		    data, datalen);
2147 		goto unload;
2148 	}
2149 
2150 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2151 	    BUS_DMASYNC_PREWRITE);
2152 
2153 	error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2154 	    datalen);
2155 	if (error)
2156 		goto unload;
2157 
2158 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2159 	    BUS_DMASYNC_POSTWRITE);
2160 unload:
2161 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
2162 
2163 out:
2164 	mutex_exit(&sc->sc_mtx);
2165 	SDMMC_UNLOCK(sc);
2166 
2167 	return error;
2168 }
2169 
2170 int
2171 sdmmc_mem_discard(struct sdmmc_function *sf, uint32_t sblkno, uint32_t eblkno)
2172 {
2173 	struct sdmmc_softc *sc = sf->sc;
2174 	struct sdmmc_command cmd;
2175 	int error;
2176 
2177 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2178 		return ENODEV;	/* XXX not tested */
2179 
2180 	if (eblkno < sblkno)
2181 		return EINVAL;
2182 
2183 	SDMMC_LOCK(sc);
2184 	mutex_enter(&sc->sc_mtx);
2185 
2186 	/* Set the address of the first write block to be erased */
2187 	memset(&cmd, 0, sizeof(cmd));
2188 	cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2189 	    SD_ERASE_WR_BLK_START : MMC_TAG_ERASE_GROUP_START;
2190 	cmd.c_arg = sblkno;
2191 	if (!ISSET(sf->flags, SFF_SDHC))
2192 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2193 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2194 	error = sdmmc_mmc_command(sc, &cmd);
2195 	if (error)
2196 		goto out;
2197 
2198 	/* Set the address of the last write block to be erased */
2199 	memset(&cmd, 0, sizeof(cmd));
2200 	cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2201 	    SD_ERASE_WR_BLK_END : MMC_TAG_ERASE_GROUP_END;
2202 	cmd.c_arg = eblkno;
2203 	if (!ISSET(sf->flags, SFF_SDHC))
2204 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2205 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2206 	error = sdmmc_mmc_command(sc, &cmd);
2207 	if (error)
2208 		goto out;
2209 
2210 	/* Start the erase operation */
2211 	memset(&cmd, 0, sizeof(cmd));
2212 	cmd.c_opcode = MMC_ERASE;
2213 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B;
2214 	error = sdmmc_mmc_command(sc, &cmd);
2215 	if (error)
2216 		goto out;
2217 
2218 out:
2219 	mutex_exit(&sc->sc_mtx);
2220 	SDMMC_UNLOCK(sc);
2221 
2222 #ifdef SDMMC_DEBUG
2223 	device_printf(sc->sc_dev, "discard blk %u-%u error %d\n",
2224 	    sblkno, eblkno, error);
2225 #endif
2226 
2227 	return error;
2228 }
2229 
2230 int
2231 sdmmc_mem_flush_cache(struct sdmmc_function *sf, bool poll)
2232 {
2233 	struct sdmmc_softc *sc = sf->sc;
2234 	int error;
2235 
2236 	if (!ISSET(sf->flags, SFF_CACHE_ENABLED))
2237 		return 0;
2238 
2239 	SDMMC_LOCK(sc);
2240 	mutex_enter(&sc->sc_mtx);
2241 
2242 	error = sdmmc_mem_mmc_switch(sf,
2243 	    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE,
2244 	    EXT_CSD_FLUSH_CACHE_FLUSH, poll);
2245 
2246 	mutex_exit(&sc->sc_mtx);
2247 	SDMMC_UNLOCK(sc);
2248 
2249 #ifdef SDMMC_DEBUG
2250 	device_printf(sc->sc_dev, "mmc flush cache error %d\n", error);
2251 #endif
2252 
2253 	return error;
2254 }
2255