xref: /netbsd-src/sys/dev/sdmmc/sdmmc_mem.c (revision 796c32c94f6e154afc9de0f63da35c91bb739b45)
1 /*	$NetBSD: sdmmc_mem.c,v 1.63 2017/09/12 13:43:37 jmcneill Exp $	*/
2 /*	$OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $	*/
3 
4 /*
5  * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka@netbsd.org>
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43  */
44 
45 /* Routines for SD/MMC memory cards. */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.63 2017/09/12 13:43:37 jmcneill Exp $");
49 
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53 
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61 
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65 
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s)	do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s)	do {} while (/*CONSTCOND*/0)
70 #endif
71 
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73 
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78     sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80     uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_ssr(struct sdmmc_softc *, struct sdmmc_function *,
83     sdmmc_bitfield512_t *);
84 static int sdmmc_mem_decode_ssr(struct sdmmc_softc *, struct sdmmc_function *,
85     sdmmc_bitfield512_t *);
86 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
87 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
88 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
89 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
90     uint8_t, bool);
91 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
92 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
93 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
94     u_char *, size_t);
95 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
96     u_char *, size_t);
97 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
98     uint32_t, u_char *, size_t);
99 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
100     uint32_t, u_char *, size_t);
101 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
102     uint32_t, u_char *, size_t);
103 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
104     uint32_t, u_char *, size_t);
105 
106 static const struct {
107 	const char *name;
108 	int v;
109 	int freq;
110 } switch_group0_functions[] = {
111 	/* Default/SDR12 */
112 	{ "Default/SDR12",	 0,			 25000 },
113 
114 	/* High-Speed/SDR25 */
115 	{ "High-Speed/SDR25",	SMC_CAPS_SD_HIGHSPEED,	 50000 },
116 
117 	/* SDR50 */
118 	{ "SDR50",		SMC_CAPS_UHS_SDR50,	100000 },
119 
120 	/* SDR104 */
121 	{ "SDR104",		SMC_CAPS_UHS_SDR104,	208000 },
122 
123 	/* DDR50 */
124 	{ "DDR50",		SMC_CAPS_UHS_DDR50,	 50000 },
125 };
126 
127 /*
128  * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
129  */
130 int
131 sdmmc_mem_enable(struct sdmmc_softc *sc)
132 {
133 	uint32_t host_ocr;
134 	uint32_t card_ocr;
135 	uint32_t new_ocr;
136 	uint32_t ocr = 0;
137 	int error;
138 
139 	SDMMC_LOCK(sc);
140 
141 	/* Set host mode to SD "combo" card or SD memory-only. */
142 	CLR(sc->sc_flags, SMF_UHS_MODE);
143 	SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
144 
145 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
146 		sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
147 
148 	/* Reset memory (*must* do that before CMD55 or CMD1). */
149 	sdmmc_go_idle_state(sc);
150 
151 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
152 		/* Check SD Ver.2 */
153 		error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
154 		if (error == 0 && card_ocr == 0x1aa)
155 			SET(ocr, MMC_OCR_HCS);
156 	}
157 
158 	/*
159 	 * Read the SD/MMC memory OCR value by issuing CMD55 followed
160 	 * by ACMD41 to read the OCR value from memory-only SD cards.
161 	 * MMC cards will not respond to CMD55 or ACMD41 and this is
162 	 * how we distinguish them from SD cards.
163 	 */
164 mmc_mode:
165 	error = sdmmc_mem_send_op_cond(sc,
166 	    ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
167 	if (error) {
168 		if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
169 		    !ISSET(sc->sc_flags, SMF_IO_MODE)) {
170 			/* Not a SD card, switch to MMC mode. */
171 			DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
172 			CLR(sc->sc_flags, SMF_SD_MODE);
173 			goto mmc_mode;
174 		}
175 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
176 			DPRINTF(("%s: couldn't read memory OCR\n",
177 			    SDMMCDEVNAME(sc)));
178 			goto out;
179 		} else {
180 			/* Not a "combo" card. */
181 			CLR(sc->sc_flags, SMF_MEM_MODE);
182 			error = 0;
183 			goto out;
184 		}
185 	}
186 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
187 		/* get card OCR */
188 		error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
189 		if (error) {
190 			DPRINTF(("%s: couldn't read SPI memory OCR\n",
191 			    SDMMCDEVNAME(sc)));
192 			goto out;
193 		}
194 	}
195 
196 	/* Set the lowest voltage supported by the card and host. */
197 	host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
198 	error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
199 	if (error) {
200 		DPRINTF(("%s: couldn't supply voltage requested by card\n",
201 		    SDMMCDEVNAME(sc)));
202 		goto out;
203 	}
204 
205 	DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
206 	DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
207 
208 	host_ocr &= card_ocr; /* only allow the common voltages */
209 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
210 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
211 			/* Tell the card(s) to enter the idle state (again). */
212 			sdmmc_go_idle_state(sc);
213 			/* Check SD Ver.2 */
214 			error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
215 			if (error == 0 && card_ocr == 0x1aa)
216 				SET(ocr, MMC_OCR_HCS);
217 
218 			if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
219 				SET(ocr, MMC_OCR_S18A);
220 		} else {
221 			SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
222 		}
223 	}
224 	host_ocr |= ocr;
225 
226 	/* Send the new OCR value until all cards are ready. */
227 	error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
228 	if (error) {
229 		DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
230 		goto out;
231 	}
232 
233 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
234 		/*
235 		 * Card and host support low voltage mode, begin switch
236 		 * sequence.
237 		 */
238 		struct sdmmc_command cmd;
239 		memset(&cmd, 0, sizeof(cmd));
240 		cmd.c_arg = 0;
241 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
242 		cmd.c_opcode = SD_VOLTAGE_SWITCH;
243 		DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
244 		error = sdmmc_mmc_command(sc, &cmd);
245 		if (error) {
246 			DPRINTF(("%s: voltage switch command failed\n",
247 			    SDMMCDEVNAME(sc)));
248 			goto out;
249 		}
250 
251 		error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
252 		if (error)
253 			goto out;
254 
255 		SET(sc->sc_flags, SMF_UHS_MODE);
256 	}
257 
258 out:
259 	SDMMC_UNLOCK(sc);
260 
261 	if (error)
262 		printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
263 		    __func__, error);
264 
265 	return error;
266 }
267 
268 static int
269 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
270 {
271 	int error;
272 
273 	/*
274 	 * Stop the clock
275 	 */
276 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
277 	    SDMMC_SDCLK_OFF, false);
278 	if (error)
279 		goto out;
280 
281 	delay(1000);
282 
283 	/*
284 	 * Card switch command was successful, update host controller
285 	 * signal voltage setting.
286 	 */
287 	DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
288 	    signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
289 	error = sdmmc_chip_signal_voltage(sc->sc_sct,
290 	    sc->sc_sch, signal_voltage);
291 	if (error)
292 		goto out;
293 
294 	delay(5000);
295 
296 	/*
297 	 * Switch to SDR12 timing
298 	 */
299 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
300 	    false);
301 	if (error)
302 		goto out;
303 
304 	delay(1000);
305 
306 out:
307 	return error;
308 }
309 
310 /*
311  * Read the CSD and CID from all cards and assign each card a unique
312  * relative card address (RCA).  CMD2 is ignored by SDIO-only cards.
313  */
314 void
315 sdmmc_mem_scan(struct sdmmc_softc *sc)
316 {
317 	sdmmc_response resp;
318 	struct sdmmc_function *sf;
319 	uint16_t next_rca;
320 	int error;
321 	int retry;
322 
323 	SDMMC_LOCK(sc);
324 
325 	/*
326 	 * CMD2 is a broadcast command understood by SD cards and MMC
327 	 * cards.  All cards begin to respond to the command, but back
328 	 * off if another card drives the CMD line to a different level.
329 	 * Only one card will get its entire response through.  That
330 	 * card remains silent once it has been assigned a RCA.
331 	 */
332 	for (retry = 0; retry < 100; retry++) {
333 		error = sdmmc_mem_send_cid(sc, &resp);
334 		if (error) {
335 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
336 			    error == ETIMEDOUT) {
337 				/* No more cards there. */
338 				break;
339 			}
340 			DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
341 			break;
342 		}
343 
344 		/* In MMC mode, find the next available RCA. */
345 		next_rca = 1;
346 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
347 			SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
348 				next_rca++;
349 		}
350 
351 		/* Allocate a sdmmc_function structure. */
352 		sf = sdmmc_function_alloc(sc);
353 		sf->rca = next_rca;
354 
355 		/*
356 		 * Remember the CID returned in the CMD2 response for
357 		 * later decoding.
358 		 */
359 		memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
360 
361 		/*
362 		 * Silence the card by assigning it a unique RCA, or
363 		 * querying it for its RCA in the case of SD.
364 		 */
365 		if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
366 			if (sdmmc_set_relative_addr(sc, sf) != 0) {
367 				aprint_error_dev(sc->sc_dev,
368 				    "couldn't set mem RCA\n");
369 				sdmmc_function_free(sf);
370 				break;
371 			}
372 		}
373 
374 		/*
375 		 * If this is a memory-only card, the card responding
376 		 * first becomes an alias for SDIO function 0.
377 		 */
378 		if (sc->sc_fn0 == NULL)
379 			sc->sc_fn0 = sf;
380 
381 		SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
382 
383 		/* only one function in SPI mode */
384 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
385 			break;
386 	}
387 
388 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
389 		/* Go to Data Transfer Mode, if possible. */
390 		sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
391 
392 	/*
393 	 * All cards are either inactive or awaiting further commands.
394 	 * Read the CSDs and decode the raw CID for each card.
395 	 */
396 	SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
397 		error = sdmmc_mem_send_csd(sc, sf, &resp);
398 		if (error) {
399 			SET(sf->flags, SFF_ERROR);
400 			continue;
401 		}
402 
403 		if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
404 		    sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
405 			SET(sf->flags, SFF_ERROR);
406 			continue;
407 		}
408 
409 #ifdef SDMMC_DEBUG
410 		printf("%s: CID: ", SDMMCDEVNAME(sc));
411 		sdmmc_print_cid(&sf->cid);
412 #endif
413 	}
414 
415 	SDMMC_UNLOCK(sc);
416 }
417 
418 int
419 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
420     struct sdmmc_function *sf)
421 {
422 	/* TRAN_SPEED(2:0): transfer rate exponent */
423 	static const int speed_exponent[8] = {
424 		100 *    1,	/* 100 Kbits/s */
425 		  1 * 1000,	/*   1 Mbits/s */
426 		 10 * 1000,	/*  10 Mbits/s */
427 		100 * 1000,	/* 100 Mbits/s */
428 		         0,
429 		         0,
430 		         0,
431 		         0,
432 	};
433 	/* TRAN_SPEED(6:3): time mantissa */
434 	static const int speed_mantissa[16] = {
435 		0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
436 	};
437 	struct sdmmc_csd *csd = &sf->csd;
438 	int e, m;
439 
440 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
441 		/*
442 		 * CSD version 1.0 corresponds to SD system
443 		 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
444 		 */
445 		csd->csdver = SD_CSD_CSDVER(resp);
446 		switch (csd->csdver) {
447 		case SD_CSD_CSDVER_2_0:
448 			DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
449 			SET(sf->flags, SFF_SDHC);
450 			csd->capacity = SD_CSD_V2_CAPACITY(resp);
451 			csd->read_bl_len = SD_CSD_V2_BL_LEN;
452 			break;
453 
454 		case SD_CSD_CSDVER_1_0:
455 			DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
456 			csd->capacity = SD_CSD_CAPACITY(resp);
457 			csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
458 			break;
459 
460 		default:
461 			aprint_error_dev(sc->sc_dev,
462 			    "unknown SD CSD structure version 0x%x\n",
463 			    csd->csdver);
464 			return 1;
465 		}
466 
467 		csd->mmcver = SD_CSD_MMCVER(resp);
468 		csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
469 		csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
470 		e = SD_CSD_SPEED_EXP(resp);
471 		m = SD_CSD_SPEED_MANT(resp);
472 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
473 		csd->ccc = SD_CSD_CCC(resp);
474 	} else {
475 		csd->csdver = MMC_CSD_CSDVER(resp);
476 		if (csd->csdver == MMC_CSD_CSDVER_1_0) {
477 			aprint_error_dev(sc->sc_dev,
478 			    "unknown MMC CSD structure version 0x%x\n",
479 			    csd->csdver);
480 			return 1;
481 		}
482 
483 		csd->mmcver = MMC_CSD_MMCVER(resp);
484 		csd->capacity = MMC_CSD_CAPACITY(resp);
485 		csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
486 		csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
487 		csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
488 		e = MMC_CSD_TRAN_SPEED_EXP(resp);
489 		m = MMC_CSD_TRAN_SPEED_MANT(resp);
490 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
491 	}
492 	if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
493 		csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
494 
495 #ifdef SDMMC_DUMP_CSD
496 	sdmmc_print_csd(resp, csd);
497 #endif
498 
499 	return 0;
500 }
501 
502 int
503 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
504     struct sdmmc_function *sf)
505 {
506 	struct sdmmc_cid *cid = &sf->cid;
507 
508 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
509 		cid->mid = SD_CID_MID(resp);
510 		cid->oid = SD_CID_OID(resp);
511 		SD_CID_PNM_CPY(resp, cid->pnm);
512 		cid->rev = SD_CID_REV(resp);
513 		cid->psn = SD_CID_PSN(resp);
514 		cid->mdt = SD_CID_MDT(resp);
515 	} else {
516 		switch(sf->csd.mmcver) {
517 		case MMC_CSD_MMCVER_1_0:
518 		case MMC_CSD_MMCVER_1_4:
519 			cid->mid = MMC_CID_MID_V1(resp);
520 			MMC_CID_PNM_V1_CPY(resp, cid->pnm);
521 			cid->rev = MMC_CID_REV_V1(resp);
522 			cid->psn = MMC_CID_PSN_V1(resp);
523 			cid->mdt = MMC_CID_MDT_V1(resp);
524 			break;
525 		case MMC_CSD_MMCVER_2_0:
526 		case MMC_CSD_MMCVER_3_1:
527 		case MMC_CSD_MMCVER_4_0:
528 			cid->mid = MMC_CID_MID_V2(resp);
529 			cid->oid = MMC_CID_OID_V2(resp);
530 			MMC_CID_PNM_V2_CPY(resp, cid->pnm);
531 			cid->psn = MMC_CID_PSN_V2(resp);
532 			break;
533 		default:
534 			aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
535 			    sf->csd.mmcver);
536 			return 1;
537 		}
538 	}
539 	return 0;
540 }
541 
542 void
543 sdmmc_print_cid(struct sdmmc_cid *cid)
544 {
545 
546 	printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
547 	    " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
548 	    cid->mdt);
549 }
550 
551 #ifdef SDMMC_DUMP_CSD
552 void
553 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
554 {
555 
556 	printf("csdver = %d\n", csd->csdver);
557 	printf("mmcver = %d\n", csd->mmcver);
558 	printf("capacity = 0x%08x\n", csd->capacity);
559 	printf("read_bl_len = %d\n", csd->read_bl_len);
560 	printf("write_bl_len = %d\n", csd->write_bl_len);
561 	printf("r2w_factor = %d\n", csd->r2w_factor);
562 	printf("tran_speed = %d\n", csd->tran_speed);
563 	printf("ccc = 0x%x\n", csd->ccc);
564 }
565 #endif
566 
567 /*
568  * Initialize a SD/MMC memory card.
569  */
570 int
571 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
572 {
573 	int error = 0;
574 
575 	SDMMC_LOCK(sc);
576 
577 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
578 		error = sdmmc_select_card(sc, sf);
579 		if (error)
580 			goto out;
581 	}
582 
583 	error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
584 	if (error)
585 		goto out;
586 
587 	if (ISSET(sc->sc_flags, SMF_SD_MODE))
588 		error = sdmmc_mem_sd_init(sc, sf);
589 	else
590 		error = sdmmc_mem_mmc_init(sc, sf);
591 
592 out:
593 	SDMMC_UNLOCK(sc);
594 
595 	return error;
596 }
597 
598 /*
599  * Get or set the card's memory OCR value (SD or MMC).
600  */
601 int
602 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
603 {
604 	struct sdmmc_command cmd;
605 	int error;
606 	int retry;
607 
608 	/* Don't lock */
609 
610 	DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
611 	    SDMMCDEVNAME(sc), ocr));
612 
613 	/*
614 	 * If we change the OCR value, retry the command until the OCR
615 	 * we receive in response has the "CARD BUSY" bit set, meaning
616 	 * that all cards are ready for identification.
617 	 */
618 	for (retry = 0; retry < 100; retry++) {
619 		memset(&cmd, 0, sizeof(cmd));
620 		cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
621 		    ocr : (ocr & MMC_OCR_HCS);
622 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
623 		    | SCF_TOUT_OK;
624 
625 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
626 			cmd.c_opcode = SD_APP_OP_COND;
627 			error = sdmmc_app_command(sc, NULL, &cmd);
628 		} else {
629 			cmd.c_opcode = MMC_SEND_OP_COND;
630 			error = sdmmc_mmc_command(sc, &cmd);
631 		}
632 		if (error)
633 			break;
634 
635 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
636 			if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
637 				break;
638 		} else {
639 			if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
640 			    ocr == 0)
641 				break;
642 		}
643 
644 		error = ETIMEDOUT;
645 		sdmmc_delay(10000);
646 	}
647 	if (error == 0 &&
648 	    ocrp != NULL &&
649 	    !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
650 		*ocrp = MMC_R3(cmd.c_resp);
651 	DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
652 	    SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
653 	return error;
654 }
655 
656 int
657 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
658 {
659 	struct sdmmc_command cmd;
660 	int error;
661 
662 	/* Don't lock */
663 
664 	memset(&cmd, 0, sizeof(cmd));
665 	cmd.c_arg = ocr;
666 	cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7;
667 	cmd.c_opcode = SD_SEND_IF_COND;
668 
669 	error = sdmmc_mmc_command(sc, &cmd);
670 	if (error == 0 && ocrp != NULL) {
671 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
672 			*ocrp = MMC_SPI_R7(cmd.c_resp);
673 		} else {
674 			*ocrp = MMC_R7(cmd.c_resp);
675 		}
676 		DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
677 		    SDMMCDEVNAME(sc), error, *ocrp));
678 	}
679 	return error;
680 }
681 
682 /*
683  * Set the read block length appropriately for this card, according to
684  * the card CSD register value.
685  */
686 int
687 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
688    int block_len)
689 {
690 	struct sdmmc_command cmd;
691 	int error;
692 
693 	/* Don't lock */
694 
695 	memset(&cmd, 0, sizeof(cmd));
696 	cmd.c_opcode = MMC_SET_BLOCKLEN;
697 	cmd.c_arg = block_len;
698 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
699 
700 	error = sdmmc_mmc_command(sc, &cmd);
701 
702 	DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
703 	    SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
704 
705 	return error;
706 }
707 
708 /* make 512-bit BE quantity __bitfield()-compatible */
709 static void
710 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
711 	size_t i;
712 	uint32_t tmp0, tmp1;
713 	const size_t bitswords = __arraycount(buf->_bits);
714 	for (i = 0; i < bitswords/2; i++) {
715 		tmp0 = buf->_bits[i];
716 		tmp1 = buf->_bits[bitswords - 1 - i];
717 		buf->_bits[i] = be32toh(tmp1);
718 		buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
719 	}
720 }
721 
722 static int
723 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
724 {
725 	if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
726 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
727 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
728 			return SD_ACCESS_MODE_SDR104;
729 		}
730 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
731 		    ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
732 			return SD_ACCESS_MODE_DDR50;
733 		}
734 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
735 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
736 			return SD_ACCESS_MODE_SDR50;
737 		}
738 	}
739 	if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
740 	    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
741 		return SD_ACCESS_MODE_SDR25;
742 	}
743 	return SD_ACCESS_MODE_SDR12;
744 }
745 
746 static int
747 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
748 {
749 	int timing = -1;
750 
751 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
752 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
753 			return 0;
754 
755 		switch (sf->csd.tran_speed) {
756 		case 100000:
757 			timing = SDMMC_TIMING_UHS_SDR50;
758 			break;
759 		case 208000:
760 			timing = SDMMC_TIMING_UHS_SDR104;
761 			break;
762 		default:
763 			return 0;
764 		}
765 	} else {
766 		switch (sf->csd.tran_speed) {
767 		case 200000:
768 			timing = SDMMC_TIMING_MMC_HS200;
769 			break;
770 		default:
771 			return 0;
772 		}
773 	}
774 
775 	DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
776 	    timing));
777 
778 	return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
779 }
780 
781 static int
782 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
783 {
784 	int support_func, best_func, bus_clock, error, i;
785 	sdmmc_bitfield512_t status;
786 	bool ddr = false;
787 
788 	/* change bus clock */
789 	bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
790 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
791 	if (error) {
792 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
793 		return error;
794 	}
795 
796 	error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
797 	if (error) {
798 		aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
799 		return error;
800 	}
801 	error = sdmmc_mem_decode_scr(sc, sf);
802 	if (error)
803 		return error;
804 
805 	if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
806 	    ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
807 		DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
808 		error = sdmmc_set_bus_width(sf, 4);
809 		if (error) {
810 			aprint_error_dev(sc->sc_dev,
811 			    "can't change bus width (%d bit)\n", 4);
812 			return error;
813 		}
814 		sf->width = 4;
815 	}
816 
817 	best_func = 0;
818 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
819 	    ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
820 		DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
821 		error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
822 		if (error) {
823 			aprint_error_dev(sc->sc_dev,
824 			    "switch func mode 0 failed\n");
825 			return error;
826 		}
827 
828 		support_func = SFUNC_STATUS_GROUP(&status, 1);
829 
830 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
831 			/* XXX UHS-I card started in 1.8V mode, switch now */
832 			error = sdmmc_mem_signal_voltage(sc,
833 			    SDMMC_SIGNAL_VOLTAGE_180);
834 			if (error) {
835 				aprint_error_dev(sc->sc_dev,
836 				    "failed to recover UHS card\n");
837 				return error;
838 			}
839 			SET(sc->sc_flags, SMF_UHS_MODE);
840 		}
841 
842 		for (i = 0; i < __arraycount(switch_group0_functions); i++) {
843 			if (!(support_func & (1 << i)))
844 				continue;
845 			DPRINTF(("%s: card supports mode %s\n",
846 			    SDMMCDEVNAME(sc),
847 			    switch_group0_functions[i].name));
848 		}
849 
850 		best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
851 
852 		DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
853 		    switch_group0_functions[best_func].name));
854 
855 		if (best_func != 0) {
856 			DPRINTF(("%s: switch func mode 1(func=%d)\n",
857 			    SDMMCDEVNAME(sc), best_func));
858 			error =
859 			    sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
860 			if (error) {
861 				aprint_error_dev(sc->sc_dev,
862 				    "switch func mode 1 failed:"
863 				    " group 1 function %d(0x%2x)\n",
864 				    best_func, support_func);
865 				return error;
866 			}
867 			sf->csd.tran_speed =
868 			    switch_group0_functions[best_func].freq;
869 
870 			if (best_func == SD_ACCESS_MODE_DDR50)
871 				ddr = true;
872 
873 			/* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
874 			delay(25);
875 		}
876 	}
877 
878 	/* update bus clock */
879 	if (sc->sc_busclk > sf->csd.tran_speed)
880 		sc->sc_busclk = sf->csd.tran_speed;
881 	if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
882 		return 0;
883 
884 	/* change bus clock */
885 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
886 	    ddr);
887 	if (error) {
888 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
889 		return error;
890 	}
891 
892 	sc->sc_transfer_mode = switch_group0_functions[best_func].name;
893 	sc->sc_busddr = ddr;
894 
895 	/* get card status */
896 	error = sdmmc_mem_send_ssr(sc, sf, &status);
897 	if (error) {
898 		aprint_error_dev(sc->sc_dev, "can't get SD status: %d\n",
899 		    error);
900 		return error;
901 	}
902 	sdmmc_mem_decode_ssr(sc, sf, &status);
903 
904 	/* execute tuning (UHS) */
905 	error = sdmmc_mem_execute_tuning(sc, sf);
906 	if (error) {
907 		aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
908 		return error;
909 	}
910 
911 	return 0;
912 }
913 
914 static int
915 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
916 {
917 	int width, value, hs_timing, bus_clock, error;
918 	uint8_t ext_csd[512];
919 	uint32_t sectors = 0;
920 	bool ddr = false;
921 
922 	sc->sc_transfer_mode = NULL;
923 
924 	/* change bus clock */
925 	bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
926 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
927 	if (error) {
928 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
929 		return error;
930 	}
931 
932 	if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
933 		error = sdmmc_mem_send_cxd_data(sc,
934 		    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
935 		if (error) {
936 			aprint_error_dev(sc->sc_dev,
937 			    "can't read EXT_CSD (error=%d)\n", error);
938 			return error;
939 		}
940 		if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
941 		    (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
942 			aprint_error_dev(sc->sc_dev,
943 			    "unrecognised future version (%d)\n",
944 				ext_csd[EXT_CSD_STRUCTURE]);
945 			return ENOTSUP;
946 		}
947 		sf->ext_csd.rev = ext_csd[EXT_CSD_REV];
948 
949 		if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
950 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
951 			sf->csd.tran_speed = 200000;	/* 200MHz SDR */
952 			hs_timing = EXT_CSD_HS_TIMING_HS200;
953 		} else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
954 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
955 			sf->csd.tran_speed = 52000;	/* 52MHz */
956 			hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
957 			ddr = true;
958 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
959 			sf->csd.tran_speed = 52000;	/* 52MHz */
960 			hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
961 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
962 			sf->csd.tran_speed = 26000;	/* 26MHz */
963 			hs_timing = EXT_CSD_HS_TIMING_LEGACY;
964 		} else {
965 			aprint_error_dev(sc->sc_dev,
966 			    "unknown CARD_TYPE: 0x%x\n",
967 			    ext_csd[EXT_CSD_CARD_TYPE]);
968 			return ENOTSUP;
969 		}
970 
971 		if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
972 			width = 8;
973 			value = EXT_CSD_BUS_WIDTH_8;
974 		} else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
975 			width = 4;
976 			value = EXT_CSD_BUS_WIDTH_4;
977 		} else {
978 			width = 1;
979 			value = EXT_CSD_BUS_WIDTH_1;
980 		}
981 
982 		if (width != 1) {
983 			error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
984 			    EXT_CSD_BUS_WIDTH, value, false);
985 			if (error == 0)
986 				error = sdmmc_chip_bus_width(sc->sc_sct,
987 				    sc->sc_sch, width);
988 			else {
989 				DPRINTF(("%s: can't change bus width"
990 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
991 				return error;
992 			}
993 
994 			/* XXXX: need bus test? (using by CMD14 & CMD19) */
995 			delay(10000);
996 		}
997 		sf->width = width;
998 
999 		if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1000 		    !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
1001 			hs_timing = EXT_CSD_HS_TIMING_LEGACY;
1002 		}
1003 		if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1004 			error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
1005 			    EXT_CSD_HS_TIMING, hs_timing, false);
1006 			if (error) {
1007 				aprint_error_dev(sc->sc_dev,
1008 				    "can't change high speed %d, error %d\n",
1009 				    hs_timing, error);
1010 				return error;
1011 			}
1012 		}
1013 
1014 		if (sc->sc_busclk > sf->csd.tran_speed)
1015 			sc->sc_busclk = sf->csd.tran_speed;
1016 		if (sc->sc_busclk != bus_clock) {
1017 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1018 			    sc->sc_busclk, false);
1019 			if (error) {
1020 				aprint_error_dev(sc->sc_dev,
1021 				    "can't change bus clock\n");
1022 				return error;
1023 			}
1024 		}
1025 
1026 		if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1027 			error = sdmmc_mem_send_cxd_data(sc,
1028 			    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1029 			if (error) {
1030 				aprint_error_dev(sc->sc_dev,
1031 				    "can't re-read EXT_CSD\n");
1032 				return error;
1033 			}
1034 			if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1035 				aprint_error_dev(sc->sc_dev,
1036 				    "HS_TIMING set failed\n");
1037 				return EINVAL;
1038 			}
1039 		}
1040 
1041 		/*
1042 		 * HS_TIMING must be set to “0x1” before setting BUS_WIDTH
1043 		 * for dual data rate operation
1044 		 */
1045 		if (ddr &&
1046 		    hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1047 		    width > 1) {
1048 			error = sdmmc_mem_mmc_switch(sf,
1049 			    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1050 			    (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1051 			      EXT_CSD_BUS_WIDTH_4_DDR, false);
1052 			if (error) {
1053 				DPRINTF(("%s: can't switch to DDR"
1054 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
1055 				return error;
1056 			}
1057 
1058 			delay(10000);
1059 
1060 			error = sdmmc_mem_signal_voltage(sc,
1061 			    SDMMC_SIGNAL_VOLTAGE_180);
1062 			if (error) {
1063 				aprint_error_dev(sc->sc_dev,
1064 				    "can't switch signaling voltage\n");
1065 				return error;
1066 			}
1067 
1068 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1069 			    sc->sc_busclk, ddr);
1070 			if (error) {
1071 				aprint_error_dev(sc->sc_dev,
1072 				    "can't change bus clock\n");
1073 				return error;
1074 			}
1075 
1076 			delay(10000);
1077 
1078 			sc->sc_transfer_mode = "DDR52";
1079 			sc->sc_busddr = ddr;
1080 		}
1081 
1082 		sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1083 		    ext_csd[EXT_CSD_SEC_COUNT + 1] << 8  |
1084 		    ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1085 		    ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1086 		if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1087 			SET(sf->flags, SFF_SDHC);
1088 			sf->csd.capacity = sectors;
1089 		}
1090 
1091 		if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1092 			sc->sc_transfer_mode = "HS200";
1093 
1094 			/* execute tuning (HS200) */
1095 			error = sdmmc_mem_execute_tuning(sc, sf);
1096 			if (error) {
1097 				aprint_error_dev(sc->sc_dev,
1098 				    "can't execute MMC tuning\n");
1099 				return error;
1100 			}
1101 		}
1102 
1103 		if (sf->ext_csd.rev >= 5) {
1104 			sf->ext_csd.rst_n_function =
1105 			    ext_csd[EXT_CSD_RST_N_FUNCTION];
1106 		}
1107 
1108 		if (sf->ext_csd.rev >= 6) {
1109 			sf->ext_csd.cache_size =
1110 			    le32dec(&ext_csd[EXT_CSD_CACHE_SIZE]) * 1024;
1111 		}
1112 		if (sf->ext_csd.cache_size > 0) {
1113 			/* eMMC cache present, enable it */
1114 			error = sdmmc_mem_mmc_switch(sf,
1115 			    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL,
1116 			    EXT_CSD_CACHE_CTRL_CACHE_EN, false);
1117 			if (error) {
1118 				aprint_error_dev(sc->sc_dev,
1119 				    "can't enable cache: %d\n", error);
1120 			} else {
1121 				SET(sf->flags, SFF_CACHE_ENABLED);
1122 			}
1123 		}
1124 	} else {
1125 		if (sc->sc_busclk > sf->csd.tran_speed)
1126 			sc->sc_busclk = sf->csd.tran_speed;
1127 		if (sc->sc_busclk != bus_clock) {
1128 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1129 			    sc->sc_busclk, false);
1130 			if (error) {
1131 				aprint_error_dev(sc->sc_dev,
1132 				    "can't change bus clock\n");
1133 				return error;
1134 			}
1135 		}
1136 	}
1137 
1138 	return 0;
1139 }
1140 
1141 static int
1142 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1143 {
1144 	struct sdmmc_command cmd;
1145 	int error;
1146 
1147 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1148 		memset(&cmd, 0, sizeof cmd);
1149 		cmd.c_opcode = MMC_ALL_SEND_CID;
1150 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1151 
1152 		error = sdmmc_mmc_command(sc, &cmd);
1153 	} else {
1154 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1155 		    sizeof(cmd.c_resp));
1156 	}
1157 
1158 #ifdef SDMMC_DEBUG
1159 	if (error == 0)
1160 		sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1161 #endif
1162 	if (error == 0 && resp != NULL)
1163 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1164 	return error;
1165 }
1166 
1167 static int
1168 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1169     sdmmc_response *resp)
1170 {
1171 	struct sdmmc_command cmd;
1172 	int error;
1173 
1174 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1175 		memset(&cmd, 0, sizeof cmd);
1176 		cmd.c_opcode = MMC_SEND_CSD;
1177 		cmd.c_arg = MMC_ARG_RCA(sf->rca);
1178 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1179 
1180 		error = sdmmc_mmc_command(sc, &cmd);
1181 	} else {
1182 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1183 		    sizeof(cmd.c_resp));
1184 	}
1185 
1186 #ifdef SDMMC_DEBUG
1187 	if (error == 0)
1188 		sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1189 #endif
1190 	if (error == 0 && resp != NULL)
1191 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1192 	return error;
1193 }
1194 
1195 static int
1196 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1197     uint32_t *scr)
1198 {
1199 	struct sdmmc_command cmd;
1200 	bus_dma_segment_t ds[1];
1201 	void *ptr = NULL;
1202 	int datalen = 8;
1203 	int rseg;
1204 	int error = 0;
1205 
1206 	/* Don't lock */
1207 
1208 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1209 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1210 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1211 		if (error)
1212 			goto out;
1213 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1214 		    BUS_DMA_NOWAIT);
1215 		if (error)
1216 			goto dmamem_free;
1217 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1218 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1219 		if (error)
1220 			goto dmamem_unmap;
1221 
1222 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1223 		    BUS_DMASYNC_PREREAD);
1224 	} else {
1225 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1226 		if (ptr == NULL)
1227 			goto out;
1228 	}
1229 
1230 	memset(&cmd, 0, sizeof(cmd));
1231 	cmd.c_data = ptr;
1232 	cmd.c_datalen = datalen;
1233 	cmd.c_blklen = datalen;
1234 	cmd.c_arg = 0;
1235 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1236 	cmd.c_opcode = SD_APP_SEND_SCR;
1237 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1238 		cmd.c_dmamap = sc->sc_dmap;
1239 
1240 	error = sdmmc_app_command(sc, sf, &cmd);
1241 	if (error == 0) {
1242 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1243 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1244 			    BUS_DMASYNC_POSTREAD);
1245 		}
1246 		memcpy(scr, ptr, datalen);
1247 	}
1248 
1249 out:
1250 	if (ptr != NULL) {
1251 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1252 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1253 dmamem_unmap:
1254 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1255 dmamem_free:
1256 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1257 		} else {
1258 			free(ptr, M_DEVBUF);
1259 		}
1260 	}
1261 	DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1262 	    error));
1263 
1264 #ifdef SDMMC_DEBUG
1265 	if (error == 0)
1266 		sdmmc_dump_data("SCR", scr, datalen);
1267 #endif
1268 	return error;
1269 }
1270 
1271 static int
1272 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1273 {
1274 	sdmmc_response resp;
1275 	int ver;
1276 
1277 	memset(resp, 0, sizeof(resp));
1278 	/*
1279 	 * Change the raw-scr received from the DMA stream to resp.
1280 	 */
1281 	resp[0] = be32toh(sf->raw_scr[1]) >> 8;		// LSW
1282 	resp[1] = be32toh(sf->raw_scr[0]);		// MSW
1283 	resp[0] |= (resp[1] & 0xff) << 24;
1284 	resp[1] >>= 8;
1285 
1286 	ver = SCR_STRUCTURE(resp);
1287 	sf->scr.sd_spec = SCR_SD_SPEC(resp);
1288 	sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1289 
1290 	DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1291 	    SDMMCDEVNAME(sc), resp[1], resp[0],
1292 	    ver, sf->scr.sd_spec, sf->scr.bus_width));
1293 
1294 	if (ver != 0 && ver != 1) {
1295 		DPRINTF(("%s: unknown structure version: %d\n",
1296 		    SDMMCDEVNAME(sc), ver));
1297 		return EINVAL;
1298 	}
1299 	return 0;
1300 }
1301 
1302 static int
1303 sdmmc_mem_send_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1304     sdmmc_bitfield512_t *ssr)
1305 {
1306 	struct sdmmc_command cmd;
1307 	bus_dma_segment_t ds[1];
1308 	void *ptr = NULL;
1309 	int datalen = 64;
1310 	int rseg;
1311 	int error = 0;
1312 
1313 	/* Don't lock */
1314 
1315 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1316 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1317 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1318 		if (error)
1319 			goto out;
1320 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1321 		    BUS_DMA_NOWAIT);
1322 		if (error)
1323 			goto dmamem_free;
1324 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1325 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1326 		if (error)
1327 			goto dmamem_unmap;
1328 
1329 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1330 		    BUS_DMASYNC_PREREAD);
1331 	} else {
1332 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1333 		if (ptr == NULL)
1334 			goto out;
1335 	}
1336 
1337 	memset(&cmd, 0, sizeof(cmd));
1338 	cmd.c_data = ptr;
1339 	cmd.c_datalen = datalen;
1340 	cmd.c_blklen = datalen;
1341 	cmd.c_arg = 0;
1342 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1343 	cmd.c_opcode = SD_APP_SD_STATUS;
1344 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1345 		cmd.c_dmamap = sc->sc_dmap;
1346 
1347 	error = sdmmc_app_command(sc, sf, &cmd);
1348 	if (error == 0) {
1349 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1350 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1351 			    BUS_DMASYNC_POSTREAD);
1352 		}
1353 		memcpy(ssr, ptr, datalen);
1354 	}
1355 
1356 out:
1357 	if (ptr != NULL) {
1358 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1359 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1360 dmamem_unmap:
1361 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1362 dmamem_free:
1363 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1364 		} else {
1365 			free(ptr, M_DEVBUF);
1366 		}
1367 	}
1368 	DPRINTF(("%s: sdmem_mem_send_ssr: error = %d\n", SDMMCDEVNAME(sc),
1369 	    error));
1370 
1371 	if (error == 0)
1372 		sdmmc_be512_to_bitfield512(ssr);
1373 
1374 #ifdef SDMMC_DEBUG
1375 	if (error == 0)
1376 		sdmmc_dump_data("SSR", ssr, datalen);
1377 #endif
1378 	return error;
1379 }
1380 
1381 static int
1382 sdmmc_mem_decode_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1383     sdmmc_bitfield512_t *ssr_bitfield)
1384 {
1385 	uint32_t *ssr = (uint32_t *)ssr_bitfield;
1386 	int speed_class_val, bus_width_val;
1387 
1388 	const int bus_width = SSR_DAT_BUS_WIDTH(ssr);
1389 	const int speed_class = SSR_SPEED_CLASS(ssr);
1390 	const int uhs_speed_grade = SSR_UHS_SPEED_GRADE(ssr);
1391 	const int video_speed_class = SSR_VIDEO_SPEED_CLASS(ssr);
1392 	const int app_perf_class = SSR_APP_PERF_CLASS(ssr);
1393 
1394 	switch (speed_class) {
1395 	case SSR_SPEED_CLASS_0:	speed_class_val = 0; break;
1396 	case SSR_SPEED_CLASS_2: speed_class_val = 2; break;
1397 	case SSR_SPEED_CLASS_4: speed_class_val = 4; break;
1398 	case SSR_SPEED_CLASS_6: speed_class_val = 6; break;
1399 	case SSR_SPEED_CLASS_10: speed_class_val = 10; break;
1400 	default: speed_class_val = -1; break;
1401 	}
1402 
1403 	switch (bus_width) {
1404 	case SSR_DAT_BUS_WIDTH_1: bus_width_val = 1; break;
1405 	case SSR_DAT_BUS_WIDTH_4: bus_width_val = 4; break;
1406 	default: bus_width_val = -1;
1407 	}
1408 
1409 	/*
1410 	 * Log card status
1411 	 */
1412 	device_printf(sc->sc_dev, "SD card status:");
1413 	if (bus_width_val != -1)
1414 		printf(" %d-bit", bus_width_val);
1415 	else
1416 		printf(" unknown bus width");
1417 	if (speed_class_val != -1)
1418 		printf(", C%d", speed_class_val);
1419 	if (uhs_speed_grade)
1420 		printf(", U%d", uhs_speed_grade);
1421 	if (video_speed_class)
1422 		printf(", V%d", video_speed_class);
1423 	if (app_perf_class)
1424 		printf(", A%d", app_perf_class);
1425 	printf("\n");
1426 
1427 	return 0;
1428 }
1429 
1430 static int
1431 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1432     size_t datalen)
1433 {
1434 	struct sdmmc_command cmd;
1435 	bus_dma_segment_t ds[1];
1436 	void *ptr = NULL;
1437 	int rseg;
1438 	int error = 0;
1439 
1440 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1441 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1442 		    1, &rseg, BUS_DMA_NOWAIT);
1443 		if (error)
1444 			goto out;
1445 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1446 		    BUS_DMA_NOWAIT);
1447 		if (error)
1448 			goto dmamem_free;
1449 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1450 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1451 		if (error)
1452 			goto dmamem_unmap;
1453 
1454 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1455 		    BUS_DMASYNC_PREREAD);
1456 	} else {
1457 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1458 		if (ptr == NULL)
1459 			goto out;
1460 	}
1461 
1462 	memset(&cmd, 0, sizeof(cmd));
1463 	cmd.c_data = ptr;
1464 	cmd.c_datalen = datalen;
1465 	cmd.c_blklen = datalen;
1466 	cmd.c_opcode = opcode;
1467 	cmd.c_arg = 0;
1468 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1469 	if (opcode == MMC_SEND_EXT_CSD)
1470 		SET(cmd.c_flags, SCF_RSP_R1);
1471 	else
1472 		SET(cmd.c_flags, SCF_RSP_R2);
1473 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1474 		cmd.c_dmamap = sc->sc_dmap;
1475 
1476 	error = sdmmc_mmc_command(sc, &cmd);
1477 	if (error == 0) {
1478 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1479 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1480 			    BUS_DMASYNC_POSTREAD);
1481 		}
1482 		memcpy(data, ptr, datalen);
1483 #ifdef SDMMC_DEBUG
1484 		sdmmc_dump_data("CXD", data, datalen);
1485 #endif
1486 	}
1487 
1488 out:
1489 	if (ptr != NULL) {
1490 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1491 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1492 dmamem_unmap:
1493 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1494 dmamem_free:
1495 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1496 		} else {
1497 			free(ptr, M_DEVBUF);
1498 		}
1499 	}
1500 	return error;
1501 }
1502 
1503 static int
1504 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1505 {
1506 	struct sdmmc_softc *sc = sf->sc;
1507 	struct sdmmc_command cmd;
1508 	int error;
1509 
1510 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1511 		return ENODEV;
1512 
1513 	memset(&cmd, 0, sizeof(cmd));
1514 	cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1515 	cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1516 
1517 	switch (width) {
1518 	case 1:
1519 		cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1520 		break;
1521 
1522 	case 4:
1523 		cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1524 		break;
1525 
1526 	default:
1527 		return EINVAL;
1528 	}
1529 
1530 	error = sdmmc_app_command(sc, sf, &cmd);
1531 	if (error == 0)
1532 		error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1533 	return error;
1534 }
1535 
1536 static int
1537 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1538     int function, sdmmc_bitfield512_t *status)
1539 {
1540 	struct sdmmc_softc *sc = sf->sc;
1541 	struct sdmmc_command cmd;
1542 	bus_dma_segment_t ds[1];
1543 	void *ptr = NULL;
1544 	int gsft, rseg, error = 0;
1545 	const int statlen = 64;
1546 
1547 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1548 	    !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1549 		return EINVAL;
1550 
1551 	if (group <= 0 || group > 6 ||
1552 	    function < 0 || function > 15)
1553 		return EINVAL;
1554 
1555 	gsft = (group - 1) << 2;
1556 
1557 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1558 		error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1559 		    1, &rseg, BUS_DMA_NOWAIT);
1560 		if (error)
1561 			goto out;
1562 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1563 		    BUS_DMA_NOWAIT);
1564 		if (error)
1565 			goto dmamem_free;
1566 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1567 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1568 		if (error)
1569 			goto dmamem_unmap;
1570 
1571 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1572 		    BUS_DMASYNC_PREREAD);
1573 	} else {
1574 		ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1575 		if (ptr == NULL)
1576 			goto out;
1577 	}
1578 
1579 	memset(&cmd, 0, sizeof(cmd));
1580 	cmd.c_data = ptr;
1581 	cmd.c_datalen = statlen;
1582 	cmd.c_blklen = statlen;
1583 	cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1584 	cmd.c_arg =
1585 	    (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1586 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1587 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1588 		cmd.c_dmamap = sc->sc_dmap;
1589 
1590 	error = sdmmc_mmc_command(sc, &cmd);
1591 	if (error == 0) {
1592 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1593 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1594 			    BUS_DMASYNC_POSTREAD);
1595 		}
1596 		memcpy(status, ptr, statlen);
1597 	}
1598 
1599 out:
1600 	if (ptr != NULL) {
1601 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1602 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1603 dmamem_unmap:
1604 			bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1605 dmamem_free:
1606 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1607 		} else {
1608 			free(ptr, M_DEVBUF);
1609 		}
1610 	}
1611 
1612 	if (error == 0)
1613 		sdmmc_be512_to_bitfield512(status);
1614 
1615 	return error;
1616 }
1617 
1618 static int
1619 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1620     uint8_t value, bool poll)
1621 {
1622 	struct sdmmc_softc *sc = sf->sc;
1623 	struct sdmmc_command cmd;
1624 	int error;
1625 
1626 	memset(&cmd, 0, sizeof(cmd));
1627 	cmd.c_opcode = MMC_SWITCH;
1628 	cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1629 	    (index << 16) | (value << 8) | set;
1630 	cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1631 
1632 	if (poll)
1633 		cmd.c_flags |= SCF_POLL;
1634 
1635 	error = sdmmc_mmc_command(sc, &cmd);
1636 	if (error)
1637 		return error;
1638 
1639 	if (index == EXT_CSD_HS_TIMING && value >= 2) {
1640 		do {
1641 			memset(&cmd, 0, sizeof(cmd));
1642 			cmd.c_opcode = MMC_SEND_STATUS;
1643 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1644 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1645 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1646 			if (poll)
1647 				cmd.c_flags |= SCF_POLL;
1648 			error = sdmmc_mmc_command(sc, &cmd);
1649 			if (error)
1650 				break;
1651 			if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1652 				aprint_error_dev(sc->sc_dev, "switch error\n");
1653 				return EINVAL;
1654 			}
1655 			/* XXX time out */
1656 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1657 
1658 		if (error) {
1659 			aprint_error_dev(sc->sc_dev,
1660 			    "error waiting for high speed switch: %d\n",
1661 			    error);
1662 			return error;
1663 		}
1664 	}
1665 
1666 	return 0;
1667 }
1668 
1669 /*
1670  * SPI mode function
1671  */
1672 static int
1673 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1674 {
1675 	struct sdmmc_command cmd;
1676 	int error;
1677 
1678 	memset(&cmd, 0, sizeof(cmd));
1679 	cmd.c_opcode = MMC_READ_OCR;
1680 	cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1681 	cmd.c_flags = SCF_RSP_SPI_R3;
1682 
1683 	error = sdmmc_mmc_command(sc, &cmd);
1684 	if (error == 0 && card_ocr != NULL)
1685 		*card_ocr = cmd.c_resp[1];
1686 	DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1687 	    SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1688 	return error;
1689 }
1690 
1691 /*
1692  * read/write function
1693  */
1694 /* read */
1695 static int
1696 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1697     u_char *data, size_t datalen)
1698 {
1699 	struct sdmmc_softc *sc = sf->sc;
1700 	int error = 0;
1701 	int i;
1702 
1703 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1704 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1705 
1706 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1707 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1708 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1709 		if (error)
1710 			break;
1711 	}
1712 	return error;
1713 }
1714 
1715 /*
1716  * Simulate multi-segment dma transfer.
1717  */
1718 static int
1719 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1720     uint32_t blkno, u_char *data, size_t datalen)
1721 {
1722 	struct sdmmc_softc *sc = sf->sc;
1723 	bool use_bbuf = false;
1724 	int error = 0;
1725 	int i;
1726 
1727 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1728 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1729 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1730 			use_bbuf = true;
1731 			break;
1732 		}
1733 	}
1734 	if (use_bbuf) {
1735 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1736 		    BUS_DMASYNC_PREREAD);
1737 
1738 		error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1739 		    blkno, data, datalen);
1740 		if (error) {
1741 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1742 			return error;
1743 		}
1744 
1745 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1746 		    BUS_DMASYNC_POSTREAD);
1747 
1748 		/* Copy from bounce buffer */
1749 		memcpy(data, sf->bbuf, datalen);
1750 
1751 		return 0;
1752 	}
1753 
1754 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1755 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1756 
1757 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1758 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1759 		if (error)
1760 			return error;
1761 
1762 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1763 		    BUS_DMASYNC_PREREAD);
1764 
1765 		error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1766 		    blkno, data, len);
1767 		if (error) {
1768 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1769 			return error;
1770 		}
1771 
1772 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1773 		    BUS_DMASYNC_POSTREAD);
1774 
1775 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1776 
1777 		blkno += len / SDMMC_SECTOR_SIZE;
1778 		data += len;
1779 	}
1780 	return 0;
1781 }
1782 
1783 static int
1784 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1785     uint32_t blkno, u_char *data, size_t datalen)
1786 {
1787 	struct sdmmc_softc *sc = sf->sc;
1788 	struct sdmmc_command cmd;
1789 	int error;
1790 
1791 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1792 		error = sdmmc_select_card(sc, sf);
1793 		if (error)
1794 			goto out;
1795 	}
1796 
1797 	memset(&cmd, 0, sizeof(cmd));
1798 	cmd.c_data = data;
1799 	cmd.c_datalen = datalen;
1800 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
1801 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1802 	    MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1803 	cmd.c_arg = blkno;
1804 	if (!ISSET(sf->flags, SFF_SDHC))
1805 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1806 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1807 	if (ISSET(sf->flags, SFF_SDHC))
1808 		cmd.c_flags |= SCF_XFER_SDHC;
1809 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1810 		cmd.c_dmamap = dmap;
1811 
1812 	sc->sc_ev_xfer.ev_count++;
1813 
1814 	error = sdmmc_mmc_command(sc, &cmd);
1815 	if (error) {
1816 		sc->sc_ev_xfer_error.ev_count++;
1817 		goto out;
1818 	}
1819 
1820 	const u_int counter = __builtin_ctz(cmd.c_datalen);
1821 	if (counter >= 9 && counter <= 16) {
1822 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1823 	} else {
1824 		sc->sc_ev_xfer_unaligned.ev_count++;
1825 	}
1826 
1827 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1828 		if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1829 			memset(&cmd, 0, sizeof cmd);
1830 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
1831 			cmd.c_arg = MMC_ARG_RCA(sf->rca);
1832 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1833 			error = sdmmc_mmc_command(sc, &cmd);
1834 			if (error)
1835 				goto out;
1836 		}
1837 	}
1838 
1839 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1840 		do {
1841 			memset(&cmd, 0, sizeof(cmd));
1842 			cmd.c_opcode = MMC_SEND_STATUS;
1843 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1844 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1845 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1846 			error = sdmmc_mmc_command(sc, &cmd);
1847 			if (error)
1848 				break;
1849 			/* XXX time out */
1850 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1851 	}
1852 
1853 out:
1854 	return error;
1855 }
1856 
1857 int
1858 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1859     size_t datalen)
1860 {
1861 	struct sdmmc_softc *sc = sf->sc;
1862 	int error;
1863 
1864 	SDMMC_LOCK(sc);
1865 	mutex_enter(&sc->sc_mtx);
1866 
1867 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1868 		error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1869 		goto out;
1870 	}
1871 
1872 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1873 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1874 		    datalen);
1875 		goto out;
1876 	}
1877 
1878 	/* DMA transfer */
1879 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1880 	    BUS_DMA_NOWAIT|BUS_DMA_READ);
1881 	if (error)
1882 		goto out;
1883 
1884 #ifdef SDMMC_DEBUG
1885 	printf("data=%p, datalen=%zu\n", data, datalen);
1886 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1887 		printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1888 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1889 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1890 	}
1891 #endif
1892 
1893 	if (sc->sc_dmap->dm_nsegs > 1
1894 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1895 		error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1896 		    data, datalen);
1897 		goto unload;
1898 	}
1899 
1900 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1901 	    BUS_DMASYNC_PREREAD);
1902 
1903 	error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1904 	    datalen);
1905 	if (error)
1906 		goto unload;
1907 
1908 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1909 	    BUS_DMASYNC_POSTREAD);
1910 unload:
1911 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1912 
1913 out:
1914 	mutex_exit(&sc->sc_mtx);
1915 	SDMMC_UNLOCK(sc);
1916 
1917 	return error;
1918 }
1919 
1920 /* write */
1921 static int
1922 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1923     u_char *data, size_t datalen)
1924 {
1925 	struct sdmmc_softc *sc = sf->sc;
1926 	int error = 0;
1927 	int i;
1928 
1929 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1930 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1931 
1932 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1933 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1934 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1935 		if (error)
1936 			break;
1937 	}
1938 	return error;
1939 }
1940 
1941 /*
1942  * Simulate multi-segment dma transfer.
1943  */
1944 static int
1945 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1946     uint32_t blkno, u_char *data, size_t datalen)
1947 {
1948 	struct sdmmc_softc *sc = sf->sc;
1949 	bool use_bbuf = false;
1950 	int error = 0;
1951 	int i;
1952 
1953 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1954 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1955 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1956 			use_bbuf = true;
1957 			break;
1958 		}
1959 	}
1960 	if (use_bbuf) {
1961 		/* Copy to bounce buffer */
1962 		memcpy(sf->bbuf, data, datalen);
1963 
1964 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1965 		    BUS_DMASYNC_PREWRITE);
1966 
1967 		error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1968 		    blkno, data, datalen);
1969 		if (error) {
1970 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1971 			return error;
1972 		}
1973 
1974 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1975 		    BUS_DMASYNC_POSTWRITE);
1976 
1977 		return 0;
1978 	}
1979 
1980 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1981 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1982 
1983 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1984 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1985 		if (error)
1986 			return error;
1987 
1988 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1989 		    BUS_DMASYNC_PREWRITE);
1990 
1991 		error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
1992 		    blkno, data, len);
1993 		if (error) {
1994 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1995 			return error;
1996 		}
1997 
1998 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1999 		    BUS_DMASYNC_POSTWRITE);
2000 
2001 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2002 
2003 		blkno += len / SDMMC_SECTOR_SIZE;
2004 		data += len;
2005 	}
2006 
2007 	return error;
2008 }
2009 
2010 static int
2011 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
2012     uint32_t blkno, u_char *data, size_t datalen)
2013 {
2014 	struct sdmmc_softc *sc = sf->sc;
2015 	struct sdmmc_command cmd;
2016 	int error;
2017 
2018 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2019 		error = sdmmc_select_card(sc, sf);
2020 		if (error)
2021 			goto out;
2022 	}
2023 
2024 	const int nblk = howmany(datalen, SDMMC_SECTOR_SIZE);
2025 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && nblk > 1) {
2026 		/* Set the number of write blocks to be pre-erased */
2027 		memset(&cmd, 0, sizeof(cmd));
2028 		cmd.c_opcode = SD_APP_SET_WR_BLK_ERASE_COUNT;
2029 		cmd.c_flags = SCF_RSP_R1 | SCF_RSP_SPI_R1 | SCF_CMD_AC;
2030 		cmd.c_arg = nblk;
2031 		error = sdmmc_app_command(sc, sf, &cmd);
2032 		if (error)
2033 			goto out;
2034 	}
2035 
2036 	memset(&cmd, 0, sizeof(cmd));
2037 	cmd.c_data = data;
2038 	cmd.c_datalen = datalen;
2039 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
2040 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
2041 	    MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
2042 	cmd.c_arg = blkno;
2043 	if (!ISSET(sf->flags, SFF_SDHC))
2044 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2045 	cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
2046 	if (ISSET(sf->flags, SFF_SDHC))
2047 		cmd.c_flags |= SCF_XFER_SDHC;
2048 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
2049 		cmd.c_dmamap = dmap;
2050 
2051 	sc->sc_ev_xfer.ev_count++;
2052 
2053 	error = sdmmc_mmc_command(sc, &cmd);
2054 	if (error) {
2055 		sc->sc_ev_xfer_error.ev_count++;
2056 		goto out;
2057 	}
2058 
2059 	const u_int counter = __builtin_ctz(cmd.c_datalen);
2060 	if (counter >= 9 && counter <= 16) {
2061 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
2062 	} else {
2063 		sc->sc_ev_xfer_unaligned.ev_count++;
2064 	}
2065 
2066 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
2067 		if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
2068 			memset(&cmd, 0, sizeof(cmd));
2069 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
2070 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
2071 			error = sdmmc_mmc_command(sc, &cmd);
2072 			if (error)
2073 				goto out;
2074 		}
2075 	}
2076 
2077 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2078 		do {
2079 			memset(&cmd, 0, sizeof(cmd));
2080 			cmd.c_opcode = MMC_SEND_STATUS;
2081 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2082 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
2083 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
2084 			error = sdmmc_mmc_command(sc, &cmd);
2085 			if (error)
2086 				break;
2087 			/* XXX time out */
2088 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
2089 	}
2090 
2091 out:
2092 	return error;
2093 }
2094 
2095 int
2096 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
2097     size_t datalen)
2098 {
2099 	struct sdmmc_softc *sc = sf->sc;
2100 	int error;
2101 
2102 	SDMMC_LOCK(sc);
2103 	mutex_enter(&sc->sc_mtx);
2104 
2105 	if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
2106 		aprint_normal_dev(sc->sc_dev, "write-protected\n");
2107 		error = EIO;
2108 		goto out;
2109 	}
2110 
2111 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
2112 		error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
2113 		goto out;
2114 	}
2115 
2116 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
2117 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2118 		    datalen);
2119 		goto out;
2120 	}
2121 
2122 	/* DMA transfer */
2123 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
2124 	    BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2125 	if (error)
2126 		goto out;
2127 
2128 #ifdef SDMMC_DEBUG
2129 	aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
2130 	    __func__, data, datalen);
2131 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2132 		aprint_normal_dev(sc->sc_dev,
2133 		    "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
2134 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
2135 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
2136 	}
2137 #endif
2138 
2139 	if (sc->sc_dmap->dm_nsegs > 1
2140 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
2141 		error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
2142 		    data, datalen);
2143 		goto unload;
2144 	}
2145 
2146 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2147 	    BUS_DMASYNC_PREWRITE);
2148 
2149 	error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2150 	    datalen);
2151 	if (error)
2152 		goto unload;
2153 
2154 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2155 	    BUS_DMASYNC_POSTWRITE);
2156 unload:
2157 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
2158 
2159 out:
2160 	mutex_exit(&sc->sc_mtx);
2161 	SDMMC_UNLOCK(sc);
2162 
2163 	return error;
2164 }
2165 
2166 int
2167 sdmmc_mem_discard(struct sdmmc_function *sf, uint32_t sblkno, uint32_t eblkno)
2168 {
2169 	struct sdmmc_softc *sc = sf->sc;
2170 	struct sdmmc_command cmd;
2171 	int error;
2172 
2173 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2174 		return ENODEV;	/* XXX not tested */
2175 
2176 	if (eblkno < sblkno)
2177 		return EINVAL;
2178 
2179 	SDMMC_LOCK(sc);
2180 	mutex_enter(&sc->sc_mtx);
2181 
2182 	/* Set the address of the first write block to be erased */
2183 	memset(&cmd, 0, sizeof(cmd));
2184 	cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2185 	    SD_ERASE_WR_BLK_START : MMC_TAG_ERASE_GROUP_START;
2186 	cmd.c_arg = sblkno;
2187 	if (!ISSET(sf->flags, SFF_SDHC))
2188 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2189 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2190 	error = sdmmc_mmc_command(sc, &cmd);
2191 	if (error)
2192 		goto out;
2193 
2194 	/* Set the address of the last write block to be erased */
2195 	memset(&cmd, 0, sizeof(cmd));
2196 	cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2197 	    SD_ERASE_WR_BLK_END : MMC_TAG_ERASE_GROUP_END;
2198 	cmd.c_arg = eblkno;
2199 	if (!ISSET(sf->flags, SFF_SDHC))
2200 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2201 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2202 	error = sdmmc_mmc_command(sc, &cmd);
2203 	if (error)
2204 		goto out;
2205 
2206 	/* Start the erase operation */
2207 	memset(&cmd, 0, sizeof(cmd));
2208 	cmd.c_opcode = MMC_ERASE;
2209 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B;
2210 	error = sdmmc_mmc_command(sc, &cmd);
2211 	if (error)
2212 		goto out;
2213 
2214 out:
2215 	mutex_exit(&sc->sc_mtx);
2216 	SDMMC_UNLOCK(sc);
2217 
2218 #ifdef SDMMC_DEBUG
2219 	device_printf(sc->sc_dev, "discard blk %u-%u error %d\n",
2220 	    sblkno, eblkno, error);
2221 #endif
2222 
2223 	return error;
2224 }
2225 
2226 int
2227 sdmmc_mem_flush_cache(struct sdmmc_function *sf, bool poll)
2228 {
2229 	struct sdmmc_softc *sc = sf->sc;
2230 	int error;
2231 
2232 	if (!ISSET(sf->flags, SFF_CACHE_ENABLED))
2233 		return 0;
2234 
2235 	SDMMC_LOCK(sc);
2236 	mutex_enter(&sc->sc_mtx);
2237 
2238 	error = sdmmc_mem_mmc_switch(sf,
2239 	    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE,
2240 	    EXT_CSD_FLUSH_CACHE_FLUSH, poll);
2241 
2242 	mutex_exit(&sc->sc_mtx);
2243 	SDMMC_UNLOCK(sc);
2244 
2245 #ifdef SDMMC_DEBUG
2246 	device_printf(sc->sc_dev, "mmc flush cache error %d\n", error);
2247 #endif
2248 
2249 	return error;
2250 }
2251