xref: /netbsd-src/sys/dev/sdmmc/sdmmc_mem.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: sdmmc_mem.c,v 1.72 2020/05/11 09:51:47 jdc Exp $	*/
2 /*	$OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $	*/
3 
4 /*
5  * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka@netbsd.org>
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43  */
44 
45 /* Routines for SD/MMC memory cards. */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.72 2020/05/11 09:51:47 jdc Exp $");
49 
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53 
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61 
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65 
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s)	do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s)	do {} while (/*CONSTCOND*/0)
70 #endif
71 
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73 
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78     sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80     uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_ssr(struct sdmmc_softc *, struct sdmmc_function *,
83     sdmmc_bitfield512_t *);
84 static int sdmmc_mem_decode_ssr(struct sdmmc_softc *, struct sdmmc_function *,
85     sdmmc_bitfield512_t *);
86 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
87 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
88 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
89 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
90     uint8_t, bool);
91 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
92 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
93 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
94     u_char *, size_t);
95 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
96     u_char *, size_t);
97 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
98     uint32_t, u_char *, size_t);
99 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
100     uint32_t, u_char *, size_t);
101 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
102     uint32_t, u_char *, size_t);
103 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
104     uint32_t, u_char *, size_t);
105 
106 static const struct {
107 	const char *name;
108 	int v;
109 	int freq;
110 } switch_group0_functions[] = {
111 	/* Default/SDR12 */
112 	{ "Default/SDR12",	 0,			 25000 },
113 
114 	/* High-Speed/SDR25 */
115 	{ "High-Speed/SDR25",	SMC_CAPS_SD_HIGHSPEED,	 50000 },
116 
117 	/* SDR50 */
118 	{ "SDR50",		SMC_CAPS_UHS_SDR50,	100000 },
119 
120 	/* SDR104 */
121 	{ "SDR104",		SMC_CAPS_UHS_SDR104,	208000 },
122 
123 	/* DDR50 */
124 	{ "DDR50",		SMC_CAPS_UHS_DDR50,	 50000 },
125 };
126 
127 static const int sdmmc_mmc_timings[] = {
128 	[EXT_CSD_HS_TIMING_LEGACY]	= 26000,
129 	[EXT_CSD_HS_TIMING_HIGHSPEED]	= 52000,
130 	[EXT_CSD_HS_TIMING_HS200]	= 200000
131 };
132 
133 /*
134  * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
135  */
136 int
137 sdmmc_mem_enable(struct sdmmc_softc *sc)
138 {
139 	uint32_t host_ocr;
140 	uint32_t card_ocr;
141 	uint32_t new_ocr;
142 	uint32_t ocr = 0;
143 	int error;
144 
145 	SDMMC_LOCK(sc);
146 
147 	/* Set host mode to SD "combo" card or SD memory-only. */
148 	CLR(sc->sc_flags, SMF_UHS_MODE);
149 	SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
150 
151 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
152 		sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
153 
154 	/* Reset memory (*must* do that before CMD55 or CMD1). */
155 	sdmmc_go_idle_state(sc);
156 
157 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
158 		/* Check SD Ver.2 */
159 		error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
160 		if (error == 0 && card_ocr == 0x1aa)
161 			SET(ocr, MMC_OCR_HCS);
162 	}
163 
164 	/*
165 	 * Read the SD/MMC memory OCR value by issuing CMD55 followed
166 	 * by ACMD41 to read the OCR value from memory-only SD cards.
167 	 * MMC cards will not respond to CMD55 or ACMD41 and this is
168 	 * how we distinguish them from SD cards.
169 	 */
170 mmc_mode:
171 	error = sdmmc_mem_send_op_cond(sc,
172 	    ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
173 	if (error) {
174 		if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
175 		    !ISSET(sc->sc_flags, SMF_IO_MODE)) {
176 			/* Not a SD card, switch to MMC mode. */
177 			DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
178 			CLR(sc->sc_flags, SMF_SD_MODE);
179 			goto mmc_mode;
180 		}
181 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
182 			DPRINTF(("%s: couldn't read memory OCR\n",
183 			    SDMMCDEVNAME(sc)));
184 			goto out;
185 		} else {
186 			/* Not a "combo" card. */
187 			CLR(sc->sc_flags, SMF_MEM_MODE);
188 			error = 0;
189 			goto out;
190 		}
191 	}
192 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
193 		/* get card OCR */
194 		error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
195 		if (error) {
196 			DPRINTF(("%s: couldn't read SPI memory OCR\n",
197 			    SDMMCDEVNAME(sc)));
198 			goto out;
199 		}
200 	}
201 
202 	/* Set the lowest voltage supported by the card and host. */
203 	host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
204 	error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
205 	if (error) {
206 		DPRINTF(("%s: couldn't supply voltage requested by card\n",
207 		    SDMMCDEVNAME(sc)));
208 		goto out;
209 	}
210 
211 	DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
212 	DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
213 
214 	host_ocr &= card_ocr; /* only allow the common voltages */
215 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
216 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
217 			/* Tell the card(s) to enter the idle state (again). */
218 			sdmmc_go_idle_state(sc);
219 			/* Check SD Ver.2 */
220 			error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
221 			if (error == 0 && card_ocr == 0x1aa)
222 				SET(ocr, MMC_OCR_HCS);
223 
224 			if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
225 				SET(ocr, MMC_OCR_S18A);
226 		} else {
227 			SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
228 		}
229 	}
230 	host_ocr |= ocr;
231 
232 	/* Send the new OCR value until all cards are ready. */
233 	error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
234 	if (error) {
235 		DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
236 		goto out;
237 	}
238 
239 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
240 		/*
241 		 * Card and host support low voltage mode, begin switch
242 		 * sequence.
243 		 */
244 		struct sdmmc_command cmd;
245 		memset(&cmd, 0, sizeof(cmd));
246 		cmd.c_arg = 0;
247 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
248 		cmd.c_opcode = SD_VOLTAGE_SWITCH;
249 		DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
250 		error = sdmmc_mmc_command(sc, &cmd);
251 		if (error) {
252 			DPRINTF(("%s: voltage switch command failed\n",
253 			    SDMMCDEVNAME(sc)));
254 			goto out;
255 		}
256 
257 		error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
258 		if (error)
259 			goto out;
260 
261 		SET(sc->sc_flags, SMF_UHS_MODE);
262 	}
263 
264 out:
265 	SDMMC_UNLOCK(sc);
266 
267 	if (error)
268 		printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
269 		    __func__, error);
270 
271 	return error;
272 }
273 
274 static int
275 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
276 {
277 	int error;
278 
279 	/*
280 	 * Stop the clock
281 	 */
282 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
283 	    SDMMC_SDCLK_OFF, false);
284 	if (error)
285 		goto out;
286 
287 	delay(1000);
288 
289 	/*
290 	 * Card switch command was successful, update host controller
291 	 * signal voltage setting.
292 	 */
293 	DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
294 	    signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
295 	error = sdmmc_chip_signal_voltage(sc->sc_sct,
296 	    sc->sc_sch, signal_voltage);
297 	if (error)
298 		goto out;
299 
300 	delay(5000);
301 
302 	/*
303 	 * Switch to SDR12 timing
304 	 */
305 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
306 	    false);
307 	if (error)
308 		goto out;
309 
310 	delay(1000);
311 
312 out:
313 	return error;
314 }
315 
316 /*
317  * Read the CSD and CID from all cards and assign each card a unique
318  * relative card address (RCA).  CMD2 is ignored by SDIO-only cards.
319  */
320 void
321 sdmmc_mem_scan(struct sdmmc_softc *sc)
322 {
323 	sdmmc_response resp;
324 	struct sdmmc_function *sf;
325 	uint16_t next_rca;
326 	int error;
327 	int retry;
328 
329 	SDMMC_LOCK(sc);
330 
331 	/*
332 	 * CMD2 is a broadcast command understood by SD cards and MMC
333 	 * cards.  All cards begin to respond to the command, but back
334 	 * off if another card drives the CMD line to a different level.
335 	 * Only one card will get its entire response through.  That
336 	 * card remains silent once it has been assigned a RCA.
337 	 */
338 	for (retry = 0; retry < 100; retry++) {
339 		error = sdmmc_mem_send_cid(sc, &resp);
340 		if (error) {
341 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
342 			    error == ETIMEDOUT) {
343 				/* No more cards there. */
344 				break;
345 			}
346 			DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
347 			break;
348 		}
349 
350 		/* In MMC mode, find the next available RCA. */
351 		next_rca = 1;
352 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
353 			SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
354 				next_rca++;
355 		}
356 
357 		/* Allocate a sdmmc_function structure. */
358 		sf = sdmmc_function_alloc(sc);
359 		sf->rca = next_rca;
360 
361 		/*
362 		 * Remember the CID returned in the CMD2 response for
363 		 * later decoding.
364 		 */
365 		memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
366 
367 		/*
368 		 * Silence the card by assigning it a unique RCA, or
369 		 * querying it for its RCA in the case of SD.
370 		 */
371 		if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
372 			if (sdmmc_set_relative_addr(sc, sf) != 0) {
373 				aprint_error_dev(sc->sc_dev,
374 				    "couldn't set mem RCA\n");
375 				sdmmc_function_free(sf);
376 				break;
377 			}
378 		}
379 
380 		/*
381 		 * If this is a memory-only card, the card responding
382 		 * first becomes an alias for SDIO function 0.
383 		 */
384 		if (sc->sc_fn0 == NULL)
385 			sc->sc_fn0 = sf;
386 
387 		SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
388 
389 		/* only one function in SPI mode */
390 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
391 			break;
392 	}
393 
394 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
395 		/* Go to Data Transfer Mode, if possible. */
396 		sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
397 
398 	/*
399 	 * All cards are either inactive or awaiting further commands.
400 	 * Read the CSDs and decode the raw CID for each card.
401 	 */
402 	SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
403 		error = sdmmc_mem_send_csd(sc, sf, &resp);
404 		if (error) {
405 			SET(sf->flags, SFF_ERROR);
406 			continue;
407 		}
408 
409 		if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
410 		    sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
411 			SET(sf->flags, SFF_ERROR);
412 			continue;
413 		}
414 
415 #ifdef SDMMC_DEBUG
416 		printf("%s: CID: ", SDMMCDEVNAME(sc));
417 		sdmmc_print_cid(&sf->cid);
418 #endif
419 	}
420 
421 	SDMMC_UNLOCK(sc);
422 }
423 
424 int
425 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
426     struct sdmmc_function *sf)
427 {
428 	/* TRAN_SPEED(2:0): transfer rate exponent */
429 	static const int speed_exponent[8] = {
430 		100 *    1,	/* 100 Kbits/s */
431 		  1 * 1000,	/*   1 Mbits/s */
432 		 10 * 1000,	/*  10 Mbits/s */
433 		100 * 1000,	/* 100 Mbits/s */
434 		         0,
435 		         0,
436 		         0,
437 		         0,
438 	};
439 	/* TRAN_SPEED(6:3): time mantissa */
440 	static const int speed_mantissa[16] = {
441 		0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
442 	};
443 	struct sdmmc_csd *csd = &sf->csd;
444 	int e, m;
445 
446 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
447 		/*
448 		 * CSD version 1.0 corresponds to SD system
449 		 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
450 		 */
451 		csd->csdver = SD_CSD_CSDVER(resp);
452 		switch (csd->csdver) {
453 		case SD_CSD_CSDVER_2_0:
454 			DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
455 			SET(sf->flags, SFF_SDHC);
456 			csd->capacity = SD_CSD_V2_CAPACITY(resp);
457 			csd->read_bl_len = SD_CSD_V2_BL_LEN;
458 			break;
459 
460 		case SD_CSD_CSDVER_1_0:
461 			DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
462 			csd->capacity = SD_CSD_CAPACITY(resp);
463 			csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
464 			break;
465 
466 		default:
467 			aprint_error_dev(sc->sc_dev,
468 			    "unknown SD CSD structure version 0x%x\n",
469 			    csd->csdver);
470 			return 1;
471 		}
472 
473 		csd->mmcver = SD_CSD_MMCVER(resp);
474 		csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
475 		csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
476 		e = SD_CSD_SPEED_EXP(resp);
477 		m = SD_CSD_SPEED_MANT(resp);
478 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
479 		csd->ccc = SD_CSD_CCC(resp);
480 	} else {
481 		csd->csdver = MMC_CSD_CSDVER(resp);
482 		if (csd->csdver == MMC_CSD_CSDVER_1_0) {
483 			aprint_error_dev(sc->sc_dev,
484 			    "unknown MMC CSD structure version 0x%x\n",
485 			    csd->csdver);
486 			return 1;
487 		}
488 
489 		csd->mmcver = MMC_CSD_MMCVER(resp);
490 		csd->capacity = MMC_CSD_CAPACITY(resp);
491 		csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
492 		csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
493 		csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
494 		e = MMC_CSD_TRAN_SPEED_EXP(resp);
495 		m = MMC_CSD_TRAN_SPEED_MANT(resp);
496 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
497 	}
498 	if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
499 		csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
500 
501 #ifdef SDMMC_DUMP_CSD
502 	sdmmc_print_csd(resp, csd);
503 #endif
504 
505 	return 0;
506 }
507 
508 int
509 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
510     struct sdmmc_function *sf)
511 {
512 	struct sdmmc_cid *cid = &sf->cid;
513 
514 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
515 		cid->mid = SD_CID_MID(resp);
516 		cid->oid = SD_CID_OID(resp);
517 		SD_CID_PNM_CPY(resp, cid->pnm);
518 		cid->rev = SD_CID_REV(resp);
519 		cid->psn = SD_CID_PSN(resp);
520 		cid->mdt = SD_CID_MDT(resp);
521 	} else {
522 		switch(sf->csd.mmcver) {
523 		case MMC_CSD_MMCVER_1_0:
524 		case MMC_CSD_MMCVER_1_4:
525 			cid->mid = MMC_CID_MID_V1(resp);
526 			MMC_CID_PNM_V1_CPY(resp, cid->pnm);
527 			cid->rev = MMC_CID_REV_V1(resp);
528 			cid->psn = MMC_CID_PSN_V1(resp);
529 			cid->mdt = MMC_CID_MDT_V1(resp);
530 			break;
531 		case MMC_CSD_MMCVER_2_0:
532 		case MMC_CSD_MMCVER_3_1:
533 		case MMC_CSD_MMCVER_4_0:
534 			cid->mid = MMC_CID_MID_V2(resp);
535 			cid->oid = MMC_CID_OID_V2(resp);
536 			MMC_CID_PNM_V2_CPY(resp, cid->pnm);
537 			cid->psn = MMC_CID_PSN_V2(resp);
538 			break;
539 		default:
540 			aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
541 			    sf->csd.mmcver);
542 			return 1;
543 		}
544 	}
545 	return 0;
546 }
547 
548 void
549 sdmmc_print_cid(struct sdmmc_cid *cid)
550 {
551 
552 	printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
553 	    " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
554 	    cid->mdt);
555 }
556 
557 #ifdef SDMMC_DUMP_CSD
558 void
559 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
560 {
561 
562 	printf("csdver = %d\n", csd->csdver);
563 	printf("mmcver = %d\n", csd->mmcver);
564 	printf("capacity = 0x%08x\n", csd->capacity);
565 	printf("read_bl_len = %d\n", csd->read_bl_len);
566 	printf("write_bl_len = %d\n", csd->write_bl_len);
567 	printf("r2w_factor = %d\n", csd->r2w_factor);
568 	printf("tran_speed = %d\n", csd->tran_speed);
569 	printf("ccc = 0x%x\n", csd->ccc);
570 }
571 #endif
572 
573 /*
574  * Initialize a SD/MMC memory card.
575  */
576 int
577 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
578 {
579 	int error = 0;
580 
581 	SDMMC_LOCK(sc);
582 
583 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
584 		error = sdmmc_select_card(sc, sf);
585 		if (error)
586 			goto out;
587 	}
588 
589 	error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
590 	if (error)
591 		goto out;
592 
593 	if (ISSET(sc->sc_flags, SMF_SD_MODE))
594 		error = sdmmc_mem_sd_init(sc, sf);
595 	else
596 		error = sdmmc_mem_mmc_init(sc, sf);
597 
598 	if (error != 0)
599 		SET(sf->flags, SFF_ERROR);
600 
601 out:
602 	SDMMC_UNLOCK(sc);
603 
604 	return error;
605 }
606 
607 /*
608  * Get or set the card's memory OCR value (SD or MMC).
609  */
610 int
611 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
612 {
613 	struct sdmmc_command cmd;
614 	int error;
615 	int retry;
616 
617 	/* Don't lock */
618 
619 	DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
620 	    SDMMCDEVNAME(sc), ocr));
621 
622 	/*
623 	 * If we change the OCR value, retry the command until the OCR
624 	 * we receive in response has the "CARD BUSY" bit set, meaning
625 	 * that all cards are ready for identification.
626 	 */
627 	for (retry = 0; retry < 100; retry++) {
628 		memset(&cmd, 0, sizeof(cmd));
629 		cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
630 		    ocr : (ocr & MMC_OCR_HCS);
631 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
632 		    | SCF_TOUT_OK;
633 
634 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
635 			cmd.c_opcode = SD_APP_OP_COND;
636 			error = sdmmc_app_command(sc, NULL, &cmd);
637 		} else {
638 			cmd.c_opcode = MMC_SEND_OP_COND;
639 			error = sdmmc_mmc_command(sc, &cmd);
640 		}
641 		if (error)
642 			break;
643 
644 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
645 			if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
646 				break;
647 		} else {
648 			if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
649 			    ocr == 0)
650 				break;
651 		}
652 
653 		error = ETIMEDOUT;
654 		sdmmc_pause(10000, NULL);
655 	}
656 	if (ocrp != NULL) {
657 		if (error == 0 &&
658 		    !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
659 			*ocrp = MMC_R3(cmd.c_resp);
660 		} else {
661 			*ocrp = ocr;
662 		}
663 	}
664 	DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
665 	    SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
666 	return error;
667 }
668 
669 int
670 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
671 {
672 	struct sdmmc_command cmd;
673 	int error;
674 
675 	/* Don't lock */
676 
677 	memset(&cmd, 0, sizeof(cmd));
678 	cmd.c_arg = ocr;
679 	cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7 | SCF_TOUT_OK;
680 	cmd.c_opcode = SD_SEND_IF_COND;
681 
682 	error = sdmmc_mmc_command(sc, &cmd);
683 	if (error == 0 && ocrp != NULL) {
684 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
685 			*ocrp = MMC_SPI_R7(cmd.c_resp);
686 		} else {
687 			*ocrp = MMC_R7(cmd.c_resp);
688 		}
689 		DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
690 		    SDMMCDEVNAME(sc), error, *ocrp));
691 	}
692 	return error;
693 }
694 
695 /*
696  * Set the read block length appropriately for this card, according to
697  * the card CSD register value.
698  */
699 int
700 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
701    int block_len)
702 {
703 	struct sdmmc_command cmd;
704 	int error;
705 
706 	/* Don't lock */
707 
708 	memset(&cmd, 0, sizeof(cmd));
709 	cmd.c_opcode = MMC_SET_BLOCKLEN;
710 	cmd.c_arg = block_len;
711 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
712 
713 	error = sdmmc_mmc_command(sc, &cmd);
714 
715 	DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
716 	    SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
717 
718 	return error;
719 }
720 
721 /* make 512-bit BE quantity __bitfield()-compatible */
722 static void
723 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
724 	size_t i;
725 	uint32_t tmp0, tmp1;
726 	const size_t bitswords = __arraycount(buf->_bits);
727 	for (i = 0; i < bitswords/2; i++) {
728 		tmp0 = buf->_bits[i];
729 		tmp1 = buf->_bits[bitswords - 1 - i];
730 		buf->_bits[i] = be32toh(tmp1);
731 		buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
732 	}
733 }
734 
735 static int
736 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
737 {
738 	if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
739 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
740 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
741 			return SD_ACCESS_MODE_SDR104;
742 		}
743 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
744 		    ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
745 			return SD_ACCESS_MODE_DDR50;
746 		}
747 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
748 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
749 			return SD_ACCESS_MODE_SDR50;
750 		}
751 	}
752 	if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
753 	    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
754 		return SD_ACCESS_MODE_SDR25;
755 	}
756 	return SD_ACCESS_MODE_SDR12;
757 }
758 
759 static int
760 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
761 {
762 	int timing = -1;
763 
764 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
765 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
766 			return 0;
767 
768 		switch (sf->csd.tran_speed) {
769 		case 100000:
770 			timing = SDMMC_TIMING_UHS_SDR50;
771 			break;
772 		case 208000:
773 			timing = SDMMC_TIMING_UHS_SDR104;
774 			break;
775 		default:
776 			return 0;
777 		}
778 	} else {
779 		switch (sf->csd.tran_speed) {
780 		case 200000:
781 			timing = SDMMC_TIMING_MMC_HS200;
782 			break;
783 		default:
784 			return 0;
785 		}
786 	}
787 
788 	DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
789 	    timing));
790 
791 	return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
792 }
793 
794 static int
795 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
796 {
797 	int support_func, best_func, bus_clock, error, i;
798 	sdmmc_bitfield512_t status;
799 	bool ddr = false;
800 
801 	/* change bus clock */
802 	bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
803 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
804 	if (error) {
805 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
806 		return error;
807 	}
808 
809 	error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
810 	if (error) {
811 		aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
812 		return error;
813 	}
814 	error = sdmmc_mem_decode_scr(sc, sf);
815 	if (error)
816 		return error;
817 
818 	if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
819 	    ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
820 		DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
821 		error = sdmmc_set_bus_width(sf, 4);
822 		if (error) {
823 			aprint_error_dev(sc->sc_dev,
824 			    "can't change bus width (%d bit)\n", 4);
825 			return error;
826 		}
827 		sf->width = 4;
828 	}
829 
830 	best_func = 0;
831 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
832 	    ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
833 		DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
834 		error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
835 		if (error) {
836 			if (error == ENOTSUP) {
837 				/* Not supported by controller */
838 				goto skipswitchfuncs;
839 			} else {
840 				aprint_error_dev(sc->sc_dev,
841 				    "switch func mode 0 failed\n");
842 				return error;
843 			}
844 		}
845 
846 		support_func = SFUNC_STATUS_GROUP(&status, 1);
847 
848 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
849 			/* XXX UHS-I card started in 1.8V mode, switch now */
850 			error = sdmmc_mem_signal_voltage(sc,
851 			    SDMMC_SIGNAL_VOLTAGE_180);
852 			if (error) {
853 				aprint_error_dev(sc->sc_dev,
854 				    "failed to recover UHS card\n");
855 				return error;
856 			}
857 			SET(sc->sc_flags, SMF_UHS_MODE);
858 		}
859 
860 		for (i = 0; i < __arraycount(switch_group0_functions); i++) {
861 			if (!(support_func & (1 << i)))
862 				continue;
863 			DPRINTF(("%s: card supports mode %s\n",
864 			    SDMMCDEVNAME(sc),
865 			    switch_group0_functions[i].name));
866 		}
867 
868 		best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
869 
870 		DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
871 		    switch_group0_functions[best_func].name));
872 
873 		if (best_func != 0) {
874 			DPRINTF(("%s: switch func mode 1(func=%d)\n",
875 			    SDMMCDEVNAME(sc), best_func));
876 			error =
877 			    sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
878 			if (error) {
879 				aprint_error_dev(sc->sc_dev,
880 				    "switch func mode 1 failed:"
881 				    " group 1 function %d(0x%2x)\n",
882 				    best_func, support_func);
883 				return error;
884 			}
885 			sf->csd.tran_speed =
886 			    switch_group0_functions[best_func].freq;
887 
888 			if (best_func == SD_ACCESS_MODE_DDR50)
889 				ddr = true;
890 
891 			/* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
892 			delay(25);
893 		}
894 	}
895 skipswitchfuncs:
896 
897 	/* update bus clock */
898 	if (sc->sc_busclk > sf->csd.tran_speed)
899 		sc->sc_busclk = sf->csd.tran_speed;
900 	if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
901 		return 0;
902 
903 	/* change bus clock */
904 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
905 	    ddr);
906 	if (error) {
907 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
908 		return error;
909 	}
910 
911 	sc->sc_transfer_mode = switch_group0_functions[best_func].name;
912 	sc->sc_busddr = ddr;
913 
914 	/* get card status */
915 	error = sdmmc_mem_send_ssr(sc, sf, &status);
916 	if (error) {
917 		aprint_error_dev(sc->sc_dev, "can't get SD status: %d\n",
918 		    error);
919 		return error;
920 	}
921 	sdmmc_mem_decode_ssr(sc, sf, &status);
922 
923 	/* execute tuning (UHS) */
924 	error = sdmmc_mem_execute_tuning(sc, sf);
925 	if (error) {
926 		aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
927 		return error;
928 	}
929 
930 	return 0;
931 }
932 
933 static int
934 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
935 {
936 	int width, value, hs_timing, bus_clock, error;
937 	uint8_t ext_csd[512];
938 	uint32_t sectors = 0;
939 	bool ddr = false;
940 
941 	sc->sc_transfer_mode = NULL;
942 
943 	/* change bus clock */
944 	bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
945 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
946 	if (error) {
947 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
948 		return error;
949 	}
950 
951 	if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
952 		error = sdmmc_mem_send_cxd_data(sc,
953 		    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
954 		if (error) {
955 			aprint_error_dev(sc->sc_dev,
956 			    "can't read EXT_CSD (error=%d)\n", error);
957 			return error;
958 		}
959 		if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
960 		    (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
961 			aprint_error_dev(sc->sc_dev,
962 			    "unrecognised future version (%d)\n",
963 				ext_csd[EXT_CSD_STRUCTURE]);
964 			return ENOTSUP;
965 		}
966 		sf->ext_csd.rev = ext_csd[EXT_CSD_REV];
967 
968 		if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
969 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
970 			hs_timing = EXT_CSD_HS_TIMING_HS200;
971 		} else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
972 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
973 			hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
974 			ddr = true;
975 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
976 			hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
977 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
978 			hs_timing = EXT_CSD_HS_TIMING_LEGACY;
979 		} else {
980 			aprint_error_dev(sc->sc_dev,
981 			    "unknown CARD_TYPE: 0x%x\n",
982 			    ext_csd[EXT_CSD_CARD_TYPE]);
983 			return ENOTSUP;
984 		}
985 
986 		if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
987 			width = 8;
988 			value = EXT_CSD_BUS_WIDTH_8;
989 		} else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
990 			width = 4;
991 			value = EXT_CSD_BUS_WIDTH_4;
992 		} else {
993 			width = 1;
994 			value = EXT_CSD_BUS_WIDTH_1;
995 		}
996 
997 		if (width != 1) {
998 			error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
999 			    EXT_CSD_BUS_WIDTH, value, false);
1000 			if (error == 0)
1001 				error = sdmmc_chip_bus_width(sc->sc_sct,
1002 				    sc->sc_sch, width);
1003 			else {
1004 				DPRINTF(("%s: can't change bus width"
1005 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
1006 				return error;
1007 			}
1008 
1009 			/* XXXX: need bus test? (using by CMD14 & CMD19) */
1010 			delay(10000);
1011 		}
1012 		sf->width = width;
1013 
1014 		if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1015 		    !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
1016 			hs_timing = EXT_CSD_HS_TIMING_LEGACY;
1017 		}
1018 
1019 		const int target_timing = hs_timing;
1020 		if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1021 			while (hs_timing >= EXT_CSD_HS_TIMING_LEGACY) {
1022 				error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
1023 				    EXT_CSD_HS_TIMING, hs_timing, false);
1024 				if (error == 0 || hs_timing == EXT_CSD_HS_TIMING_LEGACY)
1025 					break;
1026 				hs_timing--;
1027 			}
1028 		}
1029 		if (hs_timing != target_timing) {
1030 			aprint_debug_dev(sc->sc_dev,
1031 			    "card failed to switch to timing mode %d, using %d\n",
1032 			    target_timing, hs_timing);
1033 		}
1034 
1035 		KASSERT(hs_timing < __arraycount(sdmmc_mmc_timings));
1036 		sf->csd.tran_speed = sdmmc_mmc_timings[hs_timing];
1037 
1038 		if (sc->sc_busclk > sf->csd.tran_speed)
1039 			sc->sc_busclk = sf->csd.tran_speed;
1040 		if (sc->sc_busclk != bus_clock) {
1041 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1042 			    sc->sc_busclk, false);
1043 			if (error) {
1044 				aprint_error_dev(sc->sc_dev,
1045 				    "can't change bus clock\n");
1046 				return error;
1047 			}
1048 		}
1049 
1050 		if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1051 			error = sdmmc_mem_send_cxd_data(sc,
1052 			    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1053 			if (error) {
1054 				aprint_error_dev(sc->sc_dev,
1055 				    "can't re-read EXT_CSD\n");
1056 				return error;
1057 			}
1058 			if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1059 				aprint_error_dev(sc->sc_dev,
1060 				    "HS_TIMING set failed\n");
1061 				return EINVAL;
1062 			}
1063 		}
1064 
1065 		/*
1066 		 * HS_TIMING must be set to 0x1 before setting BUS_WIDTH
1067 		 * for dual data rate operation
1068 		 */
1069 		if (ddr &&
1070 		    hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1071 		    width > 1) {
1072 			error = sdmmc_mem_mmc_switch(sf,
1073 			    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1074 			    (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1075 			      EXT_CSD_BUS_WIDTH_4_DDR, false);
1076 			if (error) {
1077 				DPRINTF(("%s: can't switch to DDR"
1078 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
1079 				return error;
1080 			}
1081 
1082 			delay(10000);
1083 
1084 			error = sdmmc_mem_signal_voltage(sc,
1085 			    SDMMC_SIGNAL_VOLTAGE_180);
1086 			if (error) {
1087 				aprint_error_dev(sc->sc_dev,
1088 				    "can't switch signaling voltage\n");
1089 				return error;
1090 			}
1091 
1092 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1093 			    sc->sc_busclk, ddr);
1094 			if (error) {
1095 				aprint_error_dev(sc->sc_dev,
1096 				    "can't change bus clock\n");
1097 				return error;
1098 			}
1099 
1100 			delay(10000);
1101 
1102 			sc->sc_transfer_mode = "DDR52";
1103 			sc->sc_busddr = ddr;
1104 		}
1105 
1106 		sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1107 		    ext_csd[EXT_CSD_SEC_COUNT + 1] << 8  |
1108 		    ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1109 		    ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1110 		if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1111 			SET(sf->flags, SFF_SDHC);
1112 			sf->csd.capacity = sectors;
1113 		}
1114 
1115 		if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1116 			sc->sc_transfer_mode = "HS200";
1117 
1118 			/* execute tuning (HS200) */
1119 			error = sdmmc_mem_execute_tuning(sc, sf);
1120 			if (error) {
1121 				aprint_error_dev(sc->sc_dev,
1122 				    "can't execute MMC tuning\n");
1123 				return error;
1124 			}
1125 		}
1126 
1127 		if (sf->ext_csd.rev >= 5) {
1128 			sf->ext_csd.rst_n_function =
1129 			    ext_csd[EXT_CSD_RST_N_FUNCTION];
1130 		}
1131 
1132 		if (sf->ext_csd.rev >= 6) {
1133 			sf->ext_csd.cache_size =
1134 			    le32dec(&ext_csd[EXT_CSD_CACHE_SIZE]) * 1024;
1135 		}
1136 		if (sf->ext_csd.cache_size > 0) {
1137 			/* eMMC cache present, enable it */
1138 			error = sdmmc_mem_mmc_switch(sf,
1139 			    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL,
1140 			    EXT_CSD_CACHE_CTRL_CACHE_EN, false);
1141 			if (error) {
1142 				aprint_error_dev(sc->sc_dev,
1143 				    "can't enable cache: %d\n", error);
1144 			} else {
1145 				SET(sf->flags, SFF_CACHE_ENABLED);
1146 			}
1147 		}
1148 	} else {
1149 		if (sc->sc_busclk > sf->csd.tran_speed)
1150 			sc->sc_busclk = sf->csd.tran_speed;
1151 		if (sc->sc_busclk != bus_clock) {
1152 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1153 			    sc->sc_busclk, false);
1154 			if (error) {
1155 				aprint_error_dev(sc->sc_dev,
1156 				    "can't change bus clock\n");
1157 				return error;
1158 			}
1159 		}
1160 	}
1161 
1162 	return 0;
1163 }
1164 
1165 static int
1166 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1167 {
1168 	struct sdmmc_command cmd;
1169 	int error;
1170 
1171 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1172 		memset(&cmd, 0, sizeof cmd);
1173 		cmd.c_opcode = MMC_ALL_SEND_CID;
1174 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1175 
1176 		error = sdmmc_mmc_command(sc, &cmd);
1177 	} else {
1178 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1179 		    sizeof(cmd.c_resp));
1180 	}
1181 
1182 #ifdef SDMMC_DEBUG
1183 	if (error == 0)
1184 		sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1185 #endif
1186 	if (error == 0 && resp != NULL)
1187 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1188 	return error;
1189 }
1190 
1191 static int
1192 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1193     sdmmc_response *resp)
1194 {
1195 	struct sdmmc_command cmd;
1196 	int error;
1197 
1198 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1199 		memset(&cmd, 0, sizeof cmd);
1200 		cmd.c_opcode = MMC_SEND_CSD;
1201 		cmd.c_arg = MMC_ARG_RCA(sf->rca);
1202 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1203 
1204 		error = sdmmc_mmc_command(sc, &cmd);
1205 	} else {
1206 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1207 		    sizeof(cmd.c_resp));
1208 	}
1209 
1210 #ifdef SDMMC_DEBUG
1211 	if (error == 0)
1212 		sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1213 #endif
1214 	if (error == 0 && resp != NULL)
1215 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1216 	return error;
1217 }
1218 
1219 static int
1220 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1221     uint32_t *scr)
1222 {
1223 	struct sdmmc_command cmd;
1224 	bus_dma_segment_t ds[1];
1225 	void *ptr = NULL;
1226 	int datalen = 8;
1227 	int rseg;
1228 	int error = 0;
1229 
1230 	/* Don't lock */
1231 
1232 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1233 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1234 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1235 		if (error)
1236 			goto out;
1237 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1238 		    BUS_DMA_NOWAIT);
1239 		if (error)
1240 			goto dmamem_free;
1241 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1242 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1243 		if (error)
1244 			goto dmamem_unmap;
1245 
1246 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1247 		    BUS_DMASYNC_PREREAD);
1248 	} else {
1249 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1250 		if (ptr == NULL)
1251 			goto out;
1252 	}
1253 
1254 	memset(&cmd, 0, sizeof(cmd));
1255 	cmd.c_data = ptr;
1256 	cmd.c_datalen = datalen;
1257 	cmd.c_blklen = datalen;
1258 	cmd.c_arg = 0;
1259 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1260 	cmd.c_opcode = SD_APP_SEND_SCR;
1261 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1262 		cmd.c_dmamap = sc->sc_dmap;
1263 
1264 	error = sdmmc_app_command(sc, sf, &cmd);
1265 	if (error == 0) {
1266 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1267 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1268 			    BUS_DMASYNC_POSTREAD);
1269 		}
1270 		memcpy(scr, ptr, datalen);
1271 	}
1272 
1273 out:
1274 	if (ptr != NULL) {
1275 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1276 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1277 dmamem_unmap:
1278 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1279 dmamem_free:
1280 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1281 		} else {
1282 			free(ptr, M_DEVBUF);
1283 		}
1284 	}
1285 	DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1286 	    error));
1287 
1288 #ifdef SDMMC_DEBUG
1289 	if (error == 0)
1290 		sdmmc_dump_data("SCR", scr, datalen);
1291 #endif
1292 	return error;
1293 }
1294 
1295 static int
1296 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1297 {
1298 	sdmmc_response resp;
1299 	int ver;
1300 
1301 	memset(resp, 0, sizeof(resp));
1302 	/*
1303 	 * Change the raw-scr received from the DMA stream to resp.
1304 	 */
1305 	resp[0] = be32toh(sf->raw_scr[1]) >> 8;		// LSW
1306 	resp[1] = be32toh(sf->raw_scr[0]);		// MSW
1307 	resp[0] |= (resp[1] & 0xff) << 24;
1308 	resp[1] >>= 8;
1309 
1310 	ver = SCR_STRUCTURE(resp);
1311 	sf->scr.sd_spec = SCR_SD_SPEC(resp);
1312 	sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1313 
1314 	DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1315 	    SDMMCDEVNAME(sc), resp[1], resp[0],
1316 	    ver, sf->scr.sd_spec, sf->scr.bus_width));
1317 
1318 	if (ver != 0 && ver != 1) {
1319 		DPRINTF(("%s: unknown structure version: %d\n",
1320 		    SDMMCDEVNAME(sc), ver));
1321 		return EINVAL;
1322 	}
1323 	return 0;
1324 }
1325 
1326 static int
1327 sdmmc_mem_send_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1328     sdmmc_bitfield512_t *ssr)
1329 {
1330 	struct sdmmc_command cmd;
1331 	bus_dma_segment_t ds[1];
1332 	void *ptr = NULL;
1333 	int datalen = 64;
1334 	int rseg;
1335 	int error = 0;
1336 
1337 	/* Don't lock */
1338 
1339 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1340 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1341 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1342 		if (error)
1343 			goto out;
1344 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1345 		    BUS_DMA_NOWAIT);
1346 		if (error)
1347 			goto dmamem_free;
1348 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1349 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1350 		if (error)
1351 			goto dmamem_unmap;
1352 
1353 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1354 		    BUS_DMASYNC_PREREAD);
1355 	} else {
1356 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1357 		if (ptr == NULL)
1358 			goto out;
1359 	}
1360 
1361 	memset(&cmd, 0, sizeof(cmd));
1362 	cmd.c_data = ptr;
1363 	cmd.c_datalen = datalen;
1364 	cmd.c_blklen = datalen;
1365 	cmd.c_arg = 0;
1366 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1367 	cmd.c_opcode = SD_APP_SD_STATUS;
1368 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1369 		cmd.c_dmamap = sc->sc_dmap;
1370 
1371 	error = sdmmc_app_command(sc, sf, &cmd);
1372 	if (error == 0) {
1373 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1374 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1375 			    BUS_DMASYNC_POSTREAD);
1376 		}
1377 		memcpy(ssr, ptr, datalen);
1378 	}
1379 
1380 out:
1381 	if (ptr != NULL) {
1382 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1383 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1384 dmamem_unmap:
1385 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1386 dmamem_free:
1387 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1388 		} else {
1389 			free(ptr, M_DEVBUF);
1390 		}
1391 	}
1392 	DPRINTF(("%s: sdmem_mem_send_ssr: error = %d\n", SDMMCDEVNAME(sc),
1393 	    error));
1394 
1395 	if (error == 0)
1396 		sdmmc_be512_to_bitfield512(ssr);
1397 
1398 #ifdef SDMMC_DEBUG
1399 	if (error == 0)
1400 		sdmmc_dump_data("SSR", ssr, datalen);
1401 #endif
1402 	return error;
1403 }
1404 
1405 static int
1406 sdmmc_mem_decode_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1407     sdmmc_bitfield512_t *ssr_bitfield)
1408 {
1409 	uint32_t *ssr = (uint32_t *)ssr_bitfield;
1410 	int speed_class_val, bus_width_val;
1411 
1412 	const int bus_width = SSR_DAT_BUS_WIDTH(ssr);
1413 	const int speed_class = SSR_SPEED_CLASS(ssr);
1414 	const int uhs_speed_grade = SSR_UHS_SPEED_GRADE(ssr);
1415 	const int video_speed_class = SSR_VIDEO_SPEED_CLASS(ssr);
1416 	const int app_perf_class = SSR_APP_PERF_CLASS(ssr);
1417 
1418 	switch (speed_class) {
1419 	case SSR_SPEED_CLASS_0:	speed_class_val = 0; break;
1420 	case SSR_SPEED_CLASS_2: speed_class_val = 2; break;
1421 	case SSR_SPEED_CLASS_4: speed_class_val = 4; break;
1422 	case SSR_SPEED_CLASS_6: speed_class_val = 6; break;
1423 	case SSR_SPEED_CLASS_10: speed_class_val = 10; break;
1424 	default: speed_class_val = -1; break;
1425 	}
1426 
1427 	switch (bus_width) {
1428 	case SSR_DAT_BUS_WIDTH_1: bus_width_val = 1; break;
1429 	case SSR_DAT_BUS_WIDTH_4: bus_width_val = 4; break;
1430 	default: bus_width_val = -1;
1431 	}
1432 
1433 	/*
1434 	 * Log card status
1435 	 */
1436 	device_printf(sc->sc_dev, "SD card status:");
1437 	if (bus_width_val != -1)
1438 		printf(" %d-bit", bus_width_val);
1439 	else
1440 		printf(" unknown bus width");
1441 	if (speed_class_val != -1)
1442 		printf(", C%d", speed_class_val);
1443 	if (uhs_speed_grade)
1444 		printf(", U%d", uhs_speed_grade);
1445 	if (video_speed_class)
1446 		printf(", V%d", video_speed_class);
1447 	if (app_perf_class)
1448 		printf(", A%d", app_perf_class);
1449 	printf("\n");
1450 
1451 	return 0;
1452 }
1453 
1454 static int
1455 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1456     size_t datalen)
1457 {
1458 	struct sdmmc_command cmd;
1459 	bus_dma_segment_t ds[1];
1460 	void *ptr = NULL;
1461 	int rseg;
1462 	int error = 0;
1463 
1464 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1465 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1466 		    1, &rseg, BUS_DMA_NOWAIT);
1467 		if (error)
1468 			goto out;
1469 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1470 		    BUS_DMA_NOWAIT);
1471 		if (error)
1472 			goto dmamem_free;
1473 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1474 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1475 		if (error)
1476 			goto dmamem_unmap;
1477 
1478 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1479 		    BUS_DMASYNC_PREREAD);
1480 	} else {
1481 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1482 		if (ptr == NULL)
1483 			goto out;
1484 	}
1485 
1486 	memset(&cmd, 0, sizeof(cmd));
1487 	cmd.c_data = ptr;
1488 	cmd.c_datalen = datalen;
1489 	cmd.c_blklen = datalen;
1490 	cmd.c_opcode = opcode;
1491 	cmd.c_arg = 0;
1492 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1493 	if (opcode == MMC_SEND_EXT_CSD)
1494 		SET(cmd.c_flags, SCF_RSP_R1);
1495 	else
1496 		SET(cmd.c_flags, SCF_RSP_R2);
1497 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1498 		cmd.c_dmamap = sc->sc_dmap;
1499 
1500 	error = sdmmc_mmc_command(sc, &cmd);
1501 	if (error == 0) {
1502 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1503 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1504 			    BUS_DMASYNC_POSTREAD);
1505 		}
1506 		memcpy(data, ptr, datalen);
1507 #ifdef SDMMC_DEBUG
1508 		sdmmc_dump_data("CXD", data, datalen);
1509 #endif
1510 	}
1511 
1512 out:
1513 	if (ptr != NULL) {
1514 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1515 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1516 dmamem_unmap:
1517 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1518 dmamem_free:
1519 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1520 		} else {
1521 			free(ptr, M_DEVBUF);
1522 		}
1523 	}
1524 	return error;
1525 }
1526 
1527 static int
1528 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1529 {
1530 	struct sdmmc_softc *sc = sf->sc;
1531 	struct sdmmc_command cmd;
1532 	int error;
1533 
1534 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1535 		return ENODEV;
1536 
1537 	memset(&cmd, 0, sizeof(cmd));
1538 	cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1539 	cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1540 
1541 	switch (width) {
1542 	case 1:
1543 		cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1544 		break;
1545 
1546 	case 4:
1547 		cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1548 		break;
1549 
1550 	default:
1551 		return EINVAL;
1552 	}
1553 
1554 	error = sdmmc_app_command(sc, sf, &cmd);
1555 	if (error == 0)
1556 		error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1557 	return error;
1558 }
1559 
1560 static int
1561 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1562     int function, sdmmc_bitfield512_t *status)
1563 {
1564 	struct sdmmc_softc *sc = sf->sc;
1565 	struct sdmmc_command cmd;
1566 	bus_dma_segment_t ds[1];
1567 	void *ptr = NULL;
1568 	int gsft, rseg, error = 0;
1569 	const int statlen = 64;
1570 
1571 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1572 	    !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1573 		return EINVAL;
1574 
1575 	if (group <= 0 || group > 6 ||
1576 	    function < 0 || function > 15)
1577 		return EINVAL;
1578 
1579 	gsft = (group - 1) << 2;
1580 
1581 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1582 		error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1583 		    1, &rseg, BUS_DMA_NOWAIT);
1584 		if (error)
1585 			goto out;
1586 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1587 		    BUS_DMA_NOWAIT);
1588 		if (error)
1589 			goto dmamem_free;
1590 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1591 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1592 		if (error)
1593 			goto dmamem_unmap;
1594 
1595 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1596 		    BUS_DMASYNC_PREREAD);
1597 	} else {
1598 		ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1599 		if (ptr == NULL)
1600 			goto out;
1601 	}
1602 
1603 	memset(&cmd, 0, sizeof(cmd));
1604 	cmd.c_data = ptr;
1605 	cmd.c_datalen = statlen;
1606 	cmd.c_blklen = statlen;
1607 	cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1608 	cmd.c_arg =
1609 	    (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1610 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1611 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1612 		cmd.c_dmamap = sc->sc_dmap;
1613 
1614 	error = sdmmc_mmc_command(sc, &cmd);
1615 	if (error == 0) {
1616 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1617 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1618 			    BUS_DMASYNC_POSTREAD);
1619 		}
1620 		memcpy(status, ptr, statlen);
1621 	}
1622 
1623 out:
1624 	if (ptr != NULL) {
1625 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1626 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1627 dmamem_unmap:
1628 			bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1629 dmamem_free:
1630 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1631 		} else {
1632 			free(ptr, M_DEVBUF);
1633 		}
1634 	}
1635 
1636 	if (error == 0)
1637 		sdmmc_be512_to_bitfield512(status);
1638 
1639 	return error;
1640 }
1641 
1642 static int
1643 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1644     uint8_t value, bool poll)
1645 {
1646 	struct sdmmc_softc *sc = sf->sc;
1647 	struct sdmmc_command cmd;
1648 	int error;
1649 
1650 	memset(&cmd, 0, sizeof(cmd));
1651 	cmd.c_opcode = MMC_SWITCH;
1652 	cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1653 	    (index << 16) | (value << 8) | set;
1654 	cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1655 
1656 	if (poll)
1657 		cmd.c_flags |= SCF_POLL;
1658 
1659 	error = sdmmc_mmc_command(sc, &cmd);
1660 	if (error)
1661 		return error;
1662 
1663 	if (index == EXT_CSD_FLUSH_CACHE || (index == EXT_CSD_HS_TIMING && value >= 2)) {
1664 		do {
1665 			memset(&cmd, 0, sizeof(cmd));
1666 			cmd.c_opcode = MMC_SEND_STATUS;
1667 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1668 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1669 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1670 			if (poll)
1671 				cmd.c_flags |= SCF_POLL;
1672 			error = sdmmc_mmc_command(sc, &cmd);
1673 			if (error)
1674 				break;
1675 			if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1676 				aprint_error_dev(sc->sc_dev, "switch error\n");
1677 				return EINVAL;
1678 			}
1679 			/* XXX time out */
1680 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1681 
1682 		if (error) {
1683 			aprint_error_dev(sc->sc_dev,
1684 			    "error waiting for data ready after switch command: %d\n",
1685 			    error);
1686 			return error;
1687 		}
1688 	}
1689 
1690 	return 0;
1691 }
1692 
1693 /*
1694  * SPI mode function
1695  */
1696 static int
1697 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1698 {
1699 	struct sdmmc_command cmd;
1700 	int error;
1701 
1702 	memset(&cmd, 0, sizeof(cmd));
1703 	cmd.c_opcode = MMC_READ_OCR;
1704 	cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1705 	cmd.c_flags = SCF_RSP_SPI_R3;
1706 
1707 	error = sdmmc_mmc_command(sc, &cmd);
1708 	if (error == 0 && card_ocr != NULL)
1709 		*card_ocr = cmd.c_resp[1];
1710 	DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1711 	    SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1712 	return error;
1713 }
1714 
1715 /*
1716  * read/write function
1717  */
1718 /* read */
1719 static int
1720 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1721     u_char *data, size_t datalen)
1722 {
1723 	struct sdmmc_softc *sc = sf->sc;
1724 	int error = 0;
1725 	int i;
1726 
1727 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1728 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1729 
1730 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1731 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1732 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1733 		if (error)
1734 			break;
1735 	}
1736 	return error;
1737 }
1738 
1739 /*
1740  * Simulate multi-segment dma transfer.
1741  */
1742 static int
1743 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1744     uint32_t blkno, u_char *data, size_t datalen)
1745 {
1746 	struct sdmmc_softc *sc = sf->sc;
1747 	bool use_bbuf = false;
1748 	int error = 0;
1749 	int i;
1750 
1751 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1752 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1753 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1754 			use_bbuf = true;
1755 			break;
1756 		}
1757 	}
1758 	if (use_bbuf) {
1759 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1760 		    BUS_DMASYNC_PREREAD);
1761 
1762 		error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1763 		    blkno, data, datalen);
1764 		if (error) {
1765 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1766 			return error;
1767 		}
1768 
1769 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1770 		    BUS_DMASYNC_POSTREAD);
1771 
1772 		/* Copy from bounce buffer */
1773 		memcpy(data, sf->bbuf, datalen);
1774 
1775 		return 0;
1776 	}
1777 
1778 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1779 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1780 
1781 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1782 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1783 		if (error)
1784 			return error;
1785 
1786 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1787 		    BUS_DMASYNC_PREREAD);
1788 
1789 		error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1790 		    blkno, data, len);
1791 		if (error) {
1792 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1793 			return error;
1794 		}
1795 
1796 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1797 		    BUS_DMASYNC_POSTREAD);
1798 
1799 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1800 
1801 		blkno += len / SDMMC_SECTOR_SIZE;
1802 		data += len;
1803 	}
1804 	return 0;
1805 }
1806 
1807 static int
1808 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1809     uint32_t blkno, u_char *data, size_t datalen)
1810 {
1811 	struct sdmmc_softc *sc = sf->sc;
1812 	struct sdmmc_command cmd;
1813 	int error;
1814 
1815 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1816 		error = sdmmc_select_card(sc, sf);
1817 		if (error)
1818 			goto out;
1819 	}
1820 
1821 	memset(&cmd, 0, sizeof(cmd));
1822 	cmd.c_data = data;
1823 	cmd.c_datalen = datalen;
1824 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
1825 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1826 	    MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1827 	cmd.c_arg = blkno;
1828 	if (!ISSET(sf->flags, SFF_SDHC))
1829 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1830 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1831 	if (ISSET(sf->flags, SFF_SDHC))
1832 		cmd.c_flags |= SCF_XFER_SDHC;
1833 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1834 		cmd.c_dmamap = dmap;
1835 
1836 	sc->sc_ev_xfer.ev_count++;
1837 
1838 	error = sdmmc_mmc_command(sc, &cmd);
1839 	if (error) {
1840 		sc->sc_ev_xfer_error.ev_count++;
1841 		goto out;
1842 	}
1843 
1844 	const u_int counter = __builtin_ctz(cmd.c_datalen);
1845 	if (counter >= 9 && counter <= 16) {
1846 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1847 	} else {
1848 		sc->sc_ev_xfer_unaligned.ev_count++;
1849 	}
1850 
1851 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1852 		if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1853 			memset(&cmd, 0, sizeof cmd);
1854 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
1855 			cmd.c_arg = MMC_ARG_RCA(sf->rca);
1856 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1857 			error = sdmmc_mmc_command(sc, &cmd);
1858 			if (error)
1859 				goto out;
1860 		}
1861 	}
1862 
1863 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1864 		do {
1865 			memset(&cmd, 0, sizeof(cmd));
1866 			cmd.c_opcode = MMC_SEND_STATUS;
1867 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1868 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1869 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1870 			error = sdmmc_mmc_command(sc, &cmd);
1871 			if (error)
1872 				break;
1873 			/* XXX time out */
1874 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1875 	}
1876 
1877 out:
1878 	return error;
1879 }
1880 
1881 int
1882 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1883     size_t datalen)
1884 {
1885 	struct sdmmc_softc *sc = sf->sc;
1886 	int error;
1887 
1888 	SDMMC_LOCK(sc);
1889 	mutex_enter(&sc->sc_mtx);
1890 
1891 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1892 		error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1893 		goto out;
1894 	}
1895 
1896 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1897 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1898 		    datalen);
1899 		goto out;
1900 	}
1901 
1902 	/* DMA transfer */
1903 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1904 	    BUS_DMA_NOWAIT|BUS_DMA_READ);
1905 	if (error)
1906 		goto out;
1907 
1908 #ifdef SDMMC_DEBUG
1909 	printf("data=%p, datalen=%zu\n", data, datalen);
1910 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1911 		printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1912 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1913 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1914 	}
1915 #endif
1916 
1917 	if (sc->sc_dmap->dm_nsegs > 1
1918 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1919 		error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1920 		    data, datalen);
1921 		goto unload;
1922 	}
1923 
1924 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1925 	    BUS_DMASYNC_PREREAD);
1926 
1927 	error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1928 	    datalen);
1929 	if (error)
1930 		goto unload;
1931 
1932 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1933 	    BUS_DMASYNC_POSTREAD);
1934 unload:
1935 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1936 
1937 out:
1938 	mutex_exit(&sc->sc_mtx);
1939 	SDMMC_UNLOCK(sc);
1940 
1941 	return error;
1942 }
1943 
1944 /* write */
1945 static int
1946 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1947     u_char *data, size_t datalen)
1948 {
1949 	struct sdmmc_softc *sc = sf->sc;
1950 	int error = 0;
1951 	int i;
1952 
1953 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1954 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1955 
1956 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1957 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1958 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1959 		if (error)
1960 			break;
1961 	}
1962 	return error;
1963 }
1964 
1965 /*
1966  * Simulate multi-segment dma transfer.
1967  */
1968 static int
1969 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1970     uint32_t blkno, u_char *data, size_t datalen)
1971 {
1972 	struct sdmmc_softc *sc = sf->sc;
1973 	bool use_bbuf = false;
1974 	int error = 0;
1975 	int i;
1976 
1977 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1978 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1979 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1980 			use_bbuf = true;
1981 			break;
1982 		}
1983 	}
1984 	if (use_bbuf) {
1985 		/* Copy to bounce buffer */
1986 		memcpy(sf->bbuf, data, datalen);
1987 
1988 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1989 		    BUS_DMASYNC_PREWRITE);
1990 
1991 		error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1992 		    blkno, data, datalen);
1993 		if (error) {
1994 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1995 			return error;
1996 		}
1997 
1998 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1999 		    BUS_DMASYNC_POSTWRITE);
2000 
2001 		return 0;
2002 	}
2003 
2004 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2005 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
2006 
2007 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
2008 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2009 		if (error)
2010 			return error;
2011 
2012 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2013 		    BUS_DMASYNC_PREWRITE);
2014 
2015 		error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
2016 		    blkno, data, len);
2017 		if (error) {
2018 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2019 			return error;
2020 		}
2021 
2022 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2023 		    BUS_DMASYNC_POSTWRITE);
2024 
2025 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2026 
2027 		blkno += len / SDMMC_SECTOR_SIZE;
2028 		data += len;
2029 	}
2030 
2031 	return error;
2032 }
2033 
2034 static int
2035 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
2036     uint32_t blkno, u_char *data, size_t datalen)
2037 {
2038 	struct sdmmc_softc *sc = sf->sc;
2039 	struct sdmmc_command cmd;
2040 	int error;
2041 
2042 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2043 		error = sdmmc_select_card(sc, sf);
2044 		if (error)
2045 			goto out;
2046 	}
2047 
2048 	const int nblk = howmany(datalen, SDMMC_SECTOR_SIZE);
2049 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && nblk > 1) {
2050 		/* Set the number of write blocks to be pre-erased */
2051 		memset(&cmd, 0, sizeof(cmd));
2052 		cmd.c_opcode = SD_APP_SET_WR_BLK_ERASE_COUNT;
2053 		cmd.c_flags = SCF_RSP_R1 | SCF_RSP_SPI_R1 | SCF_CMD_AC;
2054 		cmd.c_arg = nblk;
2055 		error = sdmmc_app_command(sc, sf, &cmd);
2056 		if (error)
2057 			goto out;
2058 	}
2059 
2060 	memset(&cmd, 0, sizeof(cmd));
2061 	cmd.c_data = data;
2062 	cmd.c_datalen = datalen;
2063 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
2064 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
2065 	    MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
2066 	cmd.c_arg = blkno;
2067 	if (!ISSET(sf->flags, SFF_SDHC))
2068 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2069 	cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
2070 	if (ISSET(sf->flags, SFF_SDHC))
2071 		cmd.c_flags |= SCF_XFER_SDHC;
2072 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
2073 		cmd.c_dmamap = dmap;
2074 
2075 	sc->sc_ev_xfer.ev_count++;
2076 
2077 	error = sdmmc_mmc_command(sc, &cmd);
2078 	if (error) {
2079 		sc->sc_ev_xfer_error.ev_count++;
2080 		goto out;
2081 	}
2082 
2083 	const u_int counter = __builtin_ctz(cmd.c_datalen);
2084 	if (counter >= 9 && counter <= 16) {
2085 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
2086 	} else {
2087 		sc->sc_ev_xfer_unaligned.ev_count++;
2088 	}
2089 
2090 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
2091 		if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
2092 			memset(&cmd, 0, sizeof(cmd));
2093 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
2094 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
2095 			error = sdmmc_mmc_command(sc, &cmd);
2096 			if (error)
2097 				goto out;
2098 		}
2099 	}
2100 
2101 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2102 		do {
2103 			memset(&cmd, 0, sizeof(cmd));
2104 			cmd.c_opcode = MMC_SEND_STATUS;
2105 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2106 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
2107 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
2108 			error = sdmmc_mmc_command(sc, &cmd);
2109 			if (error)
2110 				break;
2111 			/* XXX time out */
2112 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
2113 	}
2114 
2115 out:
2116 	return error;
2117 }
2118 
2119 int
2120 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
2121     size_t datalen)
2122 {
2123 	struct sdmmc_softc *sc = sf->sc;
2124 	int error;
2125 
2126 	SDMMC_LOCK(sc);
2127 	mutex_enter(&sc->sc_mtx);
2128 
2129 	if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
2130 		aprint_normal_dev(sc->sc_dev, "write-protected\n");
2131 		error = EIO;
2132 		goto out;
2133 	}
2134 
2135 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
2136 		error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
2137 		goto out;
2138 	}
2139 
2140 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
2141 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2142 		    datalen);
2143 		goto out;
2144 	}
2145 
2146 	/* DMA transfer */
2147 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
2148 	    BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2149 	if (error)
2150 		goto out;
2151 
2152 #ifdef SDMMC_DEBUG
2153 	aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
2154 	    __func__, data, datalen);
2155 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2156 		aprint_normal_dev(sc->sc_dev,
2157 		    "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
2158 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
2159 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
2160 	}
2161 #endif
2162 
2163 	if (sc->sc_dmap->dm_nsegs > 1
2164 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
2165 		error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
2166 		    data, datalen);
2167 		goto unload;
2168 	}
2169 
2170 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2171 	    BUS_DMASYNC_PREWRITE);
2172 
2173 	error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2174 	    datalen);
2175 	if (error)
2176 		goto unload;
2177 
2178 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2179 	    BUS_DMASYNC_POSTWRITE);
2180 unload:
2181 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
2182 
2183 out:
2184 	mutex_exit(&sc->sc_mtx);
2185 	SDMMC_UNLOCK(sc);
2186 
2187 	return error;
2188 }
2189 
2190 int
2191 sdmmc_mem_discard(struct sdmmc_function *sf, uint32_t sblkno, uint32_t eblkno)
2192 {
2193 	struct sdmmc_softc *sc = sf->sc;
2194 	struct sdmmc_command cmd;
2195 	int error;
2196 
2197 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2198 		return ENODEV;	/* XXX not tested */
2199 
2200 	if (eblkno < sblkno)
2201 		return EINVAL;
2202 
2203 	SDMMC_LOCK(sc);
2204 	mutex_enter(&sc->sc_mtx);
2205 
2206 	/* Set the address of the first write block to be erased */
2207 	memset(&cmd, 0, sizeof(cmd));
2208 	cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2209 	    SD_ERASE_WR_BLK_START : MMC_TAG_ERASE_GROUP_START;
2210 	cmd.c_arg = sblkno;
2211 	if (!ISSET(sf->flags, SFF_SDHC))
2212 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2213 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2214 	error = sdmmc_mmc_command(sc, &cmd);
2215 	if (error)
2216 		goto out;
2217 
2218 	/* Set the address of the last write block to be erased */
2219 	memset(&cmd, 0, sizeof(cmd));
2220 	cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2221 	    SD_ERASE_WR_BLK_END : MMC_TAG_ERASE_GROUP_END;
2222 	cmd.c_arg = eblkno;
2223 	if (!ISSET(sf->flags, SFF_SDHC))
2224 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2225 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2226 	error = sdmmc_mmc_command(sc, &cmd);
2227 	if (error)
2228 		goto out;
2229 
2230 	/* Start the erase operation */
2231 	memset(&cmd, 0, sizeof(cmd));
2232 	cmd.c_opcode = MMC_ERASE;
2233 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B;
2234 	error = sdmmc_mmc_command(sc, &cmd);
2235 	if (error)
2236 		goto out;
2237 
2238 out:
2239 	mutex_exit(&sc->sc_mtx);
2240 	SDMMC_UNLOCK(sc);
2241 
2242 #ifdef SDMMC_DEBUG
2243 	device_printf(sc->sc_dev, "discard blk %u-%u error %d\n",
2244 	    sblkno, eblkno, error);
2245 #endif
2246 
2247 	return error;
2248 }
2249 
2250 int
2251 sdmmc_mem_flush_cache(struct sdmmc_function *sf, bool poll)
2252 {
2253 	struct sdmmc_softc *sc = sf->sc;
2254 	int error;
2255 
2256 	if (!ISSET(sf->flags, SFF_CACHE_ENABLED))
2257 		return 0;
2258 
2259 	SDMMC_LOCK(sc);
2260 	mutex_enter(&sc->sc_mtx);
2261 
2262 	error = sdmmc_mem_mmc_switch(sf,
2263 	    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE,
2264 	    EXT_CSD_FLUSH_CACHE_FLUSH, poll);
2265 
2266 	mutex_exit(&sc->sc_mtx);
2267 	SDMMC_UNLOCK(sc);
2268 
2269 #ifdef SDMMC_DEBUG
2270 	device_printf(sc->sc_dev, "mmc flush cache error %d\n", error);
2271 #endif
2272 
2273 	return error;
2274 }
2275