xref: /netbsd-src/sys/dev/sdmmc/sdmmc_mem.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: sdmmc_mem.c,v 1.71 2020/01/04 22:28:26 mlelstv Exp $	*/
2 /*	$OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $	*/
3 
4 /*
5  * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka@netbsd.org>
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43  */
44 
45 /* Routines for SD/MMC memory cards. */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.71 2020/01/04 22:28:26 mlelstv Exp $");
49 
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53 
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61 
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65 
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s)	do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s)	do {} while (/*CONSTCOND*/0)
70 #endif
71 
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73 
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78     sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80     uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_ssr(struct sdmmc_softc *, struct sdmmc_function *,
83     sdmmc_bitfield512_t *);
84 static int sdmmc_mem_decode_ssr(struct sdmmc_softc *, struct sdmmc_function *,
85     sdmmc_bitfield512_t *);
86 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
87 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
88 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
89 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
90     uint8_t, bool);
91 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
92 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
93 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
94     u_char *, size_t);
95 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
96     u_char *, size_t);
97 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
98     uint32_t, u_char *, size_t);
99 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
100     uint32_t, u_char *, size_t);
101 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
102     uint32_t, u_char *, size_t);
103 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
104     uint32_t, u_char *, size_t);
105 
106 static const struct {
107 	const char *name;
108 	int v;
109 	int freq;
110 } switch_group0_functions[] = {
111 	/* Default/SDR12 */
112 	{ "Default/SDR12",	 0,			 25000 },
113 
114 	/* High-Speed/SDR25 */
115 	{ "High-Speed/SDR25",	SMC_CAPS_SD_HIGHSPEED,	 50000 },
116 
117 	/* SDR50 */
118 	{ "SDR50",		SMC_CAPS_UHS_SDR50,	100000 },
119 
120 	/* SDR104 */
121 	{ "SDR104",		SMC_CAPS_UHS_SDR104,	208000 },
122 
123 	/* DDR50 */
124 	{ "DDR50",		SMC_CAPS_UHS_DDR50,	 50000 },
125 };
126 
127 static const int sdmmc_mmc_timings[] = {
128 	[EXT_CSD_HS_TIMING_LEGACY]	= 26000,
129 	[EXT_CSD_HS_TIMING_HIGHSPEED]	= 52000,
130 	[EXT_CSD_HS_TIMING_HS200]	= 200000
131 };
132 
133 /*
134  * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
135  */
136 int
137 sdmmc_mem_enable(struct sdmmc_softc *sc)
138 {
139 	uint32_t host_ocr;
140 	uint32_t card_ocr;
141 	uint32_t new_ocr;
142 	uint32_t ocr = 0;
143 	int error;
144 
145 	SDMMC_LOCK(sc);
146 
147 	/* Set host mode to SD "combo" card or SD memory-only. */
148 	CLR(sc->sc_flags, SMF_UHS_MODE);
149 	SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
150 
151 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
152 		sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
153 
154 	/* Reset memory (*must* do that before CMD55 or CMD1). */
155 	sdmmc_go_idle_state(sc);
156 
157 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
158 		/* Check SD Ver.2 */
159 		error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
160 		if (error == 0 && card_ocr == 0x1aa)
161 			SET(ocr, MMC_OCR_HCS);
162 	}
163 
164 	/*
165 	 * Read the SD/MMC memory OCR value by issuing CMD55 followed
166 	 * by ACMD41 to read the OCR value from memory-only SD cards.
167 	 * MMC cards will not respond to CMD55 or ACMD41 and this is
168 	 * how we distinguish them from SD cards.
169 	 */
170 mmc_mode:
171 	error = sdmmc_mem_send_op_cond(sc,
172 	    ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
173 	if (error) {
174 		if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
175 		    !ISSET(sc->sc_flags, SMF_IO_MODE)) {
176 			/* Not a SD card, switch to MMC mode. */
177 			DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
178 			CLR(sc->sc_flags, SMF_SD_MODE);
179 			goto mmc_mode;
180 		}
181 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
182 			DPRINTF(("%s: couldn't read memory OCR\n",
183 			    SDMMCDEVNAME(sc)));
184 			goto out;
185 		} else {
186 			/* Not a "combo" card. */
187 			CLR(sc->sc_flags, SMF_MEM_MODE);
188 			error = 0;
189 			goto out;
190 		}
191 	}
192 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
193 		/* get card OCR */
194 		error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
195 		if (error) {
196 			DPRINTF(("%s: couldn't read SPI memory OCR\n",
197 			    SDMMCDEVNAME(sc)));
198 			goto out;
199 		}
200 	}
201 
202 	/* Set the lowest voltage supported by the card and host. */
203 	host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
204 	error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
205 	if (error) {
206 		DPRINTF(("%s: couldn't supply voltage requested by card\n",
207 		    SDMMCDEVNAME(sc)));
208 		goto out;
209 	}
210 
211 	DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
212 	DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
213 
214 	host_ocr &= card_ocr; /* only allow the common voltages */
215 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
216 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
217 			/* Tell the card(s) to enter the idle state (again). */
218 			sdmmc_go_idle_state(sc);
219 			/* Check SD Ver.2 */
220 			error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
221 			if (error == 0 && card_ocr == 0x1aa)
222 				SET(ocr, MMC_OCR_HCS);
223 
224 			if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
225 				SET(ocr, MMC_OCR_S18A);
226 		} else {
227 			SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
228 		}
229 	}
230 	host_ocr |= ocr;
231 
232 	/* Send the new OCR value until all cards are ready. */
233 	error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
234 	if (error) {
235 		DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
236 		goto out;
237 	}
238 
239 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
240 		/*
241 		 * Card and host support low voltage mode, begin switch
242 		 * sequence.
243 		 */
244 		struct sdmmc_command cmd;
245 		memset(&cmd, 0, sizeof(cmd));
246 		cmd.c_arg = 0;
247 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
248 		cmd.c_opcode = SD_VOLTAGE_SWITCH;
249 		DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
250 		error = sdmmc_mmc_command(sc, &cmd);
251 		if (error) {
252 			DPRINTF(("%s: voltage switch command failed\n",
253 			    SDMMCDEVNAME(sc)));
254 			goto out;
255 		}
256 
257 		error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
258 		if (error)
259 			goto out;
260 
261 		SET(sc->sc_flags, SMF_UHS_MODE);
262 	}
263 
264 out:
265 	SDMMC_UNLOCK(sc);
266 
267 	if (error)
268 		printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
269 		    __func__, error);
270 
271 	return error;
272 }
273 
274 static int
275 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
276 {
277 	int error;
278 
279 	/*
280 	 * Stop the clock
281 	 */
282 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
283 	    SDMMC_SDCLK_OFF, false);
284 	if (error)
285 		goto out;
286 
287 	delay(1000);
288 
289 	/*
290 	 * Card switch command was successful, update host controller
291 	 * signal voltage setting.
292 	 */
293 	DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
294 	    signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
295 	error = sdmmc_chip_signal_voltage(sc->sc_sct,
296 	    sc->sc_sch, signal_voltage);
297 	if (error)
298 		goto out;
299 
300 	delay(5000);
301 
302 	/*
303 	 * Switch to SDR12 timing
304 	 */
305 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
306 	    false);
307 	if (error)
308 		goto out;
309 
310 	delay(1000);
311 
312 out:
313 	return error;
314 }
315 
316 /*
317  * Read the CSD and CID from all cards and assign each card a unique
318  * relative card address (RCA).  CMD2 is ignored by SDIO-only cards.
319  */
320 void
321 sdmmc_mem_scan(struct sdmmc_softc *sc)
322 {
323 	sdmmc_response resp;
324 	struct sdmmc_function *sf;
325 	uint16_t next_rca;
326 	int error;
327 	int retry;
328 
329 	SDMMC_LOCK(sc);
330 
331 	/*
332 	 * CMD2 is a broadcast command understood by SD cards and MMC
333 	 * cards.  All cards begin to respond to the command, but back
334 	 * off if another card drives the CMD line to a different level.
335 	 * Only one card will get its entire response through.  That
336 	 * card remains silent once it has been assigned a RCA.
337 	 */
338 	for (retry = 0; retry < 100; retry++) {
339 		error = sdmmc_mem_send_cid(sc, &resp);
340 		if (error) {
341 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
342 			    error == ETIMEDOUT) {
343 				/* No more cards there. */
344 				break;
345 			}
346 			DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
347 			break;
348 		}
349 
350 		/* In MMC mode, find the next available RCA. */
351 		next_rca = 1;
352 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
353 			SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
354 				next_rca++;
355 		}
356 
357 		/* Allocate a sdmmc_function structure. */
358 		sf = sdmmc_function_alloc(sc);
359 		sf->rca = next_rca;
360 
361 		/*
362 		 * Remember the CID returned in the CMD2 response for
363 		 * later decoding.
364 		 */
365 		memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
366 
367 		/*
368 		 * Silence the card by assigning it a unique RCA, or
369 		 * querying it for its RCA in the case of SD.
370 		 */
371 		if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
372 			if (sdmmc_set_relative_addr(sc, sf) != 0) {
373 				aprint_error_dev(sc->sc_dev,
374 				    "couldn't set mem RCA\n");
375 				sdmmc_function_free(sf);
376 				break;
377 			}
378 		}
379 
380 		/*
381 		 * If this is a memory-only card, the card responding
382 		 * first becomes an alias for SDIO function 0.
383 		 */
384 		if (sc->sc_fn0 == NULL)
385 			sc->sc_fn0 = sf;
386 
387 		SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
388 
389 		/* only one function in SPI mode */
390 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
391 			break;
392 	}
393 
394 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
395 		/* Go to Data Transfer Mode, if possible. */
396 		sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
397 
398 	/*
399 	 * All cards are either inactive or awaiting further commands.
400 	 * Read the CSDs and decode the raw CID for each card.
401 	 */
402 	SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
403 		error = sdmmc_mem_send_csd(sc, sf, &resp);
404 		if (error) {
405 			SET(sf->flags, SFF_ERROR);
406 			continue;
407 		}
408 
409 		if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
410 		    sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
411 			SET(sf->flags, SFF_ERROR);
412 			continue;
413 		}
414 
415 #ifdef SDMMC_DEBUG
416 		printf("%s: CID: ", SDMMCDEVNAME(sc));
417 		sdmmc_print_cid(&sf->cid);
418 #endif
419 	}
420 
421 	SDMMC_UNLOCK(sc);
422 }
423 
424 int
425 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
426     struct sdmmc_function *sf)
427 {
428 	/* TRAN_SPEED(2:0): transfer rate exponent */
429 	static const int speed_exponent[8] = {
430 		100 *    1,	/* 100 Kbits/s */
431 		  1 * 1000,	/*   1 Mbits/s */
432 		 10 * 1000,	/*  10 Mbits/s */
433 		100 * 1000,	/* 100 Mbits/s */
434 		         0,
435 		         0,
436 		         0,
437 		         0,
438 	};
439 	/* TRAN_SPEED(6:3): time mantissa */
440 	static const int speed_mantissa[16] = {
441 		0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
442 	};
443 	struct sdmmc_csd *csd = &sf->csd;
444 	int e, m;
445 
446 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
447 		/*
448 		 * CSD version 1.0 corresponds to SD system
449 		 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
450 		 */
451 		csd->csdver = SD_CSD_CSDVER(resp);
452 		switch (csd->csdver) {
453 		case SD_CSD_CSDVER_2_0:
454 			DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
455 			SET(sf->flags, SFF_SDHC);
456 			csd->capacity = SD_CSD_V2_CAPACITY(resp);
457 			csd->read_bl_len = SD_CSD_V2_BL_LEN;
458 			break;
459 
460 		case SD_CSD_CSDVER_1_0:
461 			DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
462 			csd->capacity = SD_CSD_CAPACITY(resp);
463 			csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
464 			break;
465 
466 		default:
467 			aprint_error_dev(sc->sc_dev,
468 			    "unknown SD CSD structure version 0x%x\n",
469 			    csd->csdver);
470 			return 1;
471 		}
472 
473 		csd->mmcver = SD_CSD_MMCVER(resp);
474 		csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
475 		csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
476 		e = SD_CSD_SPEED_EXP(resp);
477 		m = SD_CSD_SPEED_MANT(resp);
478 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
479 		csd->ccc = SD_CSD_CCC(resp);
480 	} else {
481 		csd->csdver = MMC_CSD_CSDVER(resp);
482 		if (csd->csdver == MMC_CSD_CSDVER_1_0) {
483 			aprint_error_dev(sc->sc_dev,
484 			    "unknown MMC CSD structure version 0x%x\n",
485 			    csd->csdver);
486 			return 1;
487 		}
488 
489 		csd->mmcver = MMC_CSD_MMCVER(resp);
490 		csd->capacity = MMC_CSD_CAPACITY(resp);
491 		csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
492 		csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
493 		csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
494 		e = MMC_CSD_TRAN_SPEED_EXP(resp);
495 		m = MMC_CSD_TRAN_SPEED_MANT(resp);
496 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
497 	}
498 	if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
499 		csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
500 
501 #ifdef SDMMC_DUMP_CSD
502 	sdmmc_print_csd(resp, csd);
503 #endif
504 
505 	return 0;
506 }
507 
508 int
509 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
510     struct sdmmc_function *sf)
511 {
512 	struct sdmmc_cid *cid = &sf->cid;
513 
514 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
515 		cid->mid = SD_CID_MID(resp);
516 		cid->oid = SD_CID_OID(resp);
517 		SD_CID_PNM_CPY(resp, cid->pnm);
518 		cid->rev = SD_CID_REV(resp);
519 		cid->psn = SD_CID_PSN(resp);
520 		cid->mdt = SD_CID_MDT(resp);
521 	} else {
522 		switch(sf->csd.mmcver) {
523 		case MMC_CSD_MMCVER_1_0:
524 		case MMC_CSD_MMCVER_1_4:
525 			cid->mid = MMC_CID_MID_V1(resp);
526 			MMC_CID_PNM_V1_CPY(resp, cid->pnm);
527 			cid->rev = MMC_CID_REV_V1(resp);
528 			cid->psn = MMC_CID_PSN_V1(resp);
529 			cid->mdt = MMC_CID_MDT_V1(resp);
530 			break;
531 		case MMC_CSD_MMCVER_2_0:
532 		case MMC_CSD_MMCVER_3_1:
533 		case MMC_CSD_MMCVER_4_0:
534 			cid->mid = MMC_CID_MID_V2(resp);
535 			cid->oid = MMC_CID_OID_V2(resp);
536 			MMC_CID_PNM_V2_CPY(resp, cid->pnm);
537 			cid->psn = MMC_CID_PSN_V2(resp);
538 			break;
539 		default:
540 			aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
541 			    sf->csd.mmcver);
542 			return 1;
543 		}
544 	}
545 	return 0;
546 }
547 
548 void
549 sdmmc_print_cid(struct sdmmc_cid *cid)
550 {
551 
552 	printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
553 	    " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
554 	    cid->mdt);
555 }
556 
557 #ifdef SDMMC_DUMP_CSD
558 void
559 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
560 {
561 
562 	printf("csdver = %d\n", csd->csdver);
563 	printf("mmcver = %d\n", csd->mmcver);
564 	printf("capacity = 0x%08x\n", csd->capacity);
565 	printf("read_bl_len = %d\n", csd->read_bl_len);
566 	printf("write_bl_len = %d\n", csd->write_bl_len);
567 	printf("r2w_factor = %d\n", csd->r2w_factor);
568 	printf("tran_speed = %d\n", csd->tran_speed);
569 	printf("ccc = 0x%x\n", csd->ccc);
570 }
571 #endif
572 
573 /*
574  * Initialize a SD/MMC memory card.
575  */
576 int
577 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
578 {
579 	int error = 0;
580 
581 	SDMMC_LOCK(sc);
582 
583 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
584 		error = sdmmc_select_card(sc, sf);
585 		if (error)
586 			goto out;
587 	}
588 
589 	error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
590 	if (error)
591 		goto out;
592 
593 	if (ISSET(sc->sc_flags, SMF_SD_MODE))
594 		error = sdmmc_mem_sd_init(sc, sf);
595 	else
596 		error = sdmmc_mem_mmc_init(sc, sf);
597 
598 	if (error != 0)
599 		SET(sf->flags, SFF_ERROR);
600 
601 out:
602 	SDMMC_UNLOCK(sc);
603 
604 	return error;
605 }
606 
607 /*
608  * Get or set the card's memory OCR value (SD or MMC).
609  */
610 int
611 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
612 {
613 	struct sdmmc_command cmd;
614 	int error;
615 	int retry;
616 
617 	/* Don't lock */
618 
619 	DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
620 	    SDMMCDEVNAME(sc), ocr));
621 
622 	/*
623 	 * If we change the OCR value, retry the command until the OCR
624 	 * we receive in response has the "CARD BUSY" bit set, meaning
625 	 * that all cards are ready for identification.
626 	 */
627 	for (retry = 0; retry < 100; retry++) {
628 		memset(&cmd, 0, sizeof(cmd));
629 		cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
630 		    ocr : (ocr & MMC_OCR_HCS);
631 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
632 		    | SCF_TOUT_OK;
633 
634 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
635 			cmd.c_opcode = SD_APP_OP_COND;
636 			error = sdmmc_app_command(sc, NULL, &cmd);
637 		} else {
638 			cmd.c_opcode = MMC_SEND_OP_COND;
639 			error = sdmmc_mmc_command(sc, &cmd);
640 		}
641 		if (error)
642 			break;
643 
644 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
645 			if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
646 				break;
647 		} else {
648 			if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
649 			    ocr == 0)
650 				break;
651 		}
652 
653 		error = ETIMEDOUT;
654 		sdmmc_pause(10000, NULL);
655 	}
656 	if (ocrp != NULL) {
657 		if (error == 0 &&
658 		    !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
659 			*ocrp = MMC_R3(cmd.c_resp);
660 		} else {
661 			*ocrp = ocr;
662 		}
663 	}
664 	DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
665 	    SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
666 	return error;
667 }
668 
669 int
670 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
671 {
672 	struct sdmmc_command cmd;
673 	int error;
674 
675 	/* Don't lock */
676 
677 	memset(&cmd, 0, sizeof(cmd));
678 	cmd.c_arg = ocr;
679 	cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7 | SCF_TOUT_OK;
680 	cmd.c_opcode = SD_SEND_IF_COND;
681 
682 	error = sdmmc_mmc_command(sc, &cmd);
683 	if (error == 0 && ocrp != NULL) {
684 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
685 			*ocrp = MMC_SPI_R7(cmd.c_resp);
686 		} else {
687 			*ocrp = MMC_R7(cmd.c_resp);
688 		}
689 		DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
690 		    SDMMCDEVNAME(sc), error, *ocrp));
691 	}
692 	return error;
693 }
694 
695 /*
696  * Set the read block length appropriately for this card, according to
697  * the card CSD register value.
698  */
699 int
700 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
701    int block_len)
702 {
703 	struct sdmmc_command cmd;
704 	int error;
705 
706 	/* Don't lock */
707 
708 	memset(&cmd, 0, sizeof(cmd));
709 	cmd.c_opcode = MMC_SET_BLOCKLEN;
710 	cmd.c_arg = block_len;
711 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
712 
713 	error = sdmmc_mmc_command(sc, &cmd);
714 
715 	DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
716 	    SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
717 
718 	return error;
719 }
720 
721 /* make 512-bit BE quantity __bitfield()-compatible */
722 static void
723 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
724 	size_t i;
725 	uint32_t tmp0, tmp1;
726 	const size_t bitswords = __arraycount(buf->_bits);
727 	for (i = 0; i < bitswords/2; i++) {
728 		tmp0 = buf->_bits[i];
729 		tmp1 = buf->_bits[bitswords - 1 - i];
730 		buf->_bits[i] = be32toh(tmp1);
731 		buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
732 	}
733 }
734 
735 static int
736 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
737 {
738 	if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
739 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
740 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
741 			return SD_ACCESS_MODE_SDR104;
742 		}
743 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
744 		    ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
745 			return SD_ACCESS_MODE_DDR50;
746 		}
747 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
748 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
749 			return SD_ACCESS_MODE_SDR50;
750 		}
751 	}
752 	if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
753 	    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
754 		return SD_ACCESS_MODE_SDR25;
755 	}
756 	return SD_ACCESS_MODE_SDR12;
757 }
758 
759 static int
760 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
761 {
762 	int timing = -1;
763 
764 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
765 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
766 			return 0;
767 
768 		switch (sf->csd.tran_speed) {
769 		case 100000:
770 			timing = SDMMC_TIMING_UHS_SDR50;
771 			break;
772 		case 208000:
773 			timing = SDMMC_TIMING_UHS_SDR104;
774 			break;
775 		default:
776 			return 0;
777 		}
778 	} else {
779 		switch (sf->csd.tran_speed) {
780 		case 200000:
781 			timing = SDMMC_TIMING_MMC_HS200;
782 			break;
783 		default:
784 			return 0;
785 		}
786 	}
787 
788 	DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
789 	    timing));
790 
791 	return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
792 }
793 
794 static int
795 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
796 {
797 	int support_func, best_func, bus_clock, error, i;
798 	sdmmc_bitfield512_t status;
799 	bool ddr = false;
800 
801 	/* change bus clock */
802 	bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
803 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
804 	if (error) {
805 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
806 		return error;
807 	}
808 
809 	error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
810 	if (error) {
811 		aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
812 		return error;
813 	}
814 	error = sdmmc_mem_decode_scr(sc, sf);
815 	if (error)
816 		return error;
817 
818 	if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
819 	    ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
820 		DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
821 		error = sdmmc_set_bus_width(sf, 4);
822 		if (error) {
823 			aprint_error_dev(sc->sc_dev,
824 			    "can't change bus width (%d bit)\n", 4);
825 			return error;
826 		}
827 		sf->width = 4;
828 	}
829 
830 	best_func = 0;
831 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
832 	    ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
833 		DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
834 		error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
835 		if (error) {
836 			aprint_error_dev(sc->sc_dev,
837 			    "switch func mode 0 failed\n");
838 			return error;
839 		}
840 
841 		support_func = SFUNC_STATUS_GROUP(&status, 1);
842 
843 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
844 			/* XXX UHS-I card started in 1.8V mode, switch now */
845 			error = sdmmc_mem_signal_voltage(sc,
846 			    SDMMC_SIGNAL_VOLTAGE_180);
847 			if (error) {
848 				aprint_error_dev(sc->sc_dev,
849 				    "failed to recover UHS card\n");
850 				return error;
851 			}
852 			SET(sc->sc_flags, SMF_UHS_MODE);
853 		}
854 
855 		for (i = 0; i < __arraycount(switch_group0_functions); i++) {
856 			if (!(support_func & (1 << i)))
857 				continue;
858 			DPRINTF(("%s: card supports mode %s\n",
859 			    SDMMCDEVNAME(sc),
860 			    switch_group0_functions[i].name));
861 		}
862 
863 		best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
864 
865 		DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
866 		    switch_group0_functions[best_func].name));
867 
868 		if (best_func != 0) {
869 			DPRINTF(("%s: switch func mode 1(func=%d)\n",
870 			    SDMMCDEVNAME(sc), best_func));
871 			error =
872 			    sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
873 			if (error) {
874 				aprint_error_dev(sc->sc_dev,
875 				    "switch func mode 1 failed:"
876 				    " group 1 function %d(0x%2x)\n",
877 				    best_func, support_func);
878 				return error;
879 			}
880 			sf->csd.tran_speed =
881 			    switch_group0_functions[best_func].freq;
882 
883 			if (best_func == SD_ACCESS_MODE_DDR50)
884 				ddr = true;
885 
886 			/* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
887 			delay(25);
888 		}
889 	}
890 
891 	/* update bus clock */
892 	if (sc->sc_busclk > sf->csd.tran_speed)
893 		sc->sc_busclk = sf->csd.tran_speed;
894 	if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
895 		return 0;
896 
897 	/* change bus clock */
898 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
899 	    ddr);
900 	if (error) {
901 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
902 		return error;
903 	}
904 
905 	sc->sc_transfer_mode = switch_group0_functions[best_func].name;
906 	sc->sc_busddr = ddr;
907 
908 	/* get card status */
909 	error = sdmmc_mem_send_ssr(sc, sf, &status);
910 	if (error) {
911 		aprint_error_dev(sc->sc_dev, "can't get SD status: %d\n",
912 		    error);
913 		return error;
914 	}
915 	sdmmc_mem_decode_ssr(sc, sf, &status);
916 
917 	/* execute tuning (UHS) */
918 	error = sdmmc_mem_execute_tuning(sc, sf);
919 	if (error) {
920 		aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
921 		return error;
922 	}
923 
924 	return 0;
925 }
926 
927 static int
928 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
929 {
930 	int width, value, hs_timing, bus_clock, error;
931 	uint8_t ext_csd[512];
932 	uint32_t sectors = 0;
933 	bool ddr = false;
934 
935 	sc->sc_transfer_mode = NULL;
936 
937 	/* change bus clock */
938 	bus_clock = uimin(sc->sc_busclk, sf->csd.tran_speed);
939 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
940 	if (error) {
941 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
942 		return error;
943 	}
944 
945 	if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
946 		error = sdmmc_mem_send_cxd_data(sc,
947 		    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
948 		if (error) {
949 			aprint_error_dev(sc->sc_dev,
950 			    "can't read EXT_CSD (error=%d)\n", error);
951 			return error;
952 		}
953 		if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
954 		    (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
955 			aprint_error_dev(sc->sc_dev,
956 			    "unrecognised future version (%d)\n",
957 				ext_csd[EXT_CSD_STRUCTURE]);
958 			return ENOTSUP;
959 		}
960 		sf->ext_csd.rev = ext_csd[EXT_CSD_REV];
961 
962 		if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
963 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
964 			hs_timing = EXT_CSD_HS_TIMING_HS200;
965 		} else if (ISSET(sc->sc_caps, SMC_CAPS_MMC_DDR52) &&
966 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_DDR52_1_8V) {
967 			hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
968 			ddr = true;
969 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
970 			hs_timing = EXT_CSD_HS_TIMING_HIGHSPEED;
971 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
972 			hs_timing = EXT_CSD_HS_TIMING_LEGACY;
973 		} else {
974 			aprint_error_dev(sc->sc_dev,
975 			    "unknown CARD_TYPE: 0x%x\n",
976 			    ext_csd[EXT_CSD_CARD_TYPE]);
977 			return ENOTSUP;
978 		}
979 
980 		if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
981 			width = 8;
982 			value = EXT_CSD_BUS_WIDTH_8;
983 		} else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
984 			width = 4;
985 			value = EXT_CSD_BUS_WIDTH_4;
986 		} else {
987 			width = 1;
988 			value = EXT_CSD_BUS_WIDTH_1;
989 		}
990 
991 		if (width != 1) {
992 			error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
993 			    EXT_CSD_BUS_WIDTH, value, false);
994 			if (error == 0)
995 				error = sdmmc_chip_bus_width(sc->sc_sct,
996 				    sc->sc_sch, width);
997 			else {
998 				DPRINTF(("%s: can't change bus width"
999 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
1000 				return error;
1001 			}
1002 
1003 			/* XXXX: need bus test? (using by CMD14 & CMD19) */
1004 			delay(10000);
1005 		}
1006 		sf->width = width;
1007 
1008 		if (hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1009 		    !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
1010 			hs_timing = EXT_CSD_HS_TIMING_LEGACY;
1011 		}
1012 
1013 		const int target_timing = hs_timing;
1014 		if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1015 			while (hs_timing >= EXT_CSD_HS_TIMING_LEGACY) {
1016 				error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
1017 				    EXT_CSD_HS_TIMING, hs_timing, false);
1018 				if (error == 0 || hs_timing == EXT_CSD_HS_TIMING_LEGACY)
1019 					break;
1020 				hs_timing--;
1021 			}
1022 		}
1023 		if (hs_timing != target_timing) {
1024 			aprint_debug_dev(sc->sc_dev,
1025 			    "card failed to switch to timing mode %d, using %d\n",
1026 			    target_timing, hs_timing);
1027 		}
1028 
1029 		KASSERT(hs_timing < __arraycount(sdmmc_mmc_timings));
1030 		sf->csd.tran_speed = sdmmc_mmc_timings[hs_timing];
1031 
1032 		if (sc->sc_busclk > sf->csd.tran_speed)
1033 			sc->sc_busclk = sf->csd.tran_speed;
1034 		if (sc->sc_busclk != bus_clock) {
1035 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1036 			    sc->sc_busclk, false);
1037 			if (error) {
1038 				aprint_error_dev(sc->sc_dev,
1039 				    "can't change bus clock\n");
1040 				return error;
1041 			}
1042 		}
1043 
1044 		if (hs_timing != EXT_CSD_HS_TIMING_LEGACY) {
1045 			error = sdmmc_mem_send_cxd_data(sc,
1046 			    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1047 			if (error) {
1048 				aprint_error_dev(sc->sc_dev,
1049 				    "can't re-read EXT_CSD\n");
1050 				return error;
1051 			}
1052 			if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1053 				aprint_error_dev(sc->sc_dev,
1054 				    "HS_TIMING set failed\n");
1055 				return EINVAL;
1056 			}
1057 		}
1058 
1059 		/*
1060 		 * HS_TIMING must be set to 0x1 before setting BUS_WIDTH
1061 		 * for dual data rate operation
1062 		 */
1063 		if (ddr &&
1064 		    hs_timing == EXT_CSD_HS_TIMING_HIGHSPEED &&
1065 		    width > 1) {
1066 			error = sdmmc_mem_mmc_switch(sf,
1067 			    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1068 			    (width == 8) ? EXT_CSD_BUS_WIDTH_8_DDR :
1069 			      EXT_CSD_BUS_WIDTH_4_DDR, false);
1070 			if (error) {
1071 				DPRINTF(("%s: can't switch to DDR"
1072 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
1073 				return error;
1074 			}
1075 
1076 			delay(10000);
1077 
1078 			error = sdmmc_mem_signal_voltage(sc,
1079 			    SDMMC_SIGNAL_VOLTAGE_180);
1080 			if (error) {
1081 				aprint_error_dev(sc->sc_dev,
1082 				    "can't switch signaling voltage\n");
1083 				return error;
1084 			}
1085 
1086 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1087 			    sc->sc_busclk, ddr);
1088 			if (error) {
1089 				aprint_error_dev(sc->sc_dev,
1090 				    "can't change bus clock\n");
1091 				return error;
1092 			}
1093 
1094 			delay(10000);
1095 
1096 			sc->sc_transfer_mode = "DDR52";
1097 			sc->sc_busddr = ddr;
1098 		}
1099 
1100 		sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1101 		    ext_csd[EXT_CSD_SEC_COUNT + 1] << 8  |
1102 		    ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1103 		    ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1104 		if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1105 			SET(sf->flags, SFF_SDHC);
1106 			sf->csd.capacity = sectors;
1107 		}
1108 
1109 		if (hs_timing == EXT_CSD_HS_TIMING_HS200) {
1110 			sc->sc_transfer_mode = "HS200";
1111 
1112 			/* execute tuning (HS200) */
1113 			error = sdmmc_mem_execute_tuning(sc, sf);
1114 			if (error) {
1115 				aprint_error_dev(sc->sc_dev,
1116 				    "can't execute MMC tuning\n");
1117 				return error;
1118 			}
1119 		}
1120 
1121 		if (sf->ext_csd.rev >= 5) {
1122 			sf->ext_csd.rst_n_function =
1123 			    ext_csd[EXT_CSD_RST_N_FUNCTION];
1124 		}
1125 
1126 		if (sf->ext_csd.rev >= 6) {
1127 			sf->ext_csd.cache_size =
1128 			    le32dec(&ext_csd[EXT_CSD_CACHE_SIZE]) * 1024;
1129 		}
1130 		if (sf->ext_csd.cache_size > 0) {
1131 			/* eMMC cache present, enable it */
1132 			error = sdmmc_mem_mmc_switch(sf,
1133 			    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL,
1134 			    EXT_CSD_CACHE_CTRL_CACHE_EN, false);
1135 			if (error) {
1136 				aprint_error_dev(sc->sc_dev,
1137 				    "can't enable cache: %d\n", error);
1138 			} else {
1139 				SET(sf->flags, SFF_CACHE_ENABLED);
1140 			}
1141 		}
1142 	} else {
1143 		if (sc->sc_busclk > sf->csd.tran_speed)
1144 			sc->sc_busclk = sf->csd.tran_speed;
1145 		if (sc->sc_busclk != bus_clock) {
1146 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1147 			    sc->sc_busclk, false);
1148 			if (error) {
1149 				aprint_error_dev(sc->sc_dev,
1150 				    "can't change bus clock\n");
1151 				return error;
1152 			}
1153 		}
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int
1160 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1161 {
1162 	struct sdmmc_command cmd;
1163 	int error;
1164 
1165 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1166 		memset(&cmd, 0, sizeof cmd);
1167 		cmd.c_opcode = MMC_ALL_SEND_CID;
1168 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1169 
1170 		error = sdmmc_mmc_command(sc, &cmd);
1171 	} else {
1172 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1173 		    sizeof(cmd.c_resp));
1174 	}
1175 
1176 #ifdef SDMMC_DEBUG
1177 	if (error == 0)
1178 		sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1179 #endif
1180 	if (error == 0 && resp != NULL)
1181 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1182 	return error;
1183 }
1184 
1185 static int
1186 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1187     sdmmc_response *resp)
1188 {
1189 	struct sdmmc_command cmd;
1190 	int error;
1191 
1192 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1193 		memset(&cmd, 0, sizeof cmd);
1194 		cmd.c_opcode = MMC_SEND_CSD;
1195 		cmd.c_arg = MMC_ARG_RCA(sf->rca);
1196 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1197 
1198 		error = sdmmc_mmc_command(sc, &cmd);
1199 	} else {
1200 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1201 		    sizeof(cmd.c_resp));
1202 	}
1203 
1204 #ifdef SDMMC_DEBUG
1205 	if (error == 0)
1206 		sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1207 #endif
1208 	if (error == 0 && resp != NULL)
1209 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1210 	return error;
1211 }
1212 
1213 static int
1214 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1215     uint32_t *scr)
1216 {
1217 	struct sdmmc_command cmd;
1218 	bus_dma_segment_t ds[1];
1219 	void *ptr = NULL;
1220 	int datalen = 8;
1221 	int rseg;
1222 	int error = 0;
1223 
1224 	/* Don't lock */
1225 
1226 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1227 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1228 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1229 		if (error)
1230 			goto out;
1231 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1232 		    BUS_DMA_NOWAIT);
1233 		if (error)
1234 			goto dmamem_free;
1235 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1236 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1237 		if (error)
1238 			goto dmamem_unmap;
1239 
1240 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1241 		    BUS_DMASYNC_PREREAD);
1242 	} else {
1243 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1244 		if (ptr == NULL)
1245 			goto out;
1246 	}
1247 
1248 	memset(&cmd, 0, sizeof(cmd));
1249 	cmd.c_data = ptr;
1250 	cmd.c_datalen = datalen;
1251 	cmd.c_blklen = datalen;
1252 	cmd.c_arg = 0;
1253 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1254 	cmd.c_opcode = SD_APP_SEND_SCR;
1255 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1256 		cmd.c_dmamap = sc->sc_dmap;
1257 
1258 	error = sdmmc_app_command(sc, sf, &cmd);
1259 	if (error == 0) {
1260 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1261 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1262 			    BUS_DMASYNC_POSTREAD);
1263 		}
1264 		memcpy(scr, ptr, datalen);
1265 	}
1266 
1267 out:
1268 	if (ptr != NULL) {
1269 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1270 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1271 dmamem_unmap:
1272 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1273 dmamem_free:
1274 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1275 		} else {
1276 			free(ptr, M_DEVBUF);
1277 		}
1278 	}
1279 	DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1280 	    error));
1281 
1282 #ifdef SDMMC_DEBUG
1283 	if (error == 0)
1284 		sdmmc_dump_data("SCR", scr, datalen);
1285 #endif
1286 	return error;
1287 }
1288 
1289 static int
1290 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1291 {
1292 	sdmmc_response resp;
1293 	int ver;
1294 
1295 	memset(resp, 0, sizeof(resp));
1296 	/*
1297 	 * Change the raw-scr received from the DMA stream to resp.
1298 	 */
1299 	resp[0] = be32toh(sf->raw_scr[1]) >> 8;		// LSW
1300 	resp[1] = be32toh(sf->raw_scr[0]);		// MSW
1301 	resp[0] |= (resp[1] & 0xff) << 24;
1302 	resp[1] >>= 8;
1303 
1304 	ver = SCR_STRUCTURE(resp);
1305 	sf->scr.sd_spec = SCR_SD_SPEC(resp);
1306 	sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1307 
1308 	DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1309 	    SDMMCDEVNAME(sc), resp[1], resp[0],
1310 	    ver, sf->scr.sd_spec, sf->scr.bus_width));
1311 
1312 	if (ver != 0 && ver != 1) {
1313 		DPRINTF(("%s: unknown structure version: %d\n",
1314 		    SDMMCDEVNAME(sc), ver));
1315 		return EINVAL;
1316 	}
1317 	return 0;
1318 }
1319 
1320 static int
1321 sdmmc_mem_send_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1322     sdmmc_bitfield512_t *ssr)
1323 {
1324 	struct sdmmc_command cmd;
1325 	bus_dma_segment_t ds[1];
1326 	void *ptr = NULL;
1327 	int datalen = 64;
1328 	int rseg;
1329 	int error = 0;
1330 
1331 	/* Don't lock */
1332 
1333 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1334 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1335 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1336 		if (error)
1337 			goto out;
1338 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1339 		    BUS_DMA_NOWAIT);
1340 		if (error)
1341 			goto dmamem_free;
1342 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1343 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1344 		if (error)
1345 			goto dmamem_unmap;
1346 
1347 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1348 		    BUS_DMASYNC_PREREAD);
1349 	} else {
1350 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1351 		if (ptr == NULL)
1352 			goto out;
1353 	}
1354 
1355 	memset(&cmd, 0, sizeof(cmd));
1356 	cmd.c_data = ptr;
1357 	cmd.c_datalen = datalen;
1358 	cmd.c_blklen = datalen;
1359 	cmd.c_arg = 0;
1360 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1361 	cmd.c_opcode = SD_APP_SD_STATUS;
1362 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1363 		cmd.c_dmamap = sc->sc_dmap;
1364 
1365 	error = sdmmc_app_command(sc, sf, &cmd);
1366 	if (error == 0) {
1367 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1368 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1369 			    BUS_DMASYNC_POSTREAD);
1370 		}
1371 		memcpy(ssr, ptr, datalen);
1372 	}
1373 
1374 out:
1375 	if (ptr != NULL) {
1376 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1377 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1378 dmamem_unmap:
1379 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1380 dmamem_free:
1381 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1382 		} else {
1383 			free(ptr, M_DEVBUF);
1384 		}
1385 	}
1386 	DPRINTF(("%s: sdmem_mem_send_ssr: error = %d\n", SDMMCDEVNAME(sc),
1387 	    error));
1388 
1389 	if (error == 0)
1390 		sdmmc_be512_to_bitfield512(ssr);
1391 
1392 #ifdef SDMMC_DEBUG
1393 	if (error == 0)
1394 		sdmmc_dump_data("SSR", ssr, datalen);
1395 #endif
1396 	return error;
1397 }
1398 
1399 static int
1400 sdmmc_mem_decode_ssr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1401     sdmmc_bitfield512_t *ssr_bitfield)
1402 {
1403 	uint32_t *ssr = (uint32_t *)ssr_bitfield;
1404 	int speed_class_val, bus_width_val;
1405 
1406 	const int bus_width = SSR_DAT_BUS_WIDTH(ssr);
1407 	const int speed_class = SSR_SPEED_CLASS(ssr);
1408 	const int uhs_speed_grade = SSR_UHS_SPEED_GRADE(ssr);
1409 	const int video_speed_class = SSR_VIDEO_SPEED_CLASS(ssr);
1410 	const int app_perf_class = SSR_APP_PERF_CLASS(ssr);
1411 
1412 	switch (speed_class) {
1413 	case SSR_SPEED_CLASS_0:	speed_class_val = 0; break;
1414 	case SSR_SPEED_CLASS_2: speed_class_val = 2; break;
1415 	case SSR_SPEED_CLASS_4: speed_class_val = 4; break;
1416 	case SSR_SPEED_CLASS_6: speed_class_val = 6; break;
1417 	case SSR_SPEED_CLASS_10: speed_class_val = 10; break;
1418 	default: speed_class_val = -1; break;
1419 	}
1420 
1421 	switch (bus_width) {
1422 	case SSR_DAT_BUS_WIDTH_1: bus_width_val = 1; break;
1423 	case SSR_DAT_BUS_WIDTH_4: bus_width_val = 4; break;
1424 	default: bus_width_val = -1;
1425 	}
1426 
1427 	/*
1428 	 * Log card status
1429 	 */
1430 	device_printf(sc->sc_dev, "SD card status:");
1431 	if (bus_width_val != -1)
1432 		printf(" %d-bit", bus_width_val);
1433 	else
1434 		printf(" unknown bus width");
1435 	if (speed_class_val != -1)
1436 		printf(", C%d", speed_class_val);
1437 	if (uhs_speed_grade)
1438 		printf(", U%d", uhs_speed_grade);
1439 	if (video_speed_class)
1440 		printf(", V%d", video_speed_class);
1441 	if (app_perf_class)
1442 		printf(", A%d", app_perf_class);
1443 	printf("\n");
1444 
1445 	return 0;
1446 }
1447 
1448 static int
1449 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1450     size_t datalen)
1451 {
1452 	struct sdmmc_command cmd;
1453 	bus_dma_segment_t ds[1];
1454 	void *ptr = NULL;
1455 	int rseg;
1456 	int error = 0;
1457 
1458 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1459 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1460 		    1, &rseg, BUS_DMA_NOWAIT);
1461 		if (error)
1462 			goto out;
1463 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1464 		    BUS_DMA_NOWAIT);
1465 		if (error)
1466 			goto dmamem_free;
1467 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1468 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1469 		if (error)
1470 			goto dmamem_unmap;
1471 
1472 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1473 		    BUS_DMASYNC_PREREAD);
1474 	} else {
1475 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1476 		if (ptr == NULL)
1477 			goto out;
1478 	}
1479 
1480 	memset(&cmd, 0, sizeof(cmd));
1481 	cmd.c_data = ptr;
1482 	cmd.c_datalen = datalen;
1483 	cmd.c_blklen = datalen;
1484 	cmd.c_opcode = opcode;
1485 	cmd.c_arg = 0;
1486 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1487 	if (opcode == MMC_SEND_EXT_CSD)
1488 		SET(cmd.c_flags, SCF_RSP_R1);
1489 	else
1490 		SET(cmd.c_flags, SCF_RSP_R2);
1491 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1492 		cmd.c_dmamap = sc->sc_dmap;
1493 
1494 	error = sdmmc_mmc_command(sc, &cmd);
1495 	if (error == 0) {
1496 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1497 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1498 			    BUS_DMASYNC_POSTREAD);
1499 		}
1500 		memcpy(data, ptr, datalen);
1501 #ifdef SDMMC_DEBUG
1502 		sdmmc_dump_data("CXD", data, datalen);
1503 #endif
1504 	}
1505 
1506 out:
1507 	if (ptr != NULL) {
1508 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1509 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1510 dmamem_unmap:
1511 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1512 dmamem_free:
1513 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1514 		} else {
1515 			free(ptr, M_DEVBUF);
1516 		}
1517 	}
1518 	return error;
1519 }
1520 
1521 static int
1522 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1523 {
1524 	struct sdmmc_softc *sc = sf->sc;
1525 	struct sdmmc_command cmd;
1526 	int error;
1527 
1528 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1529 		return ENODEV;
1530 
1531 	memset(&cmd, 0, sizeof(cmd));
1532 	cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1533 	cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1534 
1535 	switch (width) {
1536 	case 1:
1537 		cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1538 		break;
1539 
1540 	case 4:
1541 		cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1542 		break;
1543 
1544 	default:
1545 		return EINVAL;
1546 	}
1547 
1548 	error = sdmmc_app_command(sc, sf, &cmd);
1549 	if (error == 0)
1550 		error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1551 	return error;
1552 }
1553 
1554 static int
1555 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1556     int function, sdmmc_bitfield512_t *status)
1557 {
1558 	struct sdmmc_softc *sc = sf->sc;
1559 	struct sdmmc_command cmd;
1560 	bus_dma_segment_t ds[1];
1561 	void *ptr = NULL;
1562 	int gsft, rseg, error = 0;
1563 	const int statlen = 64;
1564 
1565 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1566 	    !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1567 		return EINVAL;
1568 
1569 	if (group <= 0 || group > 6 ||
1570 	    function < 0 || function > 15)
1571 		return EINVAL;
1572 
1573 	gsft = (group - 1) << 2;
1574 
1575 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1576 		error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1577 		    1, &rseg, BUS_DMA_NOWAIT);
1578 		if (error)
1579 			goto out;
1580 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1581 		    BUS_DMA_NOWAIT);
1582 		if (error)
1583 			goto dmamem_free;
1584 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1585 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1586 		if (error)
1587 			goto dmamem_unmap;
1588 
1589 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1590 		    BUS_DMASYNC_PREREAD);
1591 	} else {
1592 		ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1593 		if (ptr == NULL)
1594 			goto out;
1595 	}
1596 
1597 	memset(&cmd, 0, sizeof(cmd));
1598 	cmd.c_data = ptr;
1599 	cmd.c_datalen = statlen;
1600 	cmd.c_blklen = statlen;
1601 	cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1602 	cmd.c_arg =
1603 	    (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1604 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1605 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1606 		cmd.c_dmamap = sc->sc_dmap;
1607 
1608 	error = sdmmc_mmc_command(sc, &cmd);
1609 	if (error == 0) {
1610 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1611 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1612 			    BUS_DMASYNC_POSTREAD);
1613 		}
1614 		memcpy(status, ptr, statlen);
1615 	}
1616 
1617 out:
1618 	if (ptr != NULL) {
1619 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1620 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1621 dmamem_unmap:
1622 			bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1623 dmamem_free:
1624 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1625 		} else {
1626 			free(ptr, M_DEVBUF);
1627 		}
1628 	}
1629 
1630 	if (error == 0)
1631 		sdmmc_be512_to_bitfield512(status);
1632 
1633 	return error;
1634 }
1635 
1636 static int
1637 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1638     uint8_t value, bool poll)
1639 {
1640 	struct sdmmc_softc *sc = sf->sc;
1641 	struct sdmmc_command cmd;
1642 	int error;
1643 
1644 	memset(&cmd, 0, sizeof(cmd));
1645 	cmd.c_opcode = MMC_SWITCH;
1646 	cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1647 	    (index << 16) | (value << 8) | set;
1648 	cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1649 
1650 	if (poll)
1651 		cmd.c_flags |= SCF_POLL;
1652 
1653 	error = sdmmc_mmc_command(sc, &cmd);
1654 	if (error)
1655 		return error;
1656 
1657 	if (index == EXT_CSD_FLUSH_CACHE || (index == EXT_CSD_HS_TIMING && value >= 2)) {
1658 		do {
1659 			memset(&cmd, 0, sizeof(cmd));
1660 			cmd.c_opcode = MMC_SEND_STATUS;
1661 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1662 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1663 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1664 			if (poll)
1665 				cmd.c_flags |= SCF_POLL;
1666 			error = sdmmc_mmc_command(sc, &cmd);
1667 			if (error)
1668 				break;
1669 			if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1670 				aprint_error_dev(sc->sc_dev, "switch error\n");
1671 				return EINVAL;
1672 			}
1673 			/* XXX time out */
1674 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1675 
1676 		if (error) {
1677 			aprint_error_dev(sc->sc_dev,
1678 			    "error waiting for data ready after switch command: %d\n",
1679 			    error);
1680 			return error;
1681 		}
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 /*
1688  * SPI mode function
1689  */
1690 static int
1691 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1692 {
1693 	struct sdmmc_command cmd;
1694 	int error;
1695 
1696 	memset(&cmd, 0, sizeof(cmd));
1697 	cmd.c_opcode = MMC_READ_OCR;
1698 	cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1699 	cmd.c_flags = SCF_RSP_SPI_R3;
1700 
1701 	error = sdmmc_mmc_command(sc, &cmd);
1702 	if (error == 0 && card_ocr != NULL)
1703 		*card_ocr = cmd.c_resp[1];
1704 	DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1705 	    SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1706 	return error;
1707 }
1708 
1709 /*
1710  * read/write function
1711  */
1712 /* read */
1713 static int
1714 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1715     u_char *data, size_t datalen)
1716 {
1717 	struct sdmmc_softc *sc = sf->sc;
1718 	int error = 0;
1719 	int i;
1720 
1721 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1722 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1723 
1724 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1725 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1726 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1727 		if (error)
1728 			break;
1729 	}
1730 	return error;
1731 }
1732 
1733 /*
1734  * Simulate multi-segment dma transfer.
1735  */
1736 static int
1737 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1738     uint32_t blkno, u_char *data, size_t datalen)
1739 {
1740 	struct sdmmc_softc *sc = sf->sc;
1741 	bool use_bbuf = false;
1742 	int error = 0;
1743 	int i;
1744 
1745 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1746 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1747 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1748 			use_bbuf = true;
1749 			break;
1750 		}
1751 	}
1752 	if (use_bbuf) {
1753 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1754 		    BUS_DMASYNC_PREREAD);
1755 
1756 		error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1757 		    blkno, data, datalen);
1758 		if (error) {
1759 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1760 			return error;
1761 		}
1762 
1763 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1764 		    BUS_DMASYNC_POSTREAD);
1765 
1766 		/* Copy from bounce buffer */
1767 		memcpy(data, sf->bbuf, datalen);
1768 
1769 		return 0;
1770 	}
1771 
1772 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1773 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1774 
1775 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1776 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1777 		if (error)
1778 			return error;
1779 
1780 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1781 		    BUS_DMASYNC_PREREAD);
1782 
1783 		error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1784 		    blkno, data, len);
1785 		if (error) {
1786 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1787 			return error;
1788 		}
1789 
1790 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1791 		    BUS_DMASYNC_POSTREAD);
1792 
1793 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1794 
1795 		blkno += len / SDMMC_SECTOR_SIZE;
1796 		data += len;
1797 	}
1798 	return 0;
1799 }
1800 
1801 static int
1802 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1803     uint32_t blkno, u_char *data, size_t datalen)
1804 {
1805 	struct sdmmc_softc *sc = sf->sc;
1806 	struct sdmmc_command cmd;
1807 	int error;
1808 
1809 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1810 		error = sdmmc_select_card(sc, sf);
1811 		if (error)
1812 			goto out;
1813 	}
1814 
1815 	memset(&cmd, 0, sizeof(cmd));
1816 	cmd.c_data = data;
1817 	cmd.c_datalen = datalen;
1818 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
1819 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1820 	    MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1821 	cmd.c_arg = blkno;
1822 	if (!ISSET(sf->flags, SFF_SDHC))
1823 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1824 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1825 	if (ISSET(sf->flags, SFF_SDHC))
1826 		cmd.c_flags |= SCF_XFER_SDHC;
1827 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1828 		cmd.c_dmamap = dmap;
1829 
1830 	sc->sc_ev_xfer.ev_count++;
1831 
1832 	error = sdmmc_mmc_command(sc, &cmd);
1833 	if (error) {
1834 		sc->sc_ev_xfer_error.ev_count++;
1835 		goto out;
1836 	}
1837 
1838 	const u_int counter = __builtin_ctz(cmd.c_datalen);
1839 	if (counter >= 9 && counter <= 16) {
1840 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1841 	} else {
1842 		sc->sc_ev_xfer_unaligned.ev_count++;
1843 	}
1844 
1845 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1846 		if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1847 			memset(&cmd, 0, sizeof cmd);
1848 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
1849 			cmd.c_arg = MMC_ARG_RCA(sf->rca);
1850 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1851 			error = sdmmc_mmc_command(sc, &cmd);
1852 			if (error)
1853 				goto out;
1854 		}
1855 	}
1856 
1857 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1858 		do {
1859 			memset(&cmd, 0, sizeof(cmd));
1860 			cmd.c_opcode = MMC_SEND_STATUS;
1861 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1862 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1863 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1864 			error = sdmmc_mmc_command(sc, &cmd);
1865 			if (error)
1866 				break;
1867 			/* XXX time out */
1868 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1869 	}
1870 
1871 out:
1872 	return error;
1873 }
1874 
1875 int
1876 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1877     size_t datalen)
1878 {
1879 	struct sdmmc_softc *sc = sf->sc;
1880 	int error;
1881 
1882 	SDMMC_LOCK(sc);
1883 	mutex_enter(&sc->sc_mtx);
1884 
1885 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1886 		error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1887 		goto out;
1888 	}
1889 
1890 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1891 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1892 		    datalen);
1893 		goto out;
1894 	}
1895 
1896 	/* DMA transfer */
1897 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1898 	    BUS_DMA_NOWAIT|BUS_DMA_READ);
1899 	if (error)
1900 		goto out;
1901 
1902 #ifdef SDMMC_DEBUG
1903 	printf("data=%p, datalen=%zu\n", data, datalen);
1904 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1905 		printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1906 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1907 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1908 	}
1909 #endif
1910 
1911 	if (sc->sc_dmap->dm_nsegs > 1
1912 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1913 		error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1914 		    data, datalen);
1915 		goto unload;
1916 	}
1917 
1918 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1919 	    BUS_DMASYNC_PREREAD);
1920 
1921 	error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1922 	    datalen);
1923 	if (error)
1924 		goto unload;
1925 
1926 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1927 	    BUS_DMASYNC_POSTREAD);
1928 unload:
1929 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1930 
1931 out:
1932 	mutex_exit(&sc->sc_mtx);
1933 	SDMMC_UNLOCK(sc);
1934 
1935 	return error;
1936 }
1937 
1938 /* write */
1939 static int
1940 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1941     u_char *data, size_t datalen)
1942 {
1943 	struct sdmmc_softc *sc = sf->sc;
1944 	int error = 0;
1945 	int i;
1946 
1947 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1948 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1949 
1950 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1951 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1952 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1953 		if (error)
1954 			break;
1955 	}
1956 	return error;
1957 }
1958 
1959 /*
1960  * Simulate multi-segment dma transfer.
1961  */
1962 static int
1963 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1964     uint32_t blkno, u_char *data, size_t datalen)
1965 {
1966 	struct sdmmc_softc *sc = sf->sc;
1967 	bool use_bbuf = false;
1968 	int error = 0;
1969 	int i;
1970 
1971 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1972 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1973 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1974 			use_bbuf = true;
1975 			break;
1976 		}
1977 	}
1978 	if (use_bbuf) {
1979 		/* Copy to bounce buffer */
1980 		memcpy(sf->bbuf, data, datalen);
1981 
1982 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1983 		    BUS_DMASYNC_PREWRITE);
1984 
1985 		error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1986 		    blkno, data, datalen);
1987 		if (error) {
1988 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1989 			return error;
1990 		}
1991 
1992 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1993 		    BUS_DMASYNC_POSTWRITE);
1994 
1995 		return 0;
1996 	}
1997 
1998 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1999 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
2000 
2001 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
2002 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2003 		if (error)
2004 			return error;
2005 
2006 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2007 		    BUS_DMASYNC_PREWRITE);
2008 
2009 		error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
2010 		    blkno, data, len);
2011 		if (error) {
2012 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2013 			return error;
2014 		}
2015 
2016 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
2017 		    BUS_DMASYNC_POSTWRITE);
2018 
2019 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
2020 
2021 		blkno += len / SDMMC_SECTOR_SIZE;
2022 		data += len;
2023 	}
2024 
2025 	return error;
2026 }
2027 
2028 static int
2029 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
2030     uint32_t blkno, u_char *data, size_t datalen)
2031 {
2032 	struct sdmmc_softc *sc = sf->sc;
2033 	struct sdmmc_command cmd;
2034 	int error;
2035 
2036 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2037 		error = sdmmc_select_card(sc, sf);
2038 		if (error)
2039 			goto out;
2040 	}
2041 
2042 	const int nblk = howmany(datalen, SDMMC_SECTOR_SIZE);
2043 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && nblk > 1) {
2044 		/* Set the number of write blocks to be pre-erased */
2045 		memset(&cmd, 0, sizeof(cmd));
2046 		cmd.c_opcode = SD_APP_SET_WR_BLK_ERASE_COUNT;
2047 		cmd.c_flags = SCF_RSP_R1 | SCF_RSP_SPI_R1 | SCF_CMD_AC;
2048 		cmd.c_arg = nblk;
2049 		error = sdmmc_app_command(sc, sf, &cmd);
2050 		if (error)
2051 			goto out;
2052 	}
2053 
2054 	memset(&cmd, 0, sizeof(cmd));
2055 	cmd.c_data = data;
2056 	cmd.c_datalen = datalen;
2057 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
2058 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
2059 	    MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
2060 	cmd.c_arg = blkno;
2061 	if (!ISSET(sf->flags, SFF_SDHC))
2062 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2063 	cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
2064 	if (ISSET(sf->flags, SFF_SDHC))
2065 		cmd.c_flags |= SCF_XFER_SDHC;
2066 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
2067 		cmd.c_dmamap = dmap;
2068 
2069 	sc->sc_ev_xfer.ev_count++;
2070 
2071 	error = sdmmc_mmc_command(sc, &cmd);
2072 	if (error) {
2073 		sc->sc_ev_xfer_error.ev_count++;
2074 		goto out;
2075 	}
2076 
2077 	const u_int counter = __builtin_ctz(cmd.c_datalen);
2078 	if (counter >= 9 && counter <= 16) {
2079 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
2080 	} else {
2081 		sc->sc_ev_xfer_unaligned.ev_count++;
2082 	}
2083 
2084 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
2085 		if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
2086 			memset(&cmd, 0, sizeof(cmd));
2087 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
2088 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
2089 			error = sdmmc_mmc_command(sc, &cmd);
2090 			if (error)
2091 				goto out;
2092 		}
2093 	}
2094 
2095 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
2096 		do {
2097 			memset(&cmd, 0, sizeof(cmd));
2098 			cmd.c_opcode = MMC_SEND_STATUS;
2099 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2100 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
2101 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
2102 			error = sdmmc_mmc_command(sc, &cmd);
2103 			if (error)
2104 				break;
2105 			/* XXX time out */
2106 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
2107 	}
2108 
2109 out:
2110 	return error;
2111 }
2112 
2113 int
2114 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
2115     size_t datalen)
2116 {
2117 	struct sdmmc_softc *sc = sf->sc;
2118 	int error;
2119 
2120 	SDMMC_LOCK(sc);
2121 	mutex_enter(&sc->sc_mtx);
2122 
2123 	if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
2124 		aprint_normal_dev(sc->sc_dev, "write-protected\n");
2125 		error = EIO;
2126 		goto out;
2127 	}
2128 
2129 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
2130 		error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
2131 		goto out;
2132 	}
2133 
2134 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
2135 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2136 		    datalen);
2137 		goto out;
2138 	}
2139 
2140 	/* DMA transfer */
2141 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
2142 	    BUS_DMA_NOWAIT|BUS_DMA_WRITE);
2143 	if (error)
2144 		goto out;
2145 
2146 #ifdef SDMMC_DEBUG
2147 	aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
2148 	    __func__, data, datalen);
2149 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
2150 		aprint_normal_dev(sc->sc_dev,
2151 		    "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
2152 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
2153 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
2154 	}
2155 #endif
2156 
2157 	if (sc->sc_dmap->dm_nsegs > 1
2158 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
2159 		error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
2160 		    data, datalen);
2161 		goto unload;
2162 	}
2163 
2164 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2165 	    BUS_DMASYNC_PREWRITE);
2166 
2167 	error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
2168 	    datalen);
2169 	if (error)
2170 		goto unload;
2171 
2172 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
2173 	    BUS_DMASYNC_POSTWRITE);
2174 unload:
2175 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
2176 
2177 out:
2178 	mutex_exit(&sc->sc_mtx);
2179 	SDMMC_UNLOCK(sc);
2180 
2181 	return error;
2182 }
2183 
2184 int
2185 sdmmc_mem_discard(struct sdmmc_function *sf, uint32_t sblkno, uint32_t eblkno)
2186 {
2187 	struct sdmmc_softc *sc = sf->sc;
2188 	struct sdmmc_command cmd;
2189 	int error;
2190 
2191 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
2192 		return ENODEV;	/* XXX not tested */
2193 
2194 	if (eblkno < sblkno)
2195 		return EINVAL;
2196 
2197 	SDMMC_LOCK(sc);
2198 	mutex_enter(&sc->sc_mtx);
2199 
2200 	/* Set the address of the first write block to be erased */
2201 	memset(&cmd, 0, sizeof(cmd));
2202 	cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2203 	    SD_ERASE_WR_BLK_START : MMC_TAG_ERASE_GROUP_START;
2204 	cmd.c_arg = sblkno;
2205 	if (!ISSET(sf->flags, SFF_SDHC))
2206 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2207 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2208 	error = sdmmc_mmc_command(sc, &cmd);
2209 	if (error)
2210 		goto out;
2211 
2212 	/* Set the address of the last write block to be erased */
2213 	memset(&cmd, 0, sizeof(cmd));
2214 	cmd.c_opcode = ISSET(sc->sc_flags, SMF_SD_MODE) ?
2215 	    SD_ERASE_WR_BLK_END : MMC_TAG_ERASE_GROUP_END;
2216 	cmd.c_arg = eblkno;
2217 	if (!ISSET(sf->flags, SFF_SDHC))
2218 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
2219 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
2220 	error = sdmmc_mmc_command(sc, &cmd);
2221 	if (error)
2222 		goto out;
2223 
2224 	/* Start the erase operation */
2225 	memset(&cmd, 0, sizeof(cmd));
2226 	cmd.c_opcode = MMC_ERASE;
2227 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B;
2228 	error = sdmmc_mmc_command(sc, &cmd);
2229 	if (error)
2230 		goto out;
2231 
2232 out:
2233 	mutex_exit(&sc->sc_mtx);
2234 	SDMMC_UNLOCK(sc);
2235 
2236 #ifdef SDMMC_DEBUG
2237 	device_printf(sc->sc_dev, "discard blk %u-%u error %d\n",
2238 	    sblkno, eblkno, error);
2239 #endif
2240 
2241 	return error;
2242 }
2243 
2244 int
2245 sdmmc_mem_flush_cache(struct sdmmc_function *sf, bool poll)
2246 {
2247 	struct sdmmc_softc *sc = sf->sc;
2248 	int error;
2249 
2250 	if (!ISSET(sf->flags, SFF_CACHE_ENABLED))
2251 		return 0;
2252 
2253 	SDMMC_LOCK(sc);
2254 	mutex_enter(&sc->sc_mtx);
2255 
2256 	error = sdmmc_mem_mmc_switch(sf,
2257 	    EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE,
2258 	    EXT_CSD_FLUSH_CACHE_FLUSH, poll);
2259 
2260 	mutex_exit(&sc->sc_mtx);
2261 	SDMMC_UNLOCK(sc);
2262 
2263 #ifdef SDMMC_DEBUG
2264 	device_printf(sc->sc_dev, "mmc flush cache error %d\n", error);
2265 #endif
2266 
2267 	return error;
2268 }
2269