xref: /netbsd-src/sys/dev/sdmmc/sdmmc_mem.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /*	$NetBSD: sdmmc_mem.c,v 1.52 2016/08/11 01:33:25 nonaka Exp $	*/
2 /*	$OpenBSD: sdmmc_mem.c,v 1.10 2009/01/09 10:55:22 jsg Exp $	*/
3 
4 /*
5  * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Copyright (C) 2007, 2008, 2009, 2010 NONAKA Kimihiro <nonaka@netbsd.org>
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43  */
44 
45 /* Routines for SD/MMC memory cards. */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sdmmc_mem.c,v 1.52 2016/08/11 01:33:25 nonaka Exp $");
49 
50 #ifdef _KERNEL_OPT
51 #include "opt_sdmmc.h"
52 #endif
53 
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/bitops.h>
60 #include <sys/evcnt.h>
61 
62 #include <dev/sdmmc/sdmmcchip.h>
63 #include <dev/sdmmc/sdmmcreg.h>
64 #include <dev/sdmmc/sdmmcvar.h>
65 
66 #ifdef SDMMC_DEBUG
67 #define DPRINTF(s)	do { printf s; } while (/*CONSTCOND*/0)
68 #else
69 #define DPRINTF(s)	do {} while (/*CONSTCOND*/0)
70 #endif
71 
72 typedef struct { uint32_t _bits[512/32]; } __packed __aligned(4) sdmmc_bitfield512_t;
73 
74 static int sdmmc_mem_sd_init(struct sdmmc_softc *, struct sdmmc_function *);
75 static int sdmmc_mem_mmc_init(struct sdmmc_softc *, struct sdmmc_function *);
76 static int sdmmc_mem_send_cid(struct sdmmc_softc *, sdmmc_response *);
77 static int sdmmc_mem_send_csd(struct sdmmc_softc *, struct sdmmc_function *,
78     sdmmc_response *);
79 static int sdmmc_mem_send_scr(struct sdmmc_softc *, struct sdmmc_function *,
80     uint32_t *scr);
81 static int sdmmc_mem_decode_scr(struct sdmmc_softc *, struct sdmmc_function *);
82 static int sdmmc_mem_send_cxd_data(struct sdmmc_softc *, int, void *, size_t);
83 static int sdmmc_set_bus_width(struct sdmmc_function *, int);
84 static int sdmmc_mem_sd_switch(struct sdmmc_function *, int, int, int, sdmmc_bitfield512_t *);
85 static int sdmmc_mem_mmc_switch(struct sdmmc_function *, uint8_t, uint8_t,
86     uint8_t);
87 static int sdmmc_mem_signal_voltage(struct sdmmc_softc *, int);
88 static int sdmmc_mem_spi_read_ocr(struct sdmmc_softc *, uint32_t, uint32_t *);
89 static int sdmmc_mem_single_read_block(struct sdmmc_function *, uint32_t,
90     u_char *, size_t);
91 static int sdmmc_mem_single_write_block(struct sdmmc_function *, uint32_t,
92     u_char *, size_t);
93 static int sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *,
94     uint32_t, u_char *, size_t);
95 static int sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *,
96     uint32_t, u_char *, size_t);
97 static int sdmmc_mem_read_block_subr(struct sdmmc_function *, bus_dmamap_t,
98     uint32_t, u_char *, size_t);
99 static int sdmmc_mem_write_block_subr(struct sdmmc_function *, bus_dmamap_t,
100     uint32_t, u_char *, size_t);
101 
102 static const struct {
103 	const char *name;
104 	int v;
105 	int freq;
106 } switch_group0_functions[] = {
107 	/* Default/SDR12 */
108 	{ "Default/SDR12",	 0,			 25000 },
109 
110 	/* High-Speed/SDR25 */
111 	{ "High-Speed/SDR25",	SMC_CAPS_SD_HIGHSPEED,	 50000 },
112 
113 	/* SDR50 */
114 	{ "SDR50",		SMC_CAPS_UHS_SDR50,	100000 },
115 
116 	/* SDR104 */
117 	{ "SDR104",		SMC_CAPS_UHS_SDR104,	208000 },
118 
119 	/* DDR50 */
120 	{ "DDR50",		SMC_CAPS_UHS_DDR50,	 50000 },
121 };
122 
123 /*
124  * Initialize SD/MMC memory cards and memory in SDIO "combo" cards.
125  */
126 int
127 sdmmc_mem_enable(struct sdmmc_softc *sc)
128 {
129 	uint32_t host_ocr;
130 	uint32_t card_ocr;
131 	uint32_t new_ocr;
132 	uint32_t ocr = 0;
133 	int error;
134 
135 	SDMMC_LOCK(sc);
136 
137 	/* Set host mode to SD "combo" card or SD memory-only. */
138 	CLR(sc->sc_flags, SMF_UHS_MODE);
139 	SET(sc->sc_flags, SMF_SD_MODE|SMF_MEM_MODE);
140 
141 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
142 		sdmmc_spi_chip_initialize(sc->sc_spi_sct, sc->sc_sch);
143 
144 	/* Reset memory (*must* do that before CMD55 or CMD1). */
145 	sdmmc_go_idle_state(sc);
146 
147 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
148 		/* Check SD Ver.2 */
149 		error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
150 		if (error == 0 && card_ocr == 0x1aa)
151 			SET(ocr, MMC_OCR_HCS);
152 	}
153 
154 	/*
155 	 * Read the SD/MMC memory OCR value by issuing CMD55 followed
156 	 * by ACMD41 to read the OCR value from memory-only SD cards.
157 	 * MMC cards will not respond to CMD55 or ACMD41 and this is
158 	 * how we distinguish them from SD cards.
159 	 */
160 mmc_mode:
161 	error = sdmmc_mem_send_op_cond(sc,
162 	    ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ? ocr : 0, &card_ocr);
163 	if (error) {
164 		if (ISSET(sc->sc_flags, SMF_SD_MODE) &&
165 		    !ISSET(sc->sc_flags, SMF_IO_MODE)) {
166 			/* Not a SD card, switch to MMC mode. */
167 			DPRINTF(("%s: switch to MMC mode\n", SDMMCDEVNAME(sc)));
168 			CLR(sc->sc_flags, SMF_SD_MODE);
169 			goto mmc_mode;
170 		}
171 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
172 			DPRINTF(("%s: couldn't read memory OCR\n",
173 			    SDMMCDEVNAME(sc)));
174 			goto out;
175 		} else {
176 			/* Not a "combo" card. */
177 			CLR(sc->sc_flags, SMF_MEM_MODE);
178 			error = 0;
179 			goto out;
180 		}
181 	}
182 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
183 		/* get card OCR */
184 		error = sdmmc_mem_spi_read_ocr(sc, ocr, &card_ocr);
185 		if (error) {
186 			DPRINTF(("%s: couldn't read SPI memory OCR\n",
187 			    SDMMCDEVNAME(sc)));
188 			goto out;
189 		}
190 	}
191 
192 	/* Set the lowest voltage supported by the card and host. */
193 	host_ocr = sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch);
194 	error = sdmmc_set_bus_power(sc, host_ocr, card_ocr);
195 	if (error) {
196 		DPRINTF(("%s: couldn't supply voltage requested by card\n",
197 		    SDMMCDEVNAME(sc)));
198 		goto out;
199 	}
200 
201 	DPRINTF(("%s: host_ocr 0x%08x\n", SDMMCDEVNAME(sc), host_ocr));
202 	DPRINTF(("%s: card_ocr 0x%08x\n", SDMMCDEVNAME(sc), card_ocr));
203 
204 	host_ocr &= card_ocr; /* only allow the common voltages */
205 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
206 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
207 			/* Tell the card(s) to enter the idle state (again). */
208 			sdmmc_go_idle_state(sc);
209 			/* Check SD Ver.2 */
210 			error = sdmmc_mem_send_if_cond(sc, 0x1aa, &card_ocr);
211 			if (error == 0 && card_ocr == 0x1aa)
212 				SET(ocr, MMC_OCR_HCS);
213 
214 			if (sdmmc_chip_host_ocr(sc->sc_sct, sc->sc_sch) & MMC_OCR_S18A)
215 				SET(ocr, MMC_OCR_S18A);
216 		} else {
217 			SET(ocr, MMC_OCR_ACCESS_MODE_SECTOR);
218 		}
219 	}
220 	host_ocr |= ocr;
221 
222 	/* Send the new OCR value until all cards are ready. */
223 	error = sdmmc_mem_send_op_cond(sc, host_ocr, &new_ocr);
224 	if (error) {
225 		DPRINTF(("%s: couldn't send memory OCR\n", SDMMCDEVNAME(sc)));
226 		goto out;
227 	}
228 
229 	if (ISSET(sc->sc_flags, SMF_SD_MODE) && ISSET(new_ocr, MMC_OCR_S18A)) {
230 		/*
231 		 * Card and host support low voltage mode, begin switch
232 		 * sequence.
233 		 */
234 		struct sdmmc_command cmd;
235 		memset(&cmd, 0, sizeof(cmd));
236 		cmd.c_arg = 0;
237 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1;
238 		cmd.c_opcode = SD_VOLTAGE_SWITCH;
239 		DPRINTF(("%s: switching card to 1.8V\n", SDMMCDEVNAME(sc)));
240 		error = sdmmc_mmc_command(sc, &cmd);
241 		if (error) {
242 			DPRINTF(("%s: voltage switch command failed\n",
243 			    SDMMCDEVNAME(sc)));
244 			goto out;
245 		}
246 
247 		error = sdmmc_mem_signal_voltage(sc, SDMMC_SIGNAL_VOLTAGE_180);
248 		if (error)
249 			goto out;
250 
251 		SET(sc->sc_flags, SMF_UHS_MODE);
252 	}
253 
254 out:
255 	SDMMC_UNLOCK(sc);
256 
257 	if (error)
258 		printf("%s: %s failed with error %d\n", SDMMCDEVNAME(sc),
259 		    __func__, error);
260 
261 	return error;
262 }
263 
264 static int
265 sdmmc_mem_signal_voltage(struct sdmmc_softc *sc, int signal_voltage)
266 {
267 	int error;
268 
269 	/*
270 	 * Stop the clock
271 	 */
272 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
273 	    SDMMC_SDCLK_OFF, false);
274 	if (error)
275 		goto out;
276 
277 	delay(1000);
278 
279 	/*
280 	 * Card switch command was successful, update host controller
281 	 * signal voltage setting.
282 	 */
283 	DPRINTF(("%s: switching host to %s\n", SDMMCDEVNAME(sc),
284 	    signal_voltage == SDMMC_SIGNAL_VOLTAGE_180 ? "1.8V" : "3.3V"));
285 	error = sdmmc_chip_signal_voltage(sc->sc_sct,
286 	    sc->sc_sch, signal_voltage);
287 	if (error)
288 		goto out;
289 
290 	delay(5000);
291 
292 	/*
293 	 * Switch to SDR12 timing
294 	 */
295 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, 25000,
296 	    false);
297 	if (error)
298 		goto out;
299 
300 	delay(1000);
301 
302 out:
303 	return error;
304 }
305 
306 /*
307  * Read the CSD and CID from all cards and assign each card a unique
308  * relative card address (RCA).  CMD2 is ignored by SDIO-only cards.
309  */
310 void
311 sdmmc_mem_scan(struct sdmmc_softc *sc)
312 {
313 	sdmmc_response resp;
314 	struct sdmmc_function *sf;
315 	uint16_t next_rca;
316 	int error;
317 	int retry;
318 
319 	SDMMC_LOCK(sc);
320 
321 	/*
322 	 * CMD2 is a broadcast command understood by SD cards and MMC
323 	 * cards.  All cards begin to respond to the command, but back
324 	 * off if another card drives the CMD line to a different level.
325 	 * Only one card will get its entire response through.  That
326 	 * card remains silent once it has been assigned a RCA.
327 	 */
328 	for (retry = 0; retry < 100; retry++) {
329 		error = sdmmc_mem_send_cid(sc, &resp);
330 		if (error) {
331 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) &&
332 			    error == ETIMEDOUT) {
333 				/* No more cards there. */
334 				break;
335 			}
336 			DPRINTF(("%s: couldn't read CID\n", SDMMCDEVNAME(sc)));
337 			break;
338 		}
339 
340 		/* In MMC mode, find the next available RCA. */
341 		next_rca = 1;
342 		if (!ISSET(sc->sc_flags, SMF_SD_MODE)) {
343 			SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list)
344 				next_rca++;
345 		}
346 
347 		/* Allocate a sdmmc_function structure. */
348 		sf = sdmmc_function_alloc(sc);
349 		sf->rca = next_rca;
350 
351 		/*
352 		 * Remember the CID returned in the CMD2 response for
353 		 * later decoding.
354 		 */
355 		memcpy(sf->raw_cid, resp, sizeof(sf->raw_cid));
356 
357 		/*
358 		 * Silence the card by assigning it a unique RCA, or
359 		 * querying it for its RCA in the case of SD.
360 		 */
361 		if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
362 			if (sdmmc_set_relative_addr(sc, sf) != 0) {
363 				aprint_error_dev(sc->sc_dev,
364 				    "couldn't set mem RCA\n");
365 				sdmmc_function_free(sf);
366 				break;
367 			}
368 		}
369 
370 		/*
371 		 * If this is a memory-only card, the card responding
372 		 * first becomes an alias for SDIO function 0.
373 		 */
374 		if (sc->sc_fn0 == NULL)
375 			sc->sc_fn0 = sf;
376 
377 		SIMPLEQ_INSERT_TAIL(&sc->sf_head, sf, sf_list);
378 
379 		/* only one function in SPI mode */
380 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
381 			break;
382 	}
383 
384 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
385 		/* Go to Data Transfer Mode, if possible. */
386 		sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
387 
388 	/*
389 	 * All cards are either inactive or awaiting further commands.
390 	 * Read the CSDs and decode the raw CID for each card.
391 	 */
392 	SIMPLEQ_FOREACH(sf, &sc->sf_head, sf_list) {
393 		error = sdmmc_mem_send_csd(sc, sf, &resp);
394 		if (error) {
395 			SET(sf->flags, SFF_ERROR);
396 			continue;
397 		}
398 
399 		if (sdmmc_decode_csd(sc, resp, sf) != 0 ||
400 		    sdmmc_decode_cid(sc, sf->raw_cid, sf) != 0) {
401 			SET(sf->flags, SFF_ERROR);
402 			continue;
403 		}
404 
405 #ifdef SDMMC_DEBUG
406 		printf("%s: CID: ", SDMMCDEVNAME(sc));
407 		sdmmc_print_cid(&sf->cid);
408 #endif
409 	}
410 
411 	SDMMC_UNLOCK(sc);
412 }
413 
414 int
415 sdmmc_decode_csd(struct sdmmc_softc *sc, sdmmc_response resp,
416     struct sdmmc_function *sf)
417 {
418 	/* TRAN_SPEED(2:0): transfer rate exponent */
419 	static const int speed_exponent[8] = {
420 		100 *    1,	/* 100 Kbits/s */
421 		  1 * 1000,	/*   1 Mbits/s */
422 		 10 * 1000,	/*  10 Mbits/s */
423 		100 * 1000,	/* 100 Mbits/s */
424 		         0,
425 		         0,
426 		         0,
427 		         0,
428 	};
429 	/* TRAN_SPEED(6:3): time mantissa */
430 	static const int speed_mantissa[16] = {
431 		0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80,
432 	};
433 	struct sdmmc_csd *csd = &sf->csd;
434 	int e, m;
435 
436 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
437 		/*
438 		 * CSD version 1.0 corresponds to SD system
439 		 * specification version 1.0 - 1.10. (SanDisk, 3.5.3)
440 		 */
441 		csd->csdver = SD_CSD_CSDVER(resp);
442 		switch (csd->csdver) {
443 		case SD_CSD_CSDVER_2_0:
444 			DPRINTF(("%s: SD Ver.2.0\n", SDMMCDEVNAME(sc)));
445 			SET(sf->flags, SFF_SDHC);
446 			csd->capacity = SD_CSD_V2_CAPACITY(resp);
447 			csd->read_bl_len = SD_CSD_V2_BL_LEN;
448 			break;
449 
450 		case SD_CSD_CSDVER_1_0:
451 			DPRINTF(("%s: SD Ver.1.0\n", SDMMCDEVNAME(sc)));
452 			csd->capacity = SD_CSD_CAPACITY(resp);
453 			csd->read_bl_len = SD_CSD_READ_BL_LEN(resp);
454 			break;
455 
456 		default:
457 			aprint_error_dev(sc->sc_dev,
458 			    "unknown SD CSD structure version 0x%x\n",
459 			    csd->csdver);
460 			return 1;
461 		}
462 
463 		csd->mmcver = SD_CSD_MMCVER(resp);
464 		csd->write_bl_len = SD_CSD_WRITE_BL_LEN(resp);
465 		csd->r2w_factor = SD_CSD_R2W_FACTOR(resp);
466 		e = SD_CSD_SPEED_EXP(resp);
467 		m = SD_CSD_SPEED_MANT(resp);
468 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
469 		csd->ccc = SD_CSD_CCC(resp);
470 	} else {
471 		csd->csdver = MMC_CSD_CSDVER(resp);
472 		if (csd->csdver == MMC_CSD_CSDVER_1_0) {
473 			aprint_error_dev(sc->sc_dev,
474 			    "unknown MMC CSD structure version 0x%x\n",
475 			    csd->csdver);
476 			return 1;
477 		}
478 
479 		csd->mmcver = MMC_CSD_MMCVER(resp);
480 		csd->capacity = MMC_CSD_CAPACITY(resp);
481 		csd->read_bl_len = MMC_CSD_READ_BL_LEN(resp);
482 		csd->write_bl_len = MMC_CSD_WRITE_BL_LEN(resp);
483 		csd->r2w_factor = MMC_CSD_R2W_FACTOR(resp);
484 		e = MMC_CSD_TRAN_SPEED_EXP(resp);
485 		m = MMC_CSD_TRAN_SPEED_MANT(resp);
486 		csd->tran_speed = speed_exponent[e] * speed_mantissa[m] / 10;
487 	}
488 	if ((1 << csd->read_bl_len) > SDMMC_SECTOR_SIZE)
489 		csd->capacity *= (1 << csd->read_bl_len) / SDMMC_SECTOR_SIZE;
490 
491 #ifdef SDMMC_DUMP_CSD
492 	sdmmc_print_csd(resp, csd);
493 #endif
494 
495 	return 0;
496 }
497 
498 int
499 sdmmc_decode_cid(struct sdmmc_softc *sc, sdmmc_response resp,
500     struct sdmmc_function *sf)
501 {
502 	struct sdmmc_cid *cid = &sf->cid;
503 
504 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
505 		cid->mid = SD_CID_MID(resp);
506 		cid->oid = SD_CID_OID(resp);
507 		SD_CID_PNM_CPY(resp, cid->pnm);
508 		cid->rev = SD_CID_REV(resp);
509 		cid->psn = SD_CID_PSN(resp);
510 		cid->mdt = SD_CID_MDT(resp);
511 	} else {
512 		switch(sf->csd.mmcver) {
513 		case MMC_CSD_MMCVER_1_0:
514 		case MMC_CSD_MMCVER_1_4:
515 			cid->mid = MMC_CID_MID_V1(resp);
516 			MMC_CID_PNM_V1_CPY(resp, cid->pnm);
517 			cid->rev = MMC_CID_REV_V1(resp);
518 			cid->psn = MMC_CID_PSN_V1(resp);
519 			cid->mdt = MMC_CID_MDT_V1(resp);
520 			break;
521 		case MMC_CSD_MMCVER_2_0:
522 		case MMC_CSD_MMCVER_3_1:
523 		case MMC_CSD_MMCVER_4_0:
524 			cid->mid = MMC_CID_MID_V2(resp);
525 			cid->oid = MMC_CID_OID_V2(resp);
526 			MMC_CID_PNM_V2_CPY(resp, cid->pnm);
527 			cid->psn = MMC_CID_PSN_V2(resp);
528 			break;
529 		default:
530 			aprint_error_dev(sc->sc_dev, "unknown MMC version %d\n",
531 			    sf->csd.mmcver);
532 			return 1;
533 		}
534 	}
535 	return 0;
536 }
537 
538 void
539 sdmmc_print_cid(struct sdmmc_cid *cid)
540 {
541 
542 	printf("mid=0x%02x oid=0x%04x pnm=\"%s\" rev=0x%02x psn=0x%08x"
543 	    " mdt=%03x\n", cid->mid, cid->oid, cid->pnm, cid->rev, cid->psn,
544 	    cid->mdt);
545 }
546 
547 #ifdef SDMMC_DUMP_CSD
548 void
549 sdmmc_print_csd(sdmmc_response resp, struct sdmmc_csd *csd)
550 {
551 
552 	printf("csdver = %d\n", csd->csdver);
553 	printf("mmcver = %d\n", csd->mmcver);
554 	printf("capacity = 0x%08x\n", csd->capacity);
555 	printf("read_bl_len = %d\n", csd->read_bl_len);
556 	printf("write_bl_len = %d\n", csd->write_bl_len);
557 	printf("r2w_factor = %d\n", csd->r2w_factor);
558 	printf("tran_speed = %d\n", csd->tran_speed);
559 	printf("ccc = 0x%x\n", csd->ccc);
560 }
561 #endif
562 
563 /*
564  * Initialize a SD/MMC memory card.
565  */
566 int
567 sdmmc_mem_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
568 {
569 	int error = 0;
570 
571 	SDMMC_LOCK(sc);
572 
573 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
574 		error = sdmmc_select_card(sc, sf);
575 		if (error)
576 			goto out;
577 	}
578 
579 	error = sdmmc_mem_set_blocklen(sc, sf, SDMMC_SECTOR_SIZE);
580 	if (error)
581 		goto out;
582 
583 	if (ISSET(sc->sc_flags, SMF_SD_MODE))
584 		error = sdmmc_mem_sd_init(sc, sf);
585 	else
586 		error = sdmmc_mem_mmc_init(sc, sf);
587 
588 out:
589 	SDMMC_UNLOCK(sc);
590 
591 	return error;
592 }
593 
594 /*
595  * Get or set the card's memory OCR value (SD or MMC).
596  */
597 int
598 sdmmc_mem_send_op_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
599 {
600 	struct sdmmc_command cmd;
601 	int error;
602 	int retry;
603 
604 	/* Don't lock */
605 
606 	DPRINTF(("%s: sdmmc_mem_send_op_cond: ocr=%#x\n",
607 	    SDMMCDEVNAME(sc), ocr));
608 
609 	/*
610 	 * If we change the OCR value, retry the command until the OCR
611 	 * we receive in response has the "CARD BUSY" bit set, meaning
612 	 * that all cards are ready for identification.
613 	 */
614 	for (retry = 0; retry < 100; retry++) {
615 		memset(&cmd, 0, sizeof(cmd));
616 		cmd.c_arg = !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE) ?
617 		    ocr : (ocr & MMC_OCR_HCS);
618 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R3 | SCF_RSP_SPI_R1
619 		    | SCF_TOUT_OK;
620 
621 		if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
622 			cmd.c_opcode = SD_APP_OP_COND;
623 			error = sdmmc_app_command(sc, NULL, &cmd);
624 		} else {
625 			cmd.c_opcode = MMC_SEND_OP_COND;
626 			error = sdmmc_mmc_command(sc, &cmd);
627 		}
628 		if (error)
629 			break;
630 
631 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
632 			if (!ISSET(MMC_SPI_R1(cmd.c_resp), R1_SPI_IDLE))
633 				break;
634 		} else {
635 			if (ISSET(MMC_R3(cmd.c_resp), MMC_OCR_MEM_READY) ||
636 			    ocr == 0)
637 				break;
638 		}
639 
640 		error = ETIMEDOUT;
641 		sdmmc_delay(10000);
642 	}
643 	if (error == 0 &&
644 	    ocrp != NULL &&
645 	    !ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
646 		*ocrp = MMC_R3(cmd.c_resp);
647 	DPRINTF(("%s: sdmmc_mem_send_op_cond: error=%d, ocr=%#x\n",
648 	    SDMMCDEVNAME(sc), error, MMC_R3(cmd.c_resp)));
649 	return error;
650 }
651 
652 int
653 sdmmc_mem_send_if_cond(struct sdmmc_softc *sc, uint32_t ocr, uint32_t *ocrp)
654 {
655 	struct sdmmc_command cmd;
656 	int error;
657 
658 	/* Don't lock */
659 
660 	memset(&cmd, 0, sizeof(cmd));
661 	cmd.c_arg = ocr;
662 	cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R7 | SCF_RSP_SPI_R7;
663 	cmd.c_opcode = SD_SEND_IF_COND;
664 
665 	error = sdmmc_mmc_command(sc, &cmd);
666 	if (error == 0 && ocrp != NULL) {
667 		if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
668 			*ocrp = MMC_SPI_R7(cmd.c_resp);
669 		} else {
670 			*ocrp = MMC_R7(cmd.c_resp);
671 		}
672 		DPRINTF(("%s: sdmmc_mem_send_if_cond: error=%d, ocr=%#x\n",
673 		    SDMMCDEVNAME(sc), error, *ocrp));
674 	}
675 	return error;
676 }
677 
678 /*
679  * Set the read block length appropriately for this card, according to
680  * the card CSD register value.
681  */
682 int
683 sdmmc_mem_set_blocklen(struct sdmmc_softc *sc, struct sdmmc_function *sf,
684    int block_len)
685 {
686 	struct sdmmc_command cmd;
687 	int error;
688 
689 	/* Don't lock */
690 
691 	memset(&cmd, 0, sizeof(cmd));
692 	cmd.c_opcode = MMC_SET_BLOCKLEN;
693 	cmd.c_arg = block_len;
694 	cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R1;
695 
696 	error = sdmmc_mmc_command(sc, &cmd);
697 
698 	DPRINTF(("%s: sdmmc_mem_set_blocklen: read_bl_len=%d sector_size=%d\n",
699 	    SDMMCDEVNAME(sc), 1 << sf->csd.read_bl_len, block_len));
700 
701 	return error;
702 }
703 
704 /* make 512-bit BE quantity __bitfield()-compatible */
705 static void
706 sdmmc_be512_to_bitfield512(sdmmc_bitfield512_t *buf) {
707 	size_t i;
708 	uint32_t tmp0, tmp1;
709 	const size_t bitswords = __arraycount(buf->_bits);
710 	for (i = 0; i < bitswords/2; i++) {
711 		tmp0 = buf->_bits[i];
712 		tmp1 = buf->_bits[bitswords - 1 - i];
713 		buf->_bits[i] = be32toh(tmp1);
714 		buf->_bits[bitswords - 1 - i] = be32toh(tmp0);
715 	}
716 }
717 
718 static int
719 sdmmc_mem_select_transfer_mode(struct sdmmc_softc *sc, int support_func)
720 {
721 	if (ISSET(sc->sc_flags, SMF_UHS_MODE)) {
722 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR104) &&
723 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR104)) {
724 			return SD_ACCESS_MODE_SDR104;
725 		}
726 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_DDR50) &&
727 		    ISSET(support_func, 1 << SD_ACCESS_MODE_DDR50)) {
728 			return SD_ACCESS_MODE_DDR50;
729 		}
730 		if (ISSET(sc->sc_caps, SMC_CAPS_UHS_SDR50) &&
731 		    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR50)) {
732 			return SD_ACCESS_MODE_SDR50;
733 		}
734 	}
735 	if (ISSET(sc->sc_caps, SMC_CAPS_SD_HIGHSPEED) &&
736 	    ISSET(support_func, 1 << SD_ACCESS_MODE_SDR25)) {
737 		return SD_ACCESS_MODE_SDR25;
738 	}
739 	return SD_ACCESS_MODE_SDR12;
740 }
741 
742 static int
743 sdmmc_mem_execute_tuning(struct sdmmc_softc *sc, struct sdmmc_function *sf)
744 {
745 	int timing = -1;
746 
747 	if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
748 		return 0;
749 
750 	if (ISSET(sc->sc_flags, SMF_SD_MODE)) {
751 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE))
752 			return 0;
753 
754 		switch (sf->csd.tran_speed) {
755 		case 100000:
756 			timing = SDMMC_TIMING_UHS_SDR50;
757 			break;
758 		case 208000:
759 			timing = SDMMC_TIMING_UHS_SDR104;
760 			break;
761 		default:
762 			return 0;
763 		}
764 	} else {
765 		switch (sf->csd.tran_speed) {
766 		case 200000:
767 			timing = SDMMC_TIMING_MMC_HS200;
768 			break;
769 		default:
770 			return 0;
771 		}
772 	}
773 
774 	DPRINTF(("%s: execute tuning for timing %d\n", SDMMCDEVNAME(sc),
775 	    timing));
776 
777 	return sdmmc_chip_execute_tuning(sc->sc_sct, sc->sc_sch, timing);
778 }
779 
780 static int
781 sdmmc_mem_sd_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
782 {
783 	int support_func, best_func, bus_clock, error, i;
784 	sdmmc_bitfield512_t status; /* Switch Function Status */
785 	bool ddr = false;
786 
787 	/* change bus clock */
788 	bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
789 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
790 	if (error) {
791 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
792 		return error;
793 	}
794 
795 	error = sdmmc_mem_send_scr(sc, sf, sf->raw_scr);
796 	if (error) {
797 		aprint_error_dev(sc->sc_dev, "SD_SEND_SCR send failed.\n");
798 		return error;
799 	}
800 	error = sdmmc_mem_decode_scr(sc, sf);
801 	if (error)
802 		return error;
803 
804 	if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE) &&
805 	    ISSET(sf->scr.bus_width, SCR_SD_BUS_WIDTHS_4BIT)) {
806 		DPRINTF(("%s: change bus width\n", SDMMCDEVNAME(sc)));
807 		error = sdmmc_set_bus_width(sf, 4);
808 		if (error) {
809 			aprint_error_dev(sc->sc_dev,
810 			    "can't change bus width (%d bit)\n", 4);
811 			return error;
812 		}
813 		sf->width = 4;
814 	}
815 
816 	best_func = 0;
817 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
818 	    ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH)) {
819 		DPRINTF(("%s: switch func mode 0\n", SDMMCDEVNAME(sc)));
820 		error = sdmmc_mem_sd_switch(sf, 0, 1, 0, &status);
821 		if (error) {
822 			aprint_error_dev(sc->sc_dev,
823 			    "switch func mode 0 failed\n");
824 			return error;
825 		}
826 
827 		support_func = SFUNC_STATUS_GROUP(&status, 1);
828 
829 		if (!ISSET(sc->sc_flags, SMF_UHS_MODE) && support_func & 0x1c) {
830 			/* XXX UHS-I card started in 1.8V mode, switch now */
831 			error = sdmmc_mem_signal_voltage(sc,
832 			    SDMMC_SIGNAL_VOLTAGE_180);
833 			if (error) {
834 				aprint_error_dev(sc->sc_dev,
835 				    "failed to recover UHS card\n");
836 				return error;
837 			}
838 			SET(sc->sc_flags, SMF_UHS_MODE);
839 		}
840 
841 		for (i = 0; i < __arraycount(switch_group0_functions); i++) {
842 			if (!(support_func & (1 << i)))
843 				continue;
844 			DPRINTF(("%s: card supports mode %s\n",
845 			    SDMMCDEVNAME(sc),
846 			    switch_group0_functions[i].name));
847 		}
848 
849 		best_func = sdmmc_mem_select_transfer_mode(sc, support_func);
850 
851 		DPRINTF(("%s: using mode %s\n", SDMMCDEVNAME(sc),
852 		    switch_group0_functions[best_func].name));
853 
854 		if (best_func != 0) {
855 			DPRINTF(("%s: switch func mode 1(func=%d)\n",
856 			    SDMMCDEVNAME(sc), best_func));
857 			error =
858 			    sdmmc_mem_sd_switch(sf, 1, 1, best_func, &status);
859 			if (error) {
860 				aprint_error_dev(sc->sc_dev,
861 				    "switch func mode 1 failed:"
862 				    " group 1 function %d(0x%2x)\n",
863 				    best_func, support_func);
864 				return error;
865 			}
866 			sf->csd.tran_speed =
867 			    switch_group0_functions[best_func].freq;
868 
869 			if (best_func == SD_ACCESS_MODE_DDR50)
870 				ddr = true;
871 
872 			/* Wait 400KHz x 8 clock (2.5us * 8 + slop) */
873 			delay(25);
874 		}
875 	}
876 
877 	/* update bus clock */
878 	if (sc->sc_busclk > sf->csd.tran_speed)
879 		sc->sc_busclk = sf->csd.tran_speed;
880 	if (sc->sc_busclk == bus_clock && sc->sc_busddr == ddr)
881 		return 0;
882 
883 	/* change bus clock */
884 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, sc->sc_busclk,
885 	    ddr);
886 	if (error) {
887 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
888 		return error;
889 	}
890 
891 	sc->sc_transfer_mode = switch_group0_functions[best_func].name;
892 	sc->sc_busddr = ddr;
893 
894 	/* execute tuning (UHS) */
895 	error = sdmmc_mem_execute_tuning(sc, sf);
896 	if (error) {
897 		aprint_error_dev(sc->sc_dev, "can't execute SD tuning\n");
898 		return error;
899 	}
900 
901 	return 0;
902 }
903 
904 static int
905 sdmmc_mem_mmc_init(struct sdmmc_softc *sc, struct sdmmc_function *sf)
906 {
907 	int width, value, hs_timing, bus_clock, error;
908 	uint8_t ext_csd[512];
909 	uint32_t sectors = 0;
910 
911 	sc->sc_transfer_mode = NULL;
912 
913 	/* change bus clock */
914 	bus_clock = min(sc->sc_busclk, sf->csd.tran_speed);
915 	error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch, bus_clock, false);
916 	if (error) {
917 		aprint_error_dev(sc->sc_dev, "can't change bus clock\n");
918 		return error;
919 	}
920 
921 	if (sf->csd.mmcver >= MMC_CSD_MMCVER_4_0) {
922 		error = sdmmc_mem_send_cxd_data(sc,
923 		    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
924 		if (error) {
925 			aprint_error_dev(sc->sc_dev,
926 			    "can't read EXT_CSD (error=%d)\n", error);
927 			return error;
928 		}
929 		if ((sf->csd.csdver == MMC_CSD_CSDVER_EXT_CSD) &&
930 		    (ext_csd[EXT_CSD_STRUCTURE] > EXT_CSD_STRUCTURE_VER_1_2)) {
931 			aprint_error_dev(sc->sc_dev,
932 			    "unrecognised future version (%d)\n",
933 				ext_csd[EXT_CSD_STRUCTURE]);
934 			return ENOTSUP;
935 		}
936 
937 		sc->sc_transfer_mode = NULL;
938 		if (ISSET(sc->sc_caps, SMC_CAPS_MMC_HS200) &&
939 		    ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_HS200_1_8V) {
940 			sf->csd.tran_speed = 200000;	/* 200MHz SDR */
941 			hs_timing = 2;
942 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_52M) {
943 			sf->csd.tran_speed = 52000;	/* 52MHz */
944 			hs_timing = 1;
945 		} else if (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_F_26M) {
946 			sf->csd.tran_speed = 26000;	/* 26MHz */
947 			hs_timing = 0;
948 		} else {
949 			aprint_error_dev(sc->sc_dev,
950 			    "unknown CARD_TYPE: 0x%x\n",
951 			    ext_csd[EXT_CSD_CARD_TYPE]);
952 			return ENOTSUP;
953 		}
954 
955 		if (ISSET(sc->sc_caps, SMC_CAPS_8BIT_MODE)) {
956 			width = 8;
957 			value = EXT_CSD_BUS_WIDTH_8;
958 		} else if (ISSET(sc->sc_caps, SMC_CAPS_4BIT_MODE)) {
959 			width = 4;
960 			value = EXT_CSD_BUS_WIDTH_4;
961 		} else {
962 			width = 1;
963 			value = EXT_CSD_BUS_WIDTH_1;
964 		}
965 
966 		if (width != 1) {
967 			error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
968 			    EXT_CSD_BUS_WIDTH, value);
969 			if (error == 0)
970 				error = sdmmc_chip_bus_width(sc->sc_sct,
971 				    sc->sc_sch, width);
972 			else {
973 				DPRINTF(("%s: can't change bus width"
974 				    " (%d bit)\n", SDMMCDEVNAME(sc), width));
975 				return error;
976 			}
977 
978 			/* XXXX: need bus test? (using by CMD14 & CMD19) */
979 			delay(10000);
980 		}
981 		sf->width = width;
982 
983 		if (hs_timing == 1 &&
984 		    !ISSET(sc->sc_caps, SMC_CAPS_MMC_HIGHSPEED)) {
985 			hs_timing = 0;
986 		}
987 		if (hs_timing) {
988 			error = sdmmc_mem_mmc_switch(sf, EXT_CSD_CMD_SET_NORMAL,
989 			    EXT_CSD_HS_TIMING, hs_timing);
990 			if (error) {
991 				aprint_error_dev(sc->sc_dev,
992 				    "can't change high speed %d, error %d\n",
993 				    hs_timing, error);
994 				return error;
995 			}
996 		}
997 
998 		if (sc->sc_busclk > sf->csd.tran_speed)
999 			sc->sc_busclk = sf->csd.tran_speed;
1000 		if (sc->sc_busclk != bus_clock) {
1001 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1002 			    sc->sc_busclk, false);
1003 			if (error) {
1004 				aprint_error_dev(sc->sc_dev,
1005 				    "can't change bus clock\n");
1006 				return error;
1007 			}
1008 		}
1009 
1010 		if (hs_timing) {
1011 			error = sdmmc_mem_send_cxd_data(sc,
1012 			    MMC_SEND_EXT_CSD, ext_csd, sizeof(ext_csd));
1013 			if (error) {
1014 				aprint_error_dev(sc->sc_dev,
1015 				    "can't re-read EXT_CSD\n");
1016 				return error;
1017 			}
1018 			if (ext_csd[EXT_CSD_HS_TIMING] != hs_timing) {
1019 				aprint_error_dev(sc->sc_dev,
1020 				    "HS_TIMING set failed\n");
1021 				return EINVAL;
1022 			}
1023 		}
1024 
1025 		sectors = ext_csd[EXT_CSD_SEC_COUNT + 0] << 0 |
1026 		    ext_csd[EXT_CSD_SEC_COUNT + 1] << 8  |
1027 		    ext_csd[EXT_CSD_SEC_COUNT + 2] << 16 |
1028 		    ext_csd[EXT_CSD_SEC_COUNT + 3] << 24;
1029 		if (sectors > (2u * 1024 * 1024 * 1024) / 512) {
1030 			SET(sf->flags, SFF_SDHC);
1031 			sf->csd.capacity = sectors;
1032 		}
1033 
1034 		if (hs_timing == 2) {
1035 			sc->sc_transfer_mode = "HS200";
1036 
1037 			/* execute tuning (HS200) */
1038 			error = sdmmc_mem_execute_tuning(sc, sf);
1039 			if (error) {
1040 				aprint_error_dev(sc->sc_dev,
1041 				    "can't execute MMC tuning\n");
1042 				return error;
1043 			}
1044 		} else {
1045 			sc->sc_transfer_mode = NULL;
1046 		}
1047 	} else {
1048 		if (sc->sc_busclk > sf->csd.tran_speed)
1049 			sc->sc_busclk = sf->csd.tran_speed;
1050 		if (sc->sc_busclk != bus_clock) {
1051 			error = sdmmc_chip_bus_clock(sc->sc_sct, sc->sc_sch,
1052 			    sc->sc_busclk, false);
1053 			if (error) {
1054 				aprint_error_dev(sc->sc_dev,
1055 				    "can't change bus clock\n");
1056 				return error;
1057 			}
1058 		}
1059 	}
1060 
1061 	return 0;
1062 }
1063 
1064 static int
1065 sdmmc_mem_send_cid(struct sdmmc_softc *sc, sdmmc_response *resp)
1066 {
1067 	struct sdmmc_command cmd;
1068 	int error;
1069 
1070 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1071 		memset(&cmd, 0, sizeof cmd);
1072 		cmd.c_opcode = MMC_ALL_SEND_CID;
1073 		cmd.c_flags = SCF_CMD_BCR | SCF_RSP_R2 | SCF_TOUT_OK;
1074 
1075 		error = sdmmc_mmc_command(sc, &cmd);
1076 	} else {
1077 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CID, &cmd.c_resp,
1078 		    sizeof(cmd.c_resp));
1079 	}
1080 
1081 #ifdef SDMMC_DEBUG
1082 	if (error == 0)
1083 		sdmmc_dump_data("CID", cmd.c_resp, sizeof(cmd.c_resp));
1084 #endif
1085 	if (error == 0 && resp != NULL)
1086 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1087 	return error;
1088 }
1089 
1090 static int
1091 sdmmc_mem_send_csd(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1092     sdmmc_response *resp)
1093 {
1094 	struct sdmmc_command cmd;
1095 	int error;
1096 
1097 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1098 		memset(&cmd, 0, sizeof cmd);
1099 		cmd.c_opcode = MMC_SEND_CSD;
1100 		cmd.c_arg = MMC_ARG_RCA(sf->rca);
1101 		cmd.c_flags = SCF_CMD_AC | SCF_RSP_R2;
1102 
1103 		error = sdmmc_mmc_command(sc, &cmd);
1104 	} else {
1105 		error = sdmmc_mem_send_cxd_data(sc, MMC_SEND_CSD, &cmd.c_resp,
1106 		    sizeof(cmd.c_resp));
1107 	}
1108 
1109 #ifdef SDMMC_DEBUG
1110 	if (error == 0)
1111 		sdmmc_dump_data("CSD", cmd.c_resp, sizeof(cmd.c_resp));
1112 #endif
1113 	if (error == 0 && resp != NULL)
1114 		memcpy(resp, &cmd.c_resp, sizeof(*resp));
1115 	return error;
1116 }
1117 
1118 static int
1119 sdmmc_mem_send_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf,
1120     uint32_t *scr)
1121 {
1122 	struct sdmmc_command cmd;
1123 	bus_dma_segment_t ds[1];
1124 	void *ptr = NULL;
1125 	int datalen = 8;
1126 	int rseg;
1127 	int error = 0;
1128 
1129 	/* Don't lock */
1130 
1131 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1132 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0,
1133 		    ds, 1, &rseg, BUS_DMA_NOWAIT);
1134 		if (error)
1135 			goto out;
1136 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1137 		    BUS_DMA_NOWAIT);
1138 		if (error)
1139 			goto dmamem_free;
1140 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1141 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1142 		if (error)
1143 			goto dmamem_unmap;
1144 
1145 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1146 		    BUS_DMASYNC_PREREAD);
1147 	} else {
1148 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1149 		if (ptr == NULL)
1150 			goto out;
1151 	}
1152 
1153 	memset(&cmd, 0, sizeof(cmd));
1154 	cmd.c_data = ptr;
1155 	cmd.c_datalen = datalen;
1156 	cmd.c_blklen = datalen;
1157 	cmd.c_arg = 0;
1158 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1159 	cmd.c_opcode = SD_APP_SEND_SCR;
1160 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1161 		cmd.c_dmamap = sc->sc_dmap;
1162 
1163 	error = sdmmc_app_command(sc, sf, &cmd);
1164 	if (error == 0) {
1165 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1166 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1167 			    BUS_DMASYNC_POSTREAD);
1168 		}
1169 		memcpy(scr, ptr, datalen);
1170 	}
1171 
1172 out:
1173 	if (ptr != NULL) {
1174 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1175 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1176 dmamem_unmap:
1177 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1178 dmamem_free:
1179 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1180 		} else {
1181 			free(ptr, M_DEVBUF);
1182 		}
1183 	}
1184 	DPRINTF(("%s: sdmem_mem_send_scr: error = %d\n", SDMMCDEVNAME(sc),
1185 	    error));
1186 
1187 #ifdef SDMMC_DEBUG
1188 	if (error == 0)
1189 		sdmmc_dump_data("SCR", scr, datalen);
1190 #endif
1191 	return error;
1192 }
1193 
1194 static int
1195 sdmmc_mem_decode_scr(struct sdmmc_softc *sc, struct sdmmc_function *sf)
1196 {
1197 	sdmmc_response resp;
1198 	int ver;
1199 
1200 	memset(resp, 0, sizeof(resp));
1201 	/*
1202 	 * Change the raw-scr received from the DMA stream to resp.
1203 	 */
1204 	resp[0] = be32toh(sf->raw_scr[1]) >> 8;		// LSW
1205 	resp[1] = be32toh(sf->raw_scr[0]);		// MSW
1206 	resp[0] |= (resp[1] & 0xff) << 24;
1207 	resp[1] >>= 8;
1208 
1209 	ver = SCR_STRUCTURE(resp);
1210 	sf->scr.sd_spec = SCR_SD_SPEC(resp);
1211 	sf->scr.bus_width = SCR_SD_BUS_WIDTHS(resp);
1212 
1213 	DPRINTF(("%s: sdmmc_mem_decode_scr: %08x%08x ver=%d, spec=%d, bus width=%d\n",
1214 	    SDMMCDEVNAME(sc), resp[1], resp[0],
1215 	    ver, sf->scr.sd_spec, sf->scr.bus_width));
1216 
1217 	if (ver != 0 && ver != 1) {
1218 		DPRINTF(("%s: unknown structure version: %d\n",
1219 		    SDMMCDEVNAME(sc), ver));
1220 		return EINVAL;
1221 	}
1222 	return 0;
1223 }
1224 
1225 static int
1226 sdmmc_mem_send_cxd_data(struct sdmmc_softc *sc, int opcode, void *data,
1227     size_t datalen)
1228 {
1229 	struct sdmmc_command cmd;
1230 	bus_dma_segment_t ds[1];
1231 	void *ptr = NULL;
1232 	int rseg;
1233 	int error = 0;
1234 
1235 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1236 		error = bus_dmamem_alloc(sc->sc_dmat, datalen, PAGE_SIZE, 0, ds,
1237 		    1, &rseg, BUS_DMA_NOWAIT);
1238 		if (error)
1239 			goto out;
1240 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, datalen, &ptr,
1241 		    BUS_DMA_NOWAIT);
1242 		if (error)
1243 			goto dmamem_free;
1244 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, datalen,
1245 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1246 		if (error)
1247 			goto dmamem_unmap;
1248 
1249 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1250 		    BUS_DMASYNC_PREREAD);
1251 	} else {
1252 		ptr = malloc(datalen, M_DEVBUF, M_NOWAIT | M_ZERO);
1253 		if (ptr == NULL)
1254 			goto out;
1255 	}
1256 
1257 	memset(&cmd, 0, sizeof(cmd));
1258 	cmd.c_data = ptr;
1259 	cmd.c_datalen = datalen;
1260 	cmd.c_blklen = datalen;
1261 	cmd.c_opcode = opcode;
1262 	cmd.c_arg = 0;
1263 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_SPI_R1;
1264 	if (opcode == MMC_SEND_EXT_CSD)
1265 		SET(cmd.c_flags, SCF_RSP_R1);
1266 	else
1267 		SET(cmd.c_flags, SCF_RSP_R2);
1268 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1269 		cmd.c_dmamap = sc->sc_dmap;
1270 
1271 	error = sdmmc_mmc_command(sc, &cmd);
1272 	if (error == 0) {
1273 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1274 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1275 			    BUS_DMASYNC_POSTREAD);
1276 		}
1277 		memcpy(data, ptr, datalen);
1278 #ifdef SDMMC_DEBUG
1279 		sdmmc_dump_data("CXD", data, datalen);
1280 #endif
1281 	}
1282 
1283 out:
1284 	if (ptr != NULL) {
1285 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1286 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1287 dmamem_unmap:
1288 			bus_dmamem_unmap(sc->sc_dmat, ptr, datalen);
1289 dmamem_free:
1290 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1291 		} else {
1292 			free(ptr, M_DEVBUF);
1293 		}
1294 	}
1295 	return error;
1296 }
1297 
1298 static int
1299 sdmmc_set_bus_width(struct sdmmc_function *sf, int width)
1300 {
1301 	struct sdmmc_softc *sc = sf->sc;
1302 	struct sdmmc_command cmd;
1303 	int error;
1304 
1305 	if (ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1306 		return ENODEV;
1307 
1308 	memset(&cmd, 0, sizeof(cmd));
1309 	cmd.c_opcode = SD_APP_SET_BUS_WIDTH;
1310 	cmd.c_flags = SCF_RSP_R1 | SCF_CMD_AC;
1311 
1312 	switch (width) {
1313 	case 1:
1314 		cmd.c_arg = SD_ARG_BUS_WIDTH_1;
1315 		break;
1316 
1317 	case 4:
1318 		cmd.c_arg = SD_ARG_BUS_WIDTH_4;
1319 		break;
1320 
1321 	default:
1322 		return EINVAL;
1323 	}
1324 
1325 	error = sdmmc_app_command(sc, sf, &cmd);
1326 	if (error == 0)
1327 		error = sdmmc_chip_bus_width(sc->sc_sct, sc->sc_sch, width);
1328 	return error;
1329 }
1330 
1331 static int
1332 sdmmc_mem_sd_switch(struct sdmmc_function *sf, int mode, int group,
1333     int function, sdmmc_bitfield512_t *status)
1334 {
1335 	struct sdmmc_softc *sc = sf->sc;
1336 	struct sdmmc_command cmd;
1337 	bus_dma_segment_t ds[1];
1338 	void *ptr = NULL;
1339 	int gsft, rseg, error = 0;
1340 	const int statlen = 64;
1341 
1342 	if (sf->scr.sd_spec >= SCR_SD_SPEC_VER_1_10 &&
1343 	    !ISSET(sf->csd.ccc, SD_CSD_CCC_SWITCH))
1344 		return EINVAL;
1345 
1346 	if (group <= 0 || group > 6 ||
1347 	    function < 0 || function > 15)
1348 		return EINVAL;
1349 
1350 	gsft = (group - 1) << 2;
1351 
1352 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1353 		error = bus_dmamem_alloc(sc->sc_dmat, statlen, PAGE_SIZE, 0, ds,
1354 		    1, &rseg, BUS_DMA_NOWAIT);
1355 		if (error)
1356 			goto out;
1357 		error = bus_dmamem_map(sc->sc_dmat, ds, 1, statlen, &ptr,
1358 		    BUS_DMA_NOWAIT);
1359 		if (error)
1360 			goto dmamem_free;
1361 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, ptr, statlen,
1362 		    NULL, BUS_DMA_NOWAIT|BUS_DMA_STREAMING|BUS_DMA_READ);
1363 		if (error)
1364 			goto dmamem_unmap;
1365 
1366 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1367 		    BUS_DMASYNC_PREREAD);
1368 	} else {
1369 		ptr = malloc(statlen, M_DEVBUF, M_NOWAIT | M_ZERO);
1370 		if (ptr == NULL)
1371 			goto out;
1372 	}
1373 
1374 	memset(&cmd, 0, sizeof(cmd));
1375 	cmd.c_data = ptr;
1376 	cmd.c_datalen = statlen;
1377 	cmd.c_blklen = statlen;
1378 	cmd.c_opcode = SD_SEND_SWITCH_FUNC;
1379 	cmd.c_arg =
1380 	    (!!mode << 31) | (function << gsft) | (0x00ffffff & ~(0xf << gsft));
1381 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1382 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1383 		cmd.c_dmamap = sc->sc_dmap;
1384 
1385 	error = sdmmc_mmc_command(sc, &cmd);
1386 	if (error == 0) {
1387 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1388 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, statlen,
1389 			    BUS_DMASYNC_POSTREAD);
1390 		}
1391 		memcpy(status, ptr, statlen);
1392 	}
1393 
1394 out:
1395 	if (ptr != NULL) {
1396 		if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1397 			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1398 dmamem_unmap:
1399 			bus_dmamem_unmap(sc->sc_dmat, ptr, statlen);
1400 dmamem_free:
1401 			bus_dmamem_free(sc->sc_dmat, ds, rseg);
1402 		} else {
1403 			free(ptr, M_DEVBUF);
1404 		}
1405 	}
1406 
1407 	if (error == 0)
1408 		sdmmc_be512_to_bitfield512(status);
1409 
1410 	return error;
1411 }
1412 
1413 static int
1414 sdmmc_mem_mmc_switch(struct sdmmc_function *sf, uint8_t set, uint8_t index,
1415     uint8_t value)
1416 {
1417 	struct sdmmc_softc *sc = sf->sc;
1418 	struct sdmmc_command cmd;
1419 	int error;
1420 
1421 	memset(&cmd, 0, sizeof(cmd));
1422 	cmd.c_opcode = MMC_SWITCH;
1423 	cmd.c_arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1424 	    (index << 16) | (value << 8) | set;
1425 	cmd.c_flags = SCF_RSP_SPI_R1B | SCF_RSP_R1B | SCF_CMD_AC;
1426 
1427 	error = sdmmc_mmc_command(sc, &cmd);
1428 	if (error)
1429 		return error;
1430 
1431 	if (index == EXT_CSD_HS_TIMING && value >= 2) {
1432 		do {
1433 			memset(&cmd, 0, sizeof(cmd));
1434 			cmd.c_opcode = MMC_SEND_STATUS;
1435 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1436 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1437 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1438 			error = sdmmc_mmc_command(sc, &cmd);
1439 			if (error)
1440 				break;
1441 			if (ISSET(MMC_R1(cmd.c_resp), MMC_R1_SWITCH_ERROR)) {
1442 				aprint_error_dev(sc->sc_dev, "switch error\n");
1443 				return EINVAL;
1444 			}
1445 			/* XXX time out */
1446 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1447 
1448 		if (error) {
1449 			aprint_error_dev(sc->sc_dev,
1450 			    "error waiting for high speed switch: %d\n",
1451 			    error);
1452 			return error;
1453 		}
1454 	}
1455 
1456 	return 0;
1457 }
1458 
1459 /*
1460  * SPI mode function
1461  */
1462 static int
1463 sdmmc_mem_spi_read_ocr(struct sdmmc_softc *sc, uint32_t hcs, uint32_t *card_ocr)
1464 {
1465 	struct sdmmc_command cmd;
1466 	int error;
1467 
1468 	memset(&cmd, 0, sizeof(cmd));
1469 	cmd.c_opcode = MMC_READ_OCR;
1470 	cmd.c_arg = hcs ? MMC_OCR_HCS : 0;
1471 	cmd.c_flags = SCF_RSP_SPI_R3;
1472 
1473 	error = sdmmc_mmc_command(sc, &cmd);
1474 	if (error == 0 && card_ocr != NULL)
1475 		*card_ocr = cmd.c_resp[1];
1476 	DPRINTF(("%s: sdmmc_mem_spi_read_ocr: error=%d, ocr=%#x\n",
1477 	    SDMMCDEVNAME(sc), error, cmd.c_resp[1]));
1478 	return error;
1479 }
1480 
1481 /*
1482  * read/write function
1483  */
1484 /* read */
1485 static int
1486 sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
1487     u_char *data, size_t datalen)
1488 {
1489 	struct sdmmc_softc *sc = sf->sc;
1490 	int error = 0;
1491 	int i;
1492 
1493 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1494 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1495 
1496 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1497 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno + i,
1498 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1499 		if (error)
1500 			break;
1501 	}
1502 	return error;
1503 }
1504 
1505 /*
1506  * Simulate multi-segment dma transfer.
1507  */
1508 static int
1509 sdmmc_mem_single_segment_dma_read_block(struct sdmmc_function *sf,
1510     uint32_t blkno, u_char *data, size_t datalen)
1511 {
1512 	struct sdmmc_softc *sc = sf->sc;
1513 	bool use_bbuf = false;
1514 	int error = 0;
1515 	int i;
1516 
1517 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1518 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1519 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1520 			use_bbuf = true;
1521 			break;
1522 		}
1523 	}
1524 	if (use_bbuf) {
1525 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1526 		    BUS_DMASYNC_PREREAD);
1527 
1528 		error = sdmmc_mem_read_block_subr(sf, sf->bbuf_dmap,
1529 		    blkno, data, datalen);
1530 		if (error) {
1531 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1532 			return error;
1533 		}
1534 
1535 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1536 		    BUS_DMASYNC_POSTREAD);
1537 
1538 		/* Copy from bounce buffer */
1539 		memcpy(data, sf->bbuf, datalen);
1540 
1541 		return 0;
1542 	}
1543 
1544 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1545 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1546 
1547 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1548 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ);
1549 		if (error)
1550 			return error;
1551 
1552 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1553 		    BUS_DMASYNC_PREREAD);
1554 
1555 		error = sdmmc_mem_read_block_subr(sf, sf->sseg_dmap,
1556 		    blkno, data, len);
1557 		if (error) {
1558 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1559 			return error;
1560 		}
1561 
1562 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1563 		    BUS_DMASYNC_POSTREAD);
1564 
1565 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1566 
1567 		blkno += len / SDMMC_SECTOR_SIZE;
1568 		data += len;
1569 	}
1570 	return 0;
1571 }
1572 
1573 static int
1574 sdmmc_mem_read_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1575     uint32_t blkno, u_char *data, size_t datalen)
1576 {
1577 	struct sdmmc_softc *sc = sf->sc;
1578 	struct sdmmc_command cmd;
1579 	int error;
1580 
1581 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1582 		error = sdmmc_select_card(sc, sf);
1583 		if (error)
1584 			goto out;
1585 	}
1586 
1587 	memset(&cmd, 0, sizeof(cmd));
1588 	cmd.c_data = data;
1589 	cmd.c_datalen = datalen;
1590 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
1591 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1592 	    MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
1593 	cmd.c_arg = blkno;
1594 	if (!ISSET(sf->flags, SFF_SDHC))
1595 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1596 	cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
1597 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1598 		cmd.c_dmamap = dmap;
1599 
1600 	sc->sc_ev_xfer.ev_count++;
1601 
1602 	error = sdmmc_mmc_command(sc, &cmd);
1603 	if (error) {
1604 		sc->sc_ev_xfer_error.ev_count++;
1605 		goto out;
1606 	}
1607 
1608 	const u_int counter = __builtin_ctz(cmd.c_datalen);
1609 	if (counter >= 9 && counter <= 16) {
1610 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1611 	} else {
1612 		sc->sc_ev_xfer_unaligned.ev_count++;
1613 	}
1614 
1615 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1616 		if (cmd.c_opcode == MMC_READ_BLOCK_MULTIPLE) {
1617 			memset(&cmd, 0, sizeof cmd);
1618 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
1619 			cmd.c_arg = MMC_ARG_RCA(sf->rca);
1620 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1621 			error = sdmmc_mmc_command(sc, &cmd);
1622 			if (error)
1623 				goto out;
1624 		}
1625 	}
1626 
1627 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1628 		do {
1629 			memset(&cmd, 0, sizeof(cmd));
1630 			cmd.c_opcode = MMC_SEND_STATUS;
1631 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1632 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1633 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1634 			error = sdmmc_mmc_command(sc, &cmd);
1635 			if (error)
1636 				break;
1637 			/* XXX time out */
1638 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1639 	}
1640 
1641 out:
1642 	return error;
1643 }
1644 
1645 int
1646 sdmmc_mem_read_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1647     size_t datalen)
1648 {
1649 	struct sdmmc_softc *sc = sf->sc;
1650 	int error;
1651 
1652 	SDMMC_LOCK(sc);
1653 	mutex_enter(&sc->sc_mtx);
1654 
1655 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1656 		error = sdmmc_mem_single_read_block(sf, blkno, data, datalen);
1657 		goto out;
1658 	}
1659 
1660 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1661 		error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1662 		    datalen);
1663 		goto out;
1664 	}
1665 
1666 	/* DMA transfer */
1667 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1668 	    BUS_DMA_NOWAIT|BUS_DMA_READ);
1669 	if (error)
1670 		goto out;
1671 
1672 #ifdef SDMMC_DEBUG
1673 	printf("data=%p, datalen=%zu\n", data, datalen);
1674 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1675 		printf("seg#%d: addr=%#lx, size=%#lx\n", i,
1676 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1677 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1678 	}
1679 #endif
1680 
1681 	if (sc->sc_dmap->dm_nsegs > 1
1682 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1683 		error = sdmmc_mem_single_segment_dma_read_block(sf, blkno,
1684 		    data, datalen);
1685 		goto unload;
1686 	}
1687 
1688 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1689 	    BUS_DMASYNC_PREREAD);
1690 
1691 	error = sdmmc_mem_read_block_subr(sf, sc->sc_dmap, blkno, data,
1692 	    datalen);
1693 	if (error)
1694 		goto unload;
1695 
1696 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1697 	    BUS_DMASYNC_POSTREAD);
1698 unload:
1699 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1700 
1701 out:
1702 	mutex_exit(&sc->sc_mtx);
1703 	SDMMC_UNLOCK(sc);
1704 
1705 	return error;
1706 }
1707 
1708 /* write */
1709 static int
1710 sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
1711     u_char *data, size_t datalen)
1712 {
1713 	struct sdmmc_softc *sc = sf->sc;
1714 	int error = 0;
1715 	int i;
1716 
1717 	KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
1718 	KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
1719 
1720 	for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
1721 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno + i,
1722 		    data + i * SDMMC_SECTOR_SIZE, SDMMC_SECTOR_SIZE);
1723 		if (error)
1724 			break;
1725 	}
1726 	return error;
1727 }
1728 
1729 /*
1730  * Simulate multi-segment dma transfer.
1731  */
1732 static int
1733 sdmmc_mem_single_segment_dma_write_block(struct sdmmc_function *sf,
1734     uint32_t blkno, u_char *data, size_t datalen)
1735 {
1736 	struct sdmmc_softc *sc = sf->sc;
1737 	bool use_bbuf = false;
1738 	int error = 0;
1739 	int i;
1740 
1741 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1742 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1743 		if ((len % SDMMC_SECTOR_SIZE) != 0) {
1744 			use_bbuf = true;
1745 			break;
1746 		}
1747 	}
1748 	if (use_bbuf) {
1749 		/* Copy to bounce buffer */
1750 		memcpy(sf->bbuf, data, datalen);
1751 
1752 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1753 		    BUS_DMASYNC_PREWRITE);
1754 
1755 		error = sdmmc_mem_write_block_subr(sf, sf->bbuf_dmap,
1756 		    blkno, data, datalen);
1757 		if (error) {
1758 			bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
1759 			return error;
1760 		}
1761 
1762 		bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0, datalen,
1763 		    BUS_DMASYNC_POSTWRITE);
1764 
1765 		return 0;
1766 	}
1767 
1768 	for (i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1769 		size_t len = sc->sc_dmap->dm_segs[i].ds_len;
1770 
1771 		error = bus_dmamap_load(sc->sc_dmat, sf->sseg_dmap,
1772 		    data, len, NULL, BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1773 		if (error)
1774 			return error;
1775 
1776 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1777 		    BUS_DMASYNC_PREWRITE);
1778 
1779 		error = sdmmc_mem_write_block_subr(sf, sf->sseg_dmap,
1780 		    blkno, data, len);
1781 		if (error) {
1782 			bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1783 			return error;
1784 		}
1785 
1786 		bus_dmamap_sync(sc->sc_dmat, sf->sseg_dmap, 0, len,
1787 		    BUS_DMASYNC_POSTWRITE);
1788 
1789 		bus_dmamap_unload(sc->sc_dmat, sf->sseg_dmap);
1790 
1791 		blkno += len / SDMMC_SECTOR_SIZE;
1792 		data += len;
1793 	}
1794 
1795 	return error;
1796 }
1797 
1798 static int
1799 sdmmc_mem_write_block_subr(struct sdmmc_function *sf, bus_dmamap_t dmap,
1800     uint32_t blkno, u_char *data, size_t datalen)
1801 {
1802 	struct sdmmc_softc *sc = sf->sc;
1803 	struct sdmmc_command cmd;
1804 	int error;
1805 
1806 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1807 		error = sdmmc_select_card(sc, sf);
1808 		if (error)
1809 			goto out;
1810 	}
1811 
1812 	memset(&cmd, 0, sizeof(cmd));
1813 	cmd.c_data = data;
1814 	cmd.c_datalen = datalen;
1815 	cmd.c_blklen = SDMMC_SECTOR_SIZE;
1816 	cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
1817 	    MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
1818 	cmd.c_arg = blkno;
1819 	if (!ISSET(sf->flags, SFF_SDHC))
1820 		cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
1821 	cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
1822 	if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
1823 		cmd.c_dmamap = dmap;
1824 
1825 	sc->sc_ev_xfer.ev_count++;
1826 
1827 	error = sdmmc_mmc_command(sc, &cmd);
1828 	if (error) {
1829 		sc->sc_ev_xfer_error.ev_count++;
1830 		goto out;
1831 	}
1832 
1833 	const u_int counter = __builtin_ctz(cmd.c_datalen);
1834 	if (counter >= 9 && counter <= 16) {
1835 		sc->sc_ev_xfer_aligned[counter - 9].ev_count++;
1836 	} else {
1837 		sc->sc_ev_xfer_unaligned.ev_count++;
1838 	}
1839 
1840 	if (!ISSET(sc->sc_caps, SMC_CAPS_AUTO_STOP)) {
1841 		if (cmd.c_opcode == MMC_WRITE_BLOCK_MULTIPLE) {
1842 			memset(&cmd, 0, sizeof(cmd));
1843 			cmd.c_opcode = MMC_STOP_TRANSMISSION;
1844 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_RSP_SPI_R1B;
1845 			error = sdmmc_mmc_command(sc, &cmd);
1846 			if (error)
1847 				goto out;
1848 		}
1849 	}
1850 
1851 	if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
1852 		do {
1853 			memset(&cmd, 0, sizeof(cmd));
1854 			cmd.c_opcode = MMC_SEND_STATUS;
1855 			if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
1856 				cmd.c_arg = MMC_ARG_RCA(sf->rca);
1857 			cmd.c_flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_RSP_SPI_R2;
1858 			error = sdmmc_mmc_command(sc, &cmd);
1859 			if (error)
1860 				break;
1861 			/* XXX time out */
1862 		} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
1863 	}
1864 
1865 out:
1866 	return error;
1867 }
1868 
1869 int
1870 sdmmc_mem_write_block(struct sdmmc_function *sf, uint32_t blkno, u_char *data,
1871     size_t datalen)
1872 {
1873 	struct sdmmc_softc *sc = sf->sc;
1874 	int error;
1875 
1876 	SDMMC_LOCK(sc);
1877 	mutex_enter(&sc->sc_mtx);
1878 
1879 	if (sdmmc_chip_write_protect(sc->sc_sct, sc->sc_sch)) {
1880 		aprint_normal_dev(sc->sc_dev, "write-protected\n");
1881 		error = EIO;
1882 		goto out;
1883 	}
1884 
1885 	if (ISSET(sc->sc_caps, SMC_CAPS_SINGLE_ONLY)) {
1886 		error = sdmmc_mem_single_write_block(sf, blkno, data, datalen);
1887 		goto out;
1888 	}
1889 
1890 	if (!ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
1891 		error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
1892 		    datalen);
1893 		goto out;
1894 	}
1895 
1896 	/* DMA transfer */
1897 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, data, datalen, NULL,
1898 	    BUS_DMA_NOWAIT|BUS_DMA_WRITE);
1899 	if (error)
1900 		goto out;
1901 
1902 #ifdef SDMMC_DEBUG
1903 	aprint_normal_dev(sc->sc_dev, "%s: data=%p, datalen=%zu\n",
1904 	    __func__, data, datalen);
1905 	for (int i = 0; i < sc->sc_dmap->dm_nsegs; i++) {
1906 		aprint_normal_dev(sc->sc_dev,
1907 		    "%s: seg#%d: addr=%#lx, size=%#lx\n", __func__, i,
1908 		    (u_long)sc->sc_dmap->dm_segs[i].ds_addr,
1909 		    (u_long)sc->sc_dmap->dm_segs[i].ds_len);
1910 	}
1911 #endif
1912 
1913 	if (sc->sc_dmap->dm_nsegs > 1
1914 	    && !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
1915 		error = sdmmc_mem_single_segment_dma_write_block(sf, blkno,
1916 		    data, datalen);
1917 		goto unload;
1918 	}
1919 
1920 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1921 	    BUS_DMASYNC_PREWRITE);
1922 
1923 	error = sdmmc_mem_write_block_subr(sf, sc->sc_dmap, blkno, data,
1924 	    datalen);
1925 	if (error)
1926 		goto unload;
1927 
1928 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, 0, datalen,
1929 	    BUS_DMASYNC_POSTWRITE);
1930 unload:
1931 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1932 
1933 out:
1934 	mutex_exit(&sc->sc_mtx);
1935 	SDMMC_UNLOCK(sc);
1936 
1937 	return error;
1938 }
1939