1 /* $NetBSD: pxa2x0_mci.c,v 1.14 2023/03/28 20:01:57 andvar Exp $ */
2 /* $OpenBSD: pxa2x0_mmc.c,v 1.5 2009/02/23 18:09:55 miod Exp $ */
3
4 /*
5 * Copyright (c) 2007 Uwe Stuehler <uwe@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Copyright (C) 2007-2010 NONAKA Kimihiro <nonaka@netbsd.org>
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */
44
45 /*
46 * MMC/SD/SDIO controller driver for Intel PXA2xx processors
47 *
48 * Power management is beyond control of the processor's SD/SDIO/MMC
49 * block, so this driver depends on the attachment driver to provide
50 * us with some callback functions via the "tag" member in our softc.
51 * Bus power management calls are then dispatched to the attachment
52 * driver.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(0, "$NetBSD: pxa2x0_mci.c,v 1.14 2023/03/28 20:01:57 andvar Exp $");
57
58 #include <sys/param.h>
59 #include <sys/device.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/bus.h>
64 #include <sys/mutex.h>
65 #include <sys/condvar.h>
66
67 #include <machine/intr.h>
68
69 #include <dev/sdmmc/sdmmcvar.h>
70 #include <dev/sdmmc/sdmmcchip.h>
71
72 #include <arm/xscale/pxa2x0cpu.h>
73 #include <arm/xscale/pxa2x0reg.h>
74 #include <arm/xscale/pxa2x0var.h>
75 #include <arm/xscale/pxa2x0_dmac.h>
76 #include <arm/xscale/pxa2x0_gpio.h>
77 #include <arm/xscale/pxa2x0_mci.h>
78
79 #ifdef PXAMCI_DEBUG
80 int pxamci_debug = 9;
81 #define DPRINTF(n,s) do { if ((n) <= pxamci_debug) printf s; } while (0)
82 #else
83 #define DPRINTF(n,s) do {} while (0)
84 #endif
85
86 #ifndef PXAMCI_DEBUG
87 #define STOPCLK_TIMO 2 /* sec */
88 #define EXECCMD_TIMO 2 /* sec */
89 #else
90 #define STOPCLK_TIMO 2 /* sec */
91 #define EXECCMD_TIMO 5 /* sec */
92 #endif
93
94 static int pxamci_host_reset(sdmmc_chipset_handle_t);
95 static uint32_t pxamci_host_ocr(sdmmc_chipset_handle_t);
96 static int pxamci_host_maxblklen(sdmmc_chipset_handle_t);
97 static int pxamci_card_detect(sdmmc_chipset_handle_t);
98 static int pxamci_write_protect(sdmmc_chipset_handle_t);
99 static int pxamci_bus_power(sdmmc_chipset_handle_t, uint32_t);
100 static int pxamci_bus_clock(sdmmc_chipset_handle_t, int);
101 static int pxamci_bus_width(sdmmc_chipset_handle_t, int);
102 static int pxamci_bus_rod(sdmmc_chipset_handle_t, int);
103 static void pxamci_exec_command(sdmmc_chipset_handle_t,
104 struct sdmmc_command *);
105 static void pxamci_card_enable_intr(sdmmc_chipset_handle_t, int);
106 static void pxamci_card_intr_ack(sdmmc_chipset_handle_t);
107
108 static struct sdmmc_chip_functions pxamci_chip_functions = {
109 /* host controller reset */
110 .host_reset = pxamci_host_reset,
111
112 /* host controller capabilities */
113 .host_ocr = pxamci_host_ocr,
114 .host_maxblklen = pxamci_host_maxblklen,
115
116 /* card detection */
117 .card_detect = pxamci_card_detect,
118
119 /* write protect */
120 .write_protect = pxamci_write_protect,
121
122 /* bus power, clock frequency, width */
123 .bus_power = pxamci_bus_power,
124 .bus_clock = pxamci_bus_clock,
125 .bus_width = pxamci_bus_width,
126 .bus_rod = pxamci_bus_rod,
127
128 /* command execution */
129 .exec_command = pxamci_exec_command,
130
131 /* card interrupt */
132 .card_enable_intr = pxamci_card_enable_intr,
133 .card_intr_ack = pxamci_card_intr_ack,
134 };
135
136 static int pxamci_intr(void *);
137 static void pxamci_intr_cmd(struct pxamci_softc *);
138 static void pxamci_intr_data(struct pxamci_softc *);
139 static void pxamci_intr_done(struct pxamci_softc *);
140 static void pxamci_dmac_iintr(struct dmac_xfer *, int);
141 static void pxamci_dmac_ointr(struct dmac_xfer *, int);
142
143 static void pxamci_stop_clock(struct pxamci_softc *);
144
145 #define CSR_READ_1(sc, reg) \
146 bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, (reg))
147 #define CSR_WRITE_1(sc, reg, val) \
148 bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
149 #define CSR_READ_4(sc, reg) \
150 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
151 #define CSR_WRITE_4(sc, reg, val) \
152 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
153 #define CSR_SET_4(sc, reg, val) \
154 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (val))
155 #define CSR_CLR_4(sc, reg, val) \
156 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(val))
157
158 #if 0 /* XXX */
159 #define DMA_ALIGNED(addr) \
160 (((u_long)(addr) & 0x7) == 0 || !CPU_IS_PXA250)
161 #else
162 #define DMA_ALIGNED(addr) \
163 (((u_long)(addr) & 0x1f) == 0)
164 #endif
165
166 static void
pxamci_enable_intr(struct pxamci_softc * sc,uint32_t mask)167 pxamci_enable_intr(struct pxamci_softc *sc, uint32_t mask)
168 {
169 int s;
170
171 s = splsdmmc();
172 sc->sc_imask &= ~mask;
173 CSR_WRITE_4(sc, MMC_I_MASK, sc->sc_imask);
174 splx(s);
175 }
176
177 static void
pxamci_disable_intr(struct pxamci_softc * sc,uint32_t mask)178 pxamci_disable_intr(struct pxamci_softc *sc, uint32_t mask)
179 {
180 int s;
181
182 s = splsdmmc();
183 sc->sc_imask |= mask;
184 CSR_WRITE_4(sc, MMC_I_MASK, sc->sc_imask);
185 splx(s);
186 }
187
188 int
pxamci_attach_sub(device_t self,struct pxaip_attach_args * pxa)189 pxamci_attach_sub(device_t self, struct pxaip_attach_args *pxa)
190 {
191 struct pxamci_softc *sc = device_private(self);
192 struct sdmmcbus_attach_args saa;
193
194 sc->sc_dev = self;
195
196 aprint_normal(": MMC/SD Controller\n");
197 aprint_naive("\n");
198
199 /* Enable the clocks to the MMC controller. */
200 pxa2x0_clkman_config(CKEN_MMC, 1);
201
202 sc->sc_iot = pxa->pxa_iot;
203 if (bus_space_map(sc->sc_iot, PXA2X0_MMC_BASE, PXA2X0_MMC_SIZE, 0,
204 &sc->sc_ioh)) {
205 aprint_error_dev(sc->sc_dev, "couldn't map registers\n");
206 goto out;
207 }
208
209 /*
210 * Establish the card detection and MMC interrupt handlers and
211 * mask all interrupts until we are prepared to handle them.
212 */
213 pxamci_disable_intr(sc, MMC_I_ALL);
214 sc->sc_ih = pxa2x0_intr_establish(PXA2X0_INT_MMC, IPL_SDMMC,
215 pxamci_intr, sc);
216 if (sc->sc_ih == NULL) {
217 aprint_error_dev(sc->sc_dev,
218 "couldn't establish MMC interrupt\n");
219 goto free_map;
220 }
221
222 /*
223 * Reset the host controller and unmask normal interrupts.
224 */
225 (void) pxamci_host_reset(sc);
226
227 /* Setup bus clock */
228 if (CPU_IS_PXA270) {
229 sc->sc_clkmin = PXA270_MMC_CLKRT_MIN / 1000;
230 sc->sc_clkmax = PXA270_MMC_CLKRT_MAX / 1000;
231 } else {
232 sc->sc_clkmin = PXA250_MMC_CLKRT_MIN / 1000;
233 sc->sc_clkmax = PXA250_MMC_CLKRT_MAX / 1000;
234 }
235 sc->sc_clkbase = sc->sc_clkmin;
236 pxamci_bus_clock(sc, sc->sc_clkbase);
237
238 /* Setup max block length */
239 if (CPU_IS_PXA270) {
240 sc->sc_maxblklen = 2048;
241 } else {
242 sc->sc_maxblklen = 512;
243 }
244
245 /* Set default bus width */
246 sc->sc_buswidth = 1;
247
248 /* setting DMA */
249 if (!ISSET(sc->sc_caps, PMC_CAPS_NO_DMA)) {
250 aprint_normal_dev(sc->sc_dev, "using DMA transfer\n");
251
252 sc->sc_rxdr.ds_addr = PXA2X0_MMC_BASE + MMC_RXFIFO;
253 sc->sc_rxdr.ds_len = 1;
254 sc->sc_rxdx = pxa2x0_dmac_allocate_xfer();
255 if (sc->sc_rxdx == NULL) {
256 aprint_error_dev(sc->sc_dev,
257 "couldn't alloc rx dma xfer\n");
258 goto free_intr;
259 }
260 sc->sc_rxdx->dx_cookie = sc;
261 sc->sc_rxdx->dx_priority = DMAC_PRIORITY_NORMAL;
262 sc->sc_rxdx->dx_dev_width = DMAC_DEV_WIDTH_1;
263 sc->sc_rxdx->dx_burst_size = DMAC_BURST_SIZE_32;
264 sc->sc_rxdx->dx_done = pxamci_dmac_iintr;
265 sc->sc_rxdx->dx_peripheral = DMAC_PERIPH_MMCRX;
266 sc->sc_rxdx->dx_flow = DMAC_FLOW_CTRL_SRC;
267 sc->sc_rxdx->dx_loop_notify = DMAC_DONT_LOOP;
268 sc->sc_rxdx->dx_desc[DMAC_DESC_SRC].xd_addr_hold = true;
269 sc->sc_rxdx->dx_desc[DMAC_DESC_SRC].xd_nsegs = 1;
270 sc->sc_rxdx->dx_desc[DMAC_DESC_SRC].xd_dma_segs = &sc->sc_rxdr;
271 sc->sc_rxdx->dx_desc[DMAC_DESC_DST].xd_addr_hold = false;
272
273 sc->sc_txdr.ds_addr = PXA2X0_MMC_BASE + MMC_TXFIFO;
274 sc->sc_txdr.ds_len = 1;
275 sc->sc_txdx = pxa2x0_dmac_allocate_xfer();
276 if (sc->sc_txdx == NULL) {
277 aprint_error_dev(sc->sc_dev,
278 "couldn't alloc tx dma xfer\n");
279 goto free_xfer;
280 }
281 sc->sc_txdx->dx_cookie = sc;
282 sc->sc_txdx->dx_priority = DMAC_PRIORITY_NORMAL;
283 sc->sc_txdx->dx_dev_width = DMAC_DEV_WIDTH_1;
284 sc->sc_txdx->dx_burst_size = DMAC_BURST_SIZE_32;
285 sc->sc_txdx->dx_done = pxamci_dmac_ointr;
286 sc->sc_txdx->dx_peripheral = DMAC_PERIPH_MMCTX;
287 sc->sc_txdx->dx_flow = DMAC_FLOW_CTRL_DEST;
288 sc->sc_txdx->dx_loop_notify = DMAC_DONT_LOOP;
289 sc->sc_txdx->dx_desc[DMAC_DESC_DST].xd_addr_hold = true;
290 sc->sc_txdx->dx_desc[DMAC_DESC_DST].xd_nsegs = 1;
291 sc->sc_txdx->dx_desc[DMAC_DESC_DST].xd_dma_segs = &sc->sc_txdr;
292 sc->sc_txdx->dx_desc[DMAC_DESC_SRC].xd_addr_hold = false;
293 }
294
295 /*
296 * Attach the generic SD/MMC bus driver. (The bus driver must
297 * not invoke any chipset functions before it is attached.)
298 */
299 memset(&saa, 0, sizeof(saa));
300 saa.saa_busname = "sdmmc";
301 saa.saa_sct = &pxamci_chip_functions;
302 saa.saa_sch = sc;
303 saa.saa_dmat = pxa->pxa_dmat;
304 saa.saa_clkmin = sc->sc_clkmin;
305 saa.saa_clkmax = sc->sc_clkmax;
306 saa.saa_caps = 0;
307 if (!ISSET(sc->sc_caps, PMC_CAPS_NO_DMA))
308 SET(saa.saa_caps, SMC_CAPS_DMA | SMC_CAPS_MULTI_SEG_DMA);
309 if (CPU_IS_PXA270 && ISSET(sc->sc_caps, PMC_CAPS_4BIT))
310 SET(saa.saa_caps, SMC_CAPS_4BIT_MODE);
311
312 sc->sc_sdmmc = config_found(sc->sc_dev, &saa, NULL, CFARGS_NONE);
313 if (sc->sc_sdmmc == NULL) {
314 aprint_error_dev(sc->sc_dev, "couldn't attach bus\n");
315 goto free_xfer;
316 }
317 return 0;
318
319 free_xfer:
320 if (!ISSET(sc->sc_caps, PMC_CAPS_NO_DMA)) {
321 if (sc->sc_rxdx)
322 pxa2x0_dmac_free_xfer(sc->sc_rxdx);
323 if (sc->sc_txdx)
324 pxa2x0_dmac_free_xfer(sc->sc_txdx);
325 }
326 free_intr:
327 pxa2x0_intr_disestablish(sc->sc_ih);
328 sc->sc_ih = NULL;
329 free_map:
330 bus_space_unmap(sc->sc_iot, sc->sc_ioh, PXA2X0_MMC_SIZE);
331 out:
332 pxa2x0_clkman_config(CKEN_MMC, 0);
333 return 1;
334 }
335
336 /*
337 * Notify card attach/detach event.
338 */
339 void
pxamci_card_detect_event(struct pxamci_softc * sc)340 pxamci_card_detect_event(struct pxamci_softc *sc)
341 {
342
343 sdmmc_needs_discover(sc->sc_sdmmc);
344 }
345
346 /*
347 * Reset the host controller. Called during initialization, when
348 * cards are removed, upon resume, and during error recovery.
349 */
350 static int
pxamci_host_reset(sdmmc_chipset_handle_t sch)351 pxamci_host_reset(sdmmc_chipset_handle_t sch)
352 {
353 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
354 int s;
355
356 s = splsdmmc();
357
358 CSR_WRITE_4(sc, MMC_SPI, 0);
359 CSR_WRITE_4(sc, MMC_RESTO, 0x7f);
360 CSR_WRITE_4(sc, MMC_I_MASK, sc->sc_imask);
361
362 /* Make sure to initialize the card before the next command. */
363 CLR(sc->sc_flags, PMF_CARDINITED);
364
365 splx(s);
366
367 return 0;
368 }
369
370 static uint32_t
pxamci_host_ocr(sdmmc_chipset_handle_t sch)371 pxamci_host_ocr(sdmmc_chipset_handle_t sch)
372 {
373 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
374 int rv;
375
376 if (__predict_true(sc->sc_tag.get_ocr != NULL)) {
377 rv = (*sc->sc_tag.get_ocr)(sc->sc_tag.cookie);
378 return rv;
379 }
380
381 DPRINTF(0,("%s: driver lacks get_ocr() function.\n",
382 device_xname(sc->sc_dev)));
383 return ENXIO;
384 }
385
386 static int
pxamci_host_maxblklen(sdmmc_chipset_handle_t sch)387 pxamci_host_maxblklen(sdmmc_chipset_handle_t sch)
388 {
389 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
390
391 return sc->sc_maxblklen;
392 }
393
394 static int
pxamci_card_detect(sdmmc_chipset_handle_t sch)395 pxamci_card_detect(sdmmc_chipset_handle_t sch)
396 {
397 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
398
399 if (__predict_true(sc->sc_tag.card_detect != NULL)) {
400 return (*sc->sc_tag.card_detect)(sc->sc_tag.cookie);
401 }
402
403 DPRINTF(0,("%s: driver lacks card_detect() function.\n",
404 device_xname(sc->sc_dev)));
405 return 1; /* always detect */
406 }
407
408 static int
pxamci_write_protect(sdmmc_chipset_handle_t sch)409 pxamci_write_protect(sdmmc_chipset_handle_t sch)
410 {
411 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
412
413 if (__predict_true(sc->sc_tag.write_protect != NULL)) {
414 return (*sc->sc_tag.write_protect)(sc->sc_tag.cookie);
415 }
416
417 DPRINTF(0,("%s: driver lacks write_protect() function.\n",
418 device_xname(sc->sc_dev)));
419 return 0; /* non-protect */
420 }
421
422 /*
423 * Set or change SD bus voltage and enable or disable SD bus power.
424 * Return zero on success.
425 */
426 static int
pxamci_bus_power(sdmmc_chipset_handle_t sch,uint32_t ocr)427 pxamci_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
428 {
429 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
430
431 /*
432 * Bus power management is beyond control of the SD/SDIO/MMC
433 * block of the PXA2xx processors, so we have to hand this
434 * task off to the attachment driver.
435 */
436 if (__predict_true(sc->sc_tag.set_power != NULL)) {
437 return (*sc->sc_tag.set_power)(sc->sc_tag.cookie, ocr);
438 }
439
440 DPRINTF(0,("%s: driver lacks set_power() function\n",
441 device_xname(sc->sc_dev)));
442 return ENXIO;
443 }
444
445 /*
446 * Set or change MMCLK frequency or disable the MMC clock.
447 * Return zero on success.
448 */
449 static int
pxamci_bus_clock(sdmmc_chipset_handle_t sch,int freq)450 pxamci_bus_clock(sdmmc_chipset_handle_t sch, int freq)
451 {
452 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
453 int actfreq;
454 int div;
455 int rv = 0;
456 int s;
457
458 s = splsdmmc();
459
460 /*
461 * Stop MMC clock before changing the frequency.
462 */
463 pxamci_stop_clock(sc);
464
465 /* Just stop the clock. */
466 if (freq == 0)
467 goto out;
468
469 /*
470 * PXA27x Errata...
471 *
472 * <snip>
473 * E40. SDIO: SDIO Devices Not Working at 19.5 Mbps
474 *
475 * SD/SDIO controller can only support up to 9.75 Mbps data
476 * transfer rate for SDIO card.
477 * </snip>
478 *
479 * If we don't limit the frequency, CRC errors will be
480 * reported by the controller after we set the bus speed.
481 * XXX slow down incrementally.
482 */
483 if (CPU_IS_PXA270) {
484 if (freq > 9750) {
485 freq = 9750;
486 }
487 }
488
489 /*
490 * Pick the smallest divider that produces a frequency not
491 * more than `freq' KHz.
492 */
493 actfreq = sc->sc_clkmax;
494 for (div = 0; div < 7; actfreq /= 2, div++) {
495 if (actfreq <= freq)
496 break;
497 }
498 if (div == 7) {
499 aprint_error_dev(sc->sc_dev,
500 "unsupported bus frequency of %d KHz\n", freq);
501 rv = 1;
502 goto out;
503 }
504
505 DPRINTF(1,("%s: freq = %d, actfreq = %d, div = %d\n",
506 device_xname(sc->sc_dev), freq, actfreq, div));
507
508 sc->sc_clkbase = actfreq;
509 sc->sc_clkrt = div;
510
511 CSR_WRITE_4(sc, MMC_CLKRT, sc->sc_clkrt);
512 CSR_WRITE_4(sc, MMC_STRPCL, STRPCL_START);
513
514 out:
515 splx(s);
516
517 return rv;
518 }
519
520 static int
pxamci_bus_width(sdmmc_chipset_handle_t sch,int width)521 pxamci_bus_width(sdmmc_chipset_handle_t sch, int width)
522 {
523 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
524 int rv = 0;
525 int s;
526
527 s = splsdmmc();
528
529 switch (width) {
530 case 1:
531 break;
532 case 4:
533 if (CPU_IS_PXA270)
534 break;
535 /*FALLTHROUGH*/
536 default:
537 DPRINTF(0,("%s: unsupported bus width (%d)\n",
538 device_xname(sc->sc_dev), width));
539 rv = 1;
540 goto out;
541 }
542
543 sc->sc_buswidth = width;
544
545 out:
546 splx(s);
547
548 return rv;
549 }
550
551 static int
pxamci_bus_rod(sdmmc_chipset_handle_t sch,int on)552 pxamci_bus_rod(sdmmc_chipset_handle_t sch, int on)
553 {
554
555 /* not support */
556 return -1;
557 }
558
559 static void
pxamci_exec_command(sdmmc_chipset_handle_t sch,struct sdmmc_command * cmd)560 pxamci_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
561 {
562 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
563 uint32_t cmdat;
564 int error;
565 int timo;
566 int s;
567
568 DPRINTF(1,("%s: start cmd %d arg=%#x data=%p dlen=%d flags=%#x\n",
569 device_xname(sc->sc_dev), cmd->c_opcode, cmd->c_arg, cmd->c_data,
570 cmd->c_datalen, cmd->c_flags));
571
572 s = splsdmmc();
573
574 /* Stop the bus clock (MMCLK). [15.8.3] */
575 pxamci_stop_clock(sc);
576
577 /* Set the command and argument. */
578 CSR_WRITE_4(sc, MMC_CMD, cmd->c_opcode & CMD_MASK);
579 CSR_WRITE_4(sc, MMC_ARGH, (cmd->c_arg >> 16) & ARGH_MASK);
580 CSR_WRITE_4(sc, MMC_ARGL, cmd->c_arg & ARGL_MASK);
581
582 /* Response type */
583 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
584 cmdat = CMDAT_RESPONSE_FORMAT_NO;
585 else if (ISSET(cmd->c_flags, SCF_RSP_136))
586 cmdat = CMDAT_RESPONSE_FORMAT_R2;
587 else if (!ISSET(cmd->c_flags, SCF_RSP_CRC))
588 cmdat = CMDAT_RESPONSE_FORMAT_R3;
589 else
590 cmdat = CMDAT_RESPONSE_FORMAT_R1;
591
592 if (ISSET(cmd->c_flags, SCF_RSP_BSY))
593 cmdat |= CMDAT_BUSY;
594 if (!ISSET(cmd->c_flags, SCF_CMD_READ))
595 cmdat |= CMDAT_WRITE;
596 if (sc->sc_buswidth == 4)
597 cmdat |= CMDAT_SD_4DAT;
598
599 /* Fragment the data into proper blocks. */
600 if (cmd->c_datalen > 0) {
601 int blklen = MIN(cmd->c_datalen, cmd->c_blklen);
602 int numblk = cmd->c_datalen / blklen;
603
604 if (cmd->c_datalen % blklen > 0) {
605 /* XXX: Split this command. (1.7.4) */
606 aprint_error_dev(sc->sc_dev,
607 "data not a multiple of %u bytes\n", blklen);
608 cmd->c_error = EINVAL;
609 goto out;
610 }
611
612 /* Check limit imposed by block count. */
613 if (numblk > NOB_MASK) {
614 aprint_error_dev(sc->sc_dev, "too much data\n");
615 cmd->c_error = EINVAL;
616 goto out;
617 }
618
619 CSR_WRITE_4(sc, MMC_BLKLEN, blklen);
620 CSR_WRITE_4(sc, MMC_NOB, numblk);
621 CSR_WRITE_4(sc, MMC_RDTO, RDTO_MASK);
622
623 cmdat |= CMDAT_DATA_EN;
624
625 /* setting DMA */
626 if (!ISSET(sc->sc_caps, PMC_CAPS_NO_DMA)
627 && DMA_ALIGNED(cmd->c_data)) {
628 struct dmac_xfer_desc *dx_desc;
629
630 DPRINTF(1,("%s: using DMA\n",device_xname(sc->sc_dev)));
631
632 cmdat |= CMDAT_MMC_DMA_EN;
633
634 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
635 dx_desc = &sc->sc_rxdx->dx_desc[DMAC_DESC_DST];
636 dx_desc->xd_nsegs = cmd->c_dmamap->dm_nsegs;
637 dx_desc->xd_dma_segs = cmd->c_dmamap->dm_segs;
638 error = pxa2x0_dmac_start_xfer(sc->sc_rxdx);
639 } else {
640 dx_desc = &sc->sc_txdx->dx_desc[DMAC_DESC_SRC];
641 dx_desc->xd_nsegs = cmd->c_dmamap->dm_nsegs;
642 dx_desc->xd_dma_segs = cmd->c_dmamap->dm_segs;
643 /* workaround for erratum #91 */
644 error = 0;
645 if (!CPU_IS_PXA270) {
646 error =
647 pxa2x0_dmac_start_xfer(sc->sc_txdx);
648 }
649 }
650 if (error) {
651 aprint_error_dev(sc->sc_dev,
652 "couldn't start dma xfer. (error=%d)\n",
653 error);
654 cmd->c_error = EIO;
655 goto err;
656 }
657 } else {
658 DPRINTF(1,("%s: using PIO\n",device_xname(sc->sc_dev)));
659
660 cmd->c_resid = cmd->c_datalen;
661 cmd->c_buf = cmd->c_data;
662
663 pxamci_enable_intr(sc, MMC_I_RXFIFO_RD_REQ
664 | MMC_I_TXFIFO_WR_REQ
665 | MMC_I_DAT_ERR);
666 }
667 }
668
669 sc->sc_cmd = cmd;
670
671 /*
672 * "After reset, the MMC card must be initialized by sending
673 * 80 clocks to it on the MMCLK signal." [15.4.3.2]
674 */
675 if (!ISSET(sc->sc_flags, PMF_CARDINITED)) {
676 DPRINTF(1,("%s: first command\n", device_xname(sc->sc_dev)));
677 cmdat |= CMDAT_INIT;
678 SET(sc->sc_flags, PMF_CARDINITED);
679 }
680
681 /* Begin the transfer and start the bus clock. */
682 CSR_WRITE_4(sc, MMC_CMDAT, cmdat);
683 CSR_WRITE_4(sc, MMC_CLKRT, sc->sc_clkrt);
684 CSR_WRITE_4(sc, MMC_STRPCL, STRPCL_START);
685
686 /* Wait for it to complete */
687 pxamci_enable_intr(sc, MMC_I_END_CMD_RES|MMC_I_RES_ERR);
688 for (timo = EXECCMD_TIMO; (sc->sc_cmd == cmd) && (timo > 0); timo--) {
689 tsleep(sc, PWAIT, "mmcmd", hz);
690 }
691
692 /* If it completed in time, SCF_ITSDONE is already set. */
693 if (sc->sc_cmd == cmd) {
694 cmd->c_error = ETIMEDOUT;
695 err:
696 SET(cmd->c_flags, SCF_ITSDONE);
697 sc->sc_cmd = NULL;
698 goto out;
699 }
700
701 out:
702 splx(s);
703
704 DPRINTF(1,("%s: cmd %d done (flags=%08x error=%d)\n",
705 device_xname(sc->sc_dev), cmd->c_opcode, cmd->c_flags, cmd->c_error));
706 }
707
708 static void
pxamci_card_enable_intr(sdmmc_chipset_handle_t sch,int enable)709 pxamci_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
710 {
711 struct pxamci_softc *sc = (struct pxamci_softc *)sch;
712
713 if (enable) {
714 pxamci_enable_intr(sc, MMC_I_SDIO_INT);
715 } else {
716 pxamci_disable_intr(sc, MMC_I_SDIO_INT);
717 }
718 }
719
720 static void
pxamci_card_intr_ack(sdmmc_chipset_handle_t sch)721 pxamci_card_intr_ack(sdmmc_chipset_handle_t sch)
722 {
723
724 /* Nothing to do */
725 }
726
727 static void
pxamci_stop_clock(struct pxamci_softc * sc)728 pxamci_stop_clock(struct pxamci_softc *sc)
729 {
730 int timo = STOPCLK_TIMO;
731
732 if (ISSET(CSR_READ_4(sc, MMC_STAT), STAT_CLK_EN)) {
733 CSR_CLR_4(sc, MMC_I_MASK, MMC_I_CLK_IS_OFF);
734 CSR_WRITE_4(sc, MMC_STRPCL, STRPCL_STOP);
735 while (ISSET(CSR_READ_4(sc, MMC_STAT), STAT_CLK_EN)
736 && (timo-- > 0)) {
737 tsleep(sc, PWAIT, "mmclk", hz);
738 }
739 }
740 if (timo == 0)
741 aprint_error_dev(sc->sc_dev, "clock stop timeout\n");
742 }
743
744 /*
745 * SD/MMC controller interrupt handler
746 */
747 static int
pxamci_intr(void * arg)748 pxamci_intr(void *arg)
749 {
750 struct pxamci_softc *sc = arg;
751 int status;
752 #ifdef PXAMCI_DEBUG
753 int ostatus;
754
755 ostatus =
756 #endif
757 status = CSR_READ_4(sc, MMC_I_REG) & ~CSR_READ_4(sc, MMC_I_MASK);
758 DPRINTF(10,("%s: intr status = %08x\n", device_xname(sc->sc_dev),
759 status));
760
761 /*
762 * Notify the process waiting in pxamci_clock_stop() when
763 * the clock has really stopped.
764 */
765 if (ISSET(status, MMC_I_CLK_IS_OFF)) {
766 DPRINTF(2,("%s: clock is now off\n", device_xname(sc->sc_dev)));
767 wakeup(sc);
768 pxamci_disable_intr(sc, MMC_I_CLK_IS_OFF);
769 CLR(status, MMC_I_CLK_IS_OFF);
770 }
771
772 if (sc->sc_cmd == NULL)
773 goto end;
774
775 if (ISSET(status, MMC_I_RES_ERR)) {
776 DPRINTF(9, ("%s: handling MMC_I_RES_ERR\n",
777 device_xname(sc->sc_dev)));
778 pxamci_disable_intr(sc, MMC_I_RES_ERR);
779 CLR(status, MMC_I_RES_ERR|MMC_I_END_CMD_RES);
780 if (!ISSET(sc->sc_caps, PMC_CAPS_NO_DMA)
781 && (sc->sc_cmd->c_datalen > 0)
782 && DMA_ALIGNED(sc->sc_cmd->c_data)) {
783 if (ISSET(sc->sc_cmd->c_flags, SCF_CMD_READ)) {
784 pxa2x0_dmac_abort_xfer(sc->sc_rxdx);
785 } else {
786 pxa2x0_dmac_abort_xfer(sc->sc_txdx);
787 }
788 }
789 sc->sc_cmd->c_error = ENOEXEC;
790 pxamci_intr_done(sc);
791 goto end;
792 }
793
794 if (ISSET(status, MMC_I_END_CMD_RES)) {
795 DPRINTF(9,("%s: handling MMC_I_END_CMD_RES\n",
796 device_xname(sc->sc_dev)));
797 pxamci_intr_cmd(sc);
798 pxamci_disable_intr(sc, MMC_I_END_CMD_RES);
799 CLR(status, MMC_I_END_CMD_RES);
800 /* ignore programming done condition */
801 if (ISSET(status, MMC_I_PRG_DONE)) {
802 pxamci_disable_intr(sc, MMC_I_PRG_DONE);
803 CLR(status, MMC_I_PRG_DONE);
804 }
805 if (sc->sc_cmd == NULL)
806 goto end;
807 }
808
809 if (ISSET(status, MMC_I_DAT_ERR)) {
810 DPRINTF(9, ("%s: handling MMC_I_DAT_ERR\n",
811 device_xname(sc->sc_dev)));
812 sc->sc_cmd->c_error = EIO;
813 if (!ISSET(sc->sc_caps, PMC_CAPS_NO_DMA)
814 && DMA_ALIGNED(sc->sc_cmd->c_data)) {
815 if (ISSET(sc->sc_cmd->c_flags, SCF_CMD_READ)) {
816 pxa2x0_dmac_abort_xfer(sc->sc_rxdx);
817 } else {
818 pxa2x0_dmac_abort_xfer(sc->sc_txdx);
819 }
820 }
821 pxamci_intr_done(sc);
822 pxamci_disable_intr(sc, MMC_I_DAT_ERR);
823 CLR(status, MMC_I_DAT_ERR);
824 /* ignore transmission done condition */
825 if (ISSET(status, MMC_I_DATA_TRAN_DONE)) {
826 pxamci_disable_intr(sc, MMC_I_DATA_TRAN_DONE);
827 CLR(status, MMC_I_DATA_TRAN_DONE);
828 }
829 goto end;
830 }
831
832 if (ISSET(status, MMC_I_DATA_TRAN_DONE)) {
833 DPRINTF(9,("%s: handling MMC_I_DATA_TRAN_DONE\n",
834 device_xname(sc->sc_dev)));
835 pxamci_intr_done(sc);
836 pxamci_disable_intr(sc, MMC_I_DATA_TRAN_DONE);
837 CLR(status, MMC_I_DATA_TRAN_DONE);
838 }
839
840 if (ISSET(status, MMC_I_TXFIFO_WR_REQ|MMC_I_RXFIFO_RD_REQ)) {
841 DPRINTF(10,("%s: handling MMC_I_xxFIFO_xx_REQ\n",
842 device_xname(sc->sc_dev)));
843 pxamci_intr_data(sc);
844 CLR(status, MMC_I_TXFIFO_WR_REQ|MMC_I_RXFIFO_RD_REQ);
845 }
846
847 if (ISSET(status, STAT_SDIO_INT)) {
848 DPRINTF(9,("%s: handling STAT_SDIO_INT\n",
849 device_xname(sc->sc_dev)));
850 sdmmc_card_intr(sc->sc_sdmmc);
851 CLR(status, STAT_SDIO_INT);
852 }
853
854 end:
855 /* Avoid further unhandled interrupts. */
856 if (status != 0) {
857 pxamci_disable_intr(sc, status);
858 #ifdef PXAMCI_DEBUG
859 aprint_error_dev(sc->sc_dev,
860 "unhandled interrupt 0x%x out of 0x%x\n", status, ostatus);
861 #endif
862 }
863 return 1;
864 }
865
866 static void
pxamci_intr_cmd(struct pxamci_softc * sc)867 pxamci_intr_cmd(struct pxamci_softc *sc)
868 {
869 struct sdmmc_command *cmd = sc->sc_cmd;
870 uint32_t status;
871 int error;
872 int i;
873
874 KASSERT(sc->sc_cmd != NULL);
875
876 #define STAT_ERR (STAT_READ_TIME_OUT \
877 | STAT_TIMEOUT_RESPONSE \
878 | STAT_CRC_WRITE_ERROR \
879 | STAT_CRC_READ_ERROR \
880 | STAT_SPI_READ_ERROR_TOKEN)
881
882 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
883 for (i = 3; i >= 0; i--) {
884 uint32_t h = CSR_READ_4(sc, MMC_RES) & 0xffff;
885 uint32_t l = CSR_READ_4(sc, MMC_RES) & 0xffff;
886 cmd->c_resp[i] = (h << 16) | l;
887 }
888 cmd->c_error = 0;
889 } else if (ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
890 /*
891 * Grrr... The processor manual is not clear about
892 * the layout of the response FIFO. It just states
893 * that the FIFO is 16 bits wide, has a depth of 8,
894 * and that the CRC is not copied into the FIFO.
895 *
896 * A 16-bit word in the FIFO is filled from highest
897 * to lowest bit as the response comes in. The two
898 * start bits and the 6 command index bits are thus
899 * stored in the upper 8 bits of the first 16-bit
900 * word that we read back from the FIFO.
901 *
902 * Since the sdmmc(4) framework expects the host
903 * controller to discard the first 8 bits of the
904 * response, what we must do is discard the upper
905 * byte of the first 16-bit word.
906 */
907 uint32_t h = CSR_READ_4(sc, MMC_RES) & 0xffff;
908 uint32_t m = CSR_READ_4(sc, MMC_RES) & 0xffff;
909 uint32_t l = CSR_READ_4(sc, MMC_RES) & 0xffff;
910 cmd->c_resp[0] = (h << 24) | (m << 8) | (l >> 8);
911 for (i = 1; i < 4; i++)
912 cmd->c_resp[i] = 0;
913 cmd->c_error = 0;
914 }
915
916 status = CSR_READ_4(sc, MMC_STAT);
917
918 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
919 CLR(status, STAT_TIMEOUT_RESPONSE);
920
921 /* XXX only for R6, not for R2 */
922 if (!ISSET(cmd->c_flags, SCF_RSP_IDX))
923 CLR(status, STAT_RES_CRC_ERR);
924
925 if (ISSET(status, STAT_TIMEOUT_RESPONSE))
926 cmd->c_error = ETIMEDOUT;
927 else if (ISSET(status, STAT_RES_CRC_ERR)
928 && ISSET(cmd->c_flags, SCF_RSP_CRC)
929 && CPU_IS_PXA270) {
930 /* workaround for erratum #42 */
931 if (ISSET(cmd->c_flags, SCF_RSP_136)
932 && (cmd->c_resp[0] & 0x80000000U)) {
933 DPRINTF(1,("%s: ignore CRC error\n",
934 device_xname(sc->sc_dev)));
935 } else
936 cmd->c_error = EIO;
937 } else if (ISSET(status, STAT_ERR))
938 cmd->c_error = EIO;
939
940 if (cmd->c_error == 0 && cmd->c_datalen > 0) {
941 if (!ISSET(sc->sc_caps, PMC_CAPS_NO_DMA)
942 && DMA_ALIGNED(cmd->c_data)) {
943 /* workaround for erratum #91 */
944 if (CPU_IS_PXA270
945 && !ISSET(cmd->c_flags, SCF_CMD_READ)) {
946 error = pxa2x0_dmac_start_xfer(sc->sc_txdx);
947 if (error) {
948 aprint_error_dev(sc->sc_dev,
949 "couldn't start dma xfer."
950 " (error=%d)\n", error);
951 cmd->c_error = EIO;
952 pxamci_intr_done(sc);
953 return;
954 }
955 }
956 pxamci_enable_intr(sc,
957 MMC_I_DATA_TRAN_DONE|MMC_I_DAT_ERR);
958 }
959 } else {
960 pxamci_intr_done(sc);
961 }
962 }
963
964 static void
pxamci_intr_data(struct pxamci_softc * sc)965 pxamci_intr_data(struct pxamci_softc *sc)
966 {
967 struct sdmmc_command *cmd = sc->sc_cmd;
968 int intr;
969 int n;
970
971 DPRINTF(10,("%s: pxamci_intr_data: cmd = %p, resid = %d\n",
972 device_xname(sc->sc_dev), cmd, cmd->c_resid));
973
974 n = MIN(32, cmd->c_resid);
975 cmd->c_resid -= n;
976
977 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
978 intr = MMC_I_RXFIFO_RD_REQ;
979 while (n-- > 0)
980 *cmd->c_buf++ = CSR_READ_1(sc, MMC_RXFIFO);
981 } else {
982 int short_xfer = n < 32;
983
984 intr = MMC_I_TXFIFO_WR_REQ;
985 while (n-- > 0)
986 CSR_WRITE_1(sc, MMC_TXFIFO, *cmd->c_buf++);
987 if (short_xfer)
988 CSR_WRITE_4(sc, MMC_PRTBUF, 1);
989 }
990
991 if (cmd->c_resid > 0) {
992 pxamci_enable_intr(sc, intr);
993 } else {
994 pxamci_disable_intr(sc, intr);
995 pxamci_enable_intr(sc, MMC_I_DATA_TRAN_DONE);
996 }
997 }
998
999 /*
1000 * Wake up the process sleeping in pxamci_exec_command().
1001 */
1002 static void
pxamci_intr_done(struct pxamci_softc * sc)1003 pxamci_intr_done(struct pxamci_softc *sc)
1004 {
1005
1006 DPRINTF(1,("%s: pxamci_intr_done: mmc status = %#x\n",
1007 device_xname(sc->sc_dev), CSR_READ_4(sc, MMC_STAT)));
1008
1009 pxamci_disable_intr(sc, MMC_I_TXFIFO_WR_REQ|MMC_I_RXFIFO_RD_REQ|
1010 MMC_I_DATA_TRAN_DONE|MMC_I_END_CMD_RES|MMC_I_RES_ERR|MMC_I_DAT_ERR);
1011 SET(sc->sc_cmd->c_flags, SCF_ITSDONE);
1012 sc->sc_cmd = NULL;
1013 wakeup(sc);
1014 }
1015
1016 static void
pxamci_dmac_iintr(struct dmac_xfer * dx,int status)1017 pxamci_dmac_iintr(struct dmac_xfer *dx, int status)
1018 {
1019 struct pxamci_softc *sc = dx->dx_cookie;
1020
1021 DPRINTF(1,("%s: pxamci_dmac_iintr: status = %#x\n",
1022 device_xname(sc->sc_dev), status));
1023
1024 if (status) {
1025 aprint_error_dev(sc->sc_dev, "pxamci_dmac_iintr: "
1026 "non-zero completion status %d\n", status);
1027 }
1028 }
1029
1030 static void
pxamci_dmac_ointr(struct dmac_xfer * dx,int status)1031 pxamci_dmac_ointr(struct dmac_xfer *dx, int status)
1032 {
1033 struct pxamci_softc *sc = dx->dx_cookie;
1034
1035 DPRINTF(1,("%s: pxamci_dmac_ointr: status = %#x\n",
1036 device_xname(sc->sc_dev), status));
1037
1038 if (status == 0) {
1039 if (sc->sc_cmd != NULL && (sc->sc_cmd->c_datalen & 31) != 0) {
1040 CSR_WRITE_4(sc, MMC_PRTBUF, 1);
1041 }
1042 } else {
1043 aprint_error_dev(sc->sc_dev, "pxamci_dmac_ointr: "
1044 "non-zero completion status %d\n", status);
1045 }
1046 }
1047