1 /* $Id: at91spi.c,v 1.7 2021/08/07 16:18:43 thorpej Exp $ */
2 /* $NetBSD: at91spi.c,v 1.7 2021/08/07 16:18:43 thorpej Exp $ */
3
4 /*-
5 * Copyright (c) 2007 Embedtronics Oy. All rights reserved.
6 *
7 * Based on arch/mips/alchemy/dev/auspi.c,
8 * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
9 * Copyright (c) 2006 Garrett D'Amore.
10 * All rights reserved.
11 *
12 * Portions of this code were written by Garrett D'Amore for the
13 * Champaign-Urbana Community Wireless Network Project.
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 * 3. All advertising materials mentioning features or use of this
25 * software must display the following acknowledgements:
26 * This product includes software developed by the Urbana-Champaign
27 * Independent Media Center.
28 * This product includes software developed by Garrett D'Amore.
29 * 4. Urbana-Champaign Independent Media Center's name and Garrett
30 * D'Amore's name may not be used to endorse or promote products
31 * derived from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
34 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
35 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
36 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
38 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
39 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
40 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
41 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
42 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
43 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
44 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
45 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: at91spi.c,v 1.7 2021/08/07 16:18:43 thorpej Exp $");
50
51 #include "locators.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/device.h>
57 #include <sys/errno.h>
58 #include <sys/proc.h>
59
60 #include <sys/bus.h>
61 #include <machine/cpu.h>
62 #include <machine/vmparam.h>
63 #include <sys/inttypes.h>
64
65 #include <arm/at91/at91var.h>
66 #include <arm/at91/at91reg.h>
67 #include <arm/at91/at91spivar.h>
68 #include <arm/at91/at91spireg.h>
69
70 #define at91spi_select(sc, slave) \
71 (sc)->sc_md->select_slave((sc), (slave))
72
73 #define STATIC
74
75 //#define AT91SPI_DEBUG 4
76
77 #ifdef AT91SPI_DEBUG
78 int at91spi_debug = AT91SPI_DEBUG;
79 #define DPRINTFN(n,x) if (at91spi_debug>(n)) printf x;
80 #else
81 #define DPRINTFN(n,x)
82 #endif
83
84 STATIC int at91spi_intr(void *);
85
86 /* SPI service routines */
87 STATIC int at91spi_configure(void *, int, int, int);
88 STATIC int at91spi_transfer(void *, struct spi_transfer *);
89 STATIC void at91spi_xfer(struct at91spi_softc *sc, int start);
90
91 /* internal stuff */
92 STATIC void at91spi_done(struct at91spi_softc *, int);
93 STATIC void at91spi_send(struct at91spi_softc *);
94 STATIC void at91spi_recv(struct at91spi_softc *);
95 STATIC void at91spi_sched(struct at91spi_softc *);
96
97 #define GETREG(sc, x) \
98 bus_space_read_4(sc->sc_iot, sc->sc_ioh, x)
99 #define PUTREG(sc, x, v) \
100 bus_space_write_4(sc->sc_iot, sc->sc_ioh, x, v)
101
102 void
at91spi_attach_common(device_t parent,device_t self,void * aux,at91spi_machdep_tag_t md)103 at91spi_attach_common(device_t parent, device_t self, void *aux,
104 at91spi_machdep_tag_t md)
105 {
106 struct at91spi_softc *sc = device_private(self);
107 struct at91bus_attach_args *sa = aux;
108 struct spibus_attach_args sba;
109 bus_dma_segment_t segs;
110 int rsegs, err;
111
112 aprint_normal(": AT91 SPI Controller\n");
113
114 sc->sc_dev = self;
115 sc->sc_iot = sa->sa_iot;
116 sc->sc_pid = sa->sa_pid;
117 sc->sc_dmat = sa->sa_dmat;
118 sc->sc_md = md;
119
120 if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh))
121 panic("%s: Cannot map registers", device_xname(self));
122
123 /* we want to use dma, so allocate dma memory: */
124 err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 0, PAGE_SIZE,
125 &segs, 1, &rsegs, BUS_DMA_WAITOK);
126 if (err == 0) {
127 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE,
128 &sc->sc_dmapage,
129 BUS_DMA_WAITOK);
130 }
131 if (err == 0) {
132 err = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
133 PAGE_SIZE, 0, BUS_DMA_WAITOK,
134 &sc->sc_dmamap);
135 }
136 if (err == 0) {
137 err = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
138 sc->sc_dmapage, PAGE_SIZE, NULL,
139 BUS_DMA_WAITOK);
140 }
141 if (err != 0) {
142 panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
143 }
144 sc->sc_dmaaddr = sc->sc_dmamap->dm_segs[0].ds_addr;
145
146 /*
147 * Initialize SPI controller
148 */
149 sc->sc_spi.sct_cookie = sc;
150 sc->sc_spi.sct_configure = at91spi_configure;
151 sc->sc_spi.sct_transfer = at91spi_transfer;
152
153 //sc->sc_spi.sct_nslaves must have been initialized by machdep code
154 if (!sc->sc_spi.sct_nslaves) {
155 aprint_error("%s: no slaves!\n", device_xname(sc->sc_dev));
156 }
157
158 memset(&sba, 0, sizeof(sba));
159 sba.sba_controller = &sc->sc_spi;
160
161 /* initialize the queue */
162 SIMPLEQ_INIT(&sc->sc_q);
163
164 /* reset the SPI */
165 at91_peripheral_clock(sc->sc_pid, 1);
166 PUTREG(sc, SPI_CR, SPI_CR_SWRST);
167 delay(100);
168
169 /* be paranoid and make sure the PDC is dead */
170 PUTREG(sc, SPI_PDC_BASE + PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);
171 PUTREG(sc, SPI_PDC_BASE + PDC_RNCR, 0);
172 PUTREG(sc, SPI_PDC_BASE + PDC_RCR, 0);
173 PUTREG(sc, SPI_PDC_BASE + PDC_TNCR, 0);
174 PUTREG(sc, SPI_PDC_BASE + PDC_TCR, 0);
175
176 // configure SPI:
177 PUTREG(sc, SPI_IDR, -1);
178 PUTREG(sc, SPI_CSR(0), SPI_CSR_SCBR | SPI_CSR_BITS_8);
179 PUTREG(sc, SPI_CSR(1), SPI_CSR_SCBR | SPI_CSR_BITS_8);
180 PUTREG(sc, SPI_CSR(2), SPI_CSR_SCBR | SPI_CSR_BITS_8);
181 PUTREG(sc, SPI_CSR(3), SPI_CSR_SCBR | SPI_CSR_BITS_8);
182 PUTREG(sc, SPI_MR, SPI_MR_MODFDIS/* <- machdep? */ | SPI_MR_MSTR);
183
184 /* enable device interrupts */
185 sc->sc_ih = at91_intr_establish(sc->sc_pid, IPL_BIO, INTR_HIGH_LEVEL,
186 at91spi_intr, sc);
187
188 /* enable SPI */
189 PUTREG(sc, SPI_CR, SPI_CR_SPIEN);
190 if (GETREG(sc, SPI_SR) & SPI_SR_RDRF)
191 (void)GETREG(sc, SPI_RDR);
192
193 PUTREG(sc, SPI_PDC_BASE + PDC_PTCR, PDC_PTCR_TXTEN | PDC_PTCR_RXTEN);
194
195 /* attach slave devices */
196 config_found(sc->sc_dev, &sba, spibus_print, CFARGS_NONE);
197 }
198
199 int
at91spi_configure(void * arg,int slave,int mode,int speed)200 at91spi_configure(void *arg, int slave, int mode, int speed)
201 {
202 struct at91spi_softc *sc = arg;
203 uint scbr;
204 uint32_t csr;
205
206 /* setup interrupt registers */
207 PUTREG(sc, SPI_IDR, -1); /* disable interrupts for now */
208
209 csr = GETREG(sc, SPI_CSR(0)); /* read register */
210 csr &= SPI_CSR_RESERVED; /* keep reserved bits */
211 csr |= SPI_CSR_BITS_8; /* assume 8 bit transfers */
212
213 /*
214 * Calculate clock divider
215 */
216 scbr = speed ? ((AT91_MSTCLK + speed - 1) / speed + 1) & ~1 : -1;
217 if (scbr > 0xFF) {
218 aprint_error("%s: speed %d not supported\n",
219 device_xname(sc->sc_dev), speed);
220 return EINVAL;
221 }
222 csr |= scbr << SPI_CSR_SCBR_SHIFT;
223
224 /*
225 * I'm not entirely confident that these values are correct.
226 * But at least mode 0 appears to work properly with the
227 * devices I have tested. The documentation seems to suggest
228 * that I have the meaning of the clock delay bit inverted.
229 */
230 switch (mode) {
231 case SPI_MODE_0:
232 csr |= SPI_CSR_NCPHA; /* CPHA = 0, CPOL = 0 */
233 break;
234 case SPI_MODE_1:
235 csr |= 0; /* CPHA = 1, CPOL = 0 */
236 break;
237 case SPI_MODE_2:
238 csr |= SPI_CSR_NCPHA /* CPHA = 0, CPOL = 1 */
239 | SPI_CSR_CPOL;
240 break;
241 case SPI_MODE_3:
242 csr |= SPI_CSR_CPOL; /* CPHA = 1, CPOL = 1 */
243 break;
244 default:
245 return EINVAL;
246 }
247
248 PUTREG(sc, SPI_CSR(0), csr);
249
250 DPRINTFN(3, ("%s: slave %d mode %d speed %d, csr=0x%08"PRIX32"\n",
251 __FUNCTION__, slave, mode, speed, csr));
252
253 #if 0
254 // wait until ready!?
255 for (i = 1000000; i; i -= 10) {
256 if (GETREG(sc, AUPSC_SPISTAT) & SPISTAT_DR) {
257 return 0;
258 }
259 }
260
261 return ETIMEDOUT;
262 #else
263 return 0;
264 #endif
265 }
266
267 #define HALF_BUF_SIZE (PAGE_SIZE / 2)
268
269 void
at91spi_xfer(struct at91spi_softc * sc,int start)270 at91spi_xfer(struct at91spi_softc *sc, int start)
271 {
272 struct spi_chunk *chunk;
273 int len;
274 uint32_t sr;
275
276 DPRINTFN(3, ("%s: sc=%p start=%d\n", __FUNCTION__, sc, start));
277
278 /* so ready to transmit more / anything received? */
279 if (((sr = GETREG(sc, SPI_SR)) & (SPI_SR_ENDTX | SPI_SR_ENDRX)) != (SPI_SR_ENDTX | SPI_SR_ENDRX)) {
280 /* not ready, get out */
281 DPRINTFN(3, ("%s: sc=%p start=%d sr=%"PRIX32"\n", __FUNCTION__, sc, start, sr));
282 return;
283 }
284
285 DPRINTFN(3, ("%s: sr=%"PRIX32"\n", __FUNCTION__, sr));
286
287 if (!start) {
288 // ok, something has been transferred, synchronize..
289 int offs = sc->sc_dmaoffs ^ HALF_BUF_SIZE;
290 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, offs, HALF_BUF_SIZE,
291 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
292
293 if ((chunk = sc->sc_rchunk) != NULL) {
294 if ((len = chunk->chunk_rresid) > HALF_BUF_SIZE)
295 len = HALF_BUF_SIZE;
296 if (chunk->chunk_rptr && len > 0) {
297 memcpy(chunk->chunk_rptr, (const uint8_t *)sc->sc_dmapage + offs, len);
298 chunk->chunk_rptr += len;
299 }
300 if ((chunk->chunk_rresid -= len) <= 0) {
301 // done with this chunk, get next
302 sc->sc_rchunk = chunk->chunk_next;
303 }
304 }
305 }
306
307 /* start transmitting next chunk: */
308 if ((chunk = sc->sc_wchunk) != NULL) {
309
310 /* make sure we transmit just half buffer at a time */
311 len = MIN(chunk->chunk_wresid, HALF_BUF_SIZE);
312
313 // setup outgoing data
314 if (chunk->chunk_wptr && len > 0) {
315 memcpy((uint8_t *)sc->sc_dmapage + sc->sc_dmaoffs, chunk->chunk_wptr, len);
316 chunk->chunk_wptr += len;
317 } else {
318 memset((uint8_t *)sc->sc_dmapage + sc->sc_dmaoffs, 0, len);
319 }
320
321 /* advance to next transfer if it's time to */
322 if ((chunk->chunk_wresid -= len) <= 0) {
323 sc->sc_wchunk = sc->sc_wchunk->chunk_next;
324 }
325
326 /* determine which interrupt to get */
327 if (sc->sc_wchunk) {
328 /* just wait for next buffer to free */
329 PUTREG(sc, SPI_IER, SPI_SR_ENDRX);
330 } else {
331 /* must wait until transfer has completed */
332 PUTREG(sc, SPI_IDR, SPI_SR_ENDRX);
333 PUTREG(sc, SPI_IER, SPI_SR_RXBUFF);
334 }
335
336 DPRINTFN(3, ("%s: dmaoffs=%d len=%d wchunk=%p (%p:%d) rchunk=%p (%p:%d) mr=%"PRIX32" sr=%"PRIX32" imr=%"PRIX32" csr0=%"PRIX32"\n",
337 __FUNCTION__, sc->sc_dmaoffs, len, sc->sc_wchunk,
338 sc->sc_wchunk ? sc->sc_wchunk->chunk_wptr : NULL,
339 sc->sc_wchunk ? sc->sc_wchunk->chunk_wresid : -1,
340 sc->sc_rchunk,
341 sc->sc_rchunk ? sc->sc_rchunk->chunk_rptr : NULL,
342 sc->sc_rchunk ? sc->sc_rchunk->chunk_rresid : -1,
343 GETREG(sc, SPI_MR), GETREG(sc, SPI_SR),
344 GETREG(sc, SPI_IMR), GETREG(sc, SPI_CSR(0))));
345
346 // prepare DMA
347 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_dmaoffs, len,
348 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
349
350 // and start transmitting / receiving
351 PUTREG(sc, SPI_PDC_BASE + PDC_RNPR, sc->sc_dmaaddr + sc->sc_dmaoffs);
352 PUTREG(sc, SPI_PDC_BASE + PDC_RNCR, len);
353 PUTREG(sc, SPI_PDC_BASE + PDC_TNPR, sc->sc_dmaaddr + sc->sc_dmaoffs);
354 PUTREG(sc, SPI_PDC_BASE + PDC_TNCR, len);
355
356 // swap buffer
357 sc->sc_dmaoffs ^= HALF_BUF_SIZE;
358
359 // get out
360 return;
361 } else {
362 DPRINTFN(3, ("%s: nothing to write anymore\n", __FUNCTION__));
363 return;
364 }
365 }
366
367 void
at91spi_sched(struct at91spi_softc * sc)368 at91spi_sched(struct at91spi_softc *sc)
369 {
370 struct spi_transfer *st;
371 int err;
372
373 while ((st = spi_transq_first(&sc->sc_q)) != NULL) {
374
375 DPRINTFN(2, ("%s: st=%p\n", __FUNCTION__, st));
376
377 /* remove the item */
378 spi_transq_dequeue(&sc->sc_q);
379
380 /* note that we are working on it */
381 sc->sc_transfer = st;
382
383 if ((err = at91spi_select(sc, st->st_slave)) != 0) {
384 spi_done(st, err);
385 continue;
386 }
387
388 /* setup chunks */
389 sc->sc_rchunk = sc->sc_wchunk = st->st_chunks;
390
391 /* now kick the master start to get the chip running */
392 at91spi_xfer(sc, TRUE);
393
394 /* enable error interrupts too: */
395 PUTREG(sc, SPI_IER, SPI_SR_MODF | SPI_SR_OVRES);
396
397 sc->sc_running = TRUE;
398 return;
399 }
400 DPRINTFN(2, ("%s: nothing to do anymore\n", __FUNCTION__));
401 PUTREG(sc, SPI_IDR, -1); /* disable interrupts */
402 at91spi_select(sc, -1);
403 sc->sc_running = FALSE;
404 }
405
406 void
at91spi_done(struct at91spi_softc * sc,int err)407 at91spi_done(struct at91spi_softc *sc, int err)
408 {
409 struct spi_transfer *st;
410
411 /* called from interrupt handler */
412 if ((st = sc->sc_transfer) != NULL) {
413 sc->sc_transfer = NULL;
414 DPRINTFN(2, ("%s: st %p finished with error code %d\n", __FUNCTION__, st, err));
415 spi_done(st, err);
416 }
417 /* make sure we clear these bits out */
418 sc->sc_wchunk = sc->sc_rchunk = NULL;
419 at91spi_sched(sc);
420 }
421
422 int
at91spi_intr(void * arg)423 at91spi_intr(void *arg)
424 {
425 struct at91spi_softc *sc = arg;
426 uint32_t imr, sr;
427 int err = 0;
428
429 if ((imr = GETREG(sc, SPI_IMR)) == 0) {
430 /* interrupts are not enabled, get out */
431 DPRINTFN(4, ("%s: interrupts are not enabled\n", __FUNCTION__));
432 return 0;
433 }
434
435 sr = GETREG(sc, SPI_SR);
436 if (!(sr & imr)) {
437 /* interrupt did not happen, get out */
438 DPRINTFN(3, ("%s: interrupts are not enabled, sr=%08"PRIX32" imr=%08"PRIX32"\n",
439 __FUNCTION__, sr, imr));
440 return 0;
441 }
442
443 DPRINTFN(3, ("%s: sr=%08"PRIX32" imr=%08"PRIX32"\n",
444 __FUNCTION__, sr, imr));
445
446 if (sr & imr & SPI_SR_MODF) {
447 printf("%s: mode fault!\n", device_xname(sc->sc_dev));
448 err = EIO;
449 }
450
451 if (sr & imr & SPI_SR_OVRES) {
452 printf("%s: overrun error!\n", device_xname(sc->sc_dev));
453 err = EIO;
454 }
455 if (err) {
456 /* clear errors */
457 /* complete transfer */
458 at91spi_done(sc, err);
459 } else {
460 /* do all data exchanges */
461 at91spi_xfer(sc, FALSE);
462
463 /*
464 * if the master done bit is set, make sure we do the
465 * right processing.
466 */
467 if (sr & imr & SPI_SR_RXBUFF) {
468 if ((sc->sc_wchunk != NULL) ||
469 (sc->sc_rchunk != NULL)) {
470 printf("%s: partial transfer?\n",
471 device_xname(sc->sc_dev));
472 err = EIO;
473 }
474 at91spi_done(sc, err);
475 }
476
477 }
478
479 return 1;
480 }
481
482 int
at91spi_transfer(void * arg,struct spi_transfer * st)483 at91spi_transfer(void *arg, struct spi_transfer *st)
484 {
485 struct at91spi_softc *sc = arg;
486 int s;
487
488 /* make sure we select the right chip */
489 s = splbio();
490 spi_transq_enqueue(&sc->sc_q, st);
491 if (sc->sc_running == 0) {
492 at91spi_sched(sc);
493 }
494 splx(s);
495 return 0;
496 }
497
498