xref: /netbsd-src/sys/arch/mips/atheros/dev/arspi.c (revision c7fb772b85b2b5d4cfb282f868f454b4701534fd)
1 /* $NetBSD: arspi.c,v 1.15 2021/08/07 16:18:58 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
5  * Copyright (c) 2006 Garrett D'Amore.
6  * All rights reserved.
7  *
8  * Portions of this code were written by Garrett D'Amore for the
9  * Champaign-Urbana Community Wireless Network Project.
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above
17  *    copyright notice, this list of conditions and the following
18  *    disclaimer in the documentation and/or other materials provided
19  *    with the distribution.
20  * 3. All advertising materials mentioning features or use of this
21  *    software must display the following acknowledgements:
22  *      This product includes software developed by the Urbana-Champaign
23  *      Independent Media Center.
24  *	This product includes software developed by Garrett D'Amore.
25  * 4. Urbana-Champaign Independent Media Center's name and Garrett
26  *    D'Amore's name may not be used to endorse or promote products
27  *    derived from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
30  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
31  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
34  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: arspi.c,v 1.15 2021/08/07 16:18:58 thorpej Exp $");
46 
47 #include "locators.h"
48 
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/cpu.h>
52 #include <sys/device.h>
53 #include <sys/errno.h>
54 #include <sys/kernel.h>
55 #include <sys/kmem.h>
56 #include <sys/proc.h>
57 #include <sys/systm.h>
58 
59 #include <mips/atheros/include/ar5315reg.h>
60 #include <mips/atheros/include/arbusvar.h>
61 
62 #include <mips/atheros/dev/arspireg.h>
63 
64 #include <dev/spi/spiflash.h>
65 #include <dev/spi/spivar.h>
66 
67 /*
68  * This device is intended only to operate with specific SPI flash
69  * parts, and is not a general purpose SPI host.  (Or at least if it
70  * is, the Linux and eCos sources do not show how to use it as such.)
71  * And lack of documentation on the Atheros SoCs is less than helpful.
72  *
73  * So for now we just "emulate" enough of the host bus framework to
74  * make the SPI flash drivers happy.
75  */
76 
77 struct arspi_job {
78 	uint8_t			job_opcode;
79 	struct spi_chunk	*job_chunk;
80 	uint32_t		job_flags;
81 	uint32_t		job_addr;
82 	uint32_t		job_data;
83 	int			job_rxcnt;
84 	int			job_txcnt;
85 	int			job_addrcnt;
86 	int			job_rresid;
87 	int			job_wresid;
88 };
89 
90 #define	JOB_READ		0x1
91 #define	JOB_WRITE		0x2
92 #define	JOB_LAST		0x4
93 #define	JOB_WAIT		0x8	/* job must wait for WIP bits */
94 #define	JOB_WREN		0x10	/* WREN needed */
95 
96 struct arspi_softc {
97 	struct spi_controller	sc_spi;
98 	void			*sc_ih;
99 	bool			sc_interrupts;
100 
101 	struct spi_transfer	*sc_transfer;
102 	struct spi_chunk	*sc_wchunk;	/* for partial writes */
103 	struct spi_transq	sc_transq;
104 	bus_space_tag_t		sc_st;
105 	bus_space_handle_t	sc_sh;
106 	bus_size_t		sc_size;
107 };
108 
109 #define	STATIC
110 
111 STATIC int arspi_match(device_t, cfdata_t, void *);
112 STATIC void arspi_attach(device_t, device_t, void *);
113 STATIC void arspi_interrupts(device_t);
114 STATIC int arspi_intr(void *);
115 /* SPI service routines */
116 STATIC int arspi_configure(void *, int, int, int);
117 STATIC int arspi_transfer(void *, struct spi_transfer *);
118 /* internal support */
119 STATIC void arspi_poll(struct arspi_softc *);
120 STATIC void arspi_done(struct arspi_softc *, int);
121 STATIC void arspi_sched(struct arspi_softc *);
122 STATIC int arspi_get_byte(struct spi_chunk **, uint8_t *);
123 STATIC int arspi_put_byte(struct spi_chunk **, uint8_t);
124 STATIC int arspi_make_job(struct spi_transfer *);
125 STATIC void arspi_update_job(struct spi_transfer *);
126 STATIC void arspi_finish_job(struct spi_transfer *);
127 
128 
129 CFATTACH_DECL_NEW(arspi, sizeof(struct arspi_softc),
130     arspi_match, arspi_attach, NULL, NULL);
131 
132 #define	GETREG(sc, o)		bus_space_read_4(sc->sc_st, sc->sc_sh, o)
133 #define	PUTREG(sc, o, v)	bus_space_write_4(sc->sc_st, sc->sc_sh, o, v)
134 
135 int
arspi_match(device_t parent,cfdata_t cf,void * aux)136 arspi_match(device_t parent, cfdata_t cf, void *aux)
137 {
138 	struct arbus_attach_args *aa = aux;
139 
140 	if (strcmp(aa->aa_name, cf->cf_name) != 0)
141 		return 0;
142 	return 1;
143 }
144 
145 void
arspi_attach(device_t parent,device_t self,void * aux)146 arspi_attach(device_t parent, device_t self, void *aux)
147 {
148 	struct arspi_softc *sc = device_private(self);
149 	struct spibus_attach_args sba;
150 	struct arbus_attach_args *aa = aux;
151 
152 	/*
153 	 * Map registers.
154 	 */
155 	sc->sc_st = aa->aa_bst;
156 	sc->sc_size = aa->aa_size;
157 	if (bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
158 		&sc->sc_sh) != 0) {
159 		printf(": unable to map registers!\n");
160 		return;
161 	}
162 
163 	aprint_normal(": Atheros SPI controller\n");
164 
165 	/*
166 	 * Initialize SPI controller.
167 	 */
168 	sc->sc_spi.sct_cookie = sc;
169 	sc->sc_spi.sct_configure = arspi_configure;
170 	sc->sc_spi.sct_transfer = arspi_transfer;
171 	sc->sc_spi.sct_nslaves = 1;
172 
173 
174 	/*
175 	 * Initialize the queue.
176 	 */
177 	spi_transq_init(&sc->sc_transq);
178 
179 	/*
180 	 * Enable device interrupts.
181 	 */
182 	sc->sc_ih = arbus_intr_establish(aa->aa_cirq, aa->aa_mirq,
183 	    arspi_intr, sc);
184 	if (sc->sc_ih == NULL) {
185 		aprint_error("%s: couldn't establish interrupt\n",
186 		    device_xname(self));
187 		/* just leave it in polled mode */
188 	} else
189 		config_interrupts(self, arspi_interrupts);
190 
191 	/*
192 	 * Initialize and attach bus attach.
193 	 */
194 	memset(&sba, 0, sizeof(sba));
195 	sba.sba_controller = &sc->sc_spi;
196 	config_found(self, &sba, spibus_print, CFARGS_NONE);
197 }
198 
199 void
arspi_interrupts(device_t self)200 arspi_interrupts(device_t self)
201 {
202 	/*
203 	 * we never leave polling mode, because, apparently, we
204 	 * are missing some data about how to drive the SPI in interrupt
205 	 * mode.
206 	 */
207 #if 0
208 	struct arspi_softc *sc = device_private(self);
209 	int	s;
210 
211 	s = splbio();
212 	sc->sc_interrupts = true;
213 	splx(s);
214 #endif
215 }
216 
217 int
arspi_intr(void * arg)218 arspi_intr(void *arg)
219 {
220 	struct arspi_softc *sc = arg;
221 
222 	while (GETREG(sc, ARSPI_REG_CTL) & ARSPI_CTL_BUSY);
223 
224 	arspi_done(sc, 0);
225 
226 	return 1;
227 }
228 
229 void
arspi_poll(struct arspi_softc * sc)230 arspi_poll(struct arspi_softc *sc)
231 {
232 
233 	while (sc->sc_transfer) {
234 		arspi_intr(sc);
235 	}
236 }
237 
238 int
arspi_configure(void * cookie,int slave,int mode,int speed)239 arspi_configure(void *cookie, int slave, int mode, int speed)
240 {
241 
242 	/*
243 	 * We don't support the full SPI protocol, and hopefully the
244 	 * firmware has programmed a reasonable mode already.  So
245 	 * just a couple of quick sanity checks, then bail.
246 	 */
247 	if ((mode != 0) || (slave != 0))
248 		return EINVAL;
249 
250 	return 0;
251 }
252 
253 int
arspi_transfer(void * cookie,struct spi_transfer * st)254 arspi_transfer(void *cookie, struct spi_transfer *st)
255 {
256 	struct arspi_softc *sc = cookie;
257 	int rv;
258 	int s;
259 
260 	st->st_busprivate = NULL;
261 	if ((rv = arspi_make_job(st)) != 0) {
262 		if (st->st_busprivate) {
263 			struct arspi_job *job = st->st_busprivate;
264 			st->st_busprivate = NULL;
265 			kmem_free(job, sizeof(*job));
266 		}
267 		spi_done(st, rv);
268 		return rv;
269 	}
270 
271 	s = splbio();
272 	spi_transq_enqueue(&sc->sc_transq, st);
273 	if (sc->sc_transfer == NULL) {
274 		arspi_sched(sc);
275 		if (!sc->sc_interrupts)
276 			arspi_poll(sc);
277 	}
278 	splx(s);
279 	return 0;
280 }
281 
282 void
arspi_sched(struct arspi_softc * sc)283 arspi_sched(struct arspi_softc *sc)
284 {
285 	struct spi_transfer *st;
286 	struct arspi_job *job;
287 	uint32_t ctl, cnt;
288 
289 	for (;;) {
290 		if ((st = sc->sc_transfer) == NULL) {
291 			if ((st = spi_transq_first(&sc->sc_transq)) == NULL) {
292 				/* no work left to do */
293 				break;
294 			}
295 			spi_transq_dequeue(&sc->sc_transq);
296 			sc->sc_transfer = st;
297 		}
298 
299 		arspi_update_job(st);
300 		job = st->st_busprivate;
301 
302 		/* there shouldn't be anything running, but ensure it */
303 		do {
304 			ctl = GETREG(sc, ARSPI_REG_CTL);
305 		}  while (ctl & ARSPI_CTL_BUSY);
306 		/* clear all of the tx and rx bits */
307 		ctl &= ~(ARSPI_CTL_TXCNT_MASK | ARSPI_CTL_RXCNT_MASK);
308 
309 		if (job->job_flags & JOB_WAIT) {
310 			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_RDSR);
311 			/* only the opcode for tx */
312 			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
313 			/* and one rx byte */
314 			ctl |= (1 << ARSPI_CTL_RXCNT_SHIFT);
315 		} else if (job->job_flags & JOB_WREN) {
316 			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_WREN);
317 			/* just the opcode */
318 			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
319 			/* no rx bytes */
320 		} else {
321 			/* set the data */
322 			PUTREG(sc, ARSPI_REG_DATA, job->job_data);
323 
324 			/* set the opcode and the address */
325 			PUTREG(sc, ARSPI_REG_OPCODE, job->job_opcode |
326 			    (job->job_addr << 8));
327 
328 			/* now set txcnt */
329 			cnt = 1;	/* opcode */
330 			cnt += job->job_addrcnt + job->job_txcnt;
331 			ctl |= (cnt << ARSPI_CTL_TXCNT_SHIFT);
332 
333 			/* now set rxcnt */
334 			cnt = job->job_rxcnt;
335 			ctl |= (cnt << ARSPI_CTL_RXCNT_SHIFT);
336 		}
337 
338 		/* set the start bit */
339 		ctl |= ARSPI_CTL_START;
340 
341 		PUTREG(sc, ARSPI_REG_CTL, ctl);
342 		break;
343 	}
344 }
345 
346 void
arspi_done(struct arspi_softc * sc,int err)347 arspi_done(struct arspi_softc *sc, int err)
348 {
349 	struct spi_transfer *st;
350 	struct arspi_job *job;
351 
352 	if ((st = sc->sc_transfer) != NULL) {
353 		job = st->st_busprivate;
354 
355 		if (job->job_flags & JOB_WAIT) {
356 			if (err == 0) {
357 				if ((GETREG(sc, ARSPI_REG_DATA) &
358 				    SPIFLASH_SR_BUSY) == 0) {
359 					/* intermediate wait done */
360 					job->job_flags &= ~JOB_WAIT;
361 					goto done;
362 				}
363 			}
364 		} else if (job->job_flags & JOB_WREN) {
365 			if (err == 0) {
366 				job->job_flags &= ~JOB_WREN;
367 				goto done;
368 			}
369 		} else if (err == 0) {
370 			/*
371 			 * When breaking up write jobs, we have to wait until
372 			 * the WIP bit is clear, and we have to separately
373 			 * send WREN for each chunk.  These flags facilitate
374 			 * that.
375 			 */
376 			if (job->job_flags & JOB_WRITE)
377 				job->job_flags |= (JOB_WAIT | JOB_WREN);
378 			job->job_data = GETREG(sc, ARSPI_REG_DATA);
379 			arspi_finish_job(st);
380 		}
381 
382 		if (err || (job->job_flags & JOB_LAST)) {
383 			sc->sc_transfer = NULL;
384 			st->st_busprivate = NULL;
385 			spi_done(st, err);
386 			kmem_free(job, sizeof(*job));
387 		}
388 	}
389 done:
390 	arspi_sched(sc);
391 }
392 
393 int
arspi_get_byte(struct spi_chunk ** chunkp,uint8_t * bytep)394 arspi_get_byte(struct spi_chunk **chunkp, uint8_t *bytep)
395 {
396 	struct spi_chunk *chunk;
397 
398 	chunk = *chunkp;
399 
400 	/* skip leading empty (or already consumed) chunks */
401 	while (chunk && chunk->chunk_wresid == 0)
402 		chunk = chunk->chunk_next;
403 
404 	if (chunk == NULL) {
405 		return ENODATA;
406 	}
407 
408 	/*
409 	 * chunk must be write only.  SPI flash doesn't support
410 	 * any full duplex operations.
411 	 */
412 	if ((chunk->chunk_rptr) || !(chunk->chunk_wptr)) {
413 		return EINVAL;
414 	}
415 
416 	*bytep = *chunk->chunk_wptr;
417 	chunk->chunk_wptr++;
418 	chunk->chunk_wresid--;
419 	chunk->chunk_rresid--;
420 	/* clearing wptr and rptr makes sanity checks later easier */
421 	if (chunk->chunk_wresid == 0)
422 		chunk->chunk_wptr = NULL;
423 	if (chunk->chunk_rresid == 0)
424 		chunk->chunk_rptr = NULL;
425 	while (chunk && chunk->chunk_wresid == 0)
426 		chunk = chunk->chunk_next;
427 
428 	*chunkp = chunk;
429 	return 0;
430 }
431 
432 int
arspi_put_byte(struct spi_chunk ** chunkp,uint8_t byte)433 arspi_put_byte(struct spi_chunk **chunkp, uint8_t byte)
434 {
435 	struct spi_chunk *chunk;
436 
437 	chunk = *chunkp;
438 
439 	/* skip leading empty (or already consumed) chunks */
440 	while (chunk && chunk->chunk_rresid == 0)
441 		chunk = chunk->chunk_next;
442 
443 	if (chunk == NULL) {
444 		return EOVERFLOW;
445 	}
446 
447 	/*
448 	 * chunk must be read only.  SPI flash doesn't support
449 	 * any full duplex operations.
450 	 */
451 	if ((chunk->chunk_wptr) || !(chunk->chunk_rptr)) {
452 		return EINVAL;
453 	}
454 
455 	*chunk->chunk_rptr = byte;
456 	chunk->chunk_rptr++;
457 	chunk->chunk_wresid--;	/* technically this was done at send time */
458 	chunk->chunk_rresid--;
459 	while (chunk && chunk->chunk_rresid == 0)
460 		chunk = chunk->chunk_next;
461 
462 	*chunkp = chunk;
463 	return 0;
464 }
465 
466 int
arspi_make_job(struct spi_transfer * st)467 arspi_make_job(struct spi_transfer *st)
468 {
469 	struct arspi_job *job;
470 	struct spi_chunk *chunk;
471 	uint8_t byte;
472 	int i, rv;
473 
474 	job = kmem_zalloc(sizeof (struct arspi_job), KM_SLEEP);
475 
476 	st->st_busprivate = job;
477 
478 	/* skip any leading empty chunks (should not be any!) */
479 	chunk = st->st_chunks;
480 
481 	/* get transfer opcode */
482 	if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
483 		return rv;
484 
485 	job->job_opcode = byte;
486 	switch (job->job_opcode) {
487 	case SPIFLASH_CMD_WREN:
488 	case SPIFLASH_CMD_WRDI:
489 	case SPIFLASH_CMD_CHIPERASE:
490 		break;
491 	case SPIFLASH_CMD_RDJI:
492 		job->job_rxcnt = 3;
493 		break;
494 	case SPIFLASH_CMD_RDSR:
495 		job->job_rxcnt = 1;
496 		break;
497 	case SPIFLASH_CMD_WRSR:
498 		/*
499 		 * is this in data, or in address?  stick it in data
500 		 * for now.
501 		 */
502 		job->job_txcnt = 1;
503 		break;
504 	case SPIFLASH_CMD_RDID:
505 		job->job_addrcnt = 3;	/* 3 dummy bytes */
506 		job->job_rxcnt = 1;
507 		break;
508 	case SPIFLASH_CMD_ERASE:
509 		job->job_addrcnt = 3;
510 		break;
511 	case SPIFLASH_CMD_READ:
512 		job->job_addrcnt = 3;
513 		job->job_flags |= JOB_READ;
514 		break;
515 	case SPIFLASH_CMD_PROGRAM:
516 		job->job_addrcnt = 3;
517 		job->job_flags |= JOB_WRITE;
518 		break;
519 	case SPIFLASH_CMD_READFAST:
520 		/*
521 		 * This is a pain in the arse to support, so we will
522 		 * rewrite as an ordinary read.  But later, after we
523 		 * obtain the address.
524 		 */
525 		job->job_addrcnt = 3;	/* 3 address */
526 		job->job_flags |= JOB_READ;
527 		break;
528 	default:
529 		return EINVAL;
530 	}
531 
532 	for (i = 0; i < job->job_addrcnt; i++) {
533 		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
534 			return rv;
535 		job->job_addr <<= 8;
536 		job->job_addr |= byte;
537 	}
538 
539 
540 	if (job->job_opcode == SPIFLASH_CMD_READFAST) {
541 		/* eat the dummy timing byte */
542 		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
543 			return rv;
544 		/* rewrite this as a read */
545 		job->job_opcode = SPIFLASH_CMD_READ;
546 	}
547 
548 	job->job_chunk = chunk;
549 
550 	/*
551 	 * Now quickly check a few other things.   Namely, we are not
552 	 * allowed to have both READ and WRITE.
553 	 */
554 	for (chunk = job->job_chunk; chunk; chunk = chunk->chunk_next) {
555 		if (chunk->chunk_wptr) {
556 			job->job_wresid += chunk->chunk_wresid;
557 		}
558 		if (chunk->chunk_rptr) {
559 			job->job_rresid += chunk->chunk_rresid;
560 		}
561 	}
562 
563 	if (job->job_rresid && job->job_wresid) {
564 		return EINVAL;
565 	}
566 
567 	return 0;
568 }
569 
570 /*
571  * NB: The Atheros SPI controller runs in little endian mode. So all
572  * data accesses must be swapped appropriately.
573  *
574  * The controller auto-swaps read accesses done through the mapped memory
575  * region, but when using SPI directly, we have to do the right thing to
576  * swap to or from little endian.
577  */
578 
579 void
arspi_update_job(struct spi_transfer * st)580 arspi_update_job(struct spi_transfer *st)
581 {
582 	struct arspi_job *job = st->st_busprivate;
583 	uint8_t byte;
584 	int i;
585 
586 	if (job->job_flags & (JOB_WAIT|JOB_WREN))
587 		return;
588 
589 	job->job_rxcnt = 0;
590 	job->job_txcnt = 0;
591 	job->job_data = 0;
592 
593 	job->job_txcnt = uimin(job->job_wresid, 4);
594 	job->job_rxcnt = uimin(job->job_rresid, 4);
595 
596 	job->job_wresid -= job->job_txcnt;
597 	job->job_rresid -= job->job_rxcnt;
598 
599 	for (i = 0; i < job->job_txcnt; i++) {
600 		arspi_get_byte(&job->job_chunk, &byte);
601 		job->job_data |= (byte << (i * 8));
602 	}
603 
604 	if ((!job->job_wresid) && (!job->job_rresid)) {
605 		job->job_flags |= JOB_LAST;
606 	}
607 }
608 
609 void
arspi_finish_job(struct spi_transfer * st)610 arspi_finish_job(struct spi_transfer *st)
611 {
612 	struct arspi_job *job = st->st_busprivate;
613 	uint8_t	byte;
614 	int i;
615 
616 	job->job_addr += job->job_rxcnt;
617 	job->job_addr += job->job_txcnt;
618 	for (i = 0; i < job->job_rxcnt; i++) {
619 		byte = job->job_data & 0xff;
620 		job->job_data >>= 8;
621 		arspi_put_byte(&job->job_chunk, byte);
622 	}
623 }
624 
625