xref: /netbsd-src/sys/dev/spi/spi.c (revision a0698ed9d41653d7a2378819ad501a285ca0d401)
1 /* $NetBSD: spi.c,v 1.10 2019/02/23 10:43:25 mlelstv Exp $ */
2 
3 /*-
4  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
5  * Copyright (c) 2006 Garrett D'Amore.
6  * All rights reserved.
7  *
8  * Portions of this code were written by Garrett D'Amore for the
9  * Champaign-Urbana Community Wireless Network Project.
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above
17  *    copyright notice, this list of conditions and the following
18  *    disclaimer in the documentation and/or other materials provided
19  *    with the distribution.
20  * 3. All advertising materials mentioning features or use of this
21  *    software must display the following acknowledgements:
22  *      This product includes software developed by the Urbana-Champaign
23  *      Independent Media Center.
24  *	This product includes software developed by Garrett D'Amore.
25  * 4. Urbana-Champaign Independent Media Center's name and Garrett
26  *    D'Amore's name may not be used to endorse or promote products
27  *    derived from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
30  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
31  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
34  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: spi.c,v 1.10 2019/02/23 10:43:25 mlelstv Exp $");
46 
47 #include "locators.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/device.h>
52 #include <sys/conf.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/condvar.h>
56 #include <sys/errno.h>
57 
58 #include <dev/spi/spivar.h>
59 #include <dev/spi/spi_io.h>
60 
61 #include "ioconf.h"
62 #include "locators.h"
63 
64 struct spi_softc {
65 	struct spi_controller	sc_controller;
66 	int			sc_mode;
67 	int			sc_speed;
68 	int			sc_slave;
69 	int			sc_nslaves;
70 	struct spi_handle	*sc_slaves;
71 	kmutex_t		sc_lock;
72 	kcondvar_t		sc_cv;
73 	int			sc_flags;
74 #define SPIC_BUSY		1
75 };
76 
77 static dev_type_open(spi_open);
78 static dev_type_close(spi_close);
79 static dev_type_ioctl(spi_ioctl);
80 
81 const struct cdevsw spi_cdevsw = {
82 	.d_open = spi_open,
83 	.d_close = spi_close,
84 	.d_read = noread,
85 	.d_write = nowrite,
86 	.d_ioctl = spi_ioctl,
87 	.d_stop = nostop,
88 	.d_tty = notty,
89 	.d_poll = nopoll,
90 	.d_mmap = nommap,
91 	.d_kqfilter = nokqfilter,
92 	.d_discard = nodiscard,
93 	.d_flag = D_OTHER
94 };
95 
96 /*
97  * SPI slave device.  We have one of these per slave.
98  */
99 struct spi_handle {
100 	struct spi_softc	*sh_sc;
101 	struct spi_controller	*sh_controller;
102 	int			sh_slave;
103 	int			sh_mode;
104 	int			sh_speed;
105 };
106 
107 #define SPI_MAXDATA 4096
108 
109 /*
110  * API for bus drivers.
111  */
112 
113 int
114 spibus_print(void *aux, const char *pnp)
115 {
116 
117 	if (pnp != NULL)
118 		aprint_normal("spi at %s", pnp);
119 
120 	return (UNCONF);
121 }
122 
123 
124 static int
125 spi_match(device_t parent, cfdata_t cf, void *aux)
126 {
127 
128 	return 1;
129 }
130 
131 static int
132 spi_print(void *aux, const char *pnp)
133 {
134 	struct spi_attach_args *sa = aux;
135 
136 	if (sa->sa_handle->sh_slave != -1)
137 		aprint_normal(" slave %d", sa->sa_handle->sh_slave);
138 
139 	return (UNCONF);
140 }
141 
142 static int
143 spi_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
144 {
145 	struct spi_softc *sc = device_private(parent);
146 	struct spi_attach_args sa;
147 	int addr;
148 
149 	addr = cf->cf_loc[SPICF_SLAVE];
150 	if ((addr < 0) || (addr >= sc->sc_controller.sct_nslaves)) {
151 		return -1;
152 	}
153 
154 	sa.sa_handle = &sc->sc_slaves[addr];
155 
156 	if (config_match(parent, cf, &sa) > 0)
157 		config_attach(parent, cf, &sa, spi_print);
158 
159 	return 0;
160 }
161 
162 /*
163  * API for device drivers.
164  *
165  * We provide wrapper routines to decouple the ABI for the SPI
166  * device drivers from the ABI for the SPI bus drivers.
167  */
168 static void
169 spi_attach(device_t parent, device_t self, void *aux)
170 {
171 	struct spi_softc *sc = device_private(self);
172 	struct spibus_attach_args *sba = aux;
173 	int i;
174 
175 	aprint_naive(": SPI bus\n");
176 	aprint_normal(": SPI bus\n");
177 
178 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
179 	cv_init(&sc->sc_cv, "spictl");
180 
181 	sc->sc_controller = *sba->sba_controller;
182 	sc->sc_nslaves = sba->sba_controller->sct_nslaves;
183 	/* allocate slave structures */
184 	sc->sc_slaves = malloc(sizeof (struct spi_handle) * sc->sc_nslaves,
185 	    M_DEVBUF, M_WAITOK | M_ZERO);
186 
187 	sc->sc_speed = 0;
188 	sc->sc_mode = -1;
189 	sc->sc_slave = -1;
190 
191 	/*
192 	 * Initialize slave handles
193 	 */
194 	for (i = 0; i < sc->sc_nslaves; i++) {
195 		sc->sc_slaves[i].sh_slave = i;
196 		sc->sc_slaves[i].sh_sc = sc;
197 		sc->sc_slaves[i].sh_controller = &sc->sc_controller;
198 	}
199 
200 	/*
201 	 * Locate and attach child devices
202 	 */
203 	config_search_ia(spi_search, self, "spi", NULL);
204 }
205 
206 static int
207 spi_open(dev_t dev, int flag, int fmt, lwp_t *l)
208 {
209 	struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
210 
211 	if (sc == NULL)
212 		return ENXIO;
213 
214 	return 0;
215 }
216 
217 static int
218 spi_close(dev_t dev, int flag, int fmt, lwp_t *l)
219 {
220 
221 	return 0;
222 }
223 
224 static int
225 spi_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
226 {
227 	struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
228 	struct spi_handle *sh;
229 	spi_ioctl_configure_t *sic;
230 	spi_ioctl_transfer_t *sit;
231 	uint8_t *sbuf, *rbuf;
232 	int error;
233 
234 	if (sc == NULL)
235 		return ENXIO;
236 
237 	switch (cmd) {
238 	case SPI_IOCTL_CONFIGURE:
239 		sic = (spi_ioctl_configure_t *)data;
240 		if (sic->sic_addr < 0 || sic->sic_addr >= sc->sc_nslaves) {
241 			error = EINVAL;
242 			break;
243 		}
244 		sh = &sc->sc_slaves[sic->sic_addr];
245 		error = spi_configure(sh, sic->sic_mode, sic->sic_speed);
246 		break;
247 	case SPI_IOCTL_TRANSFER:
248 		sit = (spi_ioctl_transfer_t *)data;
249 		if (sit->sit_addr < 0 || sit->sit_addr >= sc->sc_nslaves) {
250 			error = EINVAL;
251 			break;
252 		}
253 		sh = &sc->sc_slaves[sit->sit_addr];
254 		sbuf = rbuf = NULL;
255 		error = 0;
256 		if (sit->sit_send && sit->sit_sendlen < SPI_MAXDATA) {
257 			sbuf = malloc(sit->sit_sendlen, M_DEVBUF, M_WAITOK);
258 			error = copyin(sit->sit_send, sbuf, sit->sit_sendlen);
259 		}
260 		if (sit->sit_recv && sit->sit_recvlen < SPI_MAXDATA) {
261 			rbuf = malloc(sit->sit_recvlen, M_DEVBUF, M_WAITOK);
262 		}
263 		if (error == 0) {
264 			if (sbuf && rbuf)
265 				error = spi_send_recv(sh,
266 					sit->sit_sendlen, sbuf,
267 					sit->sit_recvlen, rbuf);
268 			else if (sbuf)
269 				error = spi_send(sh,
270 					sit->sit_sendlen, sbuf);
271 			else if (rbuf)
272 				error = spi_recv(sh,
273 					sit->sit_recvlen, rbuf);
274 		}
275 		if (rbuf) {
276 			if (error == 0)
277 				error = copyout(rbuf, sit->sit_recv,
278 						sit->sit_recvlen);
279 			free(rbuf, M_DEVBUF);
280 		}
281 		if (sbuf) {
282 			free(sbuf, M_DEVBUF);
283 		}
284 		break;
285 	default:
286 		error = ENODEV;
287 		break;
288 	}
289 
290 	return error;
291 }
292 
293 CFATTACH_DECL_NEW(spi, sizeof(struct spi_softc),
294     spi_match, spi_attach, NULL, NULL);
295 
296 /*
297  * Configure.  This should be the first thing that the SPI driver
298  * should do, to configure which mode (e.g. SPI_MODE_0, which is the
299  * same as Philips Microwire mode), and speed.  If the bus driver
300  * cannot run fast enough, then it should just configure the fastest
301  * mode that it can support.  If the bus driver cannot run slow
302  * enough, then the device is incompatible and an error should be
303  * returned.
304  */
305 int
306 spi_configure(struct spi_handle *sh, int mode, int speed)
307 {
308 
309 	sh->sh_mode = mode;
310 	sh->sh_speed = speed;
311 	return 0;
312 }
313 
314 /*
315  * Acquire controller
316  */
317 static void
318 spi_acquire(struct spi_handle *sh)
319 {
320 	struct spi_softc *sc = sh->sh_sc;
321 
322 	mutex_enter(&sc->sc_lock);
323 	while ((sc->sc_flags & SPIC_BUSY) != 0)
324 		cv_wait(&sc->sc_cv, &sc->sc_lock);
325 	sc->sc_flags |= SPIC_BUSY;
326 	mutex_exit(&sc->sc_lock);
327 }
328 
329 /*
330  * Release controller
331  */
332 static void
333 spi_release(struct spi_handle *sh)
334 {
335 	struct spi_softc *sc = sh->sh_sc;
336 
337 	mutex_enter(&sc->sc_lock);
338 	sc->sc_flags &= ~SPIC_BUSY;
339 	cv_broadcast(&sc->sc_cv);
340 	mutex_exit(&sc->sc_lock);
341 }
342 
343 void
344 spi_transfer_init(struct spi_transfer *st)
345 {
346 
347 	mutex_init(&st->st_lock, MUTEX_DEFAULT, IPL_BIO);
348 	cv_init(&st->st_cv, "spixfr");
349 
350 	st->st_flags = 0;
351 	st->st_errno = 0;
352 	st->st_done = NULL;
353 	st->st_chunks = NULL;
354 	st->st_private = NULL;
355 	st->st_slave = -1;
356 }
357 
358 void
359 spi_chunk_init(struct spi_chunk *chunk, int cnt, const uint8_t *wptr,
360     uint8_t *rptr)
361 {
362 
363 	chunk->chunk_write = chunk->chunk_wptr = wptr;
364 	chunk->chunk_read = chunk->chunk_rptr = rptr;
365 	chunk->chunk_rresid = chunk->chunk_wresid = chunk->chunk_count = cnt;
366 	chunk->chunk_next = NULL;
367 }
368 
369 void
370 spi_transfer_add(struct spi_transfer *st, struct spi_chunk *chunk)
371 {
372 	struct spi_chunk **cpp;
373 
374 	/* this is an O(n) insert -- perhaps we should use a simpleq? */
375 	for (cpp = &st->st_chunks; *cpp; cpp = &(*cpp)->chunk_next);
376 	*cpp = chunk;
377 }
378 
379 int
380 spi_transfer(struct spi_handle *sh, struct spi_transfer *st)
381 {
382 	struct spi_softc	*sc = sh->sh_sc;
383 	struct spi_controller	*tag = sh->sh_controller;
384 	struct spi_chunk	*chunk;
385 	int error;
386 
387 	/*
388 	 * Initialize "resid" counters and pointers, so that callers
389 	 * and bus drivers don't have to.
390 	 */
391 	for (chunk = st->st_chunks; chunk; chunk = chunk->chunk_next) {
392 		chunk->chunk_wresid = chunk->chunk_rresid = chunk->chunk_count;
393 		chunk->chunk_wptr = chunk->chunk_write;
394 		chunk->chunk_rptr = chunk->chunk_read;
395 	}
396 
397 	/*
398 	 * Match slave and parameters to handle
399 	 */
400 	st->st_slave = sh->sh_slave;
401 
402 	/*
403 	 * Reserve controller during transaction
404  	 */
405 	spi_acquire(sh);
406 
407 	st->st_spiprivate = (void *)sh;
408 
409 	/*
410 	 * Reconfigure controller
411 	 *
412 	 * XXX backends don't configure per-slave parameters
413 	 * Whenever we switch slaves or change mode or speed, we
414 	 * need to tell the backend.
415 	 */
416 	if (sc->sc_slave != sh->sh_slave
417 	    || sc->sc_mode != sh->sh_mode
418 	    || sc->sc_speed != sh->sh_speed) {
419 		error = (*tag->sct_configure)(tag->sct_cookie,
420 				sh->sh_slave, sh->sh_mode, sh->sh_speed);
421 		if (error)
422 			return error;
423 	}
424 	sc->sc_mode = sh->sh_mode;
425 	sc->sc_speed = sh->sh_speed;
426 	sc->sc_slave = sh->sh_slave;
427 
428 	error = (*tag->sct_transfer)(tag->sct_cookie, st);
429 
430 	return error;
431 }
432 
433 void
434 spi_wait(struct spi_transfer *st)
435 {
436 	struct spi_handle *sh = st->st_spiprivate;
437 
438 	mutex_enter(&st->st_lock);
439 	while (!(st->st_flags & SPI_F_DONE)) {
440 		cv_wait(&st->st_cv, &st->st_lock);
441 	}
442 	mutex_exit(&st->st_lock);
443 	cv_destroy(&st->st_cv);
444 	mutex_destroy(&st->st_lock);
445 
446 	/*
447 	 * End transaction
448 	 */
449 	spi_release(sh);
450 }
451 
452 void
453 spi_done(struct spi_transfer *st, int err)
454 {
455 
456 	mutex_enter(&st->st_lock);
457 	if ((st->st_errno = err) != 0) {
458 		st->st_flags |= SPI_F_ERROR;
459 	}
460 	st->st_flags |= SPI_F_DONE;
461 	if (st->st_done != NULL) {
462 		(*st->st_done)(st);
463 	} else {
464 		cv_broadcast(&st->st_cv);
465 	}
466 	mutex_exit(&st->st_lock);
467 }
468 
469 /*
470  * Some convenience routines.  These routines block until the work
471  * is done.
472  *
473  * spi_recv - receives data from the bus
474  *
475  * spi_send - sends data to the bus
476  *
477  * spi_send_recv - sends data to the bus, and then receives.  Note that this is
478  * done synchronously, i.e. send a command and get the response.  This is
479  * not full duplex.  If you wnat full duplex, you can't use these convenience
480  * wrappers.
481  */
482 int
483 spi_recv(struct spi_handle *sh, int cnt, uint8_t *data)
484 {
485 	struct spi_transfer	trans;
486 	struct spi_chunk	chunk;
487 
488 	spi_transfer_init(&trans);
489 	spi_chunk_init(&chunk, cnt, NULL, data);
490 	spi_transfer_add(&trans, &chunk);
491 
492 	/* enqueue it and wait for it to complete */
493 	spi_transfer(sh, &trans);
494 	spi_wait(&trans);
495 
496 	if (trans.st_flags & SPI_F_ERROR)
497 		return trans.st_errno;
498 
499 	return 0;
500 }
501 
502 int
503 spi_send(struct spi_handle *sh, int cnt, const uint8_t *data)
504 {
505 	struct spi_transfer	trans;
506 	struct spi_chunk	chunk;
507 
508 	spi_transfer_init(&trans);
509 	spi_chunk_init(&chunk, cnt, data, NULL);
510 	spi_transfer_add(&trans, &chunk);
511 
512 	/* enqueue it and wait for it to complete */
513 	spi_transfer(sh, &trans);
514 	spi_wait(&trans);
515 
516 	if (trans.st_flags & SPI_F_ERROR)
517 		return trans.st_errno;
518 
519 	return 0;
520 }
521 
522 int
523 spi_send_recv(struct spi_handle *sh, int scnt, const uint8_t *snd,
524     int rcnt, uint8_t *rcv)
525 {
526 	struct spi_transfer	trans;
527 	struct spi_chunk	chunk1, chunk2;
528 
529 	spi_transfer_init(&trans);
530 	spi_chunk_init(&chunk1, scnt, snd, NULL);
531 	spi_chunk_init(&chunk2, rcnt, NULL, rcv);
532 	spi_transfer_add(&trans, &chunk1);
533 	spi_transfer_add(&trans, &chunk2);
534 
535 	/* enqueue it and wait for it to complete */
536 	spi_transfer(sh, &trans);
537 	spi_wait(&trans);
538 
539 	if (trans.st_flags & SPI_F_ERROR)
540 		return trans.st_errno;
541 
542 	return 0;
543 }
544 
545