spi.c revision 1.17.2.1 1 /* $NetBSD: spi.c,v 1.17.2.1 2021/05/18 23:48:16 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
5 * Copyright (c) 2006 Garrett D'Amore.
6 * All rights reserved.
7 *
8 * Portions of this code were written by Garrett D'Amore for the
9 * Champaign-Urbana Community Wireless Network Project.
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer in the documentation and/or other materials provided
19 * with the distribution.
20 * 3. All advertising materials mentioning features or use of this
21 * software must display the following acknowledgements:
22 * This product includes software developed by the Urbana-Champaign
23 * Independent Media Center.
24 * This product includes software developed by Garrett D'Amore.
25 * 4. Urbana-Champaign Independent Media Center's name and Garrett
26 * D'Amore's name may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
30 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
34 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: spi.c,v 1.17.2.1 2021/05/18 23:48:16 thorpej Exp $");
46
47 #include "locators.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/device.h>
52 #include <sys/conf.h>
53 #include <sys/kmem.h>
54 #include <sys/mutex.h>
55 #include <sys/condvar.h>
56 #include <sys/errno.h>
57
58 #include <dev/spi/spivar.h>
59 #include <dev/spi/spi_io.h>
60
61 #include "ioconf.h"
62 #include "locators.h"
63
64 struct spi_softc {
65 device_t sc_dev;
66 struct spi_controller sc_controller;
67 int sc_mode;
68 int sc_speed;
69 int sc_slave;
70 int sc_nslaves;
71 struct spi_handle *sc_slaves;
72 kmutex_t sc_slave_state_lock;
73 kmutex_t sc_lock;
74 kcondvar_t sc_cv;
75 int sc_flags;
76 #define SPIC_BUSY 1
77 };
78
79 static dev_type_open(spi_open);
80 static dev_type_close(spi_close);
81 static dev_type_ioctl(spi_ioctl);
82
83 const struct cdevsw spi_cdevsw = {
84 .d_open = spi_open,
85 .d_close = spi_close,
86 .d_read = noread,
87 .d_write = nowrite,
88 .d_ioctl = spi_ioctl,
89 .d_stop = nostop,
90 .d_tty = notty,
91 .d_poll = nopoll,
92 .d_mmap = nommap,
93 .d_kqfilter = nokqfilter,
94 .d_discard = nodiscard,
95 .d_flag = D_OTHER
96 };
97
98 /*
99 * SPI slave device. We have one of these per slave.
100 */
101 struct spi_handle {
102 struct spi_softc *sh_sc; /* static */
103 struct spi_controller *sh_controller; /* static */
104 int sh_slave; /* static */
105 int sh_mode; /* locked by owning child */
106 int sh_speed; /* locked by owning child */
107 int sh_flags; /* ^^ slave_state_lock ^^ */
108 #define SPIH_ATTACHED __BIT(0)
109 #define SPIH_DIRECT __BIT(1)
110 };
111
112 #define SPI_MAXDATA 4096
113
114 /*
115 * API for bus drivers.
116 */
117
118 int
119 spibus_print(void *aux, const char *pnp)
120 {
121
122 if (pnp != NULL)
123 aprint_normal("spi at %s", pnp);
124
125 return (UNCONF);
126 }
127
128
129 static int
130 spi_match(device_t parent, cfdata_t cf, void *aux)
131 {
132
133 return 1;
134 }
135
136 static int
137 spi_print_direct(void *aux, const char *pnp)
138 {
139 struct spi_attach_args *sa = aux;
140
141 if (pnp != NULL) {
142 aprint_normal("%s%s%s%s at %s slave %d",
143 sa->sa_name ? sa->sa_name : "(unknown)",
144 sa->sa_clist ? " (" : "",
145 sa->sa_clist ? sa->sa_clist : "",
146 sa->sa_clist ? ")" : "",
147 pnp, sa->sa_handle->sh_slave);
148 } else {
149 aprint_normal(" slave %d", sa->sa_handle->sh_slave);
150 }
151
152 return UNCONF;
153 }
154
155 static int
156 spi_print(void *aux, const char *pnp)
157 {
158 struct spi_attach_args *sa = aux;
159
160 aprint_normal(" slave %d", sa->sa_handle->sh_slave);
161
162 return UNCONF;
163 }
164
165 /*
166 * Direct and indrect for SPI are pretty similar, so we can collapse
167 * them into a single function.
168 */
169 static void
170 spi_attach_child(struct spi_softc *sc, struct spi_attach_args *sa,
171 int chip_select, cfdata_t cf)
172 {
173 struct spi_handle *sh;
174 device_t newdev = NULL;
175 bool is_direct = cf == NULL;
176 const int skip_flags = is_direct ? SPIH_ATTACHED
177 : (SPIH_ATTACHED | SPIH_DIRECT);
178 const int claim_flags = skip_flags ^ SPIH_DIRECT;
179 int locs[SPICF_NLOCS] = { 0 };
180
181 if (chip_select < 0 ||
182 chip_select >= sc->sc_controller.sct_nslaves) {
183 return;
184 }
185
186 sh = &sc->sc_slaves[chip_select];
187
188 mutex_enter(&sc->sc_slave_state_lock);
189 if (ISSET(sh->sh_flags, skip_flags)) {
190 mutex_exit(&sc->sc_slave_state_lock);
191 return;
192 }
193
194 /* Keep others off of this chip select. */
195 SET(sh->sh_flags, claim_flags);
196 mutex_exit(&sc->sc_slave_state_lock);
197
198 locs[SPICF_SLAVE] = chip_select;
199 sa->sa_handle = sh;
200
201 if (is_direct) {
202 newdev = config_found(sc->sc_dev, sa, spi_print_direct,
203 /* CFARG_SUBMATCH, config_stdsubmatch, XXX */
204 CFARG_LOCATORS, locs,
205 CFARG_DEVHANDLE, sa->sa_devhandle,
206 CFARG_EOL);
207 } else {
208 if (config_probe(sc->sc_dev, cf, &sa)) {
209 newdev = config_attach(sc->sc_dev, cf, &sa, spi_print,
210 CFARG_LOCATORS, locs,
211 CFARG_EOL);
212 }
213 }
214
215 if (newdev == NULL) {
216 /*
217 * Clear our claim on this chip select (yes, just
218 * the ATTACHED flag; we want to keep indirects off
219 * of chip selects for which there is a device tree
220 * node).
221 */
222 mutex_enter(&sc->sc_slave_state_lock);
223 CLR(sh->sh_flags, SPIH_ATTACHED);
224 mutex_exit(&sc->sc_slave_state_lock);
225 }
226 }
227
228 static int
229 spi_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
230 {
231 struct spi_softc *sc = device_private(parent);
232 struct spi_attach_args sa;
233
234 if (cf->cf_loc[SPICF_SLAVE] == SPICF_SLAVE_DEFAULT) {
235 /* No wildcards for indirect on SPI. */
236 return 0;
237 }
238
239 memset(&sa, 0, sizeof(sa));
240 spi_attach_child(sc, &sa, cf->cf_loc[SPICF_SLAVE], cf);
241
242 return 0;
243 }
244
245 static bool
246 spi_enumerate_devices_callback(device_t self,
247 struct spi_enumerate_devices_args *args)
248 {
249 struct spi_softc *sc = device_private(self);
250
251 spi_attach_child(sc, args->sa, args->chip_select, NULL);
252
253 return true; /* keep enumerating */
254 }
255
256 int
257 spi_compatible_match(const struct spi_attach_args *sa, const cfdata_t cf,
258 const struct device_compatible_entry *compats)
259 {
260 if (sa->sa_clist != NULL) {
261 return device_compatible_match_strlist(sa->sa_clist,
262 sa->sa_clist_size, compats);
263 }
264
265 /*
266 * In this case, we're using indirect configuration, but SPI
267 * has no real addressing system, and we've filtered out
268 * wildcarded chip selects in spi_search(), so we have no
269 * choice but to trust the user-specified config.
270 */
271 return 1;
272 }
273
274 static void
275 spi_attach(device_t parent, device_t self, void *aux)
276 {
277 struct spi_softc *sc = device_private(self);
278 struct spibus_attach_args *sba = aux;
279 int i;
280
281 sc->sc_dev = self;
282
283 aprint_naive(": SPI bus\n");
284 aprint_normal(": SPI bus\n");
285
286 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_VM);
287 mutex_init(&sc->sc_slave_state_lock, MUTEX_DEFAULT, IPL_NONE);
288 cv_init(&sc->sc_cv, "spictl");
289
290 sc->sc_controller = *sba->sba_controller;
291 sc->sc_nslaves = sba->sba_controller->sct_nslaves;
292
293 /* allocate slave structures */
294 sc->sc_slaves = kmem_zalloc(sizeof(*sc->sc_slaves) * sc->sc_nslaves,
295 KM_SLEEP);
296
297 sc->sc_speed = 0;
298 sc->sc_mode = -1;
299 sc->sc_slave = -1;
300
301 /*
302 * Initialize slave handles
303 */
304 for (i = 0; i < sc->sc_nslaves; i++) {
305 sc->sc_slaves[i].sh_slave = i;
306 sc->sc_slaves[i].sh_sc = sc;
307 sc->sc_slaves[i].sh_controller = &sc->sc_controller;
308 }
309
310 /*
311 * Attempt to enumerate the devices on the bus using the
312 * platform device tree.
313 */
314 struct spi_attach_args sa = { 0 };
315 struct spi_enumerate_devices_args enumargs = {
316 .sa = &sa,
317 .callback = spi_enumerate_devices_callback,
318 };
319 device_call(self, "spi-enumerate-devices", &enumargs);
320
321 /* Then do any other devices the user may have manually wired */
322 config_search(self, NULL,
323 CFARG_SEARCH, spi_search,
324 CFARG_EOL);
325 }
326
327 CFATTACH_DECL_NEW(spi, sizeof(struct spi_softc),
328 spi_match, spi_attach, NULL, NULL);
329
330 static int
331 spi_open(dev_t dev, int flag, int fmt, lwp_t *l)
332 {
333 struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
334
335 if (sc == NULL)
336 return ENXIO;
337
338 return 0;
339 }
340
341 static int
342 spi_close(dev_t dev, int flag, int fmt, lwp_t *l)
343 {
344
345 return 0;
346 }
347
348 static int
349 spi_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
350 {
351 struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
352 struct spi_handle *sh;
353 spi_ioctl_configure_t *sic;
354 spi_ioctl_transfer_t *sit;
355 uint8_t *sbuf, *rbuf;
356 int error;
357
358 if (sc == NULL)
359 return ENXIO;
360
361 switch (cmd) {
362 case SPI_IOCTL_CONFIGURE:
363 sic = (spi_ioctl_configure_t *)data;
364 if (sic->sic_addr < 0 || sic->sic_addr >= sc->sc_nslaves) {
365 error = EINVAL;
366 break;
367 }
368 sh = &sc->sc_slaves[sic->sic_addr];
369 error = spi_configure(sh, sic->sic_mode, sic->sic_speed);
370 break;
371 case SPI_IOCTL_TRANSFER:
372 sit = (spi_ioctl_transfer_t *)data;
373 if (sit->sit_addr < 0 || sit->sit_addr >= sc->sc_nslaves) {
374 error = EINVAL;
375 break;
376 }
377 if ((sit->sit_send && sit->sit_sendlen == 0)
378 || (sit->sit_recv && sit->sit_recv == 0)) {
379 error = EINVAL;
380 break;
381 }
382 sh = &sc->sc_slaves[sit->sit_addr];
383 sbuf = rbuf = NULL;
384 error = 0;
385 if (sit->sit_send && sit->sit_sendlen <= SPI_MAXDATA) {
386 sbuf = kmem_alloc(sit->sit_sendlen, KM_SLEEP);
387 error = copyin(sit->sit_send, sbuf, sit->sit_sendlen);
388 }
389 if (sit->sit_recv && sit->sit_recvlen <= SPI_MAXDATA) {
390 rbuf = kmem_alloc(sit->sit_recvlen, KM_SLEEP);
391 }
392 if (error == 0) {
393 if (sbuf && rbuf)
394 error = spi_send_recv(sh,
395 sit->sit_sendlen, sbuf,
396 sit->sit_recvlen, rbuf);
397 else if (sbuf)
398 error = spi_send(sh,
399 sit->sit_sendlen, sbuf);
400 else if (rbuf)
401 error = spi_recv(sh,
402 sit->sit_recvlen, rbuf);
403 }
404 if (rbuf) {
405 if (error == 0)
406 error = copyout(rbuf, sit->sit_recv,
407 sit->sit_recvlen);
408 kmem_free(rbuf, sit->sit_recvlen);
409 }
410 if (sbuf) {
411 kmem_free(sbuf, sit->sit_sendlen);
412 }
413 break;
414 default:
415 error = ENODEV;
416 break;
417 }
418
419 return error;
420 }
421
422 /*
423 * API for device drivers.
424 *
425 * We provide wrapper routines to decouple the ABI for the SPI
426 * device drivers from the ABI for the SPI bus drivers.
427 */
428
429 /*
430 * Configure. This should be the first thing that the SPI driver
431 * should do, to configure which mode (e.g. SPI_MODE_0, which is the
432 * same as Philips Microwire mode), and speed. If the bus driver
433 * cannot run fast enough, then it should just configure the fastest
434 * mode that it can support. If the bus driver cannot run slow
435 * enough, then the device is incompatible and an error should be
436 * returned.
437 */
438 int
439 spi_configure(struct spi_handle *sh, int mode, int speed)
440 {
441
442 sh->sh_mode = mode;
443 sh->sh_speed = speed;
444 return 0;
445 }
446
447 /*
448 * Acquire controller
449 */
450 static void
451 spi_acquire(struct spi_handle *sh)
452 {
453 struct spi_softc *sc = sh->sh_sc;
454
455 mutex_enter(&sc->sc_lock);
456 while ((sc->sc_flags & SPIC_BUSY) != 0)
457 cv_wait(&sc->sc_cv, &sc->sc_lock);
458 sc->sc_flags |= SPIC_BUSY;
459 mutex_exit(&sc->sc_lock);
460 }
461
462 /*
463 * Release controller
464 */
465 static void
466 spi_release(struct spi_handle *sh)
467 {
468 struct spi_softc *sc = sh->sh_sc;
469
470 mutex_enter(&sc->sc_lock);
471 sc->sc_flags &= ~SPIC_BUSY;
472 cv_broadcast(&sc->sc_cv);
473 mutex_exit(&sc->sc_lock);
474 }
475
476 void
477 spi_transfer_init(struct spi_transfer *st)
478 {
479
480 mutex_init(&st->st_lock, MUTEX_DEFAULT, IPL_VM);
481 cv_init(&st->st_cv, "spixfr");
482
483 st->st_flags = 0;
484 st->st_errno = 0;
485 st->st_done = NULL;
486 st->st_chunks = NULL;
487 st->st_private = NULL;
488 st->st_slave = -1;
489 }
490
491 void
492 spi_chunk_init(struct spi_chunk *chunk, int cnt, const uint8_t *wptr,
493 uint8_t *rptr)
494 {
495
496 chunk->chunk_write = chunk->chunk_wptr = wptr;
497 chunk->chunk_read = chunk->chunk_rptr = rptr;
498 chunk->chunk_rresid = chunk->chunk_wresid = chunk->chunk_count = cnt;
499 chunk->chunk_next = NULL;
500 }
501
502 void
503 spi_transfer_add(struct spi_transfer *st, struct spi_chunk *chunk)
504 {
505 struct spi_chunk **cpp;
506
507 /* this is an O(n) insert -- perhaps we should use a simpleq? */
508 for (cpp = &st->st_chunks; *cpp; cpp = &(*cpp)->chunk_next);
509 *cpp = chunk;
510 }
511
512 int
513 spi_transfer(struct spi_handle *sh, struct spi_transfer *st)
514 {
515 struct spi_softc *sc = sh->sh_sc;
516 struct spi_controller *tag = sh->sh_controller;
517 struct spi_chunk *chunk;
518 int error;
519
520 /*
521 * Initialize "resid" counters and pointers, so that callers
522 * and bus drivers don't have to.
523 */
524 for (chunk = st->st_chunks; chunk; chunk = chunk->chunk_next) {
525 chunk->chunk_wresid = chunk->chunk_rresid = chunk->chunk_count;
526 chunk->chunk_wptr = chunk->chunk_write;
527 chunk->chunk_rptr = chunk->chunk_read;
528 }
529
530 /*
531 * Match slave and parameters to handle
532 */
533 st->st_slave = sh->sh_slave;
534
535 /*
536 * Reserve controller during transaction
537 */
538 spi_acquire(sh);
539
540 st->st_spiprivate = (void *)sh;
541
542 /*
543 * Reconfigure controller
544 *
545 * XXX backends don't configure per-slave parameters
546 * Whenever we switch slaves or change mode or speed, we
547 * need to tell the backend.
548 */
549 if (sc->sc_slave != sh->sh_slave
550 || sc->sc_mode != sh->sh_mode
551 || sc->sc_speed != sh->sh_speed) {
552 error = (*tag->sct_configure)(tag->sct_cookie,
553 sh->sh_slave, sh->sh_mode, sh->sh_speed);
554 if (error)
555 return error;
556 }
557 sc->sc_mode = sh->sh_mode;
558 sc->sc_speed = sh->sh_speed;
559 sc->sc_slave = sh->sh_slave;
560
561 error = (*tag->sct_transfer)(tag->sct_cookie, st);
562
563 return error;
564 }
565
566 void
567 spi_wait(struct spi_transfer *st)
568 {
569 struct spi_handle *sh = st->st_spiprivate;
570
571 mutex_enter(&st->st_lock);
572 while (!(st->st_flags & SPI_F_DONE)) {
573 cv_wait(&st->st_cv, &st->st_lock);
574 }
575 mutex_exit(&st->st_lock);
576 cv_destroy(&st->st_cv);
577 mutex_destroy(&st->st_lock);
578
579 /*
580 * End transaction
581 */
582 spi_release(sh);
583 }
584
585 void
586 spi_done(struct spi_transfer *st, int err)
587 {
588
589 mutex_enter(&st->st_lock);
590 if ((st->st_errno = err) != 0) {
591 st->st_flags |= SPI_F_ERROR;
592 }
593 st->st_flags |= SPI_F_DONE;
594 if (st->st_done != NULL) {
595 (*st->st_done)(st);
596 } else {
597 cv_broadcast(&st->st_cv);
598 }
599 mutex_exit(&st->st_lock);
600 }
601
602 /*
603 * Some convenience routines. These routines block until the work
604 * is done.
605 *
606 * spi_recv - receives data from the bus
607 *
608 * spi_send - sends data to the bus
609 *
610 * spi_send_recv - sends data to the bus, and then receives. Note that this is
611 * done synchronously, i.e. send a command and get the response. This is
612 * not full duplex. If you wnat full duplex, you can't use these convenience
613 * wrappers.
614 */
615 int
616 spi_recv(struct spi_handle *sh, int cnt, uint8_t *data)
617 {
618 struct spi_transfer trans;
619 struct spi_chunk chunk;
620
621 spi_transfer_init(&trans);
622 spi_chunk_init(&chunk, cnt, NULL, data);
623 spi_transfer_add(&trans, &chunk);
624
625 /* enqueue it and wait for it to complete */
626 spi_transfer(sh, &trans);
627 spi_wait(&trans);
628
629 if (trans.st_flags & SPI_F_ERROR)
630 return trans.st_errno;
631
632 return 0;
633 }
634
635 int
636 spi_send(struct spi_handle *sh, int cnt, const uint8_t *data)
637 {
638 struct spi_transfer trans;
639 struct spi_chunk chunk;
640
641 spi_transfer_init(&trans);
642 spi_chunk_init(&chunk, cnt, data, NULL);
643 spi_transfer_add(&trans, &chunk);
644
645 /* enqueue it and wait for it to complete */
646 spi_transfer(sh, &trans);
647 spi_wait(&trans);
648
649 if (trans.st_flags & SPI_F_ERROR)
650 return trans.st_errno;
651
652 return 0;
653 }
654
655 int
656 spi_send_recv(struct spi_handle *sh, int scnt, const uint8_t *snd,
657 int rcnt, uint8_t *rcv)
658 {
659 struct spi_transfer trans;
660 struct spi_chunk chunk1, chunk2;
661
662 spi_transfer_init(&trans);
663 spi_chunk_init(&chunk1, scnt, snd, NULL);
664 spi_chunk_init(&chunk2, rcnt, NULL, rcv);
665 spi_transfer_add(&trans, &chunk1);
666 spi_transfer_add(&trans, &chunk2);
667
668 /* enqueue it and wait for it to complete */
669 spi_transfer(sh, &trans);
670 spi_wait(&trans);
671
672 if (trans.st_flags & SPI_F_ERROR)
673 return trans.st_errno;
674
675 return 0;
676 }
677