Home | History | Annotate | Line # | Download | only in spi
spi.c revision 1.21
      1 /* $NetBSD: spi.c,v 1.21 2022/01/19 09:30:11 martin Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
      5  * Copyright (c) 2006 Garrett D'Amore.
      6  * All rights reserved.
      7  *
      8  * Portions of this code were written by Garrett D'Amore for the
      9  * Champaign-Urbana Community Wireless Network Project.
     10  *
     11  * Redistribution and use in source and binary forms, with or
     12  * without modification, are permitted provided that the following
     13  * conditions are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above
     17  *    copyright notice, this list of conditions and the following
     18  *    disclaimer in the documentation and/or other materials provided
     19  *    with the distribution.
     20  * 3. All advertising materials mentioning features or use of this
     21  *    software must display the following acknowledgements:
     22  *      This product includes software developed by the Urbana-Champaign
     23  *      Independent Media Center.
     24  *	This product includes software developed by Garrett D'Amore.
     25  * 4. Urbana-Champaign Independent Media Center's name and Garrett
     26  *    D'Amore's name may not be used to endorse or promote products
     27  *    derived from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
     30  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
     31  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
     34  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     38  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     40  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     41  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: spi.c,v 1.21 2022/01/19 09:30:11 martin Exp $");
     46 
     47 #include "locators.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/device.h>
     52 #include <sys/conf.h>
     53 #include <sys/malloc.h>
     54 #include <sys/mutex.h>
     55 #include <sys/condvar.h>
     56 #include <sys/errno.h>
     57 
     58 #include <dev/spi/spivar.h>
     59 #include <dev/spi/spi_io.h>
     60 
     61 #include "ioconf.h"
     62 #include "locators.h"
     63 
     64 struct spi_softc {
     65 	struct spi_controller	sc_controller;
     66 	int			sc_mode;
     67 	int			sc_speed;
     68 	int			sc_slave;
     69 	int			sc_nslaves;
     70 	struct spi_handle	*sc_slaves;
     71 	kmutex_t		sc_lock;
     72 	kcondvar_t		sc_cv;
     73 	kmutex_t		sc_dev_lock;
     74 	int			sc_flags;
     75 #define SPIC_BUSY		1
     76 };
     77 
     78 static dev_type_open(spi_open);
     79 static dev_type_close(spi_close);
     80 static dev_type_ioctl(spi_ioctl);
     81 
     82 const struct cdevsw spi_cdevsw = {
     83 	.d_open = spi_open,
     84 	.d_close = spi_close,
     85 	.d_read = noread,
     86 	.d_write = nowrite,
     87 	.d_ioctl = spi_ioctl,
     88 	.d_stop = nostop,
     89 	.d_tty = notty,
     90 	.d_poll = nopoll,
     91 	.d_mmap = nommap,
     92 	.d_kqfilter = nokqfilter,
     93 	.d_discard = nodiscard,
     94 	.d_flag = D_OTHER | D_MPSAFE
     95 };
     96 
     97 /*
     98  * SPI slave device.  We have one of these per slave.
     99  */
    100 struct spi_handle {
    101 	struct spi_softc	*sh_sc;
    102 	struct spi_controller	*sh_controller;
    103 	int			sh_slave;
    104 	int			sh_mode;
    105 	int			sh_speed;
    106 	int			sh_flags;
    107 #define SPIH_ATTACHED		1
    108 };
    109 
    110 #define SPI_MAXDATA 4096
    111 
    112 /*
    113  * API for bus drivers.
    114  */
    115 
    116 int
    117 spibus_print(void *aux, const char *pnp)
    118 {
    119 
    120 	if (pnp != NULL)
    121 		aprint_normal("spi at %s", pnp);
    122 
    123 	return (UNCONF);
    124 }
    125 
    126 
    127 static int
    128 spi_match(device_t parent, cfdata_t cf, void *aux)
    129 {
    130 
    131 	return 1;
    132 }
    133 
    134 static int
    135 spi_print(void *aux, const char *pnp)
    136 {
    137 	struct spi_attach_args *sa = aux;
    138 
    139 	if (sa->sa_handle->sh_slave != -1)
    140 		aprint_normal(" slave %d", sa->sa_handle->sh_slave);
    141 
    142 	return (UNCONF);
    143 }
    144 
    145 static int
    146 spi_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
    147 {
    148 	struct spi_softc *sc = device_private(parent);
    149 	struct spi_attach_args sa;
    150 	int addr;
    151 
    152 	addr = cf->cf_loc[SPICF_SLAVE];
    153 	if ((addr < 0) || (addr >= sc->sc_controller.sct_nslaves)) {
    154 		return -1;
    155 	}
    156 
    157 	memset(&sa, 0, sizeof sa);
    158 	sa.sa_handle = &sc->sc_slaves[addr];
    159 	if (ISSET(sa.sa_handle->sh_flags, SPIH_ATTACHED))
    160 		return -1;
    161 
    162 	if (config_probe(parent, cf, &sa)) {
    163 		SET(sa.sa_handle->sh_flags, SPIH_ATTACHED);
    164 		config_attach(parent, cf, &sa, spi_print, CFARGS_NONE);
    165 	}
    166 
    167 	return 0;
    168 }
    169 
    170 /*
    171  * XXX this is the same as i2c_fill_compat. It could be refactored into a
    172  * common fill_compat function with pointers to compat & ncompat instead
    173  * of attach_args as the first parameter.
    174  */
    175 static void
    176 spi_fill_compat(struct spi_attach_args *sa, const char *compat, size_t len,
    177 	char **buffer)
    178 {
    179 	int count, i;
    180 	const char *c, *start, **ptr;
    181 
    182 	*buffer = NULL;
    183 	for (i = count = 0, c = compat; i < len; i++, c++)
    184 		if (*c == 0)
    185 			count++;
    186 	count += 2;
    187 	ptr = malloc(sizeof(char*)*count, M_TEMP, M_WAITOK);
    188 	if (!ptr)
    189 		return;
    190 
    191 	for (i = count = 0, start = c = compat; i < len; i++, c++) {
    192 		if (*c == 0) {
    193 			ptr[count++] = start;
    194 			start = c + 1;
    195 		}
    196 	}
    197 	if (start < compat + len) {
    198 		/* last string not 0 terminated */
    199 		size_t l = c - start;
    200 		*buffer = malloc(l + 1, M_TEMP, M_WAITOK);
    201 		memcpy(*buffer, start, l);
    202 		(*buffer)[l] = 0;
    203 		ptr[count++] = *buffer;
    204 	}
    205 	ptr[count] = NULL;
    206 
    207 	sa->sa_compat = ptr;
    208 	sa->sa_ncompat = count;
    209 }
    210 
    211 static void
    212 spi_direct_attach_child_devices(device_t parent, struct spi_softc *sc,
    213     prop_array_t child_devices)
    214 {
    215 	unsigned int count;
    216 	prop_dictionary_t child;
    217 	prop_data_t cdata;
    218 	uint32_t slave;
    219 	uint64_t cookie;
    220 	struct spi_attach_args sa;
    221 	int loc[SPICF_NLOCS];
    222 	char *buf;
    223 	int i;
    224 
    225 	memset(loc, 0, sizeof loc);
    226 	count = prop_array_count(child_devices);
    227 	for (i = 0; i < count; i++) {
    228 		child = prop_array_get(child_devices, i);
    229 		if (!child)
    230 			continue;
    231 		if (!prop_dictionary_get_uint32(child, "slave", &slave))
    232 			continue;
    233 		if(slave >= sc->sc_controller.sct_nslaves)
    234 			continue;
    235 		if (!prop_dictionary_get_uint64(child, "cookie", &cookie))
    236 			continue;
    237 		if (!(cdata = prop_dictionary_get(child, "compatible")))
    238 			continue;
    239 		loc[SPICF_SLAVE] = slave;
    240 
    241 		memset(&sa, 0, sizeof sa);
    242 		sa.sa_handle = &sc->sc_slaves[i];
    243 		sa.sa_prop = child;
    244 		sa.sa_cookie = cookie;
    245 		if (ISSET(sa.sa_handle->sh_flags, SPIH_ATTACHED))
    246 			continue;
    247 		SET(sa.sa_handle->sh_flags, SPIH_ATTACHED);
    248 
    249 		buf = NULL;
    250 		spi_fill_compat(&sa,
    251 				prop_data_value(cdata),
    252 				prop_data_size(cdata), &buf);
    253 		config_found(parent, &sa, spi_print,
    254 		    CFARGS(.locators = loc));
    255 
    256 		if (sa.sa_compat)
    257 			free(sa.sa_compat, M_TEMP);
    258 		if (buf)
    259 			free(buf, M_TEMP);
    260 	}
    261 }
    262 
    263 int
    264 spi_compatible_match(const struct spi_attach_args *sa, const cfdata_t cf,
    265 		     const struct device_compatible_entry *compats)
    266 {
    267 	if (sa->sa_ncompat > 0)
    268 		return device_compatible_match(sa->sa_compat, sa->sa_ncompat,
    269 					       compats);
    270 
    271 	return 1;
    272 }
    273 
    274 /*
    275  * API for device drivers.
    276  *
    277  * We provide wrapper routines to decouple the ABI for the SPI
    278  * device drivers from the ABI for the SPI bus drivers.
    279  */
    280 static void
    281 spi_attach(device_t parent, device_t self, void *aux)
    282 {
    283 	struct spi_softc *sc = device_private(self);
    284 	struct spibus_attach_args *sba = aux;
    285 	int i;
    286 
    287 	aprint_naive(": SPI bus\n");
    288 	aprint_normal(": SPI bus\n");
    289 
    290 	mutex_init(&sc->sc_dev_lock, MUTEX_DEFAULT, IPL_NONE);
    291 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_VM);
    292 	cv_init(&sc->sc_cv, "spictl");
    293 
    294 	sc->sc_controller = *sba->sba_controller;
    295 	sc->sc_nslaves = sba->sba_controller->sct_nslaves;
    296 	/* allocate slave structures */
    297 	sc->sc_slaves = malloc(sizeof (struct spi_handle) * sc->sc_nslaves,
    298 	    M_DEVBUF, M_WAITOK | M_ZERO);
    299 
    300 	sc->sc_speed = 0;
    301 	sc->sc_mode = -1;
    302 	sc->sc_slave = -1;
    303 
    304 	/*
    305 	 * Initialize slave handles
    306 	 */
    307 	for (i = 0; i < sc->sc_nslaves; i++) {
    308 		sc->sc_slaves[i].sh_slave = i;
    309 		sc->sc_slaves[i].sh_sc = sc;
    310 		sc->sc_slaves[i].sh_controller = &sc->sc_controller;
    311 	}
    312 
    313 	/* First attach devices known to be present via fdt */
    314 	if (sba->sba_child_devices) {
    315 		spi_direct_attach_child_devices(self, sc, sba->sba_child_devices);
    316 	}
    317 	/* Then do any other devices the user may have manually wired */
    318 	config_search(self, NULL,
    319 	    CFARGS(.search = spi_search));
    320 }
    321 
    322 static int
    323 spi_open(dev_t dev, int flag, int fmt, lwp_t *l)
    324 {
    325 	struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
    326 
    327 	if (sc == NULL)
    328 		return ENXIO;
    329 
    330 	return 0;
    331 }
    332 
    333 static int
    334 spi_close(dev_t dev, int flag, int fmt, lwp_t *l)
    335 {
    336 
    337 	return 0;
    338 }
    339 
    340 static int
    341 spi_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    342 {
    343 	struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
    344 	device_t self = device_lookup(&spi_cd, minor(dev));
    345 	struct spi_handle *sh;
    346 	spi_ioctl_configure_t *sic;
    347 	spi_ioctl_transfer_t *sit;
    348 	uint8_t *sbuf, *rbuf;
    349 	int error;
    350 
    351 	if (sc == NULL)
    352 		return ENXIO;
    353 
    354 	mutex_enter(&sc->sc_dev_lock);
    355 
    356 	switch (cmd) {
    357 	case SPI_IOCTL_CONFIGURE:
    358 		sic = (spi_ioctl_configure_t *)data;
    359 		if (sic->sic_addr < 0 || sic->sic_addr >= sc->sc_nslaves) {
    360 			error = EINVAL;
    361 			break;
    362 		}
    363 		sh = &sc->sc_slaves[sic->sic_addr];
    364 		error = spi_configure(self, sh, sic->sic_mode, sic->sic_speed);
    365 		break;
    366 	case SPI_IOCTL_TRANSFER:
    367 		sit = (spi_ioctl_transfer_t *)data;
    368 		if (sit->sit_addr < 0 || sit->sit_addr >= sc->sc_nslaves) {
    369 			error = EINVAL;
    370 			break;
    371 		}
    372 		if ((sit->sit_send && sit->sit_sendlen == 0)
    373 		    || (sit->sit_recv && sit->sit_recv == 0)) {
    374 			error = EINVAL;
    375 			break;
    376 		}
    377 		sh = &sc->sc_slaves[sit->sit_addr];
    378 		sbuf = rbuf = NULL;
    379 		error = 0;
    380 		if (sit->sit_send && sit->sit_sendlen <= SPI_MAXDATA) {
    381 			sbuf = malloc(sit->sit_sendlen, M_DEVBUF, M_WAITOK);
    382 			error = copyin(sit->sit_send, sbuf, sit->sit_sendlen);
    383 		}
    384 		if (sit->sit_recv && sit->sit_recvlen <= SPI_MAXDATA) {
    385 			rbuf = malloc(sit->sit_recvlen, M_DEVBUF, M_WAITOK);
    386 		}
    387 		if (error == 0) {
    388 			if (sbuf && rbuf)
    389 				error = spi_send_recv(sh,
    390 					sit->sit_sendlen, sbuf,
    391 					sit->sit_recvlen, rbuf);
    392 			else if (sbuf)
    393 				error = spi_send(sh,
    394 					sit->sit_sendlen, sbuf);
    395 			else if (rbuf)
    396 				error = spi_recv(sh,
    397 					sit->sit_recvlen, rbuf);
    398 		}
    399 		if (rbuf) {
    400 			if (error == 0)
    401 				error = copyout(rbuf, sit->sit_recv,
    402 						sit->sit_recvlen);
    403 			free(rbuf, M_DEVBUF);
    404 		}
    405 		if (sbuf) {
    406 			free(sbuf, M_DEVBUF);
    407 		}
    408 		break;
    409 	default:
    410 		error = ENODEV;
    411 		break;
    412 	}
    413 
    414 	mutex_exit(&sc->sc_dev_lock);
    415 
    416 	return error;
    417 }
    418 
    419 CFATTACH_DECL_NEW(spi, sizeof(struct spi_softc),
    420     spi_match, spi_attach, NULL, NULL);
    421 
    422 /*
    423  * Configure.  This should be the first thing that the SPI driver
    424  * should do, to configure which mode (e.g. SPI_MODE_0, which is the
    425  * same as Philips Microwire mode), and speed.  If the bus driver
    426  * cannot run fast enough, then it should just configure the fastest
    427  * mode that it can support.  If the bus driver cannot run slow
    428  * enough, then the device is incompatible and an error should be
    429  * returned.
    430  */
    431 int
    432 spi_configure(device_t dev __unused, struct spi_handle *sh, int mode, int speed)
    433 {
    434 
    435 	sh->sh_mode = mode;
    436 	sh->sh_speed = speed;
    437 
    438 	/* No need to report errors; no failures. */
    439 
    440 	return 0;
    441 }
    442 
    443 /*
    444  * Acquire controller
    445  */
    446 static void
    447 spi_acquire(struct spi_handle *sh)
    448 {
    449 	struct spi_softc *sc = sh->sh_sc;
    450 
    451 	mutex_enter(&sc->sc_lock);
    452 	while ((sc->sc_flags & SPIC_BUSY) != 0)
    453 		cv_wait(&sc->sc_cv, &sc->sc_lock);
    454 	sc->sc_flags |= SPIC_BUSY;
    455 	mutex_exit(&sc->sc_lock);
    456 }
    457 
    458 /*
    459  * Release controller
    460  */
    461 static void
    462 spi_release(struct spi_handle *sh)
    463 {
    464 	struct spi_softc *sc = sh->sh_sc;
    465 
    466 	mutex_enter(&sc->sc_lock);
    467 	sc->sc_flags &= ~SPIC_BUSY;
    468 	cv_broadcast(&sc->sc_cv);
    469 	mutex_exit(&sc->sc_lock);
    470 }
    471 
    472 void
    473 spi_transfer_init(struct spi_transfer *st)
    474 {
    475 
    476 	mutex_init(&st->st_lock, MUTEX_DEFAULT, IPL_VM);
    477 	cv_init(&st->st_cv, "spixfr");
    478 
    479 	st->st_flags = 0;
    480 	st->st_errno = 0;
    481 	st->st_done = NULL;
    482 	st->st_chunks = NULL;
    483 	st->st_private = NULL;
    484 	st->st_slave = -1;
    485 }
    486 
    487 void
    488 spi_chunk_init(struct spi_chunk *chunk, int cnt, const uint8_t *wptr,
    489     uint8_t *rptr)
    490 {
    491 
    492 	chunk->chunk_write = chunk->chunk_wptr = wptr;
    493 	chunk->chunk_read = chunk->chunk_rptr = rptr;
    494 	chunk->chunk_rresid = chunk->chunk_wresid = chunk->chunk_count = cnt;
    495 	chunk->chunk_next = NULL;
    496 }
    497 
    498 void
    499 spi_transfer_add(struct spi_transfer *st, struct spi_chunk *chunk)
    500 {
    501 	struct spi_chunk **cpp;
    502 
    503 	/* this is an O(n) insert -- perhaps we should use a simpleq? */
    504 	for (cpp = &st->st_chunks; *cpp; cpp = &(*cpp)->chunk_next);
    505 	*cpp = chunk;
    506 }
    507 
    508 int
    509 spi_transfer(struct spi_handle *sh, struct spi_transfer *st)
    510 {
    511 	struct spi_softc	*sc = sh->sh_sc;
    512 	struct spi_controller	*tag = sh->sh_controller;
    513 	struct spi_chunk	*chunk;
    514 	int error;
    515 
    516 	/*
    517 	 * Initialize "resid" counters and pointers, so that callers
    518 	 * and bus drivers don't have to.
    519 	 */
    520 	for (chunk = st->st_chunks; chunk; chunk = chunk->chunk_next) {
    521 		chunk->chunk_wresid = chunk->chunk_rresid = chunk->chunk_count;
    522 		chunk->chunk_wptr = chunk->chunk_write;
    523 		chunk->chunk_rptr = chunk->chunk_read;
    524 	}
    525 
    526 	/*
    527 	 * Match slave and parameters to handle
    528 	 */
    529 	st->st_slave = sh->sh_slave;
    530 
    531 	/*
    532 	 * Reserve controller during transaction
    533  	 */
    534 	spi_acquire(sh);
    535 
    536 	st->st_spiprivate = (void *)sh;
    537 
    538 	/*
    539 	 * Reconfigure controller
    540 	 *
    541 	 * XXX backends don't configure per-slave parameters
    542 	 * Whenever we switch slaves or change mode or speed, we
    543 	 * need to tell the backend.
    544 	 */
    545 	if (sc->sc_slave != sh->sh_slave
    546 	    || sc->sc_mode != sh->sh_mode
    547 	    || sc->sc_speed != sh->sh_speed) {
    548 		error = (*tag->sct_configure)(tag->sct_cookie,
    549 				sh->sh_slave, sh->sh_mode, sh->sh_speed);
    550 		if (error)
    551 			return error;
    552 	}
    553 	sc->sc_mode = sh->sh_mode;
    554 	sc->sc_speed = sh->sh_speed;
    555 	sc->sc_slave = sh->sh_slave;
    556 
    557 	error = (*tag->sct_transfer)(tag->sct_cookie, st);
    558 
    559 	return error;
    560 }
    561 
    562 void
    563 spi_wait(struct spi_transfer *st)
    564 {
    565 	struct spi_handle *sh = st->st_spiprivate;
    566 
    567 	mutex_enter(&st->st_lock);
    568 	while (!(st->st_flags & SPI_F_DONE)) {
    569 		cv_wait(&st->st_cv, &st->st_lock);
    570 	}
    571 	mutex_exit(&st->st_lock);
    572 	cv_destroy(&st->st_cv);
    573 	mutex_destroy(&st->st_lock);
    574 
    575 	/*
    576 	 * End transaction
    577 	 */
    578 	spi_release(sh);
    579 }
    580 
    581 void
    582 spi_done(struct spi_transfer *st, int err)
    583 {
    584 
    585 	mutex_enter(&st->st_lock);
    586 	if ((st->st_errno = err) != 0) {
    587 		st->st_flags |= SPI_F_ERROR;
    588 	}
    589 	st->st_flags |= SPI_F_DONE;
    590 	if (st->st_done != NULL) {
    591 		(*st->st_done)(st);
    592 	} else {
    593 		cv_broadcast(&st->st_cv);
    594 	}
    595 	mutex_exit(&st->st_lock);
    596 }
    597 
    598 /*
    599  * Some convenience routines.  These routines block until the work
    600  * is done.
    601  *
    602  * spi_recv - receives data from the bus
    603  *
    604  * spi_send - sends data to the bus
    605  *
    606  * spi_send_recv - sends data to the bus, and then receives.  Note that this is
    607  * done synchronously, i.e. send a command and get the response.  This is
    608  * not full duplex.  If you wnat full duplex, you can't use these convenience
    609  * wrappers.
    610  */
    611 int
    612 spi_recv(struct spi_handle *sh, int cnt, uint8_t *data)
    613 {
    614 	struct spi_transfer	trans;
    615 	struct spi_chunk	chunk;
    616 
    617 	spi_transfer_init(&trans);
    618 	spi_chunk_init(&chunk, cnt, NULL, data);
    619 	spi_transfer_add(&trans, &chunk);
    620 
    621 	/* enqueue it and wait for it to complete */
    622 	spi_transfer(sh, &trans);
    623 	spi_wait(&trans);
    624 
    625 	if (trans.st_flags & SPI_F_ERROR)
    626 		return trans.st_errno;
    627 
    628 	return 0;
    629 }
    630 
    631 int
    632 spi_send(struct spi_handle *sh, int cnt, const uint8_t *data)
    633 {
    634 	struct spi_transfer	trans;
    635 	struct spi_chunk	chunk;
    636 
    637 	spi_transfer_init(&trans);
    638 	spi_chunk_init(&chunk, cnt, data, NULL);
    639 	spi_transfer_add(&trans, &chunk);
    640 
    641 	/* enqueue it and wait for it to complete */
    642 	spi_transfer(sh, &trans);
    643 	spi_wait(&trans);
    644 
    645 	if (trans.st_flags & SPI_F_ERROR)
    646 		return trans.st_errno;
    647 
    648 	return 0;
    649 }
    650 
    651 int
    652 spi_send_recv(struct spi_handle *sh, int scnt, const uint8_t *snd,
    653     int rcnt, uint8_t *rcv)
    654 {
    655 	struct spi_transfer	trans;
    656 	struct spi_chunk	chunk1, chunk2;
    657 
    658 	spi_transfer_init(&trans);
    659 	spi_chunk_init(&chunk1, scnt, snd, NULL);
    660 	spi_chunk_init(&chunk2, rcnt, NULL, rcv);
    661 	spi_transfer_add(&trans, &chunk1);
    662 	spi_transfer_add(&trans, &chunk2);
    663 
    664 	/* enqueue it and wait for it to complete */
    665 	spi_transfer(sh, &trans);
    666 	spi_wait(&trans);
    667 
    668 	if (trans.st_flags & SPI_F_ERROR)
    669 		return trans.st_errno;
    670 
    671 	return 0;
    672 }
    673 
    674