Home | History | Annotate | Line # | Download | only in spi
spi.c revision 1.22
      1 /* $NetBSD: spi.c,v 1.22 2022/01/19 12:58:06 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
      5  * Copyright (c) 2006 Garrett D'Amore.
      6  * All rights reserved.
      7  *
      8  * Portions of this code were written by Garrett D'Amore for the
      9  * Champaign-Urbana Community Wireless Network Project.
     10  *
     11  * Redistribution and use in source and binary forms, with or
     12  * without modification, are permitted provided that the following
     13  * conditions are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above
     17  *    copyright notice, this list of conditions and the following
     18  *    disclaimer in the documentation and/or other materials provided
     19  *    with the distribution.
     20  * 3. All advertising materials mentioning features or use of this
     21  *    software must display the following acknowledgements:
     22  *      This product includes software developed by the Urbana-Champaign
     23  *      Independent Media Center.
     24  *	This product includes software developed by Garrett D'Amore.
     25  * 4. Urbana-Champaign Independent Media Center's name and Garrett
     26  *    D'Amore's name may not be used to endorse or promote products
     27  *    derived from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
     30  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
     31  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
     34  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     38  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     40  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     41  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: spi.c,v 1.22 2022/01/19 12:58:06 thorpej Exp $");
     46 
     47 #include "locators.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/device.h>
     52 #include <sys/conf.h>
     53 #include <sys/malloc.h>
     54 #include <sys/mutex.h>
     55 #include <sys/condvar.h>
     56 #include <sys/errno.h>
     57 
     58 #include <dev/spi/spivar.h>
     59 #include <dev/spi/spi_io.h>
     60 
     61 #include "ioconf.h"
     62 #include "locators.h"
     63 
     64 struct spi_softc {
     65 	device_t		sc_dev;
     66 	struct spi_controller	sc_controller;
     67 	int			sc_mode;
     68 	int			sc_speed;
     69 	int			sc_slave;
     70 	int			sc_nslaves;
     71 	struct spi_handle	*sc_slaves;
     72 	kmutex_t		sc_lock;
     73 	kcondvar_t		sc_cv;
     74 	kmutex_t		sc_dev_lock;
     75 	int			sc_flags;
     76 #define SPIC_BUSY		1
     77 };
     78 
     79 static dev_type_open(spi_open);
     80 static dev_type_close(spi_close);
     81 static dev_type_ioctl(spi_ioctl);
     82 
     83 const struct cdevsw spi_cdevsw = {
     84 	.d_open = spi_open,
     85 	.d_close = spi_close,
     86 	.d_read = noread,
     87 	.d_write = nowrite,
     88 	.d_ioctl = spi_ioctl,
     89 	.d_stop = nostop,
     90 	.d_tty = notty,
     91 	.d_poll = nopoll,
     92 	.d_mmap = nommap,
     93 	.d_kqfilter = nokqfilter,
     94 	.d_discard = nodiscard,
     95 	.d_flag = D_OTHER | D_MPSAFE
     96 };
     97 
     98 /*
     99  * SPI slave device.  We have one of these per slave.
    100  */
    101 struct spi_handle {
    102 	struct spi_softc	*sh_sc;
    103 	struct spi_controller	*sh_controller;
    104 	int			sh_slave;
    105 	int			sh_mode;
    106 	int			sh_speed;
    107 	int			sh_flags;
    108 #define SPIH_ATTACHED		1
    109 };
    110 
    111 #define SPI_MAXDATA 4096
    112 
    113 /*
    114  * API for bus drivers.
    115  */
    116 
    117 int
    118 spibus_print(void *aux, const char *pnp)
    119 {
    120 
    121 	if (pnp != NULL)
    122 		aprint_normal("spi at %s", pnp);
    123 
    124 	return (UNCONF);
    125 }
    126 
    127 
    128 static int
    129 spi_match(device_t parent, cfdata_t cf, void *aux)
    130 {
    131 
    132 	return 1;
    133 }
    134 
    135 static int
    136 spi_print(void *aux, const char *pnp)
    137 {
    138 	struct spi_attach_args *sa = aux;
    139 
    140 	if (sa->sa_handle->sh_slave != -1)
    141 		aprint_normal(" slave %d", sa->sa_handle->sh_slave);
    142 
    143 	return (UNCONF);
    144 }
    145 
    146 static int
    147 spi_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
    148 {
    149 	struct spi_softc *sc = device_private(parent);
    150 	struct spi_attach_args sa;
    151 	int addr;
    152 
    153 	addr = cf->cf_loc[SPICF_SLAVE];
    154 	if ((addr < 0) || (addr >= sc->sc_controller.sct_nslaves)) {
    155 		return -1;
    156 	}
    157 
    158 	memset(&sa, 0, sizeof sa);
    159 	sa.sa_handle = &sc->sc_slaves[addr];
    160 	if (ISSET(sa.sa_handle->sh_flags, SPIH_ATTACHED))
    161 		return -1;
    162 
    163 	if (config_probe(parent, cf, &sa)) {
    164 		SET(sa.sa_handle->sh_flags, SPIH_ATTACHED);
    165 		config_attach(parent, cf, &sa, spi_print, CFARGS_NONE);
    166 	}
    167 
    168 	return 0;
    169 }
    170 
    171 /*
    172  * XXX this is the same as i2c_fill_compat. It could be refactored into a
    173  * common fill_compat function with pointers to compat & ncompat instead
    174  * of attach_args as the first parameter.
    175  */
    176 static void
    177 spi_fill_compat(struct spi_attach_args *sa, const char *compat, size_t len,
    178 	char **buffer)
    179 {
    180 	int count, i;
    181 	const char *c, *start, **ptr;
    182 
    183 	*buffer = NULL;
    184 	for (i = count = 0, c = compat; i < len; i++, c++)
    185 		if (*c == 0)
    186 			count++;
    187 	count += 2;
    188 	ptr = malloc(sizeof(char*)*count, M_TEMP, M_WAITOK);
    189 	if (!ptr)
    190 		return;
    191 
    192 	for (i = count = 0, start = c = compat; i < len; i++, c++) {
    193 		if (*c == 0) {
    194 			ptr[count++] = start;
    195 			start = c + 1;
    196 		}
    197 	}
    198 	if (start < compat + len) {
    199 		/* last string not 0 terminated */
    200 		size_t l = c - start;
    201 		*buffer = malloc(l + 1, M_TEMP, M_WAITOK);
    202 		memcpy(*buffer, start, l);
    203 		(*buffer)[l] = 0;
    204 		ptr[count++] = *buffer;
    205 	}
    206 	ptr[count] = NULL;
    207 
    208 	sa->sa_compat = ptr;
    209 	sa->sa_ncompat = count;
    210 }
    211 
    212 static void
    213 spi_direct_attach_child_devices(device_t parent, struct spi_softc *sc,
    214     prop_array_t child_devices)
    215 {
    216 	unsigned int count;
    217 	prop_dictionary_t child;
    218 	prop_data_t cdata;
    219 	uint32_t slave;
    220 	uint64_t cookie;
    221 	struct spi_attach_args sa;
    222 	int loc[SPICF_NLOCS];
    223 	char *buf;
    224 	int i;
    225 
    226 	memset(loc, 0, sizeof loc);
    227 	count = prop_array_count(child_devices);
    228 	for (i = 0; i < count; i++) {
    229 		child = prop_array_get(child_devices, i);
    230 		if (!child)
    231 			continue;
    232 		if (!prop_dictionary_get_uint32(child, "slave", &slave))
    233 			continue;
    234 		if(slave >= sc->sc_controller.sct_nslaves)
    235 			continue;
    236 		if (!prop_dictionary_get_uint64(child, "cookie", &cookie))
    237 			continue;
    238 		if (!(cdata = prop_dictionary_get(child, "compatible")))
    239 			continue;
    240 		loc[SPICF_SLAVE] = slave;
    241 
    242 		memset(&sa, 0, sizeof sa);
    243 		sa.sa_handle = &sc->sc_slaves[i];
    244 		sa.sa_prop = child;
    245 		sa.sa_cookie = cookie;
    246 		if (ISSET(sa.sa_handle->sh_flags, SPIH_ATTACHED))
    247 			continue;
    248 		SET(sa.sa_handle->sh_flags, SPIH_ATTACHED);
    249 
    250 		buf = NULL;
    251 		spi_fill_compat(&sa,
    252 				prop_data_value(cdata),
    253 				prop_data_size(cdata), &buf);
    254 		config_found(parent, &sa, spi_print,
    255 		    CFARGS(.locators = loc));
    256 
    257 		if (sa.sa_compat)
    258 			free(sa.sa_compat, M_TEMP);
    259 		if (buf)
    260 			free(buf, M_TEMP);
    261 	}
    262 }
    263 
    264 int
    265 spi_compatible_match(const struct spi_attach_args *sa, const cfdata_t cf,
    266 		     const struct device_compatible_entry *compats)
    267 {
    268 	if (sa->sa_ncompat > 0)
    269 		return device_compatible_match(sa->sa_compat, sa->sa_ncompat,
    270 					       compats);
    271 
    272 	return 1;
    273 }
    274 
    275 /*
    276  * API for device drivers.
    277  *
    278  * We provide wrapper routines to decouple the ABI for the SPI
    279  * device drivers from the ABI for the SPI bus drivers.
    280  */
    281 static void
    282 spi_attach(device_t parent, device_t self, void *aux)
    283 {
    284 	struct spi_softc *sc = device_private(self);
    285 	struct spibus_attach_args *sba = aux;
    286 	int i;
    287 
    288 	aprint_naive(": SPI bus\n");
    289 	aprint_normal(": SPI bus\n");
    290 
    291 	mutex_init(&sc->sc_dev_lock, MUTEX_DEFAULT, IPL_NONE);
    292 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_VM);
    293 	cv_init(&sc->sc_cv, "spictl");
    294 
    295 	sc->sc_dev = self;
    296 	sc->sc_controller = *sba->sba_controller;
    297 	sc->sc_nslaves = sba->sba_controller->sct_nslaves;
    298 	/* allocate slave structures */
    299 	sc->sc_slaves = malloc(sizeof (struct spi_handle) * sc->sc_nslaves,
    300 	    M_DEVBUF, M_WAITOK | M_ZERO);
    301 
    302 	sc->sc_speed = 0;
    303 	sc->sc_mode = -1;
    304 	sc->sc_slave = -1;
    305 
    306 	/*
    307 	 * Initialize slave handles
    308 	 */
    309 	for (i = 0; i < sc->sc_nslaves; i++) {
    310 		sc->sc_slaves[i].sh_slave = i;
    311 		sc->sc_slaves[i].sh_sc = sc;
    312 		sc->sc_slaves[i].sh_controller = &sc->sc_controller;
    313 	}
    314 
    315 	/* First attach devices known to be present via fdt */
    316 	if (sba->sba_child_devices) {
    317 		spi_direct_attach_child_devices(self, sc, sba->sba_child_devices);
    318 	}
    319 	/* Then do any other devices the user may have manually wired */
    320 	config_search(self, NULL,
    321 	    CFARGS(.search = spi_search));
    322 }
    323 
    324 static int
    325 spi_open(dev_t dev, int flag, int fmt, lwp_t *l)
    326 {
    327 	struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
    328 
    329 	if (sc == NULL)
    330 		return ENXIO;
    331 
    332 	return 0;
    333 }
    334 
    335 static int
    336 spi_close(dev_t dev, int flag, int fmt, lwp_t *l)
    337 {
    338 
    339 	return 0;
    340 }
    341 
    342 static int
    343 spi_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    344 {
    345 	struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
    346 	struct spi_handle *sh;
    347 	spi_ioctl_configure_t *sic;
    348 	spi_ioctl_transfer_t *sit;
    349 	uint8_t *sbuf, *rbuf;
    350 	int error;
    351 
    352 	if (sc == NULL)
    353 		return ENXIO;
    354 
    355 	mutex_enter(&sc->sc_dev_lock);
    356 
    357 	switch (cmd) {
    358 	case SPI_IOCTL_CONFIGURE:
    359 		sic = (spi_ioctl_configure_t *)data;
    360 		if (sic->sic_addr < 0 || sic->sic_addr >= sc->sc_nslaves) {
    361 			error = EINVAL;
    362 			break;
    363 		}
    364 		sh = &sc->sc_slaves[sic->sic_addr];
    365 		error = spi_configure(sc->sc_dev, sh, sic->sic_mode,
    366 		    sic->sic_speed);
    367 		break;
    368 	case SPI_IOCTL_TRANSFER:
    369 		sit = (spi_ioctl_transfer_t *)data;
    370 		if (sit->sit_addr < 0 || sit->sit_addr >= sc->sc_nslaves) {
    371 			error = EINVAL;
    372 			break;
    373 		}
    374 		if ((sit->sit_send && sit->sit_sendlen == 0)
    375 		    || (sit->sit_recv && sit->sit_recv == 0)) {
    376 			error = EINVAL;
    377 			break;
    378 		}
    379 		sh = &sc->sc_slaves[sit->sit_addr];
    380 		sbuf = rbuf = NULL;
    381 		error = 0;
    382 		if (sit->sit_send && sit->sit_sendlen <= SPI_MAXDATA) {
    383 			sbuf = malloc(sit->sit_sendlen, M_DEVBUF, M_WAITOK);
    384 			error = copyin(sit->sit_send, sbuf, sit->sit_sendlen);
    385 		}
    386 		if (sit->sit_recv && sit->sit_recvlen <= SPI_MAXDATA) {
    387 			rbuf = malloc(sit->sit_recvlen, M_DEVBUF, M_WAITOK);
    388 		}
    389 		if (error == 0) {
    390 			if (sbuf && rbuf)
    391 				error = spi_send_recv(sh,
    392 					sit->sit_sendlen, sbuf,
    393 					sit->sit_recvlen, rbuf);
    394 			else if (sbuf)
    395 				error = spi_send(sh,
    396 					sit->sit_sendlen, sbuf);
    397 			else if (rbuf)
    398 				error = spi_recv(sh,
    399 					sit->sit_recvlen, rbuf);
    400 		}
    401 		if (rbuf) {
    402 			if (error == 0)
    403 				error = copyout(rbuf, sit->sit_recv,
    404 						sit->sit_recvlen);
    405 			free(rbuf, M_DEVBUF);
    406 		}
    407 		if (sbuf) {
    408 			free(sbuf, M_DEVBUF);
    409 		}
    410 		break;
    411 	default:
    412 		error = ENODEV;
    413 		break;
    414 	}
    415 
    416 	mutex_exit(&sc->sc_dev_lock);
    417 
    418 	return error;
    419 }
    420 
    421 CFATTACH_DECL_NEW(spi, sizeof(struct spi_softc),
    422     spi_match, spi_attach, NULL, NULL);
    423 
    424 /*
    425  * Configure.  This should be the first thing that the SPI driver
    426  * should do, to configure which mode (e.g. SPI_MODE_0, which is the
    427  * same as Philips Microwire mode), and speed.  If the bus driver
    428  * cannot run fast enough, then it should just configure the fastest
    429  * mode that it can support.  If the bus driver cannot run slow
    430  * enough, then the device is incompatible and an error should be
    431  * returned.
    432  */
    433 int
    434 spi_configure(device_t dev __unused, struct spi_handle *sh, int mode, int speed)
    435 {
    436 
    437 	sh->sh_mode = mode;
    438 	sh->sh_speed = speed;
    439 
    440 	/* No need to report errors; no failures. */
    441 
    442 	return 0;
    443 }
    444 
    445 /*
    446  * Acquire controller
    447  */
    448 static void
    449 spi_acquire(struct spi_handle *sh)
    450 {
    451 	struct spi_softc *sc = sh->sh_sc;
    452 
    453 	mutex_enter(&sc->sc_lock);
    454 	while ((sc->sc_flags & SPIC_BUSY) != 0)
    455 		cv_wait(&sc->sc_cv, &sc->sc_lock);
    456 	sc->sc_flags |= SPIC_BUSY;
    457 	mutex_exit(&sc->sc_lock);
    458 }
    459 
    460 /*
    461  * Release controller
    462  */
    463 static void
    464 spi_release(struct spi_handle *sh)
    465 {
    466 	struct spi_softc *sc = sh->sh_sc;
    467 
    468 	mutex_enter(&sc->sc_lock);
    469 	sc->sc_flags &= ~SPIC_BUSY;
    470 	cv_broadcast(&sc->sc_cv);
    471 	mutex_exit(&sc->sc_lock);
    472 }
    473 
    474 void
    475 spi_transfer_init(struct spi_transfer *st)
    476 {
    477 
    478 	mutex_init(&st->st_lock, MUTEX_DEFAULT, IPL_VM);
    479 	cv_init(&st->st_cv, "spixfr");
    480 
    481 	st->st_flags = 0;
    482 	st->st_errno = 0;
    483 	st->st_done = NULL;
    484 	st->st_chunks = NULL;
    485 	st->st_private = NULL;
    486 	st->st_slave = -1;
    487 }
    488 
    489 void
    490 spi_chunk_init(struct spi_chunk *chunk, int cnt, const uint8_t *wptr,
    491     uint8_t *rptr)
    492 {
    493 
    494 	chunk->chunk_write = chunk->chunk_wptr = wptr;
    495 	chunk->chunk_read = chunk->chunk_rptr = rptr;
    496 	chunk->chunk_rresid = chunk->chunk_wresid = chunk->chunk_count = cnt;
    497 	chunk->chunk_next = NULL;
    498 }
    499 
    500 void
    501 spi_transfer_add(struct spi_transfer *st, struct spi_chunk *chunk)
    502 {
    503 	struct spi_chunk **cpp;
    504 
    505 	/* this is an O(n) insert -- perhaps we should use a simpleq? */
    506 	for (cpp = &st->st_chunks; *cpp; cpp = &(*cpp)->chunk_next);
    507 	*cpp = chunk;
    508 }
    509 
    510 int
    511 spi_transfer(struct spi_handle *sh, struct spi_transfer *st)
    512 {
    513 	struct spi_softc	*sc = sh->sh_sc;
    514 	struct spi_controller	*tag = sh->sh_controller;
    515 	struct spi_chunk	*chunk;
    516 	int error;
    517 
    518 	/*
    519 	 * Initialize "resid" counters and pointers, so that callers
    520 	 * and bus drivers don't have to.
    521 	 */
    522 	for (chunk = st->st_chunks; chunk; chunk = chunk->chunk_next) {
    523 		chunk->chunk_wresid = chunk->chunk_rresid = chunk->chunk_count;
    524 		chunk->chunk_wptr = chunk->chunk_write;
    525 		chunk->chunk_rptr = chunk->chunk_read;
    526 	}
    527 
    528 	/*
    529 	 * Match slave and parameters to handle
    530 	 */
    531 	st->st_slave = sh->sh_slave;
    532 
    533 	/*
    534 	 * Reserve controller during transaction
    535  	 */
    536 	spi_acquire(sh);
    537 
    538 	st->st_spiprivate = (void *)sh;
    539 
    540 	/*
    541 	 * Reconfigure controller
    542 	 *
    543 	 * XXX backends don't configure per-slave parameters
    544 	 * Whenever we switch slaves or change mode or speed, we
    545 	 * need to tell the backend.
    546 	 */
    547 	if (sc->sc_slave != sh->sh_slave
    548 	    || sc->sc_mode != sh->sh_mode
    549 	    || sc->sc_speed != sh->sh_speed) {
    550 		error = (*tag->sct_configure)(tag->sct_cookie,
    551 				sh->sh_slave, sh->sh_mode, sh->sh_speed);
    552 		if (error)
    553 			return error;
    554 	}
    555 	sc->sc_mode = sh->sh_mode;
    556 	sc->sc_speed = sh->sh_speed;
    557 	sc->sc_slave = sh->sh_slave;
    558 
    559 	error = (*tag->sct_transfer)(tag->sct_cookie, st);
    560 
    561 	return error;
    562 }
    563 
    564 void
    565 spi_wait(struct spi_transfer *st)
    566 {
    567 	struct spi_handle *sh = st->st_spiprivate;
    568 
    569 	mutex_enter(&st->st_lock);
    570 	while (!(st->st_flags & SPI_F_DONE)) {
    571 		cv_wait(&st->st_cv, &st->st_lock);
    572 	}
    573 	mutex_exit(&st->st_lock);
    574 	cv_destroy(&st->st_cv);
    575 	mutex_destroy(&st->st_lock);
    576 
    577 	/*
    578 	 * End transaction
    579 	 */
    580 	spi_release(sh);
    581 }
    582 
    583 void
    584 spi_done(struct spi_transfer *st, int err)
    585 {
    586 
    587 	mutex_enter(&st->st_lock);
    588 	if ((st->st_errno = err) != 0) {
    589 		st->st_flags |= SPI_F_ERROR;
    590 	}
    591 	st->st_flags |= SPI_F_DONE;
    592 	if (st->st_done != NULL) {
    593 		(*st->st_done)(st);
    594 	} else {
    595 		cv_broadcast(&st->st_cv);
    596 	}
    597 	mutex_exit(&st->st_lock);
    598 }
    599 
    600 /*
    601  * Some convenience routines.  These routines block until the work
    602  * is done.
    603  *
    604  * spi_recv - receives data from the bus
    605  *
    606  * spi_send - sends data to the bus
    607  *
    608  * spi_send_recv - sends data to the bus, and then receives.  Note that this is
    609  * done synchronously, i.e. send a command and get the response.  This is
    610  * not full duplex.  If you wnat full duplex, you can't use these convenience
    611  * wrappers.
    612  */
    613 int
    614 spi_recv(struct spi_handle *sh, int cnt, uint8_t *data)
    615 {
    616 	struct spi_transfer	trans;
    617 	struct spi_chunk	chunk;
    618 
    619 	spi_transfer_init(&trans);
    620 	spi_chunk_init(&chunk, cnt, NULL, data);
    621 	spi_transfer_add(&trans, &chunk);
    622 
    623 	/* enqueue it and wait for it to complete */
    624 	spi_transfer(sh, &trans);
    625 	spi_wait(&trans);
    626 
    627 	if (trans.st_flags & SPI_F_ERROR)
    628 		return trans.st_errno;
    629 
    630 	return 0;
    631 }
    632 
    633 int
    634 spi_send(struct spi_handle *sh, int cnt, const uint8_t *data)
    635 {
    636 	struct spi_transfer	trans;
    637 	struct spi_chunk	chunk;
    638 
    639 	spi_transfer_init(&trans);
    640 	spi_chunk_init(&chunk, cnt, data, NULL);
    641 	spi_transfer_add(&trans, &chunk);
    642 
    643 	/* enqueue it and wait for it to complete */
    644 	spi_transfer(sh, &trans);
    645 	spi_wait(&trans);
    646 
    647 	if (trans.st_flags & SPI_F_ERROR)
    648 		return trans.st_errno;
    649 
    650 	return 0;
    651 }
    652 
    653 int
    654 spi_send_recv(struct spi_handle *sh, int scnt, const uint8_t *snd,
    655     int rcnt, uint8_t *rcv)
    656 {
    657 	struct spi_transfer	trans;
    658 	struct spi_chunk	chunk1, chunk2;
    659 
    660 	spi_transfer_init(&trans);
    661 	spi_chunk_init(&chunk1, scnt, snd, NULL);
    662 	spi_chunk_init(&chunk2, rcnt, NULL, rcv);
    663 	spi_transfer_add(&trans, &chunk1);
    664 	spi_transfer_add(&trans, &chunk2);
    665 
    666 	/* enqueue it and wait for it to complete */
    667 	spi_transfer(sh, &trans);
    668 	spi_wait(&trans);
    669 
    670 	if (trans.st_flags & SPI_F_ERROR)
    671 		return trans.st_errno;
    672 
    673 	return 0;
    674 }
    675