Home | History | Annotate | Line # | Download | only in spi
spi.c revision 1.27
      1 /* $NetBSD: spi.c,v 1.27 2025/09/10 03:16:57 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
      5  * Copyright (c) 2006 Garrett D'Amore.
      6  * All rights reserved.
      7  *
      8  * Portions of this code were written by Garrett D'Amore for the
      9  * Champaign-Urbana Community Wireless Network Project.
     10  *
     11  * Redistribution and use in source and binary forms, with or
     12  * without modification, are permitted provided that the following
     13  * conditions are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above
     17  *    copyright notice, this list of conditions and the following
     18  *    disclaimer in the documentation and/or other materials provided
     19  *    with the distribution.
     20  * 3. All advertising materials mentioning features or use of this
     21  *    software must display the following acknowledgements:
     22  *      This product includes software developed by the Urbana-Champaign
     23  *      Independent Media Center.
     24  *	This product includes software developed by Garrett D'Amore.
     25  * 4. Urbana-Champaign Independent Media Center's name and Garrett
     26  *    D'Amore's name may not be used to endorse or promote products
     27  *    derived from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
     30  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
     31  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
     34  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     38  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     40  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     41  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     42  */
     43 
     44 #include "opt_fdt.h"		/* XXX */
     45 
     46 #include <sys/cdefs.h>
     47 __KERNEL_RCSID(0, "$NetBSD: spi.c,v 1.27 2025/09/10 03:16:57 thorpej Exp $");
     48 
     49 #include "locators.h"
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/device.h>
     54 #include <sys/conf.h>
     55 #include <sys/malloc.h>
     56 #include <sys/mutex.h>
     57 #include <sys/condvar.h>
     58 #include <sys/errno.h>
     59 
     60 #include <dev/spi/spivar.h>
     61 #include <dev/spi/spi_io.h>
     62 
     63 #ifdef FDT
     64 #include <dev/fdt/fdt_spi.h>	/* XXX */
     65 #endif
     66 
     67 #include "ioconf.h"
     68 #include "locators.h"
     69 
     70 struct spi_softc {
     71 	device_t		sc_dev;
     72 	struct spi_controller	sc_controller;
     73 	int			sc_mode;
     74 	int			sc_speed;
     75 	int			sc_slave;
     76 	int			sc_nslaves;
     77 	struct spi_handle	*sc_slaves;
     78 	kmutex_t		sc_lock;
     79 	kcondvar_t		sc_cv;
     80 	kmutex_t		sc_dev_lock;
     81 	int			sc_flags;
     82 #define SPIC_BUSY		1
     83 };
     84 
     85 static dev_type_open(spi_open);
     86 static dev_type_close(spi_close);
     87 static dev_type_ioctl(spi_ioctl);
     88 
     89 const struct cdevsw spi_cdevsw = {
     90 	.d_open = spi_open,
     91 	.d_close = spi_close,
     92 	.d_read = noread,
     93 	.d_write = nowrite,
     94 	.d_ioctl = spi_ioctl,
     95 	.d_stop = nostop,
     96 	.d_tty = notty,
     97 	.d_poll = nopoll,
     98 	.d_mmap = nommap,
     99 	.d_kqfilter = nokqfilter,
    100 	.d_discard = nodiscard,
    101 	.d_flag = D_OTHER | D_MPSAFE
    102 };
    103 
    104 /*
    105  * SPI slave device.  We have one of these per slave.
    106  */
    107 struct spi_handle {
    108 	struct spi_softc	*sh_sc;
    109 	struct spi_controller	*sh_controller;
    110 	int			sh_slave;
    111 	int			sh_mode;
    112 	int			sh_speed;
    113 	int			sh_flags;
    114 #define SPIH_ATTACHED		1
    115 };
    116 
    117 #define SPI_MAXDATA 4096
    118 
    119 /*
    120  * API for bus drivers.
    121  */
    122 
    123 int
    124 spibus_print(void *aux, const char *pnp)
    125 {
    126 
    127 	if (pnp != NULL)
    128 		aprint_normal("spi at %s", pnp);
    129 
    130 	return (UNCONF);
    131 }
    132 
    133 
    134 static int
    135 spi_match(device_t parent, cfdata_t cf, void *aux)
    136 {
    137 
    138 	return 1;
    139 }
    140 
    141 static int
    142 spi_print(void *aux, const char *pnp)
    143 {
    144 	struct spi_attach_args *sa = aux;
    145 
    146 	if (sa->sa_handle->sh_slave != -1)
    147 		aprint_normal(" slave %d", sa->sa_handle->sh_slave);
    148 
    149 	return (UNCONF);
    150 }
    151 
    152 static int
    153 spi_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
    154 {
    155 	struct spi_softc *sc = device_private(parent);
    156 	struct spi_attach_args sa;
    157 	int addr;
    158 
    159 	addr = cf->cf_loc[SPICF_SLAVE];
    160 	if ((addr < 0) || (addr >= sc->sc_controller.sct_nslaves)) {
    161 		return -1;
    162 	}
    163 
    164 	memset(&sa, 0, sizeof sa);
    165 	sa.sa_handle = &sc->sc_slaves[addr];
    166 	if (ISSET(sa.sa_handle->sh_flags, SPIH_ATTACHED))
    167 		return -1;
    168 
    169 	if (config_probe(parent, cf, &sa)) {
    170 		SET(sa.sa_handle->sh_flags, SPIH_ATTACHED);
    171 		config_attach(parent, cf, &sa, spi_print, CFARGS_NONE);
    172 	}
    173 
    174 	return 0;
    175 }
    176 
    177 /*
    178  * XXX this is the same as i2c_fill_compat. It could be refactored into a
    179  * common fill_compat function with pointers to compat & ncompat instead
    180  * of attach_args as the first parameter.
    181  */
    182 static void
    183 spi_fill_compat(struct spi_attach_args *sa, const char *compat, size_t len,
    184 	char **buffer)
    185 {
    186 	int count, i;
    187 	const char *c, *start, **ptr;
    188 
    189 	*buffer = NULL;
    190 	for (i = count = 0, c = compat; i < len; i++, c++)
    191 		if (*c == 0)
    192 			count++;
    193 	count += 2;
    194 	ptr = malloc(sizeof(char*)*count, M_TEMP, M_WAITOK);
    195 	if (!ptr)
    196 		return;
    197 
    198 	for (i = count = 0, start = c = compat; i < len; i++, c++) {
    199 		if (*c == 0) {
    200 			ptr[count++] = start;
    201 			start = c + 1;
    202 		}
    203 	}
    204 	if (start < compat + len) {
    205 		/* last string not 0 terminated */
    206 		size_t l = c - start;
    207 		*buffer = malloc(l + 1, M_TEMP, M_WAITOK);
    208 		memcpy(*buffer, start, l);
    209 		(*buffer)[l] = 0;
    210 		ptr[count++] = *buffer;
    211 	}
    212 	ptr[count] = NULL;
    213 
    214 	sa->sa_compat = ptr;
    215 	sa->sa_ncompat = count;
    216 }
    217 
    218 static void
    219 spi_direct_attach_child_devices(device_t parent, struct spi_softc *sc,
    220     prop_array_t child_devices)
    221 {
    222 	unsigned int count;
    223 	prop_dictionary_t child;
    224 	prop_data_t cdata;
    225 	uint32_t slave;
    226 	uint64_t cookie;
    227 	struct spi_attach_args sa;
    228 	int loc[SPICF_NLOCS];
    229 	char *buf;
    230 	int i;
    231 
    232 	memset(loc, 0, sizeof loc);
    233 	count = prop_array_count(child_devices);
    234 	for (i = 0; i < count; i++) {
    235 		child = prop_array_get(child_devices, i);
    236 		if (!child)
    237 			continue;
    238 		if (!prop_dictionary_get_uint32(child, "slave", &slave))
    239 			continue;
    240 		if(slave >= sc->sc_controller.sct_nslaves)
    241 			continue;
    242 		if (!prop_dictionary_get_uint64(child, "cookie", &cookie))
    243 			continue;
    244 		if (!(cdata = prop_dictionary_get(child, "compatible")))
    245 			continue;
    246 		loc[SPICF_SLAVE] = slave;
    247 
    248 		memset(&sa, 0, sizeof sa);
    249 		sa.sa_handle = &sc->sc_slaves[i];
    250 		sa.sa_prop = child;
    251 		sa.sa_cookie = cookie;
    252 		if (ISSET(sa.sa_handle->sh_flags, SPIH_ATTACHED))
    253 			continue;
    254 		SET(sa.sa_handle->sh_flags, SPIH_ATTACHED);
    255 
    256 		buf = NULL;
    257 		spi_fill_compat(&sa,
    258 				prop_data_value(cdata),
    259 				prop_data_size(cdata), &buf);
    260 		config_found(parent, &sa, spi_print,
    261 		    CFARGS(.locators = loc));
    262 
    263 		if (sa.sa_compat)
    264 			free(sa.sa_compat, M_TEMP);
    265 		if (buf)
    266 			free(buf, M_TEMP);
    267 	}
    268 }
    269 
    270 int
    271 spi_compatible_match(const struct spi_attach_args *sa, const cfdata_t cf,
    272 		     const struct device_compatible_entry *compats)
    273 {
    274 	if (sa->sa_ncompat > 0)
    275 		return device_compatible_match(sa->sa_compat, sa->sa_ncompat,
    276 					       compats);
    277 
    278 	return 1;
    279 }
    280 
    281 const struct device_compatible_entry *
    282 spi_compatible_lookup(const struct spi_attach_args *sa,
    283     const struct device_compatible_entry *compats)
    284 {
    285 	return device_compatible_lookup(sa->sa_compat, sa->sa_ncompat,
    286 					compats);
    287 }
    288 
    289 /*
    290  * API for device drivers.
    291  *
    292  * We provide wrapper routines to decouple the ABI for the SPI
    293  * device drivers from the ABI for the SPI bus drivers.
    294  */
    295 static void
    296 spi_attach(device_t parent, device_t self, void *aux)
    297 {
    298 	struct spi_softc *sc = device_private(self);
    299 	struct spibus_attach_args *sba = aux;
    300 	int i;
    301 
    302 	aprint_naive(": SPI bus\n");
    303 	aprint_normal(": SPI bus\n");
    304 
    305 	mutex_init(&sc->sc_dev_lock, MUTEX_DEFAULT, IPL_NONE);
    306 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_VM);
    307 	cv_init(&sc->sc_cv, "spictl");
    308 
    309 	sc->sc_dev = self;
    310 	sc->sc_controller = *sba->sba_controller;	/* XXX copied??? */
    311 	sc->sc_nslaves = sba->sba_controller->sct_nslaves;
    312 	/* allocate slave structures */
    313 	sc->sc_slaves = malloc(sizeof (struct spi_handle) * sc->sc_nslaves,
    314 	    M_DEVBUF, M_WAITOK | M_ZERO);
    315 
    316 	sc->sc_speed = 0;
    317 	sc->sc_mode = -1;
    318 	sc->sc_slave = -1;
    319 
    320 	/*
    321 	 * Initialize slave handles
    322 	 */
    323 	for (i = 0; i < sc->sc_nslaves; i++) {
    324 		sc->sc_slaves[i].sh_slave = i;
    325 		sc->sc_slaves[i].sh_sc = sc;
    326 		sc->sc_slaves[i].sh_controller = &sc->sc_controller;
    327 	}
    328 
    329 #ifdef FDT			/* XXX */
    330 	fdtbus_register_spi_controller(self, &sc->sc_controller);
    331 #endif /* FDT */
    332 
    333 	/* First attach devices known to be present via fdt */
    334 	if (sba->sba_child_devices) {
    335 		spi_direct_attach_child_devices(self, sc, sba->sba_child_devices);
    336 	}
    337 	/* Then do any other devices the user may have manually wired */
    338 	config_search(self, NULL,
    339 	    CFARGS(.search = spi_search));
    340 }
    341 
    342 static int
    343 spi_open(dev_t dev, int flag, int fmt, lwp_t *l)
    344 {
    345 	struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
    346 
    347 	if (sc == NULL)
    348 		return ENXIO;
    349 
    350 	return 0;
    351 }
    352 
    353 static int
    354 spi_close(dev_t dev, int flag, int fmt, lwp_t *l)
    355 {
    356 
    357 	return 0;
    358 }
    359 
    360 static int
    361 spi_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    362 {
    363 	struct spi_softc *sc = device_lookup_private(&spi_cd, minor(dev));
    364 	struct spi_handle *sh;
    365 	spi_ioctl_configure_t *sic;
    366 	spi_ioctl_transfer_t *sit;
    367 	uint8_t *sbuf, *rbuf;
    368 	int error;
    369 
    370 	if (sc == NULL)
    371 		return ENXIO;
    372 
    373 	mutex_enter(&sc->sc_dev_lock);
    374 
    375 	switch (cmd) {
    376 	case SPI_IOCTL_CONFIGURE:
    377 		sic = (spi_ioctl_configure_t *)data;
    378 		if (sic->sic_addr < 0 || sic->sic_addr >= sc->sc_nslaves) {
    379 			error = EINVAL;
    380 			break;
    381 		}
    382 		sh = &sc->sc_slaves[sic->sic_addr];
    383 		error = spi_configure(sc->sc_dev, sh, sic->sic_mode,
    384 		    sic->sic_speed);
    385 		break;
    386 	case SPI_IOCTL_TRANSFER:
    387 		sit = (spi_ioctl_transfer_t *)data;
    388 		if (sit->sit_addr < 0 || sit->sit_addr >= sc->sc_nslaves) {
    389 			error = EINVAL;
    390 			break;
    391 		}
    392 		if ((sit->sit_send && sit->sit_sendlen == 0)
    393 		    || (sit->sit_recv && sit->sit_recvlen == 0)) {
    394 			error = EINVAL;
    395 			break;
    396 		}
    397 		sh = &sc->sc_slaves[sit->sit_addr];
    398 		sbuf = rbuf = NULL;
    399 		error = 0;
    400 		if (sit->sit_send && sit->sit_sendlen <= SPI_MAXDATA) {
    401 			sbuf = malloc(sit->sit_sendlen, M_DEVBUF, M_WAITOK);
    402 			error = copyin(sit->sit_send, sbuf, sit->sit_sendlen);
    403 		}
    404 		if (sit->sit_recv && sit->sit_recvlen <= SPI_MAXDATA) {
    405 			rbuf = malloc(sit->sit_recvlen, M_DEVBUF, M_WAITOK);
    406 		}
    407 		if (error == 0) {
    408 			if (sbuf && rbuf)
    409 				error = spi_send_recv(sh,
    410 					sit->sit_sendlen, sbuf,
    411 					sit->sit_recvlen, rbuf);
    412 			else if (sbuf)
    413 				error = spi_send(sh,
    414 					sit->sit_sendlen, sbuf);
    415 			else if (rbuf)
    416 				error = spi_recv(sh,
    417 					sit->sit_recvlen, rbuf);
    418 		}
    419 		if (rbuf) {
    420 			if (error == 0)
    421 				error = copyout(rbuf, sit->sit_recv,
    422 						sit->sit_recvlen);
    423 			free(rbuf, M_DEVBUF);
    424 		}
    425 		if (sbuf) {
    426 			free(sbuf, M_DEVBUF);
    427 		}
    428 		break;
    429 	default:
    430 		error = ENODEV;
    431 		break;
    432 	}
    433 
    434 	mutex_exit(&sc->sc_dev_lock);
    435 
    436 	return error;
    437 }
    438 
    439 CFATTACH_DECL_NEW(spi, sizeof(struct spi_softc),
    440     spi_match, spi_attach, NULL, NULL);
    441 
    442 /*
    443  * Configure.  This should be the first thing that the SPI driver
    444  * should do, to configure which mode (e.g. SPI_MODE_0, which is the
    445  * same as Philips Microwire mode), and speed.  If the bus driver
    446  * cannot run fast enough, then it should just configure the fastest
    447  * mode that it can support.  If the bus driver cannot run slow
    448  * enough, then the device is incompatible and an error should be
    449  * returned.
    450  */
    451 int
    452 spi_configure(device_t dev __unused, struct spi_handle *sh, int mode, int speed)
    453 {
    454 
    455 	sh->sh_mode = mode;
    456 	sh->sh_speed = speed;
    457 
    458 	/* No need to report errors; no failures. */
    459 
    460 	return 0;
    461 }
    462 
    463 /*
    464  * Acquire controller
    465  */
    466 static void
    467 spi_acquire(struct spi_handle *sh)
    468 {
    469 	struct spi_softc *sc = sh->sh_sc;
    470 
    471 	mutex_enter(&sc->sc_lock);
    472 	while ((sc->sc_flags & SPIC_BUSY) != 0)
    473 		cv_wait(&sc->sc_cv, &sc->sc_lock);
    474 	sc->sc_flags |= SPIC_BUSY;
    475 	mutex_exit(&sc->sc_lock);
    476 }
    477 
    478 /*
    479  * Release controller
    480  */
    481 static void
    482 spi_release(struct spi_handle *sh)
    483 {
    484 	struct spi_softc *sc = sh->sh_sc;
    485 
    486 	mutex_enter(&sc->sc_lock);
    487 	sc->sc_flags &= ~SPIC_BUSY;
    488 	cv_broadcast(&sc->sc_cv);
    489 	mutex_exit(&sc->sc_lock);
    490 }
    491 
    492 void
    493 spi_transfer_init(struct spi_transfer *st)
    494 {
    495 
    496 	mutex_init(&st->st_lock, MUTEX_DEFAULT, IPL_VM);
    497 	cv_init(&st->st_cv, "spixfr");
    498 
    499 	st->st_flags = 0;
    500 	st->st_errno = 0;
    501 	st->st_done = NULL;
    502 	st->st_chunks = NULL;
    503 	st->st_private = NULL;
    504 	st->st_slave = -1;
    505 }
    506 
    507 void
    508 spi_chunk_init(struct spi_chunk *chunk, int cnt, const uint8_t *wptr,
    509     uint8_t *rptr)
    510 {
    511 
    512 	chunk->chunk_write = chunk->chunk_wptr = wptr;
    513 	chunk->chunk_read = chunk->chunk_rptr = rptr;
    514 	chunk->chunk_rresid = chunk->chunk_wresid = chunk->chunk_count = cnt;
    515 	chunk->chunk_next = NULL;
    516 }
    517 
    518 void
    519 spi_transfer_add(struct spi_transfer *st, struct spi_chunk *chunk)
    520 {
    521 	struct spi_chunk **cpp;
    522 
    523 	/* this is an O(n) insert -- perhaps we should use a simpleq? */
    524 	for (cpp = &st->st_chunks; *cpp; cpp = &(*cpp)->chunk_next);
    525 	*cpp = chunk;
    526 }
    527 
    528 int
    529 spi_transfer(struct spi_handle *sh, struct spi_transfer *st)
    530 {
    531 	struct spi_softc	*sc = sh->sh_sc;
    532 	struct spi_controller	*tag = sh->sh_controller;
    533 	struct spi_chunk	*chunk;
    534 	int error;
    535 
    536 	/*
    537 	 * Initialize "resid" counters and pointers, so that callers
    538 	 * and bus drivers don't have to.
    539 	 */
    540 	for (chunk = st->st_chunks; chunk; chunk = chunk->chunk_next) {
    541 		chunk->chunk_wresid = chunk->chunk_rresid = chunk->chunk_count;
    542 		chunk->chunk_wptr = chunk->chunk_write;
    543 		chunk->chunk_rptr = chunk->chunk_read;
    544 	}
    545 
    546 	/*
    547 	 * Match slave and parameters to handle
    548 	 */
    549 	st->st_slave = sh->sh_slave;
    550 
    551 	/*
    552 	 * Reserve controller during transaction
    553  	 */
    554 	spi_acquire(sh);
    555 
    556 	st->st_spiprivate = (void *)sh;
    557 
    558 	/*
    559 	 * Reconfigure controller
    560 	 *
    561 	 * XXX backends don't configure per-slave parameters
    562 	 * Whenever we switch slaves or change mode or speed, we
    563 	 * need to tell the backend.
    564 	 */
    565 	if (sc->sc_slave != sh->sh_slave
    566 	    || sc->sc_mode != sh->sh_mode
    567 	    || sc->sc_speed != sh->sh_speed) {
    568 		error = (*tag->sct_configure)(tag->sct_cookie,
    569 				sh->sh_slave, sh->sh_mode, sh->sh_speed);
    570 		if (error)
    571 			return error;
    572 	}
    573 	sc->sc_mode = sh->sh_mode;
    574 	sc->sc_speed = sh->sh_speed;
    575 	sc->sc_slave = sh->sh_slave;
    576 
    577 	error = (*tag->sct_transfer)(tag->sct_cookie, st);
    578 
    579 	return error;
    580 }
    581 
    582 void
    583 spi_wait(struct spi_transfer *st)
    584 {
    585 	struct spi_handle *sh = st->st_spiprivate;
    586 
    587 	mutex_enter(&st->st_lock);
    588 	while (!(st->st_flags & SPI_F_DONE)) {
    589 		cv_wait(&st->st_cv, &st->st_lock);
    590 	}
    591 	mutex_exit(&st->st_lock);
    592 	cv_destroy(&st->st_cv);
    593 	mutex_destroy(&st->st_lock);
    594 
    595 	/*
    596 	 * End transaction
    597 	 */
    598 	spi_release(sh);
    599 }
    600 
    601 void
    602 spi_done(struct spi_transfer *st, int err)
    603 {
    604 
    605 	mutex_enter(&st->st_lock);
    606 	if ((st->st_errno = err) != 0) {
    607 		st->st_flags |= SPI_F_ERROR;
    608 	}
    609 	st->st_flags |= SPI_F_DONE;
    610 	if (st->st_done != NULL) {
    611 		(*st->st_done)(st);
    612 	} else {
    613 		cv_broadcast(&st->st_cv);
    614 	}
    615 	mutex_exit(&st->st_lock);
    616 }
    617 
    618 /*
    619  * Some convenience routines.  These routines block until the work
    620  * is done.
    621  *
    622  * spi_recv - receives data from the bus
    623  *
    624  * spi_send - sends data to the bus
    625  *
    626  * spi_send_recv - sends data to the bus, and then receives.  Note that this is
    627  * done synchronously, i.e. send a command and get the response.  This is
    628  * not full duplex.  If you want full duplex, you can't use these convenience
    629  * wrappers.
    630  */
    631 int
    632 spi_recv(struct spi_handle *sh, int cnt, uint8_t *data)
    633 {
    634 	struct spi_transfer	trans;
    635 	struct spi_chunk	chunk;
    636 
    637 	spi_transfer_init(&trans);
    638 	spi_chunk_init(&chunk, cnt, NULL, data);
    639 	spi_transfer_add(&trans, &chunk);
    640 
    641 	/* enqueue it and wait for it to complete */
    642 	spi_transfer(sh, &trans);
    643 	spi_wait(&trans);
    644 
    645 	if (trans.st_flags & SPI_F_ERROR)
    646 		return trans.st_errno;
    647 
    648 	return 0;
    649 }
    650 
    651 int
    652 spi_send(struct spi_handle *sh, int cnt, const uint8_t *data)
    653 {
    654 	struct spi_transfer	trans;
    655 	struct spi_chunk	chunk;
    656 
    657 	spi_transfer_init(&trans);
    658 	spi_chunk_init(&chunk, cnt, data, NULL);
    659 	spi_transfer_add(&trans, &chunk);
    660 
    661 	/* enqueue it and wait for it to complete */
    662 	spi_transfer(sh, &trans);
    663 	spi_wait(&trans);
    664 
    665 	if (trans.st_flags & SPI_F_ERROR)
    666 		return trans.st_errno;
    667 
    668 	return 0;
    669 }
    670 
    671 int
    672 spi_send_recv(struct spi_handle *sh, int scnt, const uint8_t *snd,
    673     int rcnt, uint8_t *rcv)
    674 {
    675 	struct spi_transfer	trans;
    676 	struct spi_chunk	chunk1, chunk2;
    677 
    678 	spi_transfer_init(&trans);
    679 	spi_chunk_init(&chunk1, scnt, snd, NULL);
    680 	spi_chunk_init(&chunk2, rcnt, NULL, rcv);
    681 	spi_transfer_add(&trans, &chunk1);
    682 	spi_transfer_add(&trans, &chunk2);
    683 
    684 	/* enqueue it and wait for it to complete */
    685 	spi_transfer(sh, &trans);
    686 	spi_wait(&trans);
    687 
    688 	if (trans.st_flags & SPI_F_ERROR)
    689 		return trans.st_errno;
    690 
    691 	return 0;
    692 }
    693