Home | History | Annotate | Line # | Download | only in dev
arspi.c revision 1.14.2.1
      1 /* $NetBSD: arspi.c,v 1.14.2.1 2021/05/18 23:30:56 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
      5  * Copyright (c) 2006 Garrett D'Amore.
      6  * All rights reserved.
      7  *
      8  * Portions of this code were written by Garrett D'Amore for the
      9  * Champaign-Urbana Community Wireless Network Project.
     10  *
     11  * Redistribution and use in source and binary forms, with or
     12  * without modification, are permitted provided that the following
     13  * conditions are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above
     17  *    copyright notice, this list of conditions and the following
     18  *    disclaimer in the documentation and/or other materials provided
     19  *    with the distribution.
     20  * 3. All advertising materials mentioning features or use of this
     21  *    software must display the following acknowledgements:
     22  *      This product includes software developed by the Urbana-Champaign
     23  *      Independent Media Center.
     24  *	This product includes software developed by Garrett D'Amore.
     25  * 4. Urbana-Champaign Independent Media Center's name and Garrett
     26  *    D'Amore's name may not be used to endorse or promote products
     27  *    derived from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
     30  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
     31  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
     34  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     38  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     40  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     41  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: arspi.c,v 1.14.2.1 2021/05/18 23:30:56 thorpej Exp $");
     46 
     47 #include "locators.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/bus.h>
     51 #include <sys/cpu.h>
     52 #include <sys/device.h>
     53 #include <sys/errno.h>
     54 #include <sys/kernel.h>
     55 #include <sys/kmem.h>
     56 #include <sys/proc.h>
     57 #include <sys/systm.h>
     58 
     59 #include <mips/atheros/include/ar5315reg.h>
     60 #include <mips/atheros/include/arbusvar.h>
     61 
     62 #include <mips/atheros/dev/arspireg.h>
     63 
     64 #include <dev/spi/spiflash.h>
     65 #include <dev/spi/spivar.h>
     66 
     67 /*
     68  * This device is intended only to operate with specific SPI flash
     69  * parts, and is not a general purpose SPI host.  (Or at least if it
     70  * is, the Linux and eCos sources do not show how to use it as such.)
     71  * And lack of documentation on the Atheros SoCs is less than helpful.
     72  *
     73  * So for now we just "emulate" enough of the host bus framework to
     74  * make the SPI flash drivers happy.
     75  */
     76 
     77 struct arspi_job {
     78 	uint8_t			job_opcode;
     79 	struct spi_chunk	*job_chunk;
     80 	uint32_t		job_flags;
     81 	uint32_t		job_addr;
     82 	uint32_t		job_data;
     83 	int			job_rxcnt;
     84 	int			job_txcnt;
     85 	int			job_addrcnt;
     86 	int			job_rresid;
     87 	int			job_wresid;
     88 };
     89 
     90 #define	JOB_READ		0x1
     91 #define	JOB_WRITE		0x2
     92 #define	JOB_LAST		0x4
     93 #define	JOB_WAIT		0x8	/* job must wait for WIP bits */
     94 #define	JOB_WREN		0x10	/* WREN needed */
     95 
     96 struct arspi_softc {
     97 	struct spi_controller	sc_spi;
     98 	void			*sc_ih;
     99 	bool			sc_interrupts;
    100 
    101 	struct spi_transfer	*sc_transfer;
    102 	struct spi_chunk	*sc_wchunk;	/* for partial writes */
    103 	struct spi_transq	sc_transq;
    104 	bus_space_tag_t		sc_st;
    105 	bus_space_handle_t	sc_sh;
    106 	bus_size_t		sc_size;
    107 };
    108 
    109 #define	STATIC
    110 
    111 STATIC int arspi_match(device_t, cfdata_t, void *);
    112 STATIC void arspi_attach(device_t, device_t, void *);
    113 STATIC void arspi_interrupts(device_t);
    114 STATIC int arspi_intr(void *);
    115 /* SPI service routines */
    116 STATIC int arspi_configure(void *, int, int, int);
    117 STATIC int arspi_transfer(void *, struct spi_transfer *);
    118 /* internal support */
    119 STATIC void arspi_poll(struct arspi_softc *);
    120 STATIC void arspi_done(struct arspi_softc *, int);
    121 STATIC void arspi_sched(struct arspi_softc *);
    122 STATIC int arspi_get_byte(struct spi_chunk **, uint8_t *);
    123 STATIC int arspi_put_byte(struct spi_chunk **, uint8_t);
    124 STATIC int arspi_make_job(struct spi_transfer *);
    125 STATIC void arspi_update_job(struct spi_transfer *);
    126 STATIC void arspi_finish_job(struct spi_transfer *);
    127 
    128 
    129 CFATTACH_DECL_NEW(arspi, sizeof(struct arspi_softc),
    130     arspi_match, arspi_attach, NULL, NULL);
    131 
    132 #define	GETREG(sc, o)		bus_space_read_4(sc->sc_st, sc->sc_sh, o)
    133 #define	PUTREG(sc, o, v)	bus_space_write_4(sc->sc_st, sc->sc_sh, o, v)
    134 
    135 int
    136 arspi_match(device_t parent, cfdata_t cf, void *aux)
    137 {
    138 	struct arbus_attach_args *aa = aux;
    139 
    140 	if (strcmp(aa->aa_name, cf->cf_name) != 0)
    141 		return 0;
    142 	return 1;
    143 }
    144 
    145 void
    146 arspi_attach(device_t parent, device_t self, void *aux)
    147 {
    148 	struct arspi_softc *sc = device_private(self);
    149 	struct arbus_attach_args *aa = aux;
    150 
    151 	/*
    152 	 * Map registers.
    153 	 */
    154 	sc->sc_st = aa->aa_bst;
    155 	sc->sc_size = aa->aa_size;
    156 	if (bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
    157 		&sc->sc_sh) != 0) {
    158 		printf(": unable to map registers!\n");
    159 		return;
    160 	}
    161 
    162 	aprint_normal(": Atheros SPI controller\n");
    163 
    164 	/*
    165 	 * Initialize SPI controller.
    166 	 */
    167 	sc->sc_spi.sct_cookie = sc;
    168 	sc->sc_spi.sct_configure = arspi_configure;
    169 	sc->sc_spi.sct_transfer = arspi_transfer;
    170 	sc->sc_spi.sct_nslaves = 1;
    171 
    172 
    173 	/*
    174 	 * Initialize the queue.
    175 	 */
    176 	spi_transq_init(&sc->sc_transq);
    177 
    178 	/*
    179 	 * Enable device interrupts.
    180 	 */
    181 	sc->sc_ih = arbus_intr_establish(aa->aa_cirq, aa->aa_mirq,
    182 	    arspi_intr, sc);
    183 	if (sc->sc_ih == NULL) {
    184 		aprint_error("%s: couldn't establish interrupt\n",
    185 		    device_xname(self));
    186 		/* just leave it in polled mode */
    187 	} else
    188 		config_interrupts(self, arspi_interrupts);
    189 
    190 	/*
    191 	 * Initialize and attach bus attach.
    192 	 */
    193 	struct spibus_attach_args sba = {
    194 		.sba_controller = &sc->sc_spi,
    195 	};
    196 	config_found(self, &sba, spibus_print,
    197 	    CFARG_DEVHANDLE, device_handle(self),
    198 	    CFARG_EOL);
    199 }
    200 
    201 void
    202 arspi_interrupts(device_t self)
    203 {
    204 	/*
    205 	 * we never leave polling mode, because, apparently, we
    206 	 * are missing some data about how to drive the SPI in interrupt
    207 	 * mode.
    208 	 */
    209 #if 0
    210 	struct arspi_softc *sc = device_private(self);
    211 	int	s;
    212 
    213 	s = splbio();
    214 	sc->sc_interrupts = true;
    215 	splx(s);
    216 #endif
    217 }
    218 
    219 int
    220 arspi_intr(void *arg)
    221 {
    222 	struct arspi_softc *sc = arg;
    223 
    224 	while (GETREG(sc, ARSPI_REG_CTL) & ARSPI_CTL_BUSY);
    225 
    226 	arspi_done(sc, 0);
    227 
    228 	return 1;
    229 }
    230 
    231 void
    232 arspi_poll(struct arspi_softc *sc)
    233 {
    234 
    235 	while (sc->sc_transfer) {
    236 		arspi_intr(sc);
    237 	}
    238 }
    239 
    240 int
    241 arspi_configure(void *cookie, int slave, int mode, int speed)
    242 {
    243 
    244 	/*
    245 	 * We don't support the full SPI protocol, and hopefully the
    246 	 * firmware has programmed a reasonable mode already.  So
    247 	 * just a couple of quick sanity checks, then bail.
    248 	 */
    249 	if ((mode != 0) || (slave != 0))
    250 		return EINVAL;
    251 
    252 	return 0;
    253 }
    254 
    255 int
    256 arspi_transfer(void *cookie, struct spi_transfer *st)
    257 {
    258 	struct arspi_softc *sc = cookie;
    259 	int rv;
    260 	int s;
    261 
    262 	st->st_busprivate = NULL;
    263 	if ((rv = arspi_make_job(st)) != 0) {
    264 		if (st->st_busprivate) {
    265 			struct arspi_job *job = st->st_busprivate;
    266 			st->st_busprivate = NULL;
    267 			kmem_free(job, sizeof(*job));
    268 		}
    269 		spi_done(st, rv);
    270 		return rv;
    271 	}
    272 
    273 	s = splbio();
    274 	spi_transq_enqueue(&sc->sc_transq, st);
    275 	if (sc->sc_transfer == NULL) {
    276 		arspi_sched(sc);
    277 		if (!sc->sc_interrupts)
    278 			arspi_poll(sc);
    279 	}
    280 	splx(s);
    281 	return 0;
    282 }
    283 
    284 void
    285 arspi_sched(struct arspi_softc *sc)
    286 {
    287 	struct spi_transfer *st;
    288 	struct arspi_job *job;
    289 	uint32_t ctl, cnt;
    290 
    291 	for (;;) {
    292 		if ((st = sc->sc_transfer) == NULL) {
    293 			if ((st = spi_transq_first(&sc->sc_transq)) == NULL) {
    294 				/* no work left to do */
    295 				break;
    296 			}
    297 			spi_transq_dequeue(&sc->sc_transq);
    298 			sc->sc_transfer = st;
    299 		}
    300 
    301 		arspi_update_job(st);
    302 		job = st->st_busprivate;
    303 
    304 		/* there shouldn't be anything running, but ensure it */
    305 		do {
    306 			ctl = GETREG(sc, ARSPI_REG_CTL);
    307 		}  while (ctl & ARSPI_CTL_BUSY);
    308 		/* clear all of the tx and rx bits */
    309 		ctl &= ~(ARSPI_CTL_TXCNT_MASK | ARSPI_CTL_RXCNT_MASK);
    310 
    311 		if (job->job_flags & JOB_WAIT) {
    312 			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_RDSR);
    313 			/* only the opcode for tx */
    314 			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
    315 			/* and one rx byte */
    316 			ctl |= (1 << ARSPI_CTL_RXCNT_SHIFT);
    317 		} else if (job->job_flags & JOB_WREN) {
    318 			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_WREN);
    319 			/* just the opcode */
    320 			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
    321 			/* no rx bytes */
    322 		} else {
    323 			/* set the data */
    324 			PUTREG(sc, ARSPI_REG_DATA, job->job_data);
    325 
    326 			/* set the opcode and the address */
    327 			PUTREG(sc, ARSPI_REG_OPCODE, job->job_opcode |
    328 			    (job->job_addr << 8));
    329 
    330 			/* now set txcnt */
    331 			cnt = 1;	/* opcode */
    332 			cnt += job->job_addrcnt + job->job_txcnt;
    333 			ctl |= (cnt << ARSPI_CTL_TXCNT_SHIFT);
    334 
    335 			/* now set rxcnt */
    336 			cnt = job->job_rxcnt;
    337 			ctl |= (cnt << ARSPI_CTL_RXCNT_SHIFT);
    338 		}
    339 
    340 		/* set the start bit */
    341 		ctl |= ARSPI_CTL_START;
    342 
    343 		PUTREG(sc, ARSPI_REG_CTL, ctl);
    344 		break;
    345 	}
    346 }
    347 
    348 void
    349 arspi_done(struct arspi_softc *sc, int err)
    350 {
    351 	struct spi_transfer *st;
    352 	struct arspi_job *job;
    353 
    354 	if ((st = sc->sc_transfer) != NULL) {
    355 		job = st->st_busprivate;
    356 
    357 		if (job->job_flags & JOB_WAIT) {
    358 			if (err == 0) {
    359 				if ((GETREG(sc, ARSPI_REG_DATA) &
    360 				    SPIFLASH_SR_BUSY) == 0) {
    361 					/* intermediate wait done */
    362 					job->job_flags &= ~JOB_WAIT;
    363 					goto done;
    364 				}
    365 			}
    366 		} else if (job->job_flags & JOB_WREN) {
    367 			if (err == 0) {
    368 				job->job_flags &= ~JOB_WREN;
    369 				goto done;
    370 			}
    371 		} else if (err == 0) {
    372 			/*
    373 			 * When breaking up write jobs, we have to wait until
    374 			 * the WIP bit is clear, and we have to separately
    375 			 * send WREN for each chunk.  These flags facilitate
    376 			 * that.
    377 			 */
    378 			if (job->job_flags & JOB_WRITE)
    379 				job->job_flags |= (JOB_WAIT | JOB_WREN);
    380 			job->job_data = GETREG(sc, ARSPI_REG_DATA);
    381 			arspi_finish_job(st);
    382 		}
    383 
    384 		if (err || (job->job_flags & JOB_LAST)) {
    385 			sc->sc_transfer = NULL;
    386 			st->st_busprivate = NULL;
    387 			spi_done(st, err);
    388 			kmem_free(job, sizeof(*job));
    389 		}
    390 	}
    391 done:
    392 	arspi_sched(sc);
    393 }
    394 
    395 int
    396 arspi_get_byte(struct spi_chunk **chunkp, uint8_t *bytep)
    397 {
    398 	struct spi_chunk *chunk;
    399 
    400 	chunk = *chunkp;
    401 
    402 	/* skip leading empty (or already consumed) chunks */
    403 	while (chunk && chunk->chunk_wresid == 0)
    404 		chunk = chunk->chunk_next;
    405 
    406 	if (chunk == NULL) {
    407 		return ENODATA;
    408 	}
    409 
    410 	/*
    411 	 * chunk must be write only.  SPI flash doesn't support
    412 	 * any full duplex operations.
    413 	 */
    414 	if ((chunk->chunk_rptr) || !(chunk->chunk_wptr)) {
    415 		return EINVAL;
    416 	}
    417 
    418 	*bytep = *chunk->chunk_wptr;
    419 	chunk->chunk_wptr++;
    420 	chunk->chunk_wresid--;
    421 	chunk->chunk_rresid--;
    422 	/* clearing wptr and rptr makes sanity checks later easier */
    423 	if (chunk->chunk_wresid == 0)
    424 		chunk->chunk_wptr = NULL;
    425 	if (chunk->chunk_rresid == 0)
    426 		chunk->chunk_rptr = NULL;
    427 	while (chunk && chunk->chunk_wresid == 0)
    428 		chunk = chunk->chunk_next;
    429 
    430 	*chunkp = chunk;
    431 	return 0;
    432 }
    433 
    434 int
    435 arspi_put_byte(struct spi_chunk **chunkp, uint8_t byte)
    436 {
    437 	struct spi_chunk *chunk;
    438 
    439 	chunk = *chunkp;
    440 
    441 	/* skip leading empty (or already consumed) chunks */
    442 	while (chunk && chunk->chunk_rresid == 0)
    443 		chunk = chunk->chunk_next;
    444 
    445 	if (chunk == NULL) {
    446 		return EOVERFLOW;
    447 	}
    448 
    449 	/*
    450 	 * chunk must be read only.  SPI flash doesn't support
    451 	 * any full duplex operations.
    452 	 */
    453 	if ((chunk->chunk_wptr) || !(chunk->chunk_rptr)) {
    454 		return EINVAL;
    455 	}
    456 
    457 	*chunk->chunk_rptr = byte;
    458 	chunk->chunk_rptr++;
    459 	chunk->chunk_wresid--;	/* technically this was done at send time */
    460 	chunk->chunk_rresid--;
    461 	while (chunk && chunk->chunk_rresid == 0)
    462 		chunk = chunk->chunk_next;
    463 
    464 	*chunkp = chunk;
    465 	return 0;
    466 }
    467 
    468 int
    469 arspi_make_job(struct spi_transfer *st)
    470 {
    471 	struct arspi_job *job;
    472 	struct spi_chunk *chunk;
    473 	uint8_t byte;
    474 	int i, rv;
    475 
    476 	job = kmem_zalloc(sizeof (struct arspi_job), KM_SLEEP);
    477 
    478 	st->st_busprivate = job;
    479 
    480 	/* skip any leading empty chunks (should not be any!) */
    481 	chunk = st->st_chunks;
    482 
    483 	/* get transfer opcode */
    484 	if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    485 		return rv;
    486 
    487 	job->job_opcode = byte;
    488 	switch (job->job_opcode) {
    489 	case SPIFLASH_CMD_WREN:
    490 	case SPIFLASH_CMD_WRDI:
    491 	case SPIFLASH_CMD_CHIPERASE:
    492 		break;
    493 	case SPIFLASH_CMD_RDJI:
    494 		job->job_rxcnt = 3;
    495 		break;
    496 	case SPIFLASH_CMD_RDSR:
    497 		job->job_rxcnt = 1;
    498 		break;
    499 	case SPIFLASH_CMD_WRSR:
    500 		/*
    501 		 * is this in data, or in address?  stick it in data
    502 		 * for now.
    503 		 */
    504 		job->job_txcnt = 1;
    505 		break;
    506 	case SPIFLASH_CMD_RDID:
    507 		job->job_addrcnt = 3;	/* 3 dummy bytes */
    508 		job->job_rxcnt = 1;
    509 		break;
    510 	case SPIFLASH_CMD_ERASE:
    511 		job->job_addrcnt = 3;
    512 		break;
    513 	case SPIFLASH_CMD_READ:
    514 		job->job_addrcnt = 3;
    515 		job->job_flags |= JOB_READ;
    516 		break;
    517 	case SPIFLASH_CMD_PROGRAM:
    518 		job->job_addrcnt = 3;
    519 		job->job_flags |= JOB_WRITE;
    520 		break;
    521 	case SPIFLASH_CMD_READFAST:
    522 		/*
    523 		 * This is a pain in the arse to support, so we will
    524 		 * rewrite as an ordinary read.  But later, after we
    525 		 * obtain the address.
    526 		 */
    527 		job->job_addrcnt = 3;	/* 3 address */
    528 		job->job_flags |= JOB_READ;
    529 		break;
    530 	default:
    531 		return EINVAL;
    532 	}
    533 
    534 	for (i = 0; i < job->job_addrcnt; i++) {
    535 		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    536 			return rv;
    537 		job->job_addr <<= 8;
    538 		job->job_addr |= byte;
    539 	}
    540 
    541 
    542 	if (job->job_opcode == SPIFLASH_CMD_READFAST) {
    543 		/* eat the dummy timing byte */
    544 		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    545 			return rv;
    546 		/* rewrite this as a read */
    547 		job->job_opcode = SPIFLASH_CMD_READ;
    548 	}
    549 
    550 	job->job_chunk = chunk;
    551 
    552 	/*
    553 	 * Now quickly check a few other things.   Namely, we are not
    554 	 * allowed to have both READ and WRITE.
    555 	 */
    556 	for (chunk = job->job_chunk; chunk; chunk = chunk->chunk_next) {
    557 		if (chunk->chunk_wptr) {
    558 			job->job_wresid += chunk->chunk_wresid;
    559 		}
    560 		if (chunk->chunk_rptr) {
    561 			job->job_rresid += chunk->chunk_rresid;
    562 		}
    563 	}
    564 
    565 	if (job->job_rresid && job->job_wresid) {
    566 		return EINVAL;
    567 	}
    568 
    569 	return 0;
    570 }
    571 
    572 /*
    573  * NB: The Atheros SPI controller runs in little endian mode. So all
    574  * data accesses must be swapped appropriately.
    575  *
    576  * The controller auto-swaps read accesses done through the mapped memory
    577  * region, but when using SPI directly, we have to do the right thing to
    578  * swap to or from little endian.
    579  */
    580 
    581 void
    582 arspi_update_job(struct spi_transfer *st)
    583 {
    584 	struct arspi_job *job = st->st_busprivate;
    585 	uint8_t byte;
    586 	int i;
    587 
    588 	if (job->job_flags & (JOB_WAIT|JOB_WREN))
    589 		return;
    590 
    591 	job->job_rxcnt = 0;
    592 	job->job_txcnt = 0;
    593 	job->job_data = 0;
    594 
    595 	job->job_txcnt = uimin(job->job_wresid, 4);
    596 	job->job_rxcnt = uimin(job->job_rresid, 4);
    597 
    598 	job->job_wresid -= job->job_txcnt;
    599 	job->job_rresid -= job->job_rxcnt;
    600 
    601 	for (i = 0; i < job->job_txcnt; i++) {
    602 		arspi_get_byte(&job->job_chunk, &byte);
    603 		job->job_data |= (byte << (i * 8));
    604 	}
    605 
    606 	if ((!job->job_wresid) && (!job->job_rresid)) {
    607 		job->job_flags |= JOB_LAST;
    608 	}
    609 }
    610 
    611 void
    612 arspi_finish_job(struct spi_transfer *st)
    613 {
    614 	struct arspi_job *job = st->st_busprivate;
    615 	uint8_t	byte;
    616 	int i;
    617 
    618 	job->job_addr += job->job_rxcnt;
    619 	job->job_addr += job->job_txcnt;
    620 	for (i = 0; i < job->job_rxcnt; i++) {
    621 		byte = job->job_data & 0xff;
    622 		job->job_data >>= 8;
    623 		arspi_put_byte(&job->job_chunk, byte);
    624 	}
    625 }
    626 
    627