Home | History | Annotate | Line # | Download | only in dev
arspi.c revision 1.15.2.1
      1 /* $NetBSD: arspi.c,v 1.15.2.1 2021/08/09 00:30:08 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
      5  * Copyright (c) 2006 Garrett D'Amore.
      6  * All rights reserved.
      7  *
      8  * Portions of this code were written by Garrett D'Amore for the
      9  * Champaign-Urbana Community Wireless Network Project.
     10  *
     11  * Redistribution and use in source and binary forms, with or
     12  * without modification, are permitted provided that the following
     13  * conditions are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above
     17  *    copyright notice, this list of conditions and the following
     18  *    disclaimer in the documentation and/or other materials provided
     19  *    with the distribution.
     20  * 3. All advertising materials mentioning features or use of this
     21  *    software must display the following acknowledgements:
     22  *      This product includes software developed by the Urbana-Champaign
     23  *      Independent Media Center.
     24  *	This product includes software developed by Garrett D'Amore.
     25  * 4. Urbana-Champaign Independent Media Center's name and Garrett
     26  *    D'Amore's name may not be used to endorse or promote products
     27  *    derived from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
     30  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
     31  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
     34  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     38  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     40  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     41  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: arspi.c,v 1.15.2.1 2021/08/09 00:30:08 thorpej Exp $");
     46 
     47 #include "locators.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/bus.h>
     51 #include <sys/cpu.h>
     52 #include <sys/device.h>
     53 #include <sys/errno.h>
     54 #include <sys/kernel.h>
     55 #include <sys/kmem.h>
     56 #include <sys/proc.h>
     57 #include <sys/systm.h>
     58 
     59 #include <mips/atheros/include/ar5315reg.h>
     60 #include <mips/atheros/include/arbusvar.h>
     61 
     62 #include <mips/atheros/dev/arspireg.h>
     63 
     64 #include <dev/spi/spiflash.h>
     65 #include <dev/spi/spivar.h>
     66 
     67 /*
     68  * This device is intended only to operate with specific SPI flash
     69  * parts, and is not a general purpose SPI host.  (Or at least if it
     70  * is, the Linux and eCos sources do not show how to use it as such.)
     71  * And lack of documentation on the Atheros SoCs is less than helpful.
     72  *
     73  * So for now we just "emulate" enough of the host bus framework to
     74  * make the SPI flash drivers happy.
     75  */
     76 
     77 struct arspi_job {
     78 	uint8_t			job_opcode;
     79 	struct spi_chunk	*job_chunk;
     80 	uint32_t		job_flags;
     81 	uint32_t		job_addr;
     82 	uint32_t		job_data;
     83 	int			job_rxcnt;
     84 	int			job_txcnt;
     85 	int			job_addrcnt;
     86 	int			job_rresid;
     87 	int			job_wresid;
     88 };
     89 
     90 #define	JOB_READ		0x1
     91 #define	JOB_WRITE		0x2
     92 #define	JOB_LAST		0x4
     93 #define	JOB_WAIT		0x8	/* job must wait for WIP bits */
     94 #define	JOB_WREN		0x10	/* WREN needed */
     95 
     96 struct arspi_softc {
     97 	struct spi_controller	sc_spi;
     98 	void			*sc_ih;
     99 	bool			sc_interrupts;
    100 
    101 	struct spi_transfer	*sc_transfer;
    102 	struct spi_chunk	*sc_wchunk;	/* for partial writes */
    103 	struct spi_transq	sc_transq;
    104 	bus_space_tag_t		sc_st;
    105 	bus_space_handle_t	sc_sh;
    106 	bus_size_t		sc_size;
    107 };
    108 
    109 #define	STATIC
    110 
    111 STATIC int arspi_match(device_t, cfdata_t, void *);
    112 STATIC void arspi_attach(device_t, device_t, void *);
    113 STATIC void arspi_interrupts(device_t);
    114 STATIC int arspi_intr(void *);
    115 /* SPI service routines */
    116 STATIC int arspi_configure(void *, int, int, int);
    117 STATIC int arspi_transfer(void *, struct spi_transfer *);
    118 /* internal support */
    119 STATIC void arspi_poll(struct arspi_softc *);
    120 STATIC void arspi_done(struct arspi_softc *, int);
    121 STATIC void arspi_sched(struct arspi_softc *);
    122 STATIC int arspi_get_byte(struct spi_chunk **, uint8_t *);
    123 STATIC int arspi_put_byte(struct spi_chunk **, uint8_t);
    124 STATIC int arspi_make_job(struct spi_transfer *);
    125 STATIC void arspi_update_job(struct spi_transfer *);
    126 STATIC void arspi_finish_job(struct spi_transfer *);
    127 
    128 
    129 CFATTACH_DECL_NEW(arspi, sizeof(struct arspi_softc),
    130     arspi_match, arspi_attach, NULL, NULL);
    131 
    132 #define	GETREG(sc, o)		bus_space_read_4(sc->sc_st, sc->sc_sh, o)
    133 #define	PUTREG(sc, o, v)	bus_space_write_4(sc->sc_st, sc->sc_sh, o, v)
    134 
    135 int
    136 arspi_match(device_t parent, cfdata_t cf, void *aux)
    137 {
    138 	struct arbus_attach_args *aa = aux;
    139 
    140 	if (strcmp(aa->aa_name, cf->cf_name) != 0)
    141 		return 0;
    142 	return 1;
    143 }
    144 
    145 void
    146 arspi_attach(device_t parent, device_t self, void *aux)
    147 {
    148 	struct arspi_softc *sc = device_private(self);
    149 	struct arbus_attach_args *aa = aux;
    150 
    151 	/*
    152 	 * Map registers.
    153 	 */
    154 	sc->sc_st = aa->aa_bst;
    155 	sc->sc_size = aa->aa_size;
    156 	if (bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
    157 		&sc->sc_sh) != 0) {
    158 		printf(": unable to map registers!\n");
    159 		return;
    160 	}
    161 
    162 	aprint_normal(": Atheros SPI controller\n");
    163 
    164 	/*
    165 	 * Initialize SPI controller.
    166 	 */
    167 	sc->sc_spi.sct_cookie = sc;
    168 	sc->sc_spi.sct_configure = arspi_configure;
    169 	sc->sc_spi.sct_transfer = arspi_transfer;
    170 	sc->sc_spi.sct_nslaves = 1;
    171 
    172 
    173 	/*
    174 	 * Initialize the queue.
    175 	 */
    176 	spi_transq_init(&sc->sc_transq);
    177 
    178 	/*
    179 	 * Enable device interrupts.
    180 	 */
    181 	sc->sc_ih = arbus_intr_establish(aa->aa_cirq, aa->aa_mirq,
    182 	    arspi_intr, sc);
    183 	if (sc->sc_ih == NULL) {
    184 		aprint_error("%s: couldn't establish interrupt\n",
    185 		    device_xname(self));
    186 		/* just leave it in polled mode */
    187 	} else
    188 		config_interrupts(self, arspi_interrupts);
    189 
    190 	/*
    191 	 * Initialize and attach bus attach.
    192 	 */
    193 	struct spibus_attach_args sba = {
    194 		.sba_controller = &sc->sc_spi,
    195 	};
    196 	config_found(self, &sba, spibus_print,
    197 	    CFARGS(.devhandle = device_handle(self)));
    198 }
    199 
    200 void
    201 arspi_interrupts(device_t self)
    202 {
    203 	/*
    204 	 * we never leave polling mode, because, apparently, we
    205 	 * are missing some data about how to drive the SPI in interrupt
    206 	 * mode.
    207 	 */
    208 #if 0
    209 	struct arspi_softc *sc = device_private(self);
    210 	int	s;
    211 
    212 	s = splbio();
    213 	sc->sc_interrupts = true;
    214 	splx(s);
    215 #endif
    216 }
    217 
    218 int
    219 arspi_intr(void *arg)
    220 {
    221 	struct arspi_softc *sc = arg;
    222 
    223 	while (GETREG(sc, ARSPI_REG_CTL) & ARSPI_CTL_BUSY);
    224 
    225 	arspi_done(sc, 0);
    226 
    227 	return 1;
    228 }
    229 
    230 void
    231 arspi_poll(struct arspi_softc *sc)
    232 {
    233 
    234 	while (sc->sc_transfer) {
    235 		arspi_intr(sc);
    236 	}
    237 }
    238 
    239 int
    240 arspi_configure(void *cookie, int slave, int mode, int speed)
    241 {
    242 
    243 	/*
    244 	 * We don't support the full SPI protocol, and hopefully the
    245 	 * firmware has programmed a reasonable mode already.  So
    246 	 * just a couple of quick sanity checks, then bail.
    247 	 */
    248 	if ((mode != 0) || (slave != 0))
    249 		return EINVAL;
    250 
    251 	return 0;
    252 }
    253 
    254 int
    255 arspi_transfer(void *cookie, struct spi_transfer *st)
    256 {
    257 	struct arspi_softc *sc = cookie;
    258 	int rv;
    259 	int s;
    260 
    261 	st->st_busprivate = NULL;
    262 	if ((rv = arspi_make_job(st)) != 0) {
    263 		if (st->st_busprivate) {
    264 			struct arspi_job *job = st->st_busprivate;
    265 			st->st_busprivate = NULL;
    266 			kmem_free(job, sizeof(*job));
    267 		}
    268 		spi_done(st, rv);
    269 		return rv;
    270 	}
    271 
    272 	s = splbio();
    273 	spi_transq_enqueue(&sc->sc_transq, st);
    274 	if (sc->sc_transfer == NULL) {
    275 		arspi_sched(sc);
    276 		if (!sc->sc_interrupts)
    277 			arspi_poll(sc);
    278 	}
    279 	splx(s);
    280 	return 0;
    281 }
    282 
    283 void
    284 arspi_sched(struct arspi_softc *sc)
    285 {
    286 	struct spi_transfer *st;
    287 	struct arspi_job *job;
    288 	uint32_t ctl, cnt;
    289 
    290 	for (;;) {
    291 		if ((st = sc->sc_transfer) == NULL) {
    292 			if ((st = spi_transq_first(&sc->sc_transq)) == NULL) {
    293 				/* no work left to do */
    294 				break;
    295 			}
    296 			spi_transq_dequeue(&sc->sc_transq);
    297 			sc->sc_transfer = st;
    298 		}
    299 
    300 		arspi_update_job(st);
    301 		job = st->st_busprivate;
    302 
    303 		/* there shouldn't be anything running, but ensure it */
    304 		do {
    305 			ctl = GETREG(sc, ARSPI_REG_CTL);
    306 		}  while (ctl & ARSPI_CTL_BUSY);
    307 		/* clear all of the tx and rx bits */
    308 		ctl &= ~(ARSPI_CTL_TXCNT_MASK | ARSPI_CTL_RXCNT_MASK);
    309 
    310 		if (job->job_flags & JOB_WAIT) {
    311 			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_RDSR);
    312 			/* only the opcode for tx */
    313 			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
    314 			/* and one rx byte */
    315 			ctl |= (1 << ARSPI_CTL_RXCNT_SHIFT);
    316 		} else if (job->job_flags & JOB_WREN) {
    317 			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_WREN);
    318 			/* just the opcode */
    319 			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
    320 			/* no rx bytes */
    321 		} else {
    322 			/* set the data */
    323 			PUTREG(sc, ARSPI_REG_DATA, job->job_data);
    324 
    325 			/* set the opcode and the address */
    326 			PUTREG(sc, ARSPI_REG_OPCODE, job->job_opcode |
    327 			    (job->job_addr << 8));
    328 
    329 			/* now set txcnt */
    330 			cnt = 1;	/* opcode */
    331 			cnt += job->job_addrcnt + job->job_txcnt;
    332 			ctl |= (cnt << ARSPI_CTL_TXCNT_SHIFT);
    333 
    334 			/* now set rxcnt */
    335 			cnt = job->job_rxcnt;
    336 			ctl |= (cnt << ARSPI_CTL_RXCNT_SHIFT);
    337 		}
    338 
    339 		/* set the start bit */
    340 		ctl |= ARSPI_CTL_START;
    341 
    342 		PUTREG(sc, ARSPI_REG_CTL, ctl);
    343 		break;
    344 	}
    345 }
    346 
    347 void
    348 arspi_done(struct arspi_softc *sc, int err)
    349 {
    350 	struct spi_transfer *st;
    351 	struct arspi_job *job;
    352 
    353 	if ((st = sc->sc_transfer) != NULL) {
    354 		job = st->st_busprivate;
    355 
    356 		if (job->job_flags & JOB_WAIT) {
    357 			if (err == 0) {
    358 				if ((GETREG(sc, ARSPI_REG_DATA) &
    359 				    SPIFLASH_SR_BUSY) == 0) {
    360 					/* intermediate wait done */
    361 					job->job_flags &= ~JOB_WAIT;
    362 					goto done;
    363 				}
    364 			}
    365 		} else if (job->job_flags & JOB_WREN) {
    366 			if (err == 0) {
    367 				job->job_flags &= ~JOB_WREN;
    368 				goto done;
    369 			}
    370 		} else if (err == 0) {
    371 			/*
    372 			 * When breaking up write jobs, we have to wait until
    373 			 * the WIP bit is clear, and we have to separately
    374 			 * send WREN for each chunk.  These flags facilitate
    375 			 * that.
    376 			 */
    377 			if (job->job_flags & JOB_WRITE)
    378 				job->job_flags |= (JOB_WAIT | JOB_WREN);
    379 			job->job_data = GETREG(sc, ARSPI_REG_DATA);
    380 			arspi_finish_job(st);
    381 		}
    382 
    383 		if (err || (job->job_flags & JOB_LAST)) {
    384 			sc->sc_transfer = NULL;
    385 			st->st_busprivate = NULL;
    386 			spi_done(st, err);
    387 			kmem_free(job, sizeof(*job));
    388 		}
    389 	}
    390 done:
    391 	arspi_sched(sc);
    392 }
    393 
    394 int
    395 arspi_get_byte(struct spi_chunk **chunkp, uint8_t *bytep)
    396 {
    397 	struct spi_chunk *chunk;
    398 
    399 	chunk = *chunkp;
    400 
    401 	/* skip leading empty (or already consumed) chunks */
    402 	while (chunk && chunk->chunk_wresid == 0)
    403 		chunk = chunk->chunk_next;
    404 
    405 	if (chunk == NULL) {
    406 		return ENODATA;
    407 	}
    408 
    409 	/*
    410 	 * chunk must be write only.  SPI flash doesn't support
    411 	 * any full duplex operations.
    412 	 */
    413 	if ((chunk->chunk_rptr) || !(chunk->chunk_wptr)) {
    414 		return EINVAL;
    415 	}
    416 
    417 	*bytep = *chunk->chunk_wptr;
    418 	chunk->chunk_wptr++;
    419 	chunk->chunk_wresid--;
    420 	chunk->chunk_rresid--;
    421 	/* clearing wptr and rptr makes sanity checks later easier */
    422 	if (chunk->chunk_wresid == 0)
    423 		chunk->chunk_wptr = NULL;
    424 	if (chunk->chunk_rresid == 0)
    425 		chunk->chunk_rptr = NULL;
    426 	while (chunk && chunk->chunk_wresid == 0)
    427 		chunk = chunk->chunk_next;
    428 
    429 	*chunkp = chunk;
    430 	return 0;
    431 }
    432 
    433 int
    434 arspi_put_byte(struct spi_chunk **chunkp, uint8_t byte)
    435 {
    436 	struct spi_chunk *chunk;
    437 
    438 	chunk = *chunkp;
    439 
    440 	/* skip leading empty (or already consumed) chunks */
    441 	while (chunk && chunk->chunk_rresid == 0)
    442 		chunk = chunk->chunk_next;
    443 
    444 	if (chunk == NULL) {
    445 		return EOVERFLOW;
    446 	}
    447 
    448 	/*
    449 	 * chunk must be read only.  SPI flash doesn't support
    450 	 * any full duplex operations.
    451 	 */
    452 	if ((chunk->chunk_wptr) || !(chunk->chunk_rptr)) {
    453 		return EINVAL;
    454 	}
    455 
    456 	*chunk->chunk_rptr = byte;
    457 	chunk->chunk_rptr++;
    458 	chunk->chunk_wresid--;	/* technically this was done at send time */
    459 	chunk->chunk_rresid--;
    460 	while (chunk && chunk->chunk_rresid == 0)
    461 		chunk = chunk->chunk_next;
    462 
    463 	*chunkp = chunk;
    464 	return 0;
    465 }
    466 
    467 int
    468 arspi_make_job(struct spi_transfer *st)
    469 {
    470 	struct arspi_job *job;
    471 	struct spi_chunk *chunk;
    472 	uint8_t byte;
    473 	int i, rv;
    474 
    475 	job = kmem_zalloc(sizeof (struct arspi_job), KM_SLEEP);
    476 
    477 	st->st_busprivate = job;
    478 
    479 	/* skip any leading empty chunks (should not be any!) */
    480 	chunk = st->st_chunks;
    481 
    482 	/* get transfer opcode */
    483 	if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    484 		return rv;
    485 
    486 	job->job_opcode = byte;
    487 	switch (job->job_opcode) {
    488 	case SPIFLASH_CMD_WREN:
    489 	case SPIFLASH_CMD_WRDI:
    490 	case SPIFLASH_CMD_CHIPERASE:
    491 		break;
    492 	case SPIFLASH_CMD_RDJI:
    493 		job->job_rxcnt = 3;
    494 		break;
    495 	case SPIFLASH_CMD_RDSR:
    496 		job->job_rxcnt = 1;
    497 		break;
    498 	case SPIFLASH_CMD_WRSR:
    499 		/*
    500 		 * is this in data, or in address?  stick it in data
    501 		 * for now.
    502 		 */
    503 		job->job_txcnt = 1;
    504 		break;
    505 	case SPIFLASH_CMD_RDID:
    506 		job->job_addrcnt = 3;	/* 3 dummy bytes */
    507 		job->job_rxcnt = 1;
    508 		break;
    509 	case SPIFLASH_CMD_ERASE:
    510 		job->job_addrcnt = 3;
    511 		break;
    512 	case SPIFLASH_CMD_READ:
    513 		job->job_addrcnt = 3;
    514 		job->job_flags |= JOB_READ;
    515 		break;
    516 	case SPIFLASH_CMD_PROGRAM:
    517 		job->job_addrcnt = 3;
    518 		job->job_flags |= JOB_WRITE;
    519 		break;
    520 	case SPIFLASH_CMD_READFAST:
    521 		/*
    522 		 * This is a pain in the arse to support, so we will
    523 		 * rewrite as an ordinary read.  But later, after we
    524 		 * obtain the address.
    525 		 */
    526 		job->job_addrcnt = 3;	/* 3 address */
    527 		job->job_flags |= JOB_READ;
    528 		break;
    529 	default:
    530 		return EINVAL;
    531 	}
    532 
    533 	for (i = 0; i < job->job_addrcnt; i++) {
    534 		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    535 			return rv;
    536 		job->job_addr <<= 8;
    537 		job->job_addr |= byte;
    538 	}
    539 
    540 
    541 	if (job->job_opcode == SPIFLASH_CMD_READFAST) {
    542 		/* eat the dummy timing byte */
    543 		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    544 			return rv;
    545 		/* rewrite this as a read */
    546 		job->job_opcode = SPIFLASH_CMD_READ;
    547 	}
    548 
    549 	job->job_chunk = chunk;
    550 
    551 	/*
    552 	 * Now quickly check a few other things.   Namely, we are not
    553 	 * allowed to have both READ and WRITE.
    554 	 */
    555 	for (chunk = job->job_chunk; chunk; chunk = chunk->chunk_next) {
    556 		if (chunk->chunk_wptr) {
    557 			job->job_wresid += chunk->chunk_wresid;
    558 		}
    559 		if (chunk->chunk_rptr) {
    560 			job->job_rresid += chunk->chunk_rresid;
    561 		}
    562 	}
    563 
    564 	if (job->job_rresid && job->job_wresid) {
    565 		return EINVAL;
    566 	}
    567 
    568 	return 0;
    569 }
    570 
    571 /*
    572  * NB: The Atheros SPI controller runs in little endian mode. So all
    573  * data accesses must be swapped appropriately.
    574  *
    575  * The controller auto-swaps read accesses done through the mapped memory
    576  * region, but when using SPI directly, we have to do the right thing to
    577  * swap to or from little endian.
    578  */
    579 
    580 void
    581 arspi_update_job(struct spi_transfer *st)
    582 {
    583 	struct arspi_job *job = st->st_busprivate;
    584 	uint8_t byte;
    585 	int i;
    586 
    587 	if (job->job_flags & (JOB_WAIT|JOB_WREN))
    588 		return;
    589 
    590 	job->job_rxcnt = 0;
    591 	job->job_txcnt = 0;
    592 	job->job_data = 0;
    593 
    594 	job->job_txcnt = uimin(job->job_wresid, 4);
    595 	job->job_rxcnt = uimin(job->job_rresid, 4);
    596 
    597 	job->job_wresid -= job->job_txcnt;
    598 	job->job_rresid -= job->job_rxcnt;
    599 
    600 	for (i = 0; i < job->job_txcnt; i++) {
    601 		arspi_get_byte(&job->job_chunk, &byte);
    602 		job->job_data |= (byte << (i * 8));
    603 	}
    604 
    605 	if ((!job->job_wresid) && (!job->job_rresid)) {
    606 		job->job_flags |= JOB_LAST;
    607 	}
    608 }
    609 
    610 void
    611 arspi_finish_job(struct spi_transfer *st)
    612 {
    613 	struct arspi_job *job = st->st_busprivate;
    614 	uint8_t	byte;
    615 	int i;
    616 
    617 	job->job_addr += job->job_rxcnt;
    618 	job->job_addr += job->job_txcnt;
    619 	for (i = 0; i < job->job_rxcnt; i++) {
    620 		byte = job->job_data & 0xff;
    621 		job->job_data >>= 8;
    622 		arspi_put_byte(&job->job_chunk, byte);
    623 	}
    624 }
    625 
    626