Home | History | Annotate | Line # | Download | only in dev
arspi.c revision 1.8
      1 /* $NetBSD: arspi.c,v 1.8 2011/07/07 05:06:44 matt Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
      5  * Copyright (c) 2006 Garrett D'Amore.
      6  * All rights reserved.
      7  *
      8  * Portions of this code were written by Garrett D'Amore for the
      9  * Champaign-Urbana Community Wireless Network Project.
     10  *
     11  * Redistribution and use in source and binary forms, with or
     12  * without modification, are permitted provided that the following
     13  * conditions are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above
     17  *    copyright notice, this list of conditions and the following
     18  *    disclaimer in the documentation and/or other materials provided
     19  *    with the distribution.
     20  * 3. All advertising materials mentioning features or use of this
     21  *    software must display the following acknowledgements:
     22  *      This product includes software developed by the Urbana-Champaign
     23  *      Independent Media Center.
     24  *	This product includes software developed by Garrett D'Amore.
     25  * 4. Urbana-Champaign Independent Media Center's name and Garrett
     26  *    D'Amore's name may not be used to endorse or promote products
     27  *    derived from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
     30  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
     31  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
     34  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     38  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     40  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     41  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: arspi.c,v 1.8 2011/07/07 05:06:44 matt Exp $");
     46 
     47 #include "locators.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/kernel.h>
     52 #include <sys/device.h>
     53 #include <sys/errno.h>
     54 #include <sys/malloc.h>
     55 #include <sys/proc.h>
     56 #include <sys/queue.h>
     57 
     58 #include <sys/bus.h>
     59 #include <machine/cpu.h>
     60 
     61 #include <mips/atheros/include/ar5315reg.h>
     62 #include <mips/atheros/include/arbusvar.h>
     63 
     64 #include <mips/atheros/dev/arspireg.h>
     65 
     66 #include <dev/spi/spiflash.h>
     67 #include <dev/spi/spivar.h>
     68 
     69 /*
     70  * This device is intended only to operate with specific SPI flash
     71  * parts, and is not a general purpose SPI host.  (Or at least if it
     72  * is, the Linux and eCos sources do not show how to use it as such.)
     73  * And lack of documentation on the Atheros SoCs is less than helpful.
     74  *
     75  * So for now we just "emulate" enough of the host bus framework to
     76  * make the SPI flash drivers happy.
     77  */
     78 
     79 struct arspi_job {
     80 	uint8_t			job_opcode;
     81 	struct spi_chunk	*job_chunk;
     82 	uint32_t		job_flags;
     83 	uint32_t		job_addr;
     84 	uint32_t		job_data;
     85 	int			job_rxcnt;
     86 	int			job_txcnt;
     87 	int			job_addrcnt;
     88 	int			job_rresid;
     89 	int			job_wresid;
     90 };
     91 
     92 #define	JOB_READ		0x1
     93 #define	JOB_WRITE		0x2
     94 #define	JOB_LAST		0x4
     95 #define	JOB_WAIT		0x8	/* job must wait for WIP bits */
     96 #define	JOB_WREN		0x10	/* WREN needed */
     97 
     98 struct arspi_softc {
     99 	struct device		sc_dev;
    100 	struct spi_controller	sc_spi;
    101 	void			*sc_ih;
    102 	bool			sc_interrupts;
    103 
    104 	struct spi_transfer	*sc_transfer;
    105 	struct spi_chunk	*sc_wchunk;	/* for partial writes */
    106 	struct spi_transq	sc_transq;
    107 	bus_space_tag_t		sc_st;
    108 	bus_space_handle_t	sc_sh;
    109 	bus_size_t		sc_size;
    110 };
    111 
    112 #define	STATIC
    113 
    114 STATIC int arspi_match(struct device *, struct cfdata *, void *);
    115 STATIC void arspi_attach(struct device *, struct device *, void *);
    116 STATIC void arspi_interrupts(struct device *);
    117 STATIC int arspi_intr(void *);
    118 /* SPI service routines */
    119 STATIC int arspi_configure(void *, int, int, int);
    120 STATIC int arspi_transfer(void *, struct spi_transfer *);
    121 /* internal support */
    122 STATIC void arspi_poll(struct arspi_softc *);
    123 STATIC void arspi_done(struct arspi_softc *, int);
    124 STATIC void arspi_sched(struct arspi_softc *);
    125 STATIC int arspi_get_byte(struct spi_chunk **, uint8_t *);
    126 STATIC int arspi_put_byte(struct spi_chunk **, uint8_t);
    127 STATIC int arspi_make_job(struct spi_transfer *);
    128 STATIC void arspi_update_job(struct spi_transfer *);
    129 STATIC void arspi_finish_job(struct spi_transfer *);
    130 
    131 
    132 CFATTACH_DECL(arspi, sizeof(struct arspi_softc),
    133     arspi_match, arspi_attach, NULL, NULL);
    134 
    135 #define	GETREG(sc, o)		bus_space_read_4(sc->sc_st, sc->sc_sh, o)
    136 #define	PUTREG(sc, o, v)	bus_space_write_4(sc->sc_st, sc->sc_sh, o, v)
    137 
    138 int
    139 arspi_match(struct device *parent, struct cfdata *cf, void *aux)
    140 {
    141 	struct arbus_attach_args *aa = aux;
    142 
    143 	if (strcmp(aa->aa_name, cf->cf_name) != 0)
    144 		return 0;
    145 	return 1;
    146 }
    147 
    148 void
    149 arspi_attach(struct device *parent, struct device *self, void *aux)
    150 {
    151 	struct arspi_softc *sc = device_private(self);
    152 	struct spibus_attach_args sba;
    153 	struct arbus_attach_args *aa = aux;
    154 
    155 	/*
    156 	 * Map registers.
    157 	 */
    158 	sc->sc_st = aa->aa_bst;
    159 	sc->sc_size = aa->aa_size;
    160 	if (bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
    161 		&sc->sc_sh) != 0) {
    162 		printf(": unable to map registers!\n");
    163 		return;
    164 	}
    165 
    166 	aprint_normal(": Atheros SPI controller\n");
    167 
    168 	/*
    169 	 * Initialize SPI controller.
    170 	 */
    171 	sc->sc_spi.sct_cookie = sc;
    172 	sc->sc_spi.sct_configure = arspi_configure;
    173 	sc->sc_spi.sct_transfer = arspi_transfer;
    174 	sc->sc_spi.sct_nslaves = 1;
    175 
    176 
    177 	/*
    178 	 * Initialize the queue.
    179 	 */
    180 	spi_transq_init(&sc->sc_transq);
    181 
    182 	/*
    183 	 * Enable device interrupts.
    184 	 */
    185 	sc->sc_ih = arbus_intr_establish(aa->aa_cirq, aa->aa_mirq,
    186 	    arspi_intr, sc);
    187 	if (sc->sc_ih == NULL) {
    188 		aprint_error("%s: couldn't establish interrupt\n",
    189 		    device_xname(self));
    190 		/* just leave it in polled mode */
    191 	} else
    192 		config_interrupts(self, arspi_interrupts);
    193 
    194 	/*
    195 	 * Initialize and attach bus attach.
    196 	 */
    197 	sba.sba_controller = &sc->sc_spi;
    198 	(void) config_found_ia(&sc->sc_dev, "spibus", &sba, spibus_print);
    199 }
    200 
    201 void
    202 arspi_interrupts(struct device *self)
    203 {
    204 	/*
    205 	 * we never leave polling mode, because, apparently, we
    206 	 * are missing some data about how to drive the SPI in interrupt
    207 	 * mode.
    208 	 */
    209 #if 0
    210 	struct arspi_softc *sc = device_private(self);
    211 	int	s;
    212 
    213 	s = splbio();
    214 	sc->sc_interrupts = true;
    215 	splx(s);
    216 #endif
    217 }
    218 
    219 int
    220 arspi_intr(void *arg)
    221 {
    222 	struct arspi_softc *sc = arg;
    223 
    224 	while (GETREG(sc, ARSPI_REG_CTL) & ARSPI_CTL_BUSY);
    225 
    226 	arspi_done(sc, 0);
    227 
    228 	return 1;
    229 }
    230 
    231 void
    232 arspi_poll(struct arspi_softc *sc)
    233 {
    234 
    235 	while (sc->sc_transfer) {
    236 		arspi_intr(sc);
    237 	}
    238 }
    239 
    240 int
    241 arspi_configure(void *cookie, int slave, int mode, int speed)
    242 {
    243 
    244 	/*
    245 	 * We don't support the full SPI protocol, and hopefully the
    246 	 * firmware has programmed a reasonable mode already.  So
    247 	 * just a couple of quick sanity checks, then bail.
    248 	 */
    249 	if ((mode != 0) || (slave != 0))
    250 		return EINVAL;
    251 
    252 	return 0;
    253 }
    254 
    255 int
    256 arspi_transfer(void *cookie, struct spi_transfer *st)
    257 {
    258 	struct arspi_softc *sc = cookie;
    259 	int rv;
    260 	int s;
    261 
    262 	st->st_busprivate = NULL;
    263 	if ((rv = arspi_make_job(st)) != 0) {
    264 		if (st->st_busprivate) {
    265 			free(st->st_busprivate, M_DEVBUF);
    266 			st->st_busprivate = NULL;
    267 		}
    268 		spi_done(st, rv);
    269 		return rv;
    270 	}
    271 
    272 	s = splbio();
    273 	spi_transq_enqueue(&sc->sc_transq, st);
    274 	if (sc->sc_transfer == NULL) {
    275 		arspi_sched(sc);
    276 		if (!sc->sc_interrupts)
    277 			arspi_poll(sc);
    278 	}
    279 	splx(s);
    280 	return 0;
    281 }
    282 
    283 void
    284 arspi_sched(struct arspi_softc *sc)
    285 {
    286 	struct spi_transfer *st;
    287 	struct arspi_job *job;
    288 	uint32_t ctl, cnt;
    289 
    290 	for (;;) {
    291 		if ((st = sc->sc_transfer) == NULL) {
    292 			if ((st = spi_transq_first(&sc->sc_transq)) == NULL) {
    293 				/* no work left to do */
    294 				break;
    295 			}
    296 			spi_transq_dequeue(&sc->sc_transq);
    297 			sc->sc_transfer = st;
    298 		}
    299 
    300 		arspi_update_job(st);
    301 		job = st->st_busprivate;
    302 
    303 		/* there shouldn't be anything running, but ensure it */
    304 		do {
    305 			ctl = GETREG(sc, ARSPI_REG_CTL);
    306 		}  while (ctl & ARSPI_CTL_BUSY);
    307 		/* clear all of the tx and rx bits */
    308 		ctl &= ~(ARSPI_CTL_TXCNT_MASK | ARSPI_CTL_RXCNT_MASK);
    309 
    310 		if (job->job_flags & JOB_WAIT) {
    311 			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_RDSR);
    312 			/* only the opcode for tx */
    313 			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
    314 			/* and one rx byte */
    315 			ctl |= (1 << ARSPI_CTL_RXCNT_SHIFT);
    316 		} else if (job->job_flags & JOB_WREN) {
    317 			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_WREN);
    318 			/* just the opcode */
    319 			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
    320 			/* no rx bytes */
    321 		} else {
    322 			/* set the data */
    323 			PUTREG(sc, ARSPI_REG_DATA, job->job_data);
    324 
    325 			/* set the opcode and the address */
    326 			PUTREG(sc, ARSPI_REG_OPCODE, job->job_opcode |
    327 			    (job->job_addr << 8));
    328 
    329 			/* now set txcnt */
    330 			cnt = 1;	/* opcode */
    331 			cnt += job->job_addrcnt + job->job_txcnt;
    332 			ctl |= (cnt << ARSPI_CTL_TXCNT_SHIFT);
    333 
    334 			/* now set rxcnt */
    335 			cnt = job->job_rxcnt;
    336 			ctl |= (cnt << ARSPI_CTL_RXCNT_SHIFT);
    337 		}
    338 
    339 		/* set the start bit */
    340 		ctl |= ARSPI_CTL_START;
    341 
    342 		PUTREG(sc, ARSPI_REG_CTL, ctl);
    343 		break;
    344 	}
    345 }
    346 
    347 void
    348 arspi_done(struct arspi_softc *sc, int err)
    349 {
    350 	struct spi_transfer *st;
    351 	struct arspi_job *job;
    352 
    353 	if ((st = sc->sc_transfer) != NULL) {
    354 		job = st->st_busprivate;
    355 
    356 		if (job->job_flags & JOB_WAIT) {
    357 			if (err == 0) {
    358 				if ((GETREG(sc, ARSPI_REG_DATA) &
    359 				    SPIFLASH_SR_BUSY) == 0) {
    360 					/* intermediate wait done */
    361 					job->job_flags &= ~JOB_WAIT;
    362 					goto done;
    363 				}
    364 			}
    365 		} else if (job->job_flags & JOB_WREN) {
    366 			if (err == 0) {
    367 				job->job_flags &= ~JOB_WREN;
    368 				goto done;
    369 			}
    370 		} else if (err == 0) {
    371 			/*
    372 			 * When breaking up write jobs, we have to wait until
    373 			 * the WIP bit is clear, and we have to separately
    374 			 * send WREN for each chunk.  These flags facilitate
    375 			 * that.
    376 			 */
    377 			if (job->job_flags & JOB_WRITE)
    378 				job->job_flags |= (JOB_WAIT | JOB_WREN);
    379 			job->job_data = GETREG(sc, ARSPI_REG_DATA);
    380 			arspi_finish_job(st);
    381 		}
    382 
    383 		if (err || (job->job_flags & JOB_LAST)) {
    384 			sc->sc_transfer = NULL;
    385 			st->st_busprivate = NULL;
    386 			spi_done(st, err);
    387 			free(job, M_DEVBUF);
    388 		}
    389 	}
    390 done:
    391 	arspi_sched(sc);
    392 }
    393 
    394 int
    395 arspi_get_byte(struct spi_chunk **chunkp, uint8_t *bytep)
    396 {
    397 	struct spi_chunk *chunk;
    398 
    399 	chunk = *chunkp;
    400 
    401 	/* skip leading empty (or already consumed) chunks */
    402 	while (chunk && chunk->chunk_wresid == 0)
    403 		chunk = chunk->chunk_next;
    404 
    405 	if (chunk == NULL) {
    406 		return ENODATA;
    407 	}
    408 
    409 	/*
    410 	 * chunk must be write only.  SPI flash doesn't support
    411 	 * any full duplex operations.
    412 	 */
    413 	if ((chunk->chunk_rptr) || !(chunk->chunk_wptr)) {
    414 		return EINVAL;
    415 	}
    416 
    417 	*bytep = *chunk->chunk_wptr;
    418 	chunk->chunk_wptr++;
    419 	chunk->chunk_wresid--;
    420 	chunk->chunk_rresid--;
    421 	/* clearing wptr and rptr makes sanity checks later easier */
    422 	if (chunk->chunk_wresid == 0)
    423 		chunk->chunk_wptr = NULL;
    424 	if (chunk->chunk_rresid == 0)
    425 		chunk->chunk_rptr = NULL;
    426 	while (chunk && chunk->chunk_wresid == 0)
    427 		chunk = chunk->chunk_next;
    428 
    429 	*chunkp = chunk;
    430 	return 0;
    431 }
    432 
    433 int
    434 arspi_put_byte(struct spi_chunk **chunkp, uint8_t byte)
    435 {
    436 	struct spi_chunk *chunk;
    437 
    438 	chunk = *chunkp;
    439 
    440 	/* skip leading empty (or already consumed) chunks */
    441 	while (chunk && chunk->chunk_rresid == 0)
    442 		chunk = chunk->chunk_next;
    443 
    444 	if (chunk == NULL) {
    445 		return EOVERFLOW;
    446 	}
    447 
    448 	/*
    449 	 * chunk must be read only.  SPI flash doesn't support
    450 	 * any full duplex operations.
    451 	 */
    452 	if ((chunk->chunk_wptr) || !(chunk->chunk_rptr)) {
    453 		return EINVAL;
    454 	}
    455 
    456 	*chunk->chunk_rptr = byte;
    457 	chunk->chunk_rptr++;
    458 	chunk->chunk_wresid--;	/* technically this was done at send time */
    459 	chunk->chunk_rresid--;
    460 	while (chunk && chunk->chunk_rresid == 0)
    461 		chunk = chunk->chunk_next;
    462 
    463 	*chunkp = chunk;
    464 	return 0;
    465 }
    466 
    467 int
    468 arspi_make_job(struct spi_transfer *st)
    469 {
    470 	struct arspi_job *job;
    471 	struct spi_chunk *chunk;
    472 	uint8_t byte;
    473 	int i, rv;
    474 
    475 	job = malloc(sizeof (struct arspi_job), M_DEVBUF, M_ZERO);
    476 	if (job == NULL) {
    477 		return ENOMEM;
    478 	}
    479 
    480 	st->st_busprivate = job;
    481 
    482 	/* skip any leading empty chunks (should not be any!) */
    483 	chunk = st->st_chunks;
    484 
    485 	/* get transfer opcode */
    486 	if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    487 		return rv;
    488 
    489 	job->job_opcode = byte;
    490 	switch (job->job_opcode) {
    491 	case SPIFLASH_CMD_WREN:
    492 	case SPIFLASH_CMD_WRDI:
    493 	case SPIFLASH_CMD_CHIPERASE:
    494 		break;
    495 	case SPIFLASH_CMD_RDJI:
    496 		job->job_rxcnt = 3;
    497 		break;
    498 	case SPIFLASH_CMD_RDSR:
    499 		job->job_rxcnt = 1;
    500 		break;
    501 	case SPIFLASH_CMD_WRSR:
    502 		/*
    503 		 * is this in data, or in address?  stick it in data
    504 		 * for now.
    505 		 */
    506 		job->job_txcnt = 1;
    507 		break;
    508 	case SPIFLASH_CMD_RDID:
    509 		job->job_addrcnt = 3;	/* 3 dummy bytes */
    510 		job->job_rxcnt = 1;
    511 		break;
    512 	case SPIFLASH_CMD_ERASE:
    513 		job->job_addrcnt = 3;
    514 		break;
    515 	case SPIFLASH_CMD_READ:
    516 		job->job_addrcnt = 3;
    517 		job->job_flags |= JOB_READ;
    518 		break;
    519 	case SPIFLASH_CMD_PROGRAM:
    520 		job->job_addrcnt = 3;
    521 		job->job_flags |= JOB_WRITE;
    522 		break;
    523 	case SPIFLASH_CMD_READFAST:
    524 		/*
    525 		 * This is a pain in the arse to support, so we will
    526 		 * rewrite as an ordinary read.  But later, after we
    527 		 * obtain the address.
    528 		 */
    529 		job->job_addrcnt = 3;	/* 3 address */
    530 		job->job_flags |= JOB_READ;
    531 		break;
    532 	default:
    533 		return EINVAL;
    534 	}
    535 
    536 	for (i = 0; i < job->job_addrcnt; i++) {
    537 		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    538 			return rv;
    539 		job->job_addr <<= 8;
    540 		job->job_addr |= byte;
    541 	}
    542 
    543 
    544 	if (job->job_opcode == SPIFLASH_CMD_READFAST) {
    545 		/* eat the dummy timing byte */
    546 		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
    547 			return rv;
    548 		/* rewrite this as a read */
    549 		job->job_opcode = SPIFLASH_CMD_READ;
    550 	}
    551 
    552 	job->job_chunk = chunk;
    553 
    554 	/*
    555 	 * Now quickly check a few other things.   Namely, we are not
    556 	 * allowed to have both READ and WRITE.
    557 	 */
    558 	for (chunk = job->job_chunk; chunk; chunk = chunk->chunk_next) {
    559 		if (chunk->chunk_wptr) {
    560 			job->job_wresid += chunk->chunk_wresid;
    561 		}
    562 		if (chunk->chunk_rptr) {
    563 			job->job_rresid += chunk->chunk_rresid;
    564 		}
    565 	}
    566 
    567 	if (job->job_rresid && job->job_wresid) {
    568 		return EINVAL;
    569 	}
    570 
    571 	return 0;
    572 }
    573 
    574 /*
    575  * NB: The Atheros SPI controller runs in little endian mode. So all
    576  * data accesses must be swapped appropriately.
    577  *
    578  * The controller auto-swaps read accesses done through the mapped memory
    579  * region, but when using SPI directly, we have to do the right thing to
    580  * swap to or from little endian.
    581  */
    582 
    583 void
    584 arspi_update_job(struct spi_transfer *st)
    585 {
    586 	struct arspi_job *job = st->st_busprivate;
    587 	uint8_t byte;
    588 	int i;
    589 
    590 	if (job->job_flags & (JOB_WAIT|JOB_WREN))
    591 		return;
    592 
    593 	job->job_rxcnt = 0;
    594 	job->job_txcnt = 0;
    595 	job->job_data = 0;
    596 
    597 	job->job_txcnt = min(job->job_wresid, 4);
    598 	job->job_rxcnt = min(job->job_rresid, 4);
    599 
    600 	job->job_wresid -= job->job_txcnt;
    601 	job->job_rresid -= job->job_rxcnt;
    602 
    603 	for (i = 0; i < job->job_txcnt; i++) {
    604 		arspi_get_byte(&job->job_chunk, &byte);
    605 		job->job_data |= (byte << (i * 8));
    606 	}
    607 
    608 	if ((!job->job_wresid) && (!job->job_rresid)) {
    609 		job->job_flags |= JOB_LAST;
    610 	}
    611 }
    612 
    613 void
    614 arspi_finish_job(struct spi_transfer *st)
    615 {
    616 	struct arspi_job *job = st->st_busprivate;
    617 	uint8_t	byte;
    618 	int i;
    619 
    620 	job->job_addr += job->job_rxcnt;
    621 	job->job_addr += job->job_txcnt;
    622 	for (i = 0; i < job->job_rxcnt; i++) {
    623 		byte = job->job_data & 0xff;
    624 		job->job_data >>= 8;
    625 		arspi_put_byte(&job->job_chunk, byte);
    626 	}
    627 }
    628 
    629