1 1.1 rkujawa /******************************************************************************* 2 1.1 rkujawa Copyright (C) Marvell International Ltd. and its affiliates 3 1.1 rkujawa 4 1.1 rkujawa Developed by Semihalf 5 1.1 rkujawa 6 1.1 rkujawa ******************************************************************************** 7 1.1 rkujawa Marvell BSD License 8 1.1 rkujawa 9 1.1 rkujawa If you received this File from Marvell, you may opt to use, redistribute and/or 10 1.1 rkujawa modify this File under the following licensing terms. 11 1.1 rkujawa Redistribution and use in source and binary forms, with or without modification, 12 1.1 rkujawa are permitted provided that the following conditions are met: 13 1.1 rkujawa 14 1.1 rkujawa * Redistributions of source code must retain the above copyright notice, 15 1.1 rkujawa this list of conditions and the following disclaimer. 16 1.1 rkujawa 17 1.1 rkujawa * Redistributions in binary form must reproduce the above copyright 18 1.1 rkujawa notice, this list of conditions and the following disclaimer in the 19 1.1 rkujawa documentation and/or other materials provided with the distribution. 20 1.1 rkujawa 21 1.1 rkujawa * Neither the name of Marvell nor the names of its contributors may be 22 1.1 rkujawa used to endorse or promote products derived from this software without 23 1.1 rkujawa specific prior written permission. 24 1.1 rkujawa 25 1.1 rkujawa THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 26 1.1 rkujawa ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 27 1.1 rkujawa WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 1.1 rkujawa DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 29 1.1 rkujawa ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 1.1 rkujawa (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 1.1 rkujawa LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 32 1.1 rkujawa ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 33 1.1 rkujawa (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 1.1 rkujawa SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 1.1 rkujawa 36 1.1 rkujawa *******************************************************************************/ 37 1.1 rkujawa 38 1.1 rkujawa /* 39 1.1 rkujawa * Transfer mechanism extracted from arspi.c corresponding with the lines 40 1.1 rkujawa * 254-262 in this file. 41 1.1 rkujawa */ 42 1.1 rkujawa 43 1.1 rkujawa #include <sys/param.h> 44 1.1 rkujawa #include <sys/device.h> 45 1.1 rkujawa 46 1.1 rkujawa #include <dev/spi/spivar.h> 47 1.1 rkujawa 48 1.1 rkujawa #include <dev/marvell/mvspireg.h> 49 1.1 rkujawa #include <dev/marvell/marvellvar.h> 50 1.1 rkujawa 51 1.1 rkujawa #include "locators.h" 52 1.1 rkujawa 53 1.1 rkujawa extern uint32_t mvTclk; 54 1.1 rkujawa 55 1.1 rkujawa struct mvspi_softc { 56 1.1 rkujawa struct spi_controller sc_spi; 57 1.1 rkujawa void *sc_ih; 58 1.1 rkujawa bool sc_interrupts; 59 1.1 rkujawa 60 1.1 rkujawa struct spi_transfer *sc_transfer; 61 1.1 rkujawa struct spi_chunk *sc_wchunk; /* For partial writes */ 62 1.1 rkujawa struct spi_transq sc_transq; 63 1.1 rkujawa bus_space_tag_t sc_st; 64 1.1 rkujawa bus_space_handle_t sc_sh; 65 1.1 rkujawa bus_size_t sc_size; 66 1.1 rkujawa }; 67 1.1 rkujawa 68 1.1 rkujawa int mvspi_match(struct device *, struct cfdata *, void *); 69 1.1 rkujawa void mvspi_attach(struct device *, struct device *, void *); 70 1.1 rkujawa /* SPI service routines */ 71 1.1 rkujawa int mvspi_configure(void *, int, int, int); 72 1.1 rkujawa int mvspi_transfer(void *, struct spi_transfer *); 73 1.1 rkujawa /* Internal support */ 74 1.1 rkujawa void mvspi_sched(struct mvspi_softc *); 75 1.1 rkujawa void mvspi_assert(struct mvspi_softc *sc); 76 1.1 rkujawa void mvspi_deassert(struct mvspi_softc *sc); 77 1.1 rkujawa 78 1.1 rkujawa #define GETREG(sc, x) \ 79 1.1 rkujawa bus_space_read_4(sc->sc_st, sc->sc_sh, x) 80 1.1 rkujawa #define PUTREG(sc, x, v) \ 81 1.1 rkujawa bus_space_write_4(sc->sc_st, sc->sc_sh, x, v) 82 1.1 rkujawa 83 1.1 rkujawa /* Attach structure */ 84 1.1 rkujawa CFATTACH_DECL_NEW(mvspi_mbus, sizeof(struct mvspi_softc), 85 1.1 rkujawa mvspi_match, mvspi_attach, NULL, NULL); 86 1.1 rkujawa 87 1.1 rkujawa int 88 1.1 rkujawa mvspi_match(struct device *parent, struct cfdata *cf, void *aux) 89 1.1 rkujawa { 90 1.1 rkujawa struct marvell_attach_args *mva = aux; 91 1.1 rkujawa 92 1.1 rkujawa if (strcmp(mva->mva_name, cf->cf_name) != 0) 93 1.1 rkujawa return 0; 94 1.1 rkujawa if (mva->mva_offset == MVA_OFFSET_DEFAULT || 95 1.1 rkujawa mva->mva_irq == MVA_IRQ_DEFAULT) 96 1.1 rkujawa return 0; 97 1.1 rkujawa 98 1.1 rkujawa mva->mva_size = MVSPI_SIZE; 99 1.1 rkujawa return 1; 100 1.1 rkujawa } 101 1.1 rkujawa 102 1.1 rkujawa void 103 1.1 rkujawa mvspi_attach(struct device *parent, struct device *self, void *aux) 104 1.1 rkujawa { 105 1.1 rkujawa struct mvspi_softc *sc = device_private(self); 106 1.1 rkujawa struct marvell_attach_args *mva = aux; 107 1.1 rkujawa int ctl; 108 1.1 rkujawa 109 1.1 rkujawa aprint_normal(": Marvell SPI controller\n"); 110 1.1 rkujawa 111 1.1 rkujawa /* 112 1.1 rkujawa * Map registers. 113 1.1 rkujawa */ 114 1.1 rkujawa sc->sc_st = mva->mva_iot; 115 1.1 rkujawa sc->sc_size = mva->mva_size; 116 1.1 rkujawa 117 1.1 rkujawa if (bus_space_subregion(sc->sc_st, mva->mva_ioh, mva->mva_offset, 118 1.1 rkujawa mva->mva_size, &sc->sc_sh)) { 119 1.1 rkujawa aprint_error_dev(self, "Cannot map registers\n"); 120 1.1 rkujawa return; 121 1.1 rkujawa } 122 1.1 rkujawa 123 1.1 rkujawa /* 124 1.1 rkujawa * Initialize hardware. 125 1.1 rkujawa */ 126 1.1 rkujawa ctl = GETREG(sc, MVSPI_INTCONF_REG); 127 1.1 rkujawa 128 1.1 rkujawa ctl &= MVSPI_DIRHS_MASK; 129 1.1 rkujawa ctl &= MVSPI_1BYTE_MASK; 130 1.1 rkujawa 131 1.4 christos PUTREG(sc, MVSPI_INTCONF_REG, ctl); 132 1.1 rkujawa 133 1.1 rkujawa /* 134 1.1 rkujawa * Initialize SPI controller. 135 1.1 rkujawa */ 136 1.1 rkujawa sc->sc_spi.sct_cookie = sc; 137 1.1 rkujawa sc->sc_spi.sct_configure = mvspi_configure; 138 1.1 rkujawa sc->sc_spi.sct_transfer = mvspi_transfer; 139 1.1 rkujawa sc->sc_spi.sct_nslaves = 1; 140 1.1 rkujawa 141 1.1 rkujawa /* 142 1.1 rkujawa * Initialize the queue. 143 1.1 rkujawa */ 144 1.1 rkujawa spi_transq_init(&sc->sc_transq); 145 1.1 rkujawa 146 1.1 rkujawa /* 147 1.1 rkujawa * Initialize and attach bus attach. 148 1.1 rkujawa */ 149 1.10 thorpej spibus_attach(self, &sc->sc_spi); 150 1.1 rkujawa } 151 1.1 rkujawa 152 1.1 rkujawa int 153 1.1 rkujawa mvspi_configure(void *cookie, int slave, int mode, int speed) 154 1.1 rkujawa { 155 1.1 rkujawa struct mvspi_softc *sc = cookie; 156 1.1 rkujawa uint32_t ctl = 0, spr, sppr; 157 1.1 rkujawa uint32_t divider; 158 1.1 rkujawa uint32_t best_spr = 0, best_sppr = 0; 159 1.1 rkujawa uint32_t best_sppr0, best_spprhi; 160 1.1 rkujawa uint8_t exact_match = 0; 161 1.1 rkujawa uint32_t min_baud_offset = 0xFFFFFFFF; 162 1.1 rkujawa 163 1.1 rkujawa if (slave < 0 || slave > 7) 164 1.1 rkujawa return EINVAL; 165 1.1 rkujawa 166 1.1 rkujawa switch(mode) { 167 1.1 rkujawa case SPI_MODE_0: 168 1.1 rkujawa ctl &= ~(MVSPI_CPOL_MASK); 169 1.1 rkujawa /* In boards documentation, CPHA is inverted */ 170 1.1 rkujawa ctl &= MVSPI_CPHA_MASK; 171 1.1 rkujawa break; 172 1.1 rkujawa case SPI_MODE_1: 173 1.1 rkujawa ctl |= MVSPI_CPOL_MASK; 174 1.1 rkujawa ctl &= MVSPI_CPHA_MASK; 175 1.1 rkujawa break; 176 1.1 rkujawa case SPI_MODE_2: 177 1.1 rkujawa ctl &= ~(MVSPI_CPOL_MASK); 178 1.1 rkujawa ctl |= ~(MVSPI_CPHA_MASK); 179 1.1 rkujawa break; 180 1.1 rkujawa case SPI_MODE_3: 181 1.1 rkujawa ctl |= MVSPI_CPOL_MASK; 182 1.1 rkujawa ctl |= ~(MVSPI_CPHA_MASK); 183 1.1 rkujawa break; 184 1.1 rkujawa default: 185 1.1 rkujawa return EINVAL; 186 1.1 rkujawa } 187 1.1 rkujawa 188 1.1 rkujawa /* Find the best prescale configuration - less or equal: 189 1.9 andvar * SPI actual frequency = core_clk / (SPR * (2 ^ SPPR)) 190 1.1 rkujawa * Try to find the minimal SPR and SPPR values that offer 191 1.1 rkujawa * the best prescale config. 192 1.1 rkujawa * 193 1.1 rkujawa */ 194 1.1 rkujawa for (spr = 1; spr <= MVSPI_SPR_MAXVALUE; spr++) { 195 1.1 rkujawa for (sppr = 0; sppr <= MVSPI_SPPR_MAXVALUE; sppr++) { 196 1.1 rkujawa divider = spr * (1 << sppr); 197 1.1 rkujawa /* Check for higher - irrelevant */ 198 1.1 rkujawa if ((mvTclk / divider) > speed) 199 1.1 rkujawa continue; 200 1.1 rkujawa 201 1.1 rkujawa /* Check for exact fit */ 202 1.1 rkujawa if ((mvTclk / divider) == speed) { 203 1.1 rkujawa best_spr = spr; 204 1.1 rkujawa best_sppr = sppr; 205 1.1 rkujawa exact_match = 1; 206 1.1 rkujawa break; 207 1.1 rkujawa } 208 1.1 rkujawa 209 1.1 rkujawa /* Check if this is better than the previous one */ 210 1.1 rkujawa if ((speed - (mvTclk / divider)) < min_baud_offset) { 211 1.1 rkujawa min_baud_offset = (speed - (mvTclk / divider)); 212 1.1 rkujawa best_spr = spr; 213 1.1 rkujawa best_sppr = sppr; 214 1.1 rkujawa } 215 1.1 rkujawa } 216 1.1 rkujawa 217 1.1 rkujawa if (exact_match == 1) 218 1.1 rkujawa break; 219 1.1 rkujawa } 220 1.1 rkujawa 221 1.1 rkujawa if (best_spr == 0) { 222 1.1 rkujawa printf("%s ERROR: SPI baud rate prescale error!\n", __func__); 223 1.1 rkujawa return -1; 224 1.1 rkujawa } 225 1.1 rkujawa 226 1.1 rkujawa ctl &= ~(MVSPI_SPR_MASK); 227 1.1 rkujawa ctl &= ~(MVSPI_SPPR_MASK); 228 1.1 rkujawa ctl |= best_spr; 229 1.1 rkujawa 230 1.1 rkujawa best_spprhi = best_sppr & MVSPI_SPPRHI_MASK; 231 1.1 rkujawa best_spprhi = best_spprhi << 5; 232 1.1 rkujawa 233 1.1 rkujawa ctl |= best_spprhi; 234 1.1 rkujawa 235 1.1 rkujawa best_sppr0 = best_sppr & MVSPI_SPPR0_MASK; 236 1.1 rkujawa best_sppr0 = best_sppr0 << 4; 237 1.1 rkujawa 238 1.1 rkujawa ctl |= best_sppr0; 239 1.1 rkujawa 240 1.1 rkujawa PUTREG(sc, MVSPI_INTCONF_REG, ctl); 241 1.1 rkujawa 242 1.1 rkujawa return 0; 243 1.1 rkujawa } 244 1.1 rkujawa 245 1.1 rkujawa int 246 1.1 rkujawa mvspi_transfer(void *cookie, struct spi_transfer *st) 247 1.1 rkujawa { 248 1.1 rkujawa struct mvspi_softc *sc = cookie; 249 1.2 khorben int s; 250 1.1 rkujawa 251 1.1 rkujawa s = splbio(); 252 1.1 rkujawa spi_transq_enqueue(&sc->sc_transq, st); 253 1.1 rkujawa if (sc->sc_transfer == NULL) { 254 1.1 rkujawa mvspi_sched(sc); 255 1.1 rkujawa } 256 1.1 rkujawa splx(s); 257 1.1 rkujawa return 0; 258 1.1 rkujawa } 259 1.1 rkujawa 260 1.1 rkujawa void 261 1.1 rkujawa mvspi_assert(struct mvspi_softc *sc) 262 1.1 rkujawa { 263 1.1 rkujawa int ctl; 264 1.1 rkujawa 265 1.3 christos if (sc->sc_transfer->st_slave < 0 || sc->sc_transfer->st_slave > 7) { 266 1.1 rkujawa printf("%s ERROR: Slave number %d not valid!\n", __func__, sc->sc_transfer->st_slave); 267 1.1 rkujawa return; 268 1.1 rkujawa } else 269 1.1 rkujawa /* Enable appropriate CSn according to its slave number */ 270 1.1 rkujawa PUTREG(sc, MVSPI_CTRL_REG, (sc->sc_transfer->st_slave << 2)); 271 1.1 rkujawa 272 1.1 rkujawa /* Enable CSnAct */ 273 1.1 rkujawa ctl = GETREG(sc, MVSPI_CTRL_REG); 274 1.1 rkujawa ctl |= MVSPI_CSNACT_MASK; 275 1.1 rkujawa PUTREG(sc, MVSPI_CTRL_REG, ctl); 276 1.1 rkujawa } 277 1.1 rkujawa 278 1.1 rkujawa void 279 1.1 rkujawa mvspi_deassert(struct mvspi_softc *sc) 280 1.1 rkujawa { 281 1.1 rkujawa int ctl = GETREG(sc, MVSPI_CTRL_REG); 282 1.1 rkujawa ctl &= ~(MVSPI_CSNACT_MASK); 283 1.1 rkujawa PUTREG(sc, MVSPI_CTRL_REG, ctl); 284 1.1 rkujawa } 285 1.1 rkujawa 286 1.1 rkujawa void 287 1.1 rkujawa mvspi_sched(struct mvspi_softc *sc) 288 1.1 rkujawa { 289 1.1 rkujawa struct spi_transfer *st; 290 1.1 rkujawa struct spi_chunk *chunk; 291 1.1 rkujawa int i, j, ctl; 292 1.1 rkujawa uint8_t byte; 293 1.1 rkujawa int ready = FALSE; 294 1.1 rkujawa 295 1.1 rkujawa for (;;) { 296 1.1 rkujawa if ((st = sc->sc_transfer) == NULL) { 297 1.1 rkujawa if ((st = spi_transq_first(&sc->sc_transq)) == NULL) { 298 1.1 rkujawa /* No work left to do */ 299 1.1 rkujawa break; 300 1.1 rkujawa } 301 1.1 rkujawa spi_transq_dequeue(&sc->sc_transq); 302 1.1 rkujawa sc->sc_transfer = st; 303 1.1 rkujawa } 304 1.1 rkujawa 305 1.1 rkujawa chunk = st->st_chunks; 306 1.1 rkujawa 307 1.1 rkujawa mvspi_assert(sc); 308 1.1 rkujawa 309 1.1 rkujawa do { 310 1.1 rkujawa for (i = chunk->chunk_wresid; i > 0; i--) { 311 1.1 rkujawa /* First clear the ready bit */ 312 1.1 rkujawa ctl = GETREG(sc, MVSPI_CTRL_REG); 313 1.1 rkujawa ctl &= ~(MVSPI_CR_SMEMRDY); 314 1.1 rkujawa PUTREG(sc, MVSPI_CTRL_REG, ctl); 315 1.1 rkujawa 316 1.1 rkujawa if (chunk->chunk_wptr){ 317 1.1 rkujawa byte = *chunk->chunk_wptr; 318 1.1 rkujawa chunk->chunk_wptr++; 319 1.1 rkujawa } else 320 1.1 rkujawa byte = MVSPI_DUMMY_BYTE; 321 1.1 rkujawa 322 1.1 rkujawa /* Transmit data */ 323 1.1 rkujawa PUTREG(sc, MVSPI_DATAOUT_REG, byte); 324 1.1 rkujawa 325 1.1 rkujawa /* Wait with timeout for memory ready */ 326 1.1 rkujawa for (j = 0; j < MVSPI_WAIT_RDY_MAX_LOOP; j++) { 327 1.1 rkujawa if (GETREG(sc, MVSPI_CTRL_REG) & 328 1.1 rkujawa MVSPI_CR_SMEMRDY) { 329 1.1 rkujawa ready = TRUE; 330 1.1 rkujawa break; 331 1.1 rkujawa } 332 1.1 rkujawa 333 1.1 rkujawa } 334 1.1 rkujawa 335 1.1 rkujawa if (!ready) { 336 1.1 rkujawa mvspi_deassert(sc); 337 1.1 rkujawa spi_done(st, EBUSY); 338 1.1 rkujawa return; 339 1.1 rkujawa } 340 1.1 rkujawa 341 1.1 rkujawa /* Check that the RX data is needed */ 342 1.1 rkujawa if (chunk->chunk_rptr) { 343 1.1 rkujawa *chunk->chunk_rptr = 344 1.1 rkujawa GETREG(sc, MVSPI_DATAIN_REG); 345 1.1 rkujawa chunk->chunk_rptr++; 346 1.1 rkujawa 347 1.1 rkujawa } 348 1.1 rkujawa 349 1.1 rkujawa } 350 1.1 rkujawa 351 1.1 rkujawa chunk = chunk->chunk_next; 352 1.1 rkujawa 353 1.1 rkujawa } while (chunk != NULL); 354 1.1 rkujawa 355 1.1 rkujawa mvspi_deassert(sc); 356 1.1 rkujawa 357 1.1 rkujawa spi_done(st, 0); 358 1.1 rkujawa sc->sc_transfer = NULL; 359 1.1 rkujawa 360 1.1 rkujawa 361 1.1 rkujawa break; 362 1.1 rkujawa } 363 1.1 rkujawa } 364