Home | History | Annotate | Line # | Download | only in nvidia
tegra_apbdma.c revision 1.7
      1 /* $NetBSD: tegra_apbdma.c,v 1.7 2019/10/13 06:11:31 skrll Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2017 Jared D. McNeill <jmcneill (at) invisible.ca>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: tegra_apbdma.c,v 1.7 2019/10/13 06:11:31 skrll Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/bus.h>
     34 #include <sys/device.h>
     35 #include <sys/intr.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 
     39 #include <arm/nvidia/tegra_reg.h>
     40 #include <arm/nvidia/tegra_apbdmareg.h>
     41 #include <arm/nvidia/tegra_var.h>
     42 
     43 #include <dev/fdt/fdtvar.h>
     44 
     45 #define	TEGRA_APBDMA_NCHAN	32
     46 
     47 static void *	tegra_apbdma_acquire(device_t, const void *, size_t,
     48 				     void (*)(void *), void *);
     49 static void	tegra_apbdma_release(device_t, void *);
     50 static int	tegra_apbdma_transfer(device_t, void *,
     51 				      struct fdtbus_dma_req *);
     52 static void	tegra_apbdma_halt(device_t, void *);
     53 
     54 static const struct fdtbus_dma_controller_func tegra_apbdma_funcs = {
     55 	.acquire = tegra_apbdma_acquire,
     56 	.release = tegra_apbdma_release,
     57 	.transfer = tegra_apbdma_transfer,
     58 	.halt = tegra_apbdma_halt
     59 };
     60 
     61 static int	tegra_apbdma_match(device_t, cfdata_t, void *);
     62 static void	tegra_apbdma_attach(device_t, device_t, void *);
     63 
     64 static int	tegra_apbdma_intr(void *);
     65 
     66 struct tegra_apbdma_softc;
     67 
     68 struct tegra_apbdma_chan {
     69 	struct tegra_apbdma_softc *ch_sc;
     70 	u_int			ch_n;
     71 	void			*ch_ih;
     72 	void			(*ch_cb)(void *);
     73 	void			*ch_cbarg;
     74 	u_int			ch_req;
     75 };
     76 
     77 struct tegra_apbdma_softc {
     78 	device_t		sc_dev;
     79 	bus_space_tag_t		sc_bst;
     80 	bus_space_handle_t	sc_bsh;
     81 	int			sc_phandle;
     82 
     83 	struct tegra_apbdma_chan sc_chan[TEGRA_APBDMA_NCHAN];
     84 };
     85 
     86 CFATTACH_DECL_NEW(tegra_apbdma, sizeof(struct tegra_apbdma_softc),
     87 	tegra_apbdma_match, tegra_apbdma_attach, NULL, NULL);
     88 
     89 #define	APBDMA_READ(sc, reg)						\
     90 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
     91 #define	APBDMA_WRITE(sc, reg, val)					\
     92 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
     93 
     94 static int
     95 tegra_apbdma_match(device_t parent, cfdata_t cf, void *aux)
     96 {
     97 	const char * const compatible[] = {
     98 		"nvidia,tegra210-apbdma",
     99 		"nvidia,tegra124-apbdma",
    100 		NULL
    101 	};
    102 	struct fdt_attach_args * const faa = aux;
    103 
    104 	return of_match_compatible(faa->faa_phandle, compatible);
    105 }
    106 
    107 static void
    108 tegra_apbdma_attach(device_t parent, device_t self, void *aux)
    109 {
    110 	struct tegra_apbdma_softc *sc = device_private(self);
    111 	struct fdt_attach_args * const faa = aux;
    112 	const int phandle = faa->faa_phandle;
    113 	struct fdtbus_reset *rst;
    114 	struct clk *clk;
    115 	bus_addr_t addr;
    116 	bus_size_t size;
    117 	int error;
    118 	u_int n;
    119 
    120 	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
    121 		aprint_error(": couldn't get registers\n");
    122 		return;
    123 	}
    124 
    125 	clk = fdtbus_clock_get_index(phandle, 0);
    126 	if (clk == NULL) {
    127 		aprint_error(": couldn't get clock\n");
    128 		return;
    129 	}
    130 	rst = fdtbus_reset_get(phandle, "dma");
    131 	if (rst == NULL) {
    132 		aprint_error(": couldn't get reset dma\n");
    133 		return;
    134 	}
    135 
    136 	fdtbus_reset_assert(rst);
    137 	error = clk_enable(clk);
    138 	if (error) {
    139 		aprint_error(": couldn't enable clock dma: %d\n", error);
    140 		return;
    141 	}
    142 	fdtbus_reset_deassert(rst);
    143 
    144 	sc->sc_dev = self;
    145 	sc->sc_bst = faa->faa_bst;
    146 	sc->sc_phandle = phandle;
    147 	error = bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh);
    148 	if (error) {
    149 		aprint_error(": couldn't map %#" PRIxBUSADDR ": %d", addr, error);
    150 		return;
    151 	}
    152 	for (n = 0; n < TEGRA_APBDMA_NCHAN; n++) {
    153 		sc->sc_chan[n].ch_sc = sc;
    154 		sc->sc_chan[n].ch_n = n;
    155 	}
    156 
    157 	aprint_naive("\n");
    158 	aprint_normal(": APBDMA\n");
    159 
    160 	/* Stop all channels */
    161 	for (n = 0; n < TEGRA_APBDMA_NCHAN; n++)
    162 		APBDMA_WRITE(sc, APBDMACHAN_CSR_REG(n), 0);
    163 
    164 	/* Mask interrupts */
    165 	APBDMA_WRITE(sc, APBDMA_IRQ_MASK_REG, 0);
    166 
    167 	/* Global enable */
    168 	APBDMA_WRITE(sc, APBDMA_COMMAND_REG, APBDMA_COMMAND_GEN);
    169 
    170 	fdtbus_register_dma_controller(self, phandle, &tegra_apbdma_funcs);
    171 }
    172 
    173 static int
    174 tegra_apbdma_intr(void *priv)
    175 {
    176 	struct tegra_apbdma_chan *ch = priv;
    177 	struct tegra_apbdma_softc *sc = ch->ch_sc;
    178 	const u_int n = ch->ch_n;
    179 	uint32_t sta;
    180 
    181 	sta = APBDMA_READ(sc, APBDMACHAN_STA_REG(n));
    182 	APBDMA_WRITE(sc, APBDMACHAN_STA_REG(n), sta);	/* clear EOC */
    183 
    184 	ch->ch_cb(ch->ch_cbarg);
    185 
    186 	return 1;
    187 }
    188 
    189 static void *
    190 tegra_apbdma_acquire(device_t dev, const void *data, size_t len,
    191     void (*cb)(void *), void *cbarg)
    192 {
    193 	struct tegra_apbdma_softc *sc = device_private(dev);
    194 	struct tegra_apbdma_chan *ch;
    195 	u_int n;
    196 	char intrstr[128];
    197 
    198 	if (len != 4)
    199 		return NULL;
    200 
    201 	const u_int req = be32dec(data);
    202 	if (req > __SHIFTOUT_MASK(APBDMACHAN_CSR_REQ_SEL))
    203 		return NULL;
    204 
    205 	for (n = 0; n < TEGRA_APBDMA_NCHAN; n++) {
    206 		ch = &sc->sc_chan[n];
    207 		if (ch->ch_ih == NULL)
    208 			break;
    209 	}
    210 	if (n >= TEGRA_APBDMA_NCHAN) {
    211 		aprint_error_dev(dev, "no free DMA channel\n");
    212 		return NULL;
    213 	}
    214 
    215 	if (!fdtbus_intr_str(sc->sc_phandle, n, intrstr, sizeof(intrstr))) {
    216 		aprint_error_dev(dev, "failed to decode interrupt %u\n", n);
    217 		return NULL;
    218 	}
    219 
    220 	ch->ch_ih = fdtbus_intr_establish(sc->sc_phandle, n, IPL_VM,
    221 	    FDT_INTR_MPSAFE, tegra_apbdma_intr, ch);
    222 	if (ch->ch_ih == NULL) {
    223 		aprint_error_dev(dev, "failed to establish interrupt on %s\n",
    224 		    intrstr);
    225 		return NULL;
    226 	}
    227 	aprint_normal_dev(dev, "interrupting on %s (channel %u)\n", intrstr, n);
    228 
    229 	ch->ch_cb = cb;
    230 	ch->ch_cbarg = cbarg;
    231 	ch->ch_req = req;
    232 
    233 	/* Unmask interrupts for this channel */
    234 	APBDMA_WRITE(sc, APBDMA_IRQ_MASK_SET_REG, __BIT(n));
    235 
    236 	return ch;
    237 }
    238 static void
    239 tegra_apbdma_release(device_t dev, void *priv)
    240 {
    241 	struct tegra_apbdma_softc *sc = device_private(dev);
    242 	struct tegra_apbdma_chan *ch = priv;
    243 	const u_int n = ch->ch_n;
    244 
    245 	KASSERT(ch->ch_ih != NULL);
    246 
    247 	/* Halt the channel */
    248 	APBDMA_WRITE(sc, APBDMACHAN_CSR_REG(n), 0);
    249 
    250 	/* Mask interrupts for this channel */
    251 	APBDMA_WRITE(sc, APBDMA_IRQ_MASK_CLR_REG, __BIT(n));
    252 
    253 	fdtbus_intr_disestablish(sc->sc_phandle, ch->ch_ih);
    254 
    255 	ch->ch_cb = NULL;
    256 	ch->ch_cbarg = NULL;
    257 }
    258 
    259 static int
    260 tegra_apbdma_transfer(device_t dev, void *priv, struct fdtbus_dma_req *req)
    261 
    262 {
    263 	struct tegra_apbdma_softc *sc = device_private(dev);
    264 	struct tegra_apbdma_chan *ch = priv;
    265 	const u_int n = ch->ch_n;
    266 	uint32_t csr = 0;
    267 	uint32_t csre = 0;
    268 	uint32_t ahb_seq = 0;
    269 	uint32_t apb_seq = 0;
    270 
    271 	/* Scatter-gather not supported */
    272 	if (req->dreq_nsegs != 1)
    273 		return EINVAL;
    274 
    275 	/* Addresses must be aligned to 32-bits */
    276 	if ((req->dreq_segs[0].ds_addr & 3) != 0 ||
    277 	    (req->dreq_dev_phys & 3) != 0)
    278 		return EINVAL;
    279 
    280 	/* Length must be a multiple of 32-bits */
    281 	if ((req->dreq_segs[0].ds_len & 3) != 0)
    282 		return EINVAL;
    283 
    284 	csr |= __SHIFTIN(ch->ch_req, APBDMACHAN_CSR_REQ_SEL);
    285 
    286 	/*
    287 	 * Set DMA transfer direction.
    288 	 * APBDMACHAN_CSR_DIR=0 means "APB read to AHB write", and
    289 	 * APBDMACHAN_CSR_DIR=1 means "AHB read to APB write".
    290 	 */
    291 	if (req->dreq_dir == FDT_DMA_WRITE)
    292 		csr |= APBDMACHAN_CSR_DIR;
    293 
    294 	/*
    295 	 * Generate interrupt when DMA block transfer completes.
    296 	 */
    297 	if (req->dreq_block_irq)
    298 		csr |= APBDMACHAN_CSR_IE_EOC;
    299 
    300 	/*
    301 	 * Single or multiple block transfer
    302 	 */
    303 	if (!req->dreq_block_multi)
    304 		csr |= APBDMACHAN_CSR_ONCE;
    305 
    306 	/*
    307 	 * Flow control enable
    308 	 */
    309 	if (req->dreq_flow)
    310 		csr |= APBDMACHAN_CSR_FLOW;
    311 
    312 	/*
    313 	 * Route interrupt to CPU. 1 = CPU, 0 = COP
    314 	 */
    315 	ahb_seq |= APBDMACHAN_AHB_SEQ_INTR_ENB;
    316 
    317 	/*
    318 	 * AHB is a 32-bit bus.
    319 	 */
    320 	if (req->dreq_mem_opt.opt_bus_width != 32)
    321 		return EINVAL;
    322 	ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_BUS_WIDTH_32,
    323 			     APBDMACHAN_AHB_SEQ_BUS_WIDTH);
    324 
    325 	/*
    326 	 * AHB data swap.
    327 	 */
    328 	if (req->dreq_mem_opt.opt_swap)
    329 		ahb_seq |= APBDMACHAN_AHB_SEQ_DATA_SWAP;
    330 
    331 	/*
    332 	 * AHB burst size.
    333 	 */
    334 	switch (req->dreq_mem_opt.opt_burst_len) {
    335 	case 32:
    336 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_BURST_1,
    337 				     APBDMACHAN_AHB_SEQ_BURST);
    338 		break;
    339 	case 128:
    340 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_BURST_4,
    341 				     APBDMACHAN_AHB_SEQ_BURST);
    342 		break;
    343 	case 256:
    344 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_BURST_8,
    345 				     APBDMACHAN_AHB_SEQ_BURST);
    346 		break;
    347 	default:
    348 		return EINVAL;
    349 	}
    350 
    351 	/*
    352 	 * 2X double buffering mode. Only supported in run-multiple mode
    353 	 * with no-wrap operations.
    354 	 */
    355 	if (req->dreq_mem_opt.opt_dblbuf) {
    356 		if (req->dreq_mem_opt.opt_wrap_len != 0)
    357 			return EINVAL;
    358 		if (!req->dreq_block_multi)
    359 			return EINVAL;
    360 		ahb_seq |= APBDMACHAN_AHB_SEQ_DBL_BUF;
    361 	}
    362 
    363 	/*
    364 	 * AHB address wrap.
    365 	 */
    366 	switch (req->dreq_mem_opt.opt_wrap_len) {
    367 	case 0:
    368 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_NO_WRAP,
    369 				     APBDMACHAN_AHB_SEQ_WRAP);
    370 		break;
    371 	case 128:
    372 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_32,
    373 				     APBDMACHAN_AHB_SEQ_WRAP);
    374 		break;
    375 	case 256:
    376 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_64,
    377 				     APBDMACHAN_AHB_SEQ_WRAP);
    378 		break;
    379 	case 512:
    380 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_128,
    381 				     APBDMACHAN_AHB_SEQ_WRAP);
    382 		break;
    383 	case 1024:
    384 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_256,
    385 				     APBDMACHAN_AHB_SEQ_WRAP);
    386 		break;
    387 	case 2048:
    388 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_512,
    389 				     APBDMACHAN_AHB_SEQ_WRAP);
    390 		break;
    391 	case 4096:
    392 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_1024,
    393 				     APBDMACHAN_AHB_SEQ_WRAP);
    394 		break;
    395 	case 8192:
    396 		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_2048,
    397 				     APBDMACHAN_AHB_SEQ_WRAP);
    398 		break;
    399 	default:
    400 		return EINVAL;
    401 	}
    402 
    403 	/*
    404 	 * APB bus width.
    405 	 */
    406 	switch (req->dreq_dev_opt.opt_bus_width) {
    407 	case 8:
    408 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_BUS_WIDTH_8,
    409 				     APBDMACHAN_APB_SEQ_BUS_WIDTH);
    410 		break;
    411 	case 16:
    412 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_BUS_WIDTH_16,
    413 				     APBDMACHAN_APB_SEQ_BUS_WIDTH);
    414 		break;
    415 	case 32:
    416 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_BUS_WIDTH_32,
    417 				     APBDMACHAN_APB_SEQ_BUS_WIDTH);
    418 		break;
    419 	default:
    420 		return EINVAL;
    421 	}
    422 
    423 	/*
    424 	 * APB data swap.
    425 	 */
    426 	if (req->dreq_dev_opt.opt_swap)
    427 		apb_seq |= APBDMACHAN_APB_SEQ_DATA_SWAP;
    428 
    429 	/*
    430 	 * APB address wrap-around window.
    431 	 */
    432 	switch (req->dreq_dev_opt.opt_wrap_len) {
    433 	case 0:
    434 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_NO_WRAP,
    435 				     APBDMACHAN_APB_SEQ_WRAP);
    436 		break;
    437 	case 4:
    438 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_1,
    439 				     APBDMACHAN_APB_SEQ_WRAP);
    440 		break;
    441 	case 8:
    442 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_2,
    443 				     APBDMACHAN_APB_SEQ_WRAP);
    444 		break;
    445 	case 16:
    446 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_4,
    447 				     APBDMACHAN_APB_SEQ_WRAP);
    448 		break;
    449 	case 32:
    450 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_8,
    451 				     APBDMACHAN_APB_SEQ_WRAP);
    452 		break;
    453 	case 64:
    454 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_16,
    455 				     APBDMACHAN_APB_SEQ_WRAP);
    456 		break;
    457 	case 128:
    458 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_32,
    459 				     APBDMACHAN_APB_SEQ_WRAP);
    460 		break;
    461 	case 256:
    462 		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_64,
    463 				     APBDMACHAN_APB_SEQ_WRAP);
    464 		break;
    465 	default:
    466 		return EINVAL;
    467 	}
    468 
    469 	/*
    470 	 * Program all channel registers before setting the channel enable bit.
    471 	 */
    472 	APBDMA_WRITE(sc, APBDMACHAN_AHB_PTR_REG(n), req->dreq_segs[0].ds_addr);
    473 	APBDMA_WRITE(sc, APBDMACHAN_APB_PTR_REG(n), req->dreq_dev_phys);
    474 	APBDMA_WRITE(sc, APBDMACHAN_AHB_SEQ_REG(n), ahb_seq);
    475 	APBDMA_WRITE(sc, APBDMACHAN_APB_SEQ_REG(n), apb_seq);
    476 	APBDMA_WRITE(sc, APBDMACHAN_WCOUNT_REG(n), req->dreq_segs[0].ds_len);
    477 	APBDMA_WRITE(sc, APBDMACHAN_CSRE_REG(n), csre);
    478 	APBDMA_WRITE(sc, APBDMACHAN_CSR_REG(n), csr | APBDMACHAN_CSR_ENB);
    479 
    480 	return 0;
    481 }
    482 
    483 static void
    484 tegra_apbdma_halt(device_t dev, void *priv)
    485 {
    486 	struct tegra_apbdma_softc *sc = device_private(dev);
    487 	struct tegra_apbdma_chan *ch = priv;
    488 	const u_int n = ch->ch_n;
    489 	uint32_t v;
    490 
    491 	v = APBDMA_READ(sc, APBDMACHAN_CSR_REG(n));
    492 	v &= ~APBDMACHAN_CSR_ENB;
    493 	APBDMA_WRITE(sc, APBDMACHAN_CSR_REG(n), v);
    494 }
    495