sun4i_dma.c revision 1.5 1 /* $NetBSD: sun4i_dma.c,v 1.5 2021/01/18 02:35:49 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_ddb.h"
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sun4i_dma.c,v 1.5 2021/01/18 02:35:49 thorpej Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/mutex.h>
40 #include <sys/bitops.h>
41 #include <sys/kmem.h>
42
43 #include <dev/fdt/fdtvar.h>
44
45 #define DMA_MAX_TYPES 2
46 #define DMA_TYPE_NORMAL 0
47 #define DMA_TYPE_DEDICATED 1
48 #define DMA_MAX_CHANNELS 8
49 #define DMA_MAX_DRQS 32
50
51 #define DRQ_TYPE_SDRAM 0x16
52
53 #define DMA_IRQ_EN_REG 0x00
54 #define DMA_IRQ_PEND_STAS_REG 0x04
55 #define DMA_IRQ_PEND_STAS_END_MASK 0xaaaaaaaa
56 #define NDMA_CTRL_REG(n) (0x100 + (n) * 0x20)
57 #define NDMA_CTRL_LOAD __BIT(31)
58 #define NDMA_CTRL_CONTI_EN __BIT(30)
59 #define NDMA_CTRL_WAIT_STATE __BITS(29,27)
60 #define NDMA_CTRL_DST_DATA_WIDTH __BITS(26,25)
61 #define NDMA_CTRL_DST_BST_LEN __BITS(24,23)
62 #define NDMA_CTRL_DST_ADDR_TYPE __BIT(21)
63 #define NDMA_CTRL_DST_DRQ_TYPE __BITS(20,16)
64 #define NDMA_CTRL_BC_MODE_SEL __BIT(15)
65 #define NDMA_CTRL_SRC_DATA_WIDTH __BITS(10,9)
66 #define NDMA_CTRL_SRC_BST_LEN __BITS(8,7)
67 #define NDMA_CTRL_SRC_ADDR_TYPE __BIT(5)
68 #define NDMA_CTRL_SRC_DRQ_TYPE __BITS(4,0)
69 #define NDMA_SRC_ADDR_REG(n) (0x100 + (n) * 0x20 + 0x4)
70 #define NDMA_DEST_ADDR_REG(n) (0x100 + (n) * 0x20 + 0x8)
71 #define NDMA_BC_REG(n) (0x100 + (n) * 0x20 + 0xc)
72 #define DDMA_CTRL_REG(n) (0x300 + (n) * 0x20)
73 #define DDMA_CTRL_LOAD __BIT(31)
74 #define DDMA_CTRL_BSY_STA __BIT(30)
75 #define DDMA_CTRL_CONTI_EN __BIT(29)
76 #define DDMA_CTRL_DST_DATA_WIDTH __BITS(26,25)
77 #define DDMA_CTRL_DST_BST_LEN __BITS(24,23)
78 #define DDMA_CTRL_DST_ADDR_MODE __BITS(22,21)
79 #define DDMA_CTRL_DST_DRQ_TYPE __BITS(20,16)
80 #define DDMA_CTRL_BC_MODE_SEL __BIT(15)
81 #define DDMA_CTRL_SRC_DATA_WIDTH __BITS(10,9)
82 #define DDMA_CTRL_SRC_BST_LEN __BITS(8,7)
83 #define DDMA_CTRL_SRC_ADDR_MODE __BITS(6,5)
84 #define DDMA_CTRL_SRC_DRQ_TYPE __BITS(4,0)
85 #define DDMA_SRC_ADDR_REG(n) (0x300 + (n) * 0x20 + 0x4)
86 #define DDMA_DEST_ADDR_REG(n) (0x300 + (n) * 0x20 + 0x8)
87 #define DDMA_BC_REG(n) (0x300 + (n) * 0x20 + 0xc)
88 #define DDMA_PARA_REG(n) (0x300 + (n) * 0x20 + 0x18)
89 #define DDMA_PARA_DST_DATA_BLK_SIZE __BITS(31,24)
90 #define DDMA_PARA_DST_WAIT_CLK_CYC __BITS(23,16)
91 #define DDMA_PARA_SRC_DATA_BLK_SIZE __BITS(15,8)
92 #define DDMA_PARA_SRC_WAIT_CLK_CYC __BITS(7,0)
93 #define DDMA_PARA_VALUE \
94 (__SHIFTIN(1, DDMA_PARA_DST_DATA_BLK_SIZE) | \
95 __SHIFTIN(1, DDMA_PARA_SRC_DATA_BLK_SIZE) | \
96 __SHIFTIN(2, DDMA_PARA_DST_WAIT_CLK_CYC) | \
97 __SHIFTIN(2, DDMA_PARA_SRC_WAIT_CLK_CYC))
98
99 static const struct device_compatible_entry compat_data[] = {
100 { .compat = "allwinner,sun4i-a10-dma" },
101
102 { 0 }
103 };
104
105 struct sun4idma_channel {
106 uint8_t ch_type;
107 uint8_t ch_index;
108 uint32_t ch_irqmask;
109 void (*ch_callback)(void *);
110 void *ch_callbackarg;
111 u_int ch_drq;
112 };
113
114 struct sun4idma_softc {
115 device_t sc_dev;
116 bus_space_tag_t sc_bst;
117 bus_space_handle_t sc_bsh;
118 bus_dma_tag_t sc_dmat;
119 int sc_phandle;
120 void *sc_ih;
121
122 kmutex_t sc_lock;
123
124 struct sun4idma_channel sc_chan[DMA_MAX_TYPES][DMA_MAX_CHANNELS];
125 };
126
127 #define DMA_READ(sc, reg) \
128 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
129 #define DMA_WRITE(sc, reg, val) \
130 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
131
132 static void *
133 sun4idma_acquire(device_t dev, const void *data, size_t len,
134 void (*cb)(void *), void *cbarg)
135 {
136 struct sun4idma_softc *sc = device_private(dev);
137 struct sun4idma_channel *ch = NULL;
138 const uint32_t *specifier = data;
139 uint32_t irqen;
140 uint8_t index;
141
142 if (len != 8)
143 return NULL;
144
145 const u_int type = be32toh(specifier[0]);
146 const u_int drq = be32toh(specifier[1]);
147
148 if (type >= DMA_MAX_TYPES || drq >= DMA_MAX_DRQS)
149 return NULL;
150
151 mutex_enter(&sc->sc_lock);
152
153 for (index = 0; index < DMA_MAX_CHANNELS; index++) {
154 if (sc->sc_chan[type][index].ch_callback == NULL) {
155 ch = &sc->sc_chan[type][index];
156 ch->ch_callback = cb;
157 ch->ch_callbackarg = cbarg;
158 ch->ch_drq = drq;
159
160 irqen = DMA_READ(sc, DMA_IRQ_EN_REG);
161 irqen |= ch->ch_irqmask;
162 DMA_WRITE(sc, DMA_IRQ_EN_REG, irqen);
163
164 break;
165 }
166 }
167
168 mutex_exit(&sc->sc_lock);
169
170 return ch;
171 }
172
173 static void
174 sun4idma_release(device_t dev, void *priv)
175 {
176 struct sun4idma_softc *sc = device_private(dev);
177 struct sun4idma_channel *ch = priv;
178 uint32_t irqen;
179
180 mutex_enter(&sc->sc_lock);
181
182 irqen = DMA_READ(sc, DMA_IRQ_EN_REG);
183 irqen &= ~ch->ch_irqmask;
184 DMA_WRITE(sc, DMA_IRQ_EN_REG, irqen);
185
186 ch->ch_callback = NULL;
187 ch->ch_callbackarg = NULL;
188
189 mutex_exit(&sc->sc_lock);
190 }
191
192 static int
193 sun4idma_transfer_ndma(struct sun4idma_softc *sc, struct sun4idma_channel *ch,
194 struct fdtbus_dma_req *req)
195 {
196 uint32_t cfg, mem_cfg, dev_cfg, src, dst;
197 uint32_t mem_width, dev_width, mem_burst, dev_burst;
198
199 mem_width = req->dreq_mem_opt.opt_bus_width >> 4;
200 dev_width = req->dreq_dev_opt.opt_bus_width >> 4;
201 mem_burst = req->dreq_mem_opt.opt_burst_len == 1 ? 0 :
202 (req->dreq_mem_opt.opt_burst_len >> 3) + 1;
203 dev_burst = req->dreq_dev_opt.opt_burst_len == 1 ? 0 :
204 (req->dreq_dev_opt.opt_burst_len >> 3) + 1;
205
206 mem_cfg = __SHIFTIN(mem_width, NDMA_CTRL_SRC_DATA_WIDTH) |
207 __SHIFTIN(mem_burst, NDMA_CTRL_SRC_BST_LEN) |
208 __SHIFTIN(DRQ_TYPE_SDRAM, NDMA_CTRL_SRC_DRQ_TYPE);
209 dev_cfg = __SHIFTIN(dev_width, NDMA_CTRL_SRC_DATA_WIDTH) |
210 __SHIFTIN(dev_burst, NDMA_CTRL_SRC_BST_LEN) |
211 __SHIFTIN(ch->ch_drq, NDMA_CTRL_SRC_DRQ_TYPE) |
212 NDMA_CTRL_SRC_ADDR_TYPE;
213
214 if (req->dreq_dir == FDT_DMA_READ) {
215 src = req->dreq_dev_phys;
216 dst = req->dreq_segs[0].ds_addr;
217 cfg = mem_cfg << 16 | dev_cfg;
218 } else {
219 src = req->dreq_segs[0].ds_addr;
220 dst = req->dreq_dev_phys;
221 cfg = dev_cfg << 16 | mem_cfg;
222 }
223
224 DMA_WRITE(sc, NDMA_SRC_ADDR_REG(ch->ch_index), src);
225 DMA_WRITE(sc, NDMA_DEST_ADDR_REG(ch->ch_index), dst);
226 DMA_WRITE(sc, NDMA_BC_REG(ch->ch_index), req->dreq_segs[0].ds_len);
227 DMA_WRITE(sc, NDMA_CTRL_REG(ch->ch_index), cfg | NDMA_CTRL_LOAD);
228
229 return 0;
230 }
231
232 static int
233 sun4idma_transfer_ddma(struct sun4idma_softc *sc, struct sun4idma_channel *ch,
234 struct fdtbus_dma_req *req)
235 {
236 uint32_t cfg, mem_cfg, dev_cfg, src, dst;
237 uint32_t mem_width, dev_width, mem_burst, dev_burst;
238
239 mem_width = req->dreq_mem_opt.opt_bus_width >> 4;
240 dev_width = req->dreq_dev_opt.opt_bus_width >> 4;
241 mem_burst = req->dreq_mem_opt.opt_burst_len == 1 ? 0 :
242 (req->dreq_mem_opt.opt_burst_len >> 3) + 1;
243 dev_burst = req->dreq_dev_opt.opt_burst_len == 1 ? 0 :
244 (req->dreq_dev_opt.opt_burst_len >> 3) + 1;
245
246 mem_cfg = __SHIFTIN(mem_width, DDMA_CTRL_SRC_DATA_WIDTH) |
247 __SHIFTIN(mem_burst, DDMA_CTRL_SRC_BST_LEN) |
248 __SHIFTIN(DRQ_TYPE_SDRAM, DDMA_CTRL_SRC_DRQ_TYPE) |
249 __SHIFTIN(0, DDMA_CTRL_SRC_ADDR_MODE);
250 dev_cfg = __SHIFTIN(dev_width, DDMA_CTRL_SRC_DATA_WIDTH) |
251 __SHIFTIN(dev_burst, DDMA_CTRL_SRC_BST_LEN) |
252 __SHIFTIN(ch->ch_drq, DDMA_CTRL_SRC_DRQ_TYPE) |
253 __SHIFTIN(1, DDMA_CTRL_SRC_ADDR_MODE);
254
255 if (req->dreq_dir == FDT_DMA_READ) {
256 src = req->dreq_dev_phys;
257 dst = req->dreq_segs[0].ds_addr;
258 cfg = mem_cfg << 16 | dev_cfg;
259 } else {
260 src = req->dreq_segs[0].ds_addr;
261 dst = req->dreq_dev_phys;
262 cfg = dev_cfg << 16 | mem_cfg;
263 }
264
265 DMA_WRITE(sc, DDMA_SRC_ADDR_REG(ch->ch_index), src);
266 DMA_WRITE(sc, DDMA_DEST_ADDR_REG(ch->ch_index), dst);
267 DMA_WRITE(sc, DDMA_BC_REG(ch->ch_index), req->dreq_segs[0].ds_len);
268 DMA_WRITE(sc, DDMA_PARA_REG(ch->ch_index), DDMA_PARA_VALUE);
269 DMA_WRITE(sc, DDMA_CTRL_REG(ch->ch_index), cfg | DDMA_CTRL_LOAD);
270
271 return 0;
272 }
273
274 static int
275 sun4idma_transfer(device_t dev, void *priv, struct fdtbus_dma_req *req)
276 {
277 struct sun4idma_softc *sc = device_private(dev);
278 struct sun4idma_channel *ch = priv;
279
280 if (req->dreq_nsegs != 1)
281 return EINVAL;
282
283 if (ch->ch_type == DMA_TYPE_NORMAL)
284 return sun4idma_transfer_ndma(sc, ch, req);
285 else
286 return sun4idma_transfer_ddma(sc, ch, req);
287 }
288
289 static void
290 sun4idma_halt(device_t dev, void *priv)
291 {
292 struct sun4idma_softc *sc = device_private(dev);
293 struct sun4idma_channel *ch = priv;
294 uint32_t val;
295
296 if (ch->ch_type == DMA_TYPE_NORMAL) {
297 val = DMA_READ(sc, NDMA_CTRL_REG(ch->ch_index));
298 val &= ~NDMA_CTRL_LOAD;
299 DMA_WRITE(sc, NDMA_CTRL_REG(ch->ch_index), val);
300 } else {
301 val = DMA_READ(sc, DDMA_CTRL_REG(ch->ch_index));
302 val &= ~DDMA_CTRL_LOAD;
303 DMA_WRITE(sc, DDMA_CTRL_REG(ch->ch_index), val);
304 }
305 }
306
307 static const struct fdtbus_dma_controller_func sun4idma_funcs = {
308 .acquire = sun4idma_acquire,
309 .release = sun4idma_release,
310 .transfer = sun4idma_transfer,
311 .halt = sun4idma_halt
312 };
313
314 static int
315 sun4idma_intr(void *priv)
316 {
317 struct sun4idma_softc *sc = priv;
318 uint32_t pend, mask, bit;
319 uint8_t type, index;
320
321 pend = DMA_READ(sc, DMA_IRQ_PEND_STAS_REG);
322 if (pend == 0)
323 return 0;
324
325 DMA_WRITE(sc, DMA_IRQ_PEND_STAS_REG, pend);
326
327 pend &= DMA_IRQ_PEND_STAS_END_MASK;
328
329 while ((bit = ffs32(pend)) != 0) {
330 mask = __BIT(bit - 1);
331 pend &= ~mask;
332 type = ((bit - 1) / 2) / 8;
333 index = ((bit - 1) / 2) % 8;
334
335 if (sc->sc_chan[type][index].ch_callback == NULL)
336 continue;
337 sc->sc_chan[type][index].ch_callback(
338 sc->sc_chan[type][index].ch_callbackarg);
339 }
340
341 return 1;
342 }
343
344 static int
345 sun4idma_match(device_t parent, cfdata_t cf, void *aux)
346 {
347 struct fdt_attach_args * const faa = aux;
348
349 return of_match_compat_data(faa->faa_phandle, compat_data);
350 }
351
352 static void
353 sun4idma_attach(device_t parent, device_t self, void *aux)
354 {
355 struct sun4idma_softc * const sc = device_private(self);
356 struct fdt_attach_args * const faa = aux;
357 const int phandle = faa->faa_phandle;
358 struct clk *clk;
359 char intrstr[128];
360 bus_addr_t addr;
361 bus_size_t size;
362 u_int index, type;
363
364 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
365 aprint_error(": couldn't get registers\n");
366 return;
367 }
368
369 if ((clk = fdtbus_clock_get_index(phandle, 0)) == NULL ||
370 clk_enable(clk) != 0) {
371 aprint_error(": couldn't enable clock\n");
372 return;
373 }
374
375 sc->sc_dev = self;
376 sc->sc_phandle = phandle;
377 sc->sc_dmat = faa->faa_dmat;
378 sc->sc_bst = faa->faa_bst;
379 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
380 aprint_error(": couldn't map registers\n");
381 return;
382 }
383 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SCHED);
384
385 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
386 aprint_error(": failed to decode interrupt\n");
387 return;
388 }
389
390 aprint_naive("\n");
391 aprint_normal(": DMA controller\n");
392
393 DMA_WRITE(sc, DMA_IRQ_EN_REG, 0);
394 DMA_WRITE(sc, DMA_IRQ_PEND_STAS_REG, ~0);
395
396 for (type = 0; type < DMA_MAX_TYPES; type++) {
397 for (index = 0; index < DMA_MAX_CHANNELS; index++) {
398 struct sun4idma_channel *ch = &sc->sc_chan[type][index];
399 ch->ch_type = type;
400 ch->ch_index = index;
401 ch->ch_irqmask = __BIT((type * 16) + (index * 2) + 1);
402 ch->ch_callback = NULL;
403 ch->ch_callbackarg = NULL;
404
405 if (type == DMA_TYPE_NORMAL)
406 DMA_WRITE(sc, NDMA_CTRL_REG(index), 0);
407 else
408 DMA_WRITE(sc, DDMA_CTRL_REG(index), 0);
409 }
410 }
411
412 sc->sc_ih = fdtbus_intr_establish_xname(phandle, 0, IPL_SCHED,
413 FDT_INTR_MPSAFE, sun4idma_intr, sc, device_xname(sc->sc_dev));
414 if (sc->sc_ih == NULL) {
415 aprint_error_dev(sc->sc_dev,
416 "couldn't establish interrupt on %s\n", intrstr);
417 return;
418 }
419 aprint_normal_dev(sc->sc_dev, "interrupting on %s\n", intrstr);
420
421 fdtbus_register_dma_controller(self, phandle, &sun4idma_funcs);
422 }
423
424 CFATTACH_DECL_NEW(sun4i_dma, sizeof(struct sun4idma_softc),
425 sun4idma_match, sun4idma_attach, NULL, NULL);
426