pxa2x0_dmac.c revision 1.7 1 /* $NetBSD: pxa2x0_dmac.c,v 1.7 2011/06/09 17:29:42 nonaka Exp $ */
2
3 /*
4 * Copyright (c) 2003, 2005 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "opt_pxa2x0_dmac.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/queue.h>
46
47 #include <uvm/uvm_param.h> /* For PAGE_SIZE */
48
49 #include <machine/intr.h>
50 #include <machine/bus.h>
51
52 #include <dev/dmover/dmovervar.h>
53
54 #include <arm/xscale/pxa2x0reg.h>
55 #include <arm/xscale/pxa2x0var.h>
56 #include <arm/xscale/pxa2x0cpu.h>
57
58 #include <arm/xscale/pxa2x0_dmac.h>
59
60 #include "locators.h"
61
62 #undef DMAC_N_PRIORITIES
63 #ifndef PXA2X0_DMAC_FIXED_PRIORITY
64 #define DMAC_N_PRIORITIES 3
65 #define DMAC_PRI(p) (p)
66 #else
67 #define DMAC_N_PRIORITIES 1
68 #define DMAC_PRI(p) (0)
69 #endif
70
71 struct dmac_desc {
72 SLIST_ENTRY(dmac_desc) d_link;
73 struct pxa2x0_dma_desc *d_desc;
74 paddr_t d_desc_pa;
75 };
76
77 /*
78 * This is used to maintain state for an in-progress transfer.
79 * It tracks the current DMA segment, and offset within the segment
80 * in the case where we had to split a request into several DMA
81 * operations due to a shortage of DMAC descriptors.
82 */
83 struct dmac_desc_segs {
84 bus_dma_segment_t *ds_curseg; /* Current segment */
85 u_int ds_nsegs; /* Remaining segments */
86 bus_size_t ds_offset; /* Offset within current seg */
87 };
88
89 SIMPLEQ_HEAD(dmac_xfer_state_head, dmac_xfer_state);
90
91 struct dmac_xfer_state {
92 struct dmac_xfer dxs_xfer;
93 #define dxs_cookie dxs_xfer.dx_cookie
94 #define dxs_done dxs_xfer.dx_done
95 #define dxs_priority dxs_xfer.dx_priority
96 #define dxs_peripheral dxs_xfer.dx_peripheral
97 #define dxs_flow dxs_xfer.dx_flow
98 #define dxs_dev_width dxs_xfer.dx_dev_width
99 #define dxs_burst_size dxs_xfer.dx_burst_size
100 #define dxs_loop_notify dxs_xfer.dx_loop_notify
101 #define dxs_desc dxs_xfer.dx_desc
102 SIMPLEQ_ENTRY(dmac_xfer_state) dxs_link;
103 SLIST_HEAD(, dmac_desc) dxs_descs;
104 struct dmac_xfer_state_head *dxs_queue;
105 u_int dxs_channel;
106 #define DMAC_NO_CHANNEL (~0)
107 u_int32_t dxs_dcmd;
108 struct dmac_desc_segs dxs_segs[2];
109 bool dxs_misaligned_flag;
110 };
111
112
113 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
114 /*
115 * This structure is used to maintain state for the dmover(9) backend
116 * part of the driver. We can have a number of concurrent dmover
117 * requests in progress at any given time. The exact number is given
118 * by the PXA2X0_DMAC_DMOVER_CONCURRENCY compile-time constant. One of
119 * these structures is allocated for each concurrent request.
120 */
121 struct dmac_dmover_state {
122 LIST_ENTRY(dmac_dmover_state) ds_link; /* List of idle dmover chans */
123 struct pxadmac_softc *ds_sc; /* Uplink to pxadmac softc */
124 struct dmover_request *ds_current; /* Current dmover request */
125 struct dmac_xfer_state ds_xfer;
126 bus_dmamap_t ds_src_dmap;
127 bus_dmamap_t ds_dst_dmap;
128 /*
129 * There is no inherent size limit in the DMA engine.
130 * The following limit is somewhat arbitrary.
131 */
132 #define DMAC_DMOVER_MAX_XFER (8*1024*1024)
133 #if 0
134 /* This would require 16KB * 2 just for segments... */
135 #define DMAC_DMOVER_NSEGS ((DMAC_DMOVER_MAX_XFER / PAGE_SIZE) + 1)
136 #else
137 #define DMAC_DMOVER_NSEGS 512 /* XXX: Only enough for 2MB */
138 #endif
139 bus_dma_segment_t ds_zero_seg; /* Used for zero-fill ops */
140 void *ds_zero_va;
141 bus_dma_segment_t ds_fill_seg; /* Used for fill8 ops */
142 void *ds_fill_va;
143
144 #define ds_src_addr_hold ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_addr_hold
145 #define ds_dst_addr_hold ds_xfer.dxs_desc[DMAC_DESC_DST].xd_addr_hold
146 #define ds_src_burst ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_burst_size
147 #define ds_dst_burst ds_xfer.dxs_desc[DMAC_DESC_DST].xd_burst_size
148 #define ds_src_dma_segs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_dma_segs
149 #define ds_dst_dma_segs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_dma_segs
150 #define ds_src_nsegs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_nsegs
151 #define ds_dst_nsegs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_nsegs
152 };
153
154 /*
155 * Overall dmover(9) backend state
156 */
157 struct dmac_dmover {
158 struct dmover_backend dd_backend;
159 int dd_busy;
160 LIST_HEAD(, dmac_dmover_state) dd_free;
161 struct dmac_dmover_state dd_state[PXA2X0_DMAC_DMOVER_CONCURRENCY];
162 };
163 #endif
164
165 struct pxadmac_softc {
166 device_t sc_dev;
167 bus_space_tag_t sc_bust;
168 bus_dma_tag_t sc_dmat;
169 bus_space_handle_t sc_bush;
170 void *sc_irqcookie;
171
172 /*
173 * Queue of pending requests, per priority
174 */
175 struct dmac_xfer_state_head sc_queue[DMAC_N_PRIORITIES];
176
177 /*
178 * Queue of pending requests, per peripheral
179 */
180 struct {
181 struct dmac_xfer_state_head sp_queue;
182 u_int sp_busy;
183 } sc_periph[DMAC_N_PERIPH];
184
185 /*
186 * Active requests, per channel.
187 */
188 struct dmac_xfer_state *sc_active[DMAC_N_CHANNELS];
189
190 /*
191 * Channel Priority Allocation
192 */
193 struct {
194 u_int8_t p_first;
195 u_int8_t p_pri[DMAC_N_CHANNELS];
196 } sc_prio[DMAC_N_PRIORITIES];
197 #define DMAC_PRIO_END (~0)
198 u_int8_t sc_channel_priority[DMAC_N_CHANNELS];
199
200 /*
201 * DMA descriptor management
202 */
203 bus_dmamap_t sc_desc_map;
204 bus_dma_segment_t sc_segs;
205 #define DMAC_N_DESCS ((PAGE_SIZE * 2) / sizeof(struct pxa2x0_dma_desc))
206 #define DMAC_DESCS_SIZE (DMAC_N_DESCS * sizeof(struct pxa2x0_dma_desc))
207 struct dmac_desc sc_all_descs[DMAC_N_DESCS];
208 u_int sc_free_descs;
209 SLIST_HEAD(, dmac_desc) sc_descs;
210
211 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
212 /*
213 * dmover(9) backend state
214 */
215 struct dmac_dmover sc_dmover;
216 #endif
217 };
218
219 static int pxadmac_match(device_t, cfdata_t, void *);
220 static void pxadmac_attach(device_t, device_t, void *);
221
222 CFATTACH_DECL_NEW(pxadmac, sizeof(struct pxadmac_softc),
223 pxadmac_match, pxadmac_attach, NULL, NULL);
224
225 static struct pxadmac_softc *pxadmac_sc;
226
227 static void dmac_start(struct pxadmac_softc *, dmac_priority_t);
228 static int dmac_continue_xfer(struct pxadmac_softc *, struct dmac_xfer_state *);
229 static u_int dmac_channel_intr(struct pxadmac_softc *, u_int);
230 static int dmac_intr(void *);
231
232 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
233 static void dmac_dmover_attach(struct pxadmac_softc *);
234 static void dmac_dmover_process(struct dmover_backend *);
235 static void dmac_dmover_run(struct dmover_backend *);
236 static void dmac_dmover_done(struct dmac_xfer *, int);
237 #endif
238
239 static inline u_int32_t
240 dmac_reg_read(struct pxadmac_softc *sc, int reg)
241 {
242
243 return (bus_space_read_4(sc->sc_bust, sc->sc_bush, reg));
244 }
245
246 static inline void
247 dmac_reg_write(struct pxadmac_softc *sc, int reg, u_int32_t val)
248 {
249
250 bus_space_write_4(sc->sc_bust, sc->sc_bush, reg, val);
251 }
252
253 static inline int
254 dmac_allocate_channel(struct pxadmac_softc *sc, dmac_priority_t priority,
255 u_int *chanp)
256 {
257 u_int channel;
258
259 KDASSERT((u_int)priority < DMAC_N_PRIORITIES);
260
261 if ((channel = sc->sc_prio[priority].p_first) == DMAC_PRIO_END)
262 return (-1);
263 sc->sc_prio[priority].p_first = sc->sc_prio[priority].p_pri[channel];
264
265 *chanp = channel;
266 return (0);
267 }
268
269 static inline void
270 dmac_free_channel(struct pxadmac_softc *sc, dmac_priority_t priority,
271 u_int channel)
272 {
273
274 KDASSERT((u_int)priority < DMAC_N_PRIORITIES);
275
276 sc->sc_prio[priority].p_pri[channel] = sc->sc_prio[priority].p_first;
277 sc->sc_prio[priority].p_first = channel;
278 }
279
280 static int
281 pxadmac_match(device_t parent, cfdata_t cf, void *aux)
282 {
283 struct pxaip_attach_args *pxa = aux;
284
285 if (pxadmac_sc || pxa->pxa_addr != PXA2X0_DMAC_BASE ||
286 pxa->pxa_intr != PXA2X0_INT_DMA)
287 return (0);
288
289 pxa->pxa_size = PXA2X0_DMAC_SIZE;
290
291 return (1);
292 }
293
294 static void
295 pxadmac_attach(device_t parent, device_t self, void *aux)
296 {
297 struct pxadmac_softc *sc = device_private(self);
298 struct pxaip_attach_args *pxa = aux;
299 struct pxa2x0_dma_desc *dd;
300 int i, nsegs;
301
302 sc->sc_dev = self;
303 sc->sc_bust = pxa->pxa_iot;
304 sc->sc_dmat = pxa->pxa_dmat;
305
306 aprint_normal(": DMA Controller\n");
307
308 if (bus_space_map(sc->sc_bust, pxa->pxa_addr, pxa->pxa_size, 0,
309 &sc->sc_bush)) {
310 aprint_error_dev(self, "Can't map registers!\n");
311 return;
312 }
313
314 pxadmac_sc = sc;
315
316 /*
317 * Make sure the DMAC is quiescent
318 */
319 for (i = 0; i < DMAC_N_CHANNELS; i++) {
320 dmac_reg_write(sc, DMAC_DCSR(i), 0);
321 dmac_reg_write(sc, DMAC_DRCMR(i), 0);
322 sc->sc_active[i] = NULL;
323 }
324 dmac_reg_write(sc, DMAC_DINT,
325 dmac_reg_read(sc, DMAC_DINT) & DMAC_DINT_MASK);
326
327 /*
328 * Initialise the request queues
329 */
330 for (i = 0; i < DMAC_N_PRIORITIES; i++)
331 SIMPLEQ_INIT(&sc->sc_queue[i]);
332
333 /*
334 * Initialise the request queues
335 */
336 for (i = 0; i < DMAC_N_PERIPH; i++) {
337 sc->sc_periph[i].sp_busy = 0;
338 SIMPLEQ_INIT(&sc->sc_periph[i].sp_queue);
339 }
340
341 /*
342 * Initialise the channel priority metadata
343 */
344 memset(sc->sc_prio, DMAC_PRIO_END, sizeof(sc->sc_prio));
345 for (i = 0; i < DMAC_N_CHANNELS; i++) {
346 #if (DMAC_N_PRIORITIES > 1)
347 if (i <= 3)
348 dmac_free_channel(sc, DMAC_PRIORITY_HIGH, i);
349 else
350 if (i <= 7)
351 dmac_free_channel(sc, DMAC_PRIORITY_MED, i);
352 else
353 dmac_free_channel(sc, DMAC_PRIORITY_LOW, i);
354 #else
355 dmac_free_channel(sc, DMAC_PRIORITY_NORMAL, i);
356 #endif
357 }
358
359 /*
360 * Initialise DMA descriptors and associated metadata
361 */
362 if (bus_dmamem_alloc(sc->sc_dmat, DMAC_DESCS_SIZE, DMAC_DESCS_SIZE, 0,
363 &sc->sc_segs, 1, &nsegs, BUS_DMA_NOWAIT))
364 panic("dmac_pxaip_attach: bus_dmamem_alloc failed");
365
366 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_segs, 1, DMAC_DESCS_SIZE,
367 (void *)&dd, BUS_DMA_COHERENT|BUS_DMA_NOCACHE))
368 panic("dmac_pxaip_attach: bus_dmamem_map failed");
369
370 if (bus_dmamap_create(sc->sc_dmat, DMAC_DESCS_SIZE, 1,
371 DMAC_DESCS_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_desc_map))
372 panic("dmac_pxaip_attach: bus_dmamap_create failed");
373
374 if (bus_dmamap_load(sc->sc_dmat, sc->sc_desc_map, (void *)dd,
375 DMAC_DESCS_SIZE, NULL, BUS_DMA_NOWAIT))
376 panic("dmac_pxaip_attach: bus_dmamap_load failed");
377
378 SLIST_INIT(&sc->sc_descs);
379 sc->sc_free_descs = DMAC_N_DESCS;
380 for (i = 0; i < DMAC_N_DESCS; i++, dd++) {
381 SLIST_INSERT_HEAD(&sc->sc_descs, &sc->sc_all_descs[i], d_link);
382 sc->sc_all_descs[i].d_desc = dd;
383 sc->sc_all_descs[i].d_desc_pa =
384 sc->sc_segs.ds_addr + (sizeof(struct pxa2x0_dma_desc) * i);
385 }
386
387 sc->sc_irqcookie = pxa2x0_intr_establish(pxa->pxa_intr, IPL_BIO,
388 dmac_intr, sc);
389 KASSERT(sc->sc_irqcookie != NULL);
390
391 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
392 dmac_dmover_attach(sc);
393 #endif
394 }
395
396 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
397 /*
398 * We support the following dmover(9) operations
399 */
400 static const struct dmover_algdesc dmac_dmover_algdescs[] = {
401 {DMOVER_FUNC_ZERO, NULL, 0}, /* Zero-fill */
402 {DMOVER_FUNC_FILL8, NULL, 0}, /* Fill with 8-bit immediate value */
403 {DMOVER_FUNC_COPY, NULL, 1} /* Copy */
404 };
405 #define DMAC_DMOVER_ALGDESC_COUNT \
406 (sizeof(dmac_dmover_algdescs) / sizeof(dmac_dmover_algdescs[0]))
407
408 static void
409 dmac_dmover_attach(struct pxadmac_softc *sc)
410 {
411 struct dmac_dmover *dd = &sc->sc_dmover;
412 struct dmac_dmover_state *ds;
413 int i, dummy;
414
415 /*
416 * Describe ourselves to the dmover(9) code
417 */
418 dd->dd_backend.dmb_name = "pxadmac";
419 dd->dd_backend.dmb_speed = 100*1024*1024; /* XXX */
420 dd->dd_backend.dmb_cookie = sc;
421 dd->dd_backend.dmb_algdescs = dmac_dmover_algdescs;
422 dd->dd_backend.dmb_nalgdescs = DMAC_DMOVER_ALGDESC_COUNT;
423 dd->dd_backend.dmb_process = dmac_dmover_process;
424 dd->dd_busy = 0;
425 LIST_INIT(&dd->dd_free);
426
427 for (i = 0; i < PXA2X0_DMAC_DMOVER_CONCURRENCY; i++) {
428 ds = &dd->dd_state[i];
429 ds->ds_sc = sc;
430 ds->ds_current = NULL;
431 ds->ds_xfer.dxs_cookie = ds;
432 ds->ds_xfer.dxs_done = dmac_dmover_done;
433 ds->ds_xfer.dxs_priority = DMAC_PRIORITY_NORMAL;
434 ds->ds_xfer.dxs_peripheral = DMAC_PERIPH_NONE;
435 ds->ds_xfer.dxs_flow = DMAC_FLOW_CTRL_NONE;
436 ds->ds_xfer.dxs_dev_width = DMAC_DEV_WIDTH_DEFAULT;
437 ds->ds_xfer.dxs_burst_size = DMAC_BURST_SIZE_8; /* XXX */
438 ds->ds_xfer.dxs_loop_notify = DMAC_DONT_LOOP;
439 ds->ds_src_addr_hold = false;
440 ds->ds_dst_addr_hold = false;
441 ds->ds_src_nsegs = 0;
442 ds->ds_dst_nsegs = 0;
443 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
444
445 /*
446 * Create dma maps for both source and destination buffers.
447 */
448 if (bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER,
449 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER,
450 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
451 &ds->ds_src_dmap) ||
452 bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER,
453 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER,
454 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
455 &ds->ds_dst_dmap)) {
456 panic("dmac_dmover_attach: bus_dmamap_create failed");
457 }
458
459 /*
460 * Allocate some dma memory to be used as source buffers
461 * for the zero-fill and fill-8 operations. We only need
462 * small buffers here, since we set up the DMAC source
463 * descriptor with 'ds_addr_hold' set to true.
464 */
465 if (bus_dmamem_alloc(sc->sc_dmat,
466 arm_pdcache_line_size, arm_pdcache_line_size, 0,
467 &ds->ds_zero_seg, 1, &dummy, BUS_DMA_NOWAIT) ||
468 bus_dmamem_alloc(sc->sc_dmat,
469 arm_pdcache_line_size, arm_pdcache_line_size, 0,
470 &ds->ds_fill_seg, 1, &dummy, BUS_DMA_NOWAIT)) {
471 panic("dmac_dmover_attach: bus_dmamem_alloc failed");
472 }
473
474 if (bus_dmamem_map(sc->sc_dmat, &ds->ds_zero_seg, 1,
475 arm_pdcache_line_size, &ds->ds_zero_va,
476 BUS_DMA_NOWAIT) ||
477 bus_dmamem_map(sc->sc_dmat, &ds->ds_fill_seg, 1,
478 arm_pdcache_line_size, &ds->ds_fill_va,
479 BUS_DMA_NOWAIT)) {
480 panic("dmac_dmover_attach: bus_dmamem_map failed");
481 }
482
483 /*
484 * Make sure the zero-fill source buffer really is zero filled
485 */
486 memset(ds->ds_zero_va, 0, arm_pdcache_line_size);
487 }
488
489 dmover_backend_register(&sc->sc_dmover.dd_backend);
490 }
491
492 static void
493 dmac_dmover_process(struct dmover_backend *dmb)
494 {
495 struct pxadmac_softc *sc = dmb->dmb_cookie;
496 int s = splbio();
497
498 /*
499 * If the backend is currently idle, go process the queue.
500 */
501 if (sc->sc_dmover.dd_busy == 0)
502 dmac_dmover_run(&sc->sc_dmover.dd_backend);
503 splx(s);
504 }
505
506 static void
507 dmac_dmover_run(struct dmover_backend *dmb)
508 {
509 struct dmover_request *dreq;
510 struct pxadmac_softc *sc;
511 struct dmac_dmover *dd;
512 struct dmac_dmover_state *ds;
513 size_t len_src, len_dst;
514 int rv;
515
516 sc = dmb->dmb_cookie;
517 dd = &sc->sc_dmover;
518 sc->sc_dmover.dd_busy = 1;
519
520 /*
521 * As long as we can queue up dmover requests...
522 */
523 while ((dreq = TAILQ_FIRST(&dmb->dmb_pendreqs)) != NULL &&
524 (ds = LIST_FIRST(&dd->dd_free)) != NULL) {
525 /*
526 * Pull the request off the queue, mark it 'running',
527 * and make it 'current'.
528 */
529 dmover_backend_remque(dmb, dreq);
530 dreq->dreq_flags |= DMOVER_REQ_RUNNING;
531 LIST_REMOVE(ds, ds_link);
532 ds->ds_current = dreq;
533
534 switch (dreq->dreq_outbuf_type) {
535 case DMOVER_BUF_LINEAR:
536 len_dst = dreq->dreq_outbuf.dmbuf_linear.l_len;
537 break;
538 case DMOVER_BUF_UIO:
539 len_dst = dreq->dreq_outbuf.dmbuf_uio->uio_resid;
540 break;
541 default:
542 goto error;
543 }
544
545 /*
546 * Fix up the appropriate DMA 'source' buffer
547 */
548 if (dreq->dreq_assignment->das_algdesc->dad_ninputs) {
549 struct uio *uio;
550 /*
551 * This is a 'copy' operation.
552 * Load up the specified source buffer
553 */
554 switch (dreq->dreq_inbuf_type) {
555 case DMOVER_BUF_LINEAR:
556 len_src= dreq->dreq_inbuf[0].dmbuf_linear.l_len;
557 if (len_src != len_dst)
558 goto error;
559 if (bus_dmamap_load(sc->sc_dmat,ds->ds_src_dmap,
560 dreq->dreq_inbuf[0].dmbuf_linear.l_addr,
561 len_src, NULL,
562 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
563 BUS_DMA_READ))
564 goto error;
565 break;
566
567 case DMOVER_BUF_UIO:
568 uio = dreq->dreq_inbuf[0].dmbuf_uio;
569 len_src = uio->uio_resid;
570 if (uio->uio_rw != UIO_WRITE ||
571 len_src != len_dst)
572 goto error;
573 if (bus_dmamap_load_uio(sc->sc_dmat,
574 ds->ds_src_dmap, uio,
575 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
576 BUS_DMA_READ))
577 goto error;
578 break;
579
580 default:
581 goto error;
582 }
583
584 ds->ds_src_addr_hold = false;
585 } else
586 if (dreq->dreq_assignment->das_algdesc->dad_name ==
587 DMOVER_FUNC_ZERO) {
588 /*
589 * Zero-fill operation.
590 * Simply load up the pre-zeroed source buffer
591 */
592 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap,
593 ds->ds_zero_va, arm_pdcache_line_size, NULL,
594 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ))
595 goto error;
596
597 ds->ds_src_addr_hold = true;
598 } else
599 if (dreq->dreq_assignment->das_algdesc->dad_name ==
600 DMOVER_FUNC_FILL8) {
601 /*
602 * Fill-8 operation.
603 * Initialise our fill-8 buffer, and load it up.
604 *
605 * XXX: Experiment with exactly how much of the
606 * source buffer needs to be filled. Particularly WRT
607 * burst size (which is hardcoded to 8 for dmover).
608 */
609 memset(ds->ds_fill_va, dreq->dreq_immediate[0],
610 arm_pdcache_line_size);
611
612 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap,
613 ds->ds_fill_va, arm_pdcache_line_size, NULL,
614 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ))
615 goto error;
616
617 ds->ds_src_addr_hold = true;
618 } else {
619 goto error;
620 }
621
622 /*
623 * Now do the same for the destination buffer
624 */
625 switch (dreq->dreq_outbuf_type) {
626 case DMOVER_BUF_LINEAR:
627 if (bus_dmamap_load(sc->sc_dmat, ds->ds_dst_dmap,
628 dreq->dreq_outbuf.dmbuf_linear.l_addr,
629 len_dst, NULL,
630 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE))
631 goto error_unload_src;
632 break;
633
634 case DMOVER_BUF_UIO:
635 if (dreq->dreq_outbuf.dmbuf_uio->uio_rw != UIO_READ)
636 goto error_unload_src;
637 if (bus_dmamap_load_uio(sc->sc_dmat, ds->ds_dst_dmap,
638 dreq->dreq_outbuf.dmbuf_uio,
639 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE))
640 goto error_unload_src;
641 break;
642
643 default:
644 error_unload_src:
645 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
646 error:
647 dreq->dreq_error = EINVAL;
648 dreq->dreq_flags |= DMOVER_REQ_ERROR;
649 ds->ds_current = NULL;
650 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
651 dmover_done(dreq);
652 continue;
653 }
654
655 /*
656 * The last step before shipping the request off to the
657 * DMAC driver is to sync the dma maps.
658 */
659 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0,
660 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
661 ds->ds_src_dma_segs = ds->ds_src_dmap->dm_segs;
662 ds->ds_src_nsegs = ds->ds_src_dmap->dm_nsegs;
663
664 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0,
665 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
666 ds->ds_dst_dma_segs = ds->ds_dst_dmap->dm_segs;
667 ds->ds_dst_nsegs = ds->ds_dst_dmap->dm_nsegs;
668
669 /*
670 * Hand the request over to the dmac section of the driver.
671 */
672 if ((rv = pxa2x0_dmac_start_xfer(&ds->ds_xfer.dxs_xfer)) != 0) {
673 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
674 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap);
675 dreq->dreq_error = rv;
676 dreq->dreq_flags |= DMOVER_REQ_ERROR;
677 ds->ds_current = NULL;
678 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
679 dmover_done(dreq);
680 }
681 }
682
683 /* All done */
684 sc->sc_dmover.dd_busy = 0;
685 }
686
687 static void
688 dmac_dmover_done(struct dmac_xfer *dx, int error)
689 {
690 struct dmac_dmover_state *ds = dx->dx_cookie;
691 struct pxadmac_softc *sc = ds->ds_sc;
692 struct dmover_request *dreq = ds->ds_current;
693
694 /*
695 * A dmover(9) request has just completed.
696 */
697
698 KDASSERT(dreq != NULL);
699
700 /*
701 * Sync and unload the DMA maps
702 */
703 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0,
704 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
705 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0,
706 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
707
708 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
709 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap);
710
711 ds->ds_current = NULL;
712 LIST_INSERT_HEAD(&sc->sc_dmover.dd_free, ds, ds_link);
713
714 /*
715 * Record the completion status of the transfer
716 */
717 if (error) {
718 dreq->dreq_error = error;
719 dreq->dreq_flags |= DMOVER_REQ_ERROR;
720 } else {
721 if (dreq->dreq_outbuf_type == DMOVER_BUF_UIO)
722 dreq->dreq_outbuf.dmbuf_uio->uio_resid = 0;
723 if (dreq->dreq_assignment->das_algdesc->dad_ninputs &&
724 dreq->dreq_inbuf_type == DMOVER_BUF_UIO)
725 dreq->dreq_inbuf[0].dmbuf_uio->uio_resid = 0;
726 }
727
728 /*
729 * Done!
730 */
731 dmover_done(dreq);
732
733 /*
734 * See if we can start some more dmover(9) requests.
735 *
736 * Note: We're already at splbio() here.
737 */
738 if (sc->sc_dmover.dd_busy == 0)
739 dmac_dmover_run(&sc->sc_dmover.dd_backend);
740 }
741 #endif
742
743 struct dmac_xfer *
744 pxa2x0_dmac_allocate_xfer(int flags)
745 {
746 struct dmac_xfer_state *dxs;
747
748 dxs = malloc(sizeof(struct dmac_xfer_state), M_DEVBUF, flags);
749
750 return ((struct dmac_xfer *)dxs);
751 }
752
753 void
754 pxa2x0_dmac_free_xfer(struct dmac_xfer *dx)
755 {
756
757 /*
758 * XXX: Should verify the DMAC is not actively using this
759 * structure before freeing...
760 */
761 free(dx, M_DEVBUF);
762 }
763
764 static inline int
765 dmac_validate_desc(struct dmac_xfer_desc *xd, size_t *psize,
766 bool *misaligned_flag)
767 {
768 size_t size;
769 int i;
770
771 /*
772 * Make sure the transfer parameters are acceptable.
773 */
774
775 if (xd->xd_addr_hold &&
776 (xd->xd_nsegs != 1 || xd->xd_dma_segs[0].ds_len == 0))
777 return (EINVAL);
778
779 for (i = 0, size = 0; i < xd->xd_nsegs; i++) {
780 if (xd->xd_dma_segs[i].ds_addr & 0x7) {
781 if (!CPU_IS_PXA270)
782 return (EFAULT);
783 *misaligned_flag = true;
784 }
785 size += xd->xd_dma_segs[i].ds_len;
786 }
787
788 *psize = size;
789 return (0);
790 }
791
792 static inline int
793 dmac_init_desc(struct dmac_desc_segs *ds, struct dmac_xfer_desc *xd,
794 size_t *psize, bool *misaligned_flag)
795 {
796 int err;
797
798 if ((err = dmac_validate_desc(xd, psize, misaligned_flag)))
799 return (err);
800
801 ds->ds_curseg = xd->xd_dma_segs;
802 ds->ds_nsegs = xd->xd_nsegs;
803 ds->ds_offset = 0;
804 return (0);
805 }
806
807 int
808 pxa2x0_dmac_start_xfer(struct dmac_xfer *dx)
809 {
810 struct pxadmac_softc *sc = pxadmac_sc;
811 struct dmac_xfer_state *dxs = (struct dmac_xfer_state *)dx;
812 struct dmac_xfer_desc *src, *dst;
813 size_t size;
814 int err, s;
815
816 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE &&
817 dxs->dxs_peripheral >= DMAC_N_PERIPH)
818 return (EINVAL);
819
820 src = &dxs->dxs_desc[DMAC_DESC_SRC];
821 dst = &dxs->dxs_desc[DMAC_DESC_DST];
822
823 dxs->dxs_misaligned_flag = false;
824
825 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_SRC], src, &size,
826 &dxs->dxs_misaligned_flag)))
827 return (err);
828 if (src->xd_addr_hold == false &&
829 dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
830 (size % dxs->dxs_loop_notify) != 0)
831 return (EINVAL);
832
833 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_DST], dst, &size,
834 &dxs->dxs_misaligned_flag)))
835 return (err);
836 if (dst->xd_addr_hold == false &&
837 dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
838 (size % dxs->dxs_loop_notify) != 0)
839 return (EINVAL);
840
841 SLIST_INIT(&dxs->dxs_descs);
842 dxs->dxs_channel = DMAC_NO_CHANNEL;
843 dxs->dxs_dcmd = (((u_int32_t)dxs->dxs_dev_width) << DCMD_WIDTH_SHIFT) |
844 (((u_int32_t)dxs->dxs_burst_size) << DCMD_SIZE_SHIFT);
845
846 switch (dxs->dxs_flow) {
847 case DMAC_FLOW_CTRL_NONE:
848 break;
849 case DMAC_FLOW_CTRL_SRC:
850 dxs->dxs_dcmd |= DCMD_FLOWSRC;
851 break;
852 case DMAC_FLOW_CTRL_DEST:
853 dxs->dxs_dcmd |= DCMD_FLOWTRG;
854 break;
855 }
856
857 if (src->xd_addr_hold == false)
858 dxs->dxs_dcmd |= DCMD_INCSRCADDR;
859 if (dst->xd_addr_hold == false)
860 dxs->dxs_dcmd |= DCMD_INCTRGADDR;
861
862 s = splbio();
863 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE ||
864 sc->sc_periph[dxs->dxs_peripheral].sp_busy == 0) {
865 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
866 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
867 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
868 sc->sc_periph[dxs->dxs_peripheral].sp_busy++;
869 dmac_start(sc, DMAC_PRI(dxs->dxs_priority));
870 } else {
871 dxs->dxs_queue = &sc->sc_periph[dxs->dxs_peripheral].sp_queue;
872 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
873 sc->sc_periph[dxs->dxs_peripheral].sp_busy++;
874 }
875 splx(s);
876
877 return (0);
878 }
879
880 void
881 pxa2x0_dmac_abort_xfer(struct dmac_xfer *dx)
882 {
883 struct pxadmac_softc *sc = pxadmac_sc;
884 struct dmac_xfer_state *ndxs, *dxs = (struct dmac_xfer_state *)dx;
885 struct dmac_desc *desc, *ndesc;
886 struct dmac_xfer_state_head *queue;
887 u_int32_t rv;
888 int s, timeout, need_start = 0;
889
890 s = splbio();
891
892 queue = dxs->dxs_queue;
893
894 if (dxs->dxs_channel == DMAC_NO_CHANNEL) {
895 /*
896 * The request has not yet started, or it has already
897 * completed. If the request is not on a queue, just
898 * return.
899 */
900 if (queue == NULL) {
901 splx(s);
902 return;
903 }
904
905 dxs->dxs_queue = NULL;
906 SIMPLEQ_REMOVE(queue, dxs, dmac_xfer_state, dxs_link);
907 } else {
908 /*
909 * The request is in progress. This is a bit trickier.
910 */
911 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel), 0);
912
913 for (timeout = 5000; timeout; timeout--) {
914 rv = dmac_reg_read(sc, DMAC_DCSR(dxs->dxs_channel));
915 if (rv & DCSR_STOPSTATE)
916 break;
917 delay(1);
918 }
919
920 if ((rv & DCSR_STOPSTATE) == 0)
921 panic(
922 "pxa2x0_dmac_abort_xfer: channel %d failed to abort",
923 dxs->dxs_channel);
924
925 /*
926 * Free resources allocated to the request
927 */
928 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) {
929 ndesc = SLIST_NEXT(desc, d_link);
930 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link);
931 sc->sc_free_descs++;
932 }
933
934 sc->sc_active[dxs->dxs_channel] = NULL;
935 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority),
936 dxs->dxs_channel);
937
938 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
939 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0);
940
941 need_start = 1;
942 dxs->dxs_queue = NULL;
943 }
944
945 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE ||
946 sc->sc_periph[dxs->dxs_peripheral].sp_busy-- == 1 ||
947 queue == &sc->sc_periph[dxs->dxs_peripheral].sp_queue)
948 goto out;
949
950 /*
951 * We've just removed the current item for this
952 * peripheral, and there is at least one more
953 * pending item waiting. Make it current.
954 */
955 ndxs = SIMPLEQ_FIRST(&sc->sc_periph[dxs->dxs_peripheral].sp_queue);
956 dxs = ndxs;
957 KDASSERT(dxs != NULL);
958 SIMPLEQ_REMOVE_HEAD(&sc->sc_periph[dxs->dxs_peripheral].sp_queue,
959 dxs_link);
960
961 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
962 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
963 need_start = 1;
964
965 /*
966 * Try to start any pending requests with the same
967 * priority.
968 */
969 out:
970 if (need_start)
971 dmac_start(sc, DMAC_PRI(dxs->dxs_priority));
972 splx(s);
973 }
974
975 static void
976 dmac_start(struct pxadmac_softc *sc, dmac_priority_t priority)
977 {
978 struct dmac_xfer_state *dxs;
979 u_int channel;
980
981 while (sc->sc_free_descs &&
982 (dxs = SIMPLEQ_FIRST(&sc->sc_queue[priority])) != NULL &&
983 dmac_allocate_channel(sc, priority, &channel) == 0) {
984 /*
985 * Yay, got some descriptors, a transfer request, and
986 * an available DMA channel.
987 */
988 KDASSERT(sc->sc_active[channel] == NULL);
989 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue[priority], dxs_link);
990
991 /* set DMA alignment register */
992 if (CPU_IS_PXA270) {
993 uint32_t dalgn;
994
995 dalgn = dmac_reg_read(sc, DMAC_DALGN);
996 dalgn &= ~(1U << channel);
997 if (dxs->dxs_misaligned_flag)
998 dalgn |= (1U << channel);
999 dmac_reg_write(sc, DMAC_DALGN, dalgn);
1000 }
1001
1002 dxs->dxs_channel = channel;
1003 sc->sc_active[channel] = dxs;
1004 (void) dmac_continue_xfer(sc, dxs);
1005 /*
1006 * XXX: Deal with descriptor allocation failure for loops
1007 */
1008 }
1009 }
1010
1011 static int
1012 dmac_continue_xfer(struct pxadmac_softc *sc, struct dmac_xfer_state *dxs)
1013 {
1014 struct dmac_desc *desc, *prev_desc;
1015 struct pxa2x0_dma_desc *dd;
1016 struct dmac_desc_segs *src_ds, *dst_ds;
1017 struct dmac_xfer_desc *src_xd, *dst_xd;
1018 bus_dma_segment_t *src_seg, *dst_seg;
1019 bus_addr_t src_mem_addr, dst_mem_addr;
1020 bus_size_t src_size, dst_size, this_size;
1021
1022 desc = NULL;
1023 prev_desc = NULL;
1024 dd = NULL;
1025 src_ds = &dxs->dxs_segs[DMAC_DESC_SRC];
1026 dst_ds = &dxs->dxs_segs[DMAC_DESC_DST];
1027 src_xd = &dxs->dxs_desc[DMAC_DESC_SRC];
1028 dst_xd = &dxs->dxs_desc[DMAC_DESC_DST];
1029 SLIST_INIT(&dxs->dxs_descs);
1030
1031 /*
1032 * As long as the source/destination buffers have DMA segments,
1033 * and we have free descriptors, build a DMA chain.
1034 */
1035 while (src_ds->ds_nsegs && dst_ds->ds_nsegs && sc->sc_free_descs) {
1036 src_seg = src_ds->ds_curseg;
1037 src_mem_addr = src_seg->ds_addr + src_ds->ds_offset;
1038 if (src_xd->xd_addr_hold == false &&
1039 dxs->dxs_loop_notify != DMAC_DONT_LOOP)
1040 src_size = dxs->dxs_loop_notify;
1041 else
1042 src_size = src_seg->ds_len - src_ds->ds_offset;
1043
1044 dst_seg = dst_ds->ds_curseg;
1045 dst_mem_addr = dst_seg->ds_addr + dst_ds->ds_offset;
1046 if (dst_xd->xd_addr_hold == false &&
1047 dxs->dxs_loop_notify != DMAC_DONT_LOOP)
1048 dst_size = dxs->dxs_loop_notify;
1049 else
1050 dst_size = dst_seg->ds_len - dst_ds->ds_offset;
1051
1052 /*
1053 * We may need to split a source or destination segment
1054 * across two or more DMAC descriptors.
1055 */
1056 while (src_size && dst_size &&
1057 (desc = SLIST_FIRST(&sc->sc_descs)) != NULL) {
1058 SLIST_REMOVE_HEAD(&sc->sc_descs, d_link);
1059 sc->sc_free_descs--;
1060
1061 /*
1062 * Decide how much data we're going to transfer
1063 * using this DMAC descriptor.
1064 */
1065 if (src_xd->xd_addr_hold)
1066 this_size = dst_size;
1067 else
1068 if (dst_xd->xd_addr_hold)
1069 this_size = src_size;
1070 else
1071 this_size = min(dst_size, src_size);
1072
1073 /*
1074 * But clamp the transfer size to the DMAC
1075 * descriptor's maximum.
1076 */
1077 this_size = min(this_size, DCMD_LENGTH_MASK & ~0x1f);
1078
1079 /*
1080 * Fill in the DMAC descriptor
1081 */
1082 dd = desc->d_desc;
1083 dd->dd_dsadr = src_mem_addr;
1084 dd->dd_dtadr = dst_mem_addr;
1085 dd->dd_dcmd = dxs->dxs_dcmd | this_size;
1086
1087 /*
1088 * Link it into the chain
1089 */
1090 if (prev_desc) {
1091 SLIST_INSERT_AFTER(prev_desc, desc, d_link);
1092 prev_desc->d_desc->dd_ddadr = desc->d_desc_pa;
1093 } else {
1094 SLIST_INSERT_HEAD(&dxs->dxs_descs, desc,
1095 d_link);
1096 }
1097 prev_desc = desc;
1098
1099 /*
1100 * Update the source/destination pointers
1101 */
1102 if (src_xd->xd_addr_hold == false) {
1103 src_size -= this_size;
1104 src_ds->ds_offset += this_size;
1105 if (src_ds->ds_offset == src_seg->ds_len) {
1106 KDASSERT(src_size == 0);
1107 src_ds->ds_curseg = ++src_seg;
1108 src_ds->ds_offset = 0;
1109 src_ds->ds_nsegs--;
1110 } else
1111 src_mem_addr += this_size;
1112 }
1113
1114 if (dst_xd->xd_addr_hold == false) {
1115 dst_size -= this_size;
1116 dst_ds->ds_offset += this_size;
1117 if (dst_ds->ds_offset == dst_seg->ds_len) {
1118 KDASSERT(dst_size == 0);
1119 dst_ds->ds_curseg = ++dst_seg;
1120 dst_ds->ds_offset = 0;
1121 dst_ds->ds_nsegs--;
1122 } else
1123 dst_mem_addr += this_size;
1124 }
1125 }
1126
1127 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP) {
1128 /*
1129 * We must be able to allocate descriptors for the
1130 * entire loop. Otherwise, return them to the pool
1131 * and bail.
1132 */
1133 if (desc == NULL) {
1134 struct dmac_desc *ndesc;
1135 for (desc = SLIST_FIRST(&dxs->dxs_descs);
1136 desc; desc = ndesc) {
1137 ndesc = SLIST_NEXT(desc, d_link);
1138 SLIST_INSERT_HEAD(&sc->sc_descs, desc,
1139 d_link);
1140 sc->sc_free_descs++;
1141 }
1142
1143 return (0);
1144 }
1145
1146 KASSERT(dd != NULL);
1147 dd->dd_dcmd |= DCMD_ENDIRQEN;
1148 }
1149 }
1150
1151 /*
1152 * Did we manage to build a chain?
1153 * If not, just return.
1154 */
1155 if (dd == NULL)
1156 return (0);
1157
1158 if (dxs->dxs_loop_notify == DMAC_DONT_LOOP) {
1159 dd->dd_dcmd |= DCMD_ENDIRQEN;
1160 dd->dd_ddadr = DMAC_DESC_LAST;
1161 } else
1162 dd->dd_ddadr = SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa;
1163
1164 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) {
1165 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral),
1166 dxs->dxs_channel | DRCMR_MAPVLD);
1167 }
1168 dmac_reg_write(sc, DMAC_DDADR(dxs->dxs_channel),
1169 SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa);
1170 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel),
1171 DCSR_ENDINTR | DCSR_RUN);
1172
1173 return (1);
1174 }
1175
1176 static u_int
1177 dmac_channel_intr(struct pxadmac_softc *sc, u_int channel)
1178 {
1179 struct dmac_xfer_state *dxs;
1180 struct dmac_desc *desc, *ndesc;
1181 u_int32_t dcsr;
1182 u_int rv = 0;
1183
1184 dcsr = dmac_reg_read(sc, DMAC_DCSR(channel));
1185 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr);
1186 if (dmac_reg_read(sc, DMAC_DCSR(channel)) & DCSR_STOPSTATE)
1187 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr & ~DCSR_RUN);
1188
1189 if ((dxs = sc->sc_active[channel]) == NULL) {
1190 aprint_error_dev(sc->sc_dev,
1191 "Stray DMAC interrupt for unallocated channel %d\n",
1192 channel);
1193 return (0);
1194 }
1195
1196 /*
1197 * Clear down the interrupt in the DMA Interrupt Register
1198 */
1199 dmac_reg_write(sc, DMAC_DINT, (1u << channel));
1200
1201 /*
1202 * If this is a looping request, invoke the 'done' callback and
1203 * return immediately.
1204 */
1205 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
1206 (dcsr & DCSR_BUSERRINTR) == 0) {
1207 (dxs->dxs_done)(&dxs->dxs_xfer, 0);
1208 return (0);
1209 }
1210
1211 /*
1212 * Free the descriptors allocated to the completed transfer
1213 *
1214 * XXX: If there is more data to transfer in this request,
1215 * we could simply reuse some or all of the descriptors
1216 * already allocated for the transfer which just completed.
1217 */
1218 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) {
1219 ndesc = SLIST_NEXT(desc, d_link);
1220 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link);
1221 sc->sc_free_descs++;
1222 }
1223
1224 if ((dcsr & DCSR_BUSERRINTR) || dmac_continue_xfer(sc, dxs) == 0) {
1225 /*
1226 * The transfer completed (possibly due to an error),
1227 * -OR- we were unable to continue any remaining
1228 * segment of the transfer due to a lack of descriptors.
1229 *
1230 * In either case, we have to free up DMAC resources
1231 * allocated to the request.
1232 */
1233 sc->sc_active[channel] = NULL;
1234 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority), channel);
1235 dxs->dxs_channel = DMAC_NO_CHANNEL;
1236 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
1237 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0);
1238
1239 if (dxs->dxs_segs[DMAC_DESC_SRC].ds_nsegs == 0 ||
1240 dxs->dxs_segs[DMAC_DESC_DST].ds_nsegs == 0 ||
1241 (dcsr & DCSR_BUSERRINTR)) {
1242
1243 /*
1244 * The transfer is complete.
1245 */
1246 dxs->dxs_queue = NULL;
1247 rv = 1u << DMAC_PRI(dxs->dxs_priority);
1248
1249 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE &&
1250 --sc->sc_periph[dxs->dxs_peripheral].sp_busy != 0) {
1251 struct dmac_xfer_state *ndxs;
1252 /*
1253 * We've just removed the current item for this
1254 * peripheral, and there is at least one more
1255 * pending item waiting. Make it current.
1256 */
1257 ndxs = SIMPLEQ_FIRST(
1258 &sc->sc_periph[dxs->dxs_peripheral].sp_queue);
1259 KDASSERT(ndxs != NULL);
1260 SIMPLEQ_REMOVE_HEAD(
1261 &sc->sc_periph[dxs->dxs_peripheral].sp_queue,
1262 dxs_link);
1263
1264 ndxs->dxs_queue =
1265 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
1266 SIMPLEQ_INSERT_TAIL(ndxs->dxs_queue, ndxs,
1267 dxs_link);
1268 }
1269
1270 (dxs->dxs_done)(&dxs->dxs_xfer,
1271 (dcsr & DCSR_BUSERRINTR) ? EFAULT : 0);
1272 } else {
1273 /*
1274 * The request is not yet complete, but we were unable
1275 * to make any headway at this time because there are
1276 * no free descriptors. Put the request back at the
1277 * head of the appropriate priority queue. It'll be
1278 * dealt with as other in-progress transfers complete.
1279 */
1280 SIMPLEQ_INSERT_HEAD(
1281 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)], dxs,
1282 dxs_link);
1283 }
1284 }
1285
1286 return (rv);
1287 }
1288
1289 static int
1290 dmac_intr(void *arg)
1291 {
1292 struct pxadmac_softc *sc = arg;
1293 u_int32_t rv, mask;
1294 u_int chan, pri;
1295
1296 rv = dmac_reg_read(sc, DMAC_DINT);
1297 if ((rv & DMAC_DINT_MASK) == 0)
1298 return (0);
1299
1300 /*
1301 * Deal with completed transfers
1302 */
1303 for (chan = 0, mask = 1u, pri = 0;
1304 chan < DMAC_N_CHANNELS; chan++, mask <<= 1) {
1305 if (rv & mask)
1306 pri |= dmac_channel_intr(sc, chan);
1307 }
1308
1309 /*
1310 * Now try to start any queued transfers
1311 */
1312 #if (DMAC_N_PRIORITIES > 1)
1313 if (pri & (1u << DMAC_PRIORITY_HIGH))
1314 dmac_start(sc, DMAC_PRIORITY_HIGH);
1315 if (pri & (1u << DMAC_PRIORITY_MED))
1316 dmac_start(sc, DMAC_PRIORITY_MED);
1317 if (pri & (1u << DMAC_PRIORITY_LOW))
1318 dmac_start(sc, DMAC_PRIORITY_LOW);
1319 #else
1320 if (pri)
1321 dmac_start(sc, DMAC_PRIORITY_NORMAL);
1322 #endif
1323
1324 return (1);
1325 }
1326