pxa2x0_dmac.c revision 1.3 1 /* $NetBSD: pxa2x0_dmac.c,v 1.3 2005/12/24 20:06:52 perry Exp $ */
2
3 /*
4 * Copyright (c) 2003, 2005 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "opt_pxa2x0_dmac.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/queue.h>
46
47 #include <uvm/uvm_param.h> /* For PAGE_SIZE */
48
49 #include <machine/intr.h>
50 #include <machine/bus.h>
51
52 #include <dev/dmover/dmovervar.h>
53
54 #include <arm/xscale/pxa2x0reg.h>
55 #include <arm/xscale/pxa2x0var.h>
56
57 #include <arm/xscale/pxa2x0_dmac.h>
58
59 #include "locators.h"
60
61 #undef DMAC_N_PRIORITIES
62 #ifndef PXA2X0_DMAC_FIXED_PRIORITY
63 #define DMAC_N_PRIORITIES 3
64 #define DMAC_PRI(p) (p)
65 #else
66 #define DMAC_N_PRIORITIES 1
67 #define DMAC_PRI(p) (0)
68 #endif
69
70 struct dmac_desc {
71 SLIST_ENTRY(dmac_desc) d_link;
72 struct pxa2x0_dma_desc *d_desc;
73 paddr_t d_desc_pa;
74 };
75
76 /*
77 * This is used to maintain state for an in-progress transfer.
78 * It tracks the current DMA segment, and offset within the segment
79 * in the case where we had to split a request into several DMA
80 * operations due to a shortage of DMAC descriptors.
81 */
82 struct dmac_desc_segs {
83 bus_dma_segment_t *ds_curseg; /* Current segment */
84 u_int ds_nsegs; /* Remaining segments */
85 bus_size_t ds_offset; /* Offset within current seg */
86 };
87
88 SIMPLEQ_HEAD(dmac_xfer_state_head, dmac_xfer_state);
89
90 struct dmac_xfer_state {
91 struct dmac_xfer dxs_xfer;
92 #define dxs_cookie dxs_xfer.dx_cookie
93 #define dxs_done dxs_xfer.dx_done
94 #define dxs_priority dxs_xfer.dx_priority
95 #define dxs_peripheral dxs_xfer.dx_peripheral
96 #define dxs_flow dxs_xfer.dx_flow
97 #define dxs_dev_width dxs_xfer.dx_dev_width
98 #define dxs_burst_size dxs_xfer.dx_burst_size
99 #define dxs_loop_notify dxs_xfer.dx_loop_notify
100 #define dxs_desc dxs_xfer.dx_desc
101 SIMPLEQ_ENTRY(dmac_xfer_state) dxs_link;
102 SLIST_HEAD(, dmac_desc) dxs_descs;
103 struct dmac_xfer_state_head *dxs_queue;
104 u_int dxs_channel;
105 #define DMAC_NO_CHANNEL (~0)
106 u_int32_t dxs_dcmd;
107 struct dmac_desc_segs dxs_segs[2];
108 };
109
110
111 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
112 /*
113 * This structure is used to maintain state for the dmover(9) backend
114 * part of the driver. We can have a number of concurrent dmover
115 * requests in progress at any given time. The exact number is given
116 * by the PXA2X0_DMAC_DMOVER_CONCURRENCY compile-time constant. One of
117 * these structures is allocated for each concurrent request.
118 */
119 struct dmac_dmover_state {
120 LIST_ENTRY(dmac_dmover_state) ds_link; /* List of idle dmover chans */
121 struct pxadmac_softc *ds_sc; /* Uplink to pxadmac softc */
122 struct dmover_request *ds_current; /* Current dmover request */
123 struct dmac_xfer_state ds_xfer;
124 bus_dmamap_t ds_src_dmap;
125 bus_dmamap_t ds_dst_dmap;
126 /*
127 * There is no inherent size limit in the DMA engine.
128 * The following limit is somewhat arbitrary.
129 */
130 #define DMAC_DMOVER_MAX_XFER (8*1024*1024)
131 #if 0
132 /* This would require 16KB * 2 just for segments... */
133 #define DMAC_DMOVER_NSEGS ((DMAC_DMOVER_MAX_XFER / PAGE_SIZE) + 1)
134 #else
135 #define DMAC_DMOVER_NSEGS 512 /* XXX: Only enough for 2MB */
136 #endif
137 bus_dma_segment_t ds_zero_seg; /* Used for zero-fill ops */
138 caddr_t ds_zero_va;
139 bus_dma_segment_t ds_fill_seg; /* Used for fill8 ops */
140 caddr_t ds_fill_va;
141
142 #define ds_src_addr_hold ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_addr_hold
143 #define ds_dst_addr_hold ds_xfer.dxs_desc[DMAC_DESC_DST].xd_addr_hold
144 #define ds_src_burst ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_burst_size
145 #define ds_dst_burst ds_xfer.dxs_desc[DMAC_DESC_DST].xd_burst_size
146 #define ds_src_dma_segs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_dma_segs
147 #define ds_dst_dma_segs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_dma_segs
148 #define ds_src_nsegs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_nsegs
149 #define ds_dst_nsegs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_nsegs
150 };
151
152 /*
153 * Overall dmover(9) backend state
154 */
155 struct dmac_dmover {
156 struct dmover_backend dd_backend;
157 int dd_busy;
158 LIST_HEAD(, dmac_dmover_state) dd_free;
159 struct dmac_dmover_state dd_state[PXA2X0_DMAC_DMOVER_CONCURRENCY];
160 };
161 #endif
162
163 struct pxadmac_softc {
164 struct device sc_dev;
165 bus_space_tag_t sc_bust;
166 bus_dma_tag_t sc_dmat;
167 bus_space_handle_t sc_bush;
168 void *sc_irqcookie;
169
170 /*
171 * Queue of pending requests, per priority
172 */
173 struct dmac_xfer_state_head sc_queue[DMAC_N_PRIORITIES];
174
175 /*
176 * Queue of pending requests, per peripheral
177 */
178 struct {
179 struct dmac_xfer_state_head sp_queue;
180 u_int sp_busy;
181 } sc_periph[DMAC_N_PERIPH];
182
183 /*
184 * Active requests, per channel.
185 */
186 struct dmac_xfer_state *sc_active[DMAC_N_CHANNELS];
187
188 /*
189 * Channel Priority Allocation
190 */
191 struct {
192 u_int8_t p_first;
193 u_int8_t p_pri[DMAC_N_CHANNELS];
194 } sc_prio[DMAC_N_PRIORITIES];
195 #define DMAC_PRIO_END (~0)
196 u_int8_t sc_channel_priority[DMAC_N_CHANNELS];
197
198 /*
199 * DMA descriptor management
200 */
201 bus_dmamap_t sc_desc_map;
202 bus_dma_segment_t sc_segs;
203 #define DMAC_N_DESCS ((PAGE_SIZE * 2) / sizeof(struct pxa2x0_dma_desc))
204 #define DMAC_DESCS_SIZE (DMAC_N_DESCS * sizeof(struct pxa2x0_dma_desc))
205 struct dmac_desc sc_all_descs[DMAC_N_DESCS];
206 u_int sc_free_descs;
207 SLIST_HEAD(, dmac_desc) sc_descs;
208
209 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
210 /*
211 * dmover(9) backend state
212 */
213 struct dmac_dmover sc_dmover;
214 #endif
215 };
216
217 static int pxadmac_match(struct device *, struct cfdata *, void *);
218 static void pxadmac_attach(struct device *, struct device *, void *);
219
220 CFATTACH_DECL(pxadmac, sizeof(struct pxadmac_softc),
221 pxadmac_match, pxadmac_attach, NULL, NULL);
222
223 static struct pxadmac_softc *pxadmac_sc;
224
225 static void dmac_start(struct pxadmac_softc *, dmac_priority_t);
226 static int dmac_continue_xfer(struct pxadmac_softc *, struct dmac_xfer_state *);
227 static u_int dmac_channel_intr(struct pxadmac_softc *, u_int);
228 static int dmac_intr(void *);
229
230 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
231 static void dmac_dmover_attach(struct pxadmac_softc *);
232 static void dmac_dmover_process(struct dmover_backend *);
233 static void dmac_dmover_run(struct dmover_backend *);
234 static void dmac_dmover_done(struct dmac_xfer *, int);
235 #endif
236
237 static inline u_int32_t
238 dmac_reg_read(struct pxadmac_softc *sc, int reg)
239 {
240
241 return (bus_space_read_4(sc->sc_bust, sc->sc_bush, reg));
242 }
243
244 static inline void
245 dmac_reg_write(struct pxadmac_softc *sc, int reg, u_int32_t val)
246 {
247
248 bus_space_write_4(sc->sc_bust, sc->sc_bush, reg, val);
249 }
250
251 static inline int
252 dmac_allocate_channel(struct pxadmac_softc *sc, dmac_priority_t priority,
253 u_int *chanp)
254 {
255 u_int channel;
256
257 KDASSERT((u_int)priority < DMAC_N_PRIORITIES);
258
259 if ((channel = sc->sc_prio[priority].p_first) == DMAC_PRIO_END)
260 return (-1);
261 sc->sc_prio[priority].p_first = sc->sc_prio[priority].p_pri[channel];
262
263 *chanp = channel;
264 return (0);
265 }
266
267 static inline void
268 dmac_free_channel(struct pxadmac_softc *sc, dmac_priority_t priority,
269 u_int channel)
270 {
271
272 KDASSERT((u_int)priority < DMAC_N_PRIORITIES);
273
274 sc->sc_prio[priority].p_pri[channel] = sc->sc_prio[priority].p_first;
275 sc->sc_prio[priority].p_first = channel;
276 }
277
278 static int
279 pxadmac_match(struct device *parent, struct cfdata *cf, void *aux)
280 {
281 struct pxaip_attach_args *pxa = aux;
282
283 if (pxadmac_sc || pxa->pxa_addr != PXA2X0_DMAC_BASE ||
284 pxa->pxa_intr != PXA2X0_INT_DMA)
285 return (0);
286
287 pxa->pxa_size = PXA2X0_DMAC_SIZE;
288
289 return (1);
290 }
291
292 static void
293 pxadmac_attach(struct device *parent, struct device *self, void *aux)
294 {
295 struct pxadmac_softc *sc = (struct pxadmac_softc *)self;
296 struct pxaip_attach_args *pxa = aux;
297 struct pxa2x0_dma_desc *dd;
298 int i, nsegs;
299
300 sc->sc_bust = pxa->pxa_iot;
301 sc->sc_dmat = pxa->pxa_dmat;
302
303 aprint_normal(": DMA Controller\n");
304
305 if (bus_space_map(sc->sc_bust, pxa->pxa_addr, pxa->pxa_size, 0,
306 &sc->sc_bush)) {
307 aprint_error("%s: Can't map registers!\n", sc->sc_dev.dv_xname);
308 return;
309 }
310
311 pxadmac_sc = sc;
312
313 /*
314 * Make sure the DMAC is quiescent
315 */
316 for (i = 0; i < DMAC_N_CHANNELS; i++) {
317 dmac_reg_write(sc, DMAC_DCSR(i), 0);
318 dmac_reg_write(sc, DMAC_DRCMR(i), 0);
319 sc->sc_active[i] = NULL;
320 }
321 dmac_reg_write(sc, DMAC_DINT,
322 dmac_reg_read(sc, DMAC_DINT) & DMAC_DINT_MASK);
323
324 /*
325 * Initialise the request queues
326 */
327 for (i = 0; i < DMAC_N_PRIORITIES; i++)
328 SIMPLEQ_INIT(&sc->sc_queue[i]);
329
330 /*
331 * Initialise the request queues
332 */
333 for (i = 0; i < DMAC_N_PERIPH; i++) {
334 sc->sc_periph[i].sp_busy = 0;
335 SIMPLEQ_INIT(&sc->sc_periph[i].sp_queue);
336 }
337
338 /*
339 * Initialise the channel priority metadata
340 */
341 memset(sc->sc_prio, DMAC_PRIO_END, sizeof(sc->sc_prio));
342 for (i = 0; i < DMAC_N_CHANNELS; i++) {
343 #if (DMAC_N_PRIORITIES > 1)
344 if (i <= 3)
345 dmac_free_channel(sc, DMAC_PRIORITY_HIGH, i);
346 else
347 if (i <= 7)
348 dmac_free_channel(sc, DMAC_PRIORITY_MED, i);
349 else
350 dmac_free_channel(sc, DMAC_PRIORITY_LOW, i);
351 #else
352 dmac_free_channel(sc, DMAC_PRIORITY_NORMAL, i);
353 #endif
354 }
355
356 /*
357 * Initialise DMA descriptors and associated metadata
358 */
359 if (bus_dmamem_alloc(sc->sc_dmat, DMAC_DESCS_SIZE, DMAC_DESCS_SIZE, 0,
360 &sc->sc_segs, 1, &nsegs, BUS_DMA_NOWAIT))
361 panic("dmac_pxaip_attach: bus_dmamem_alloc failed");
362
363 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_segs, 1, DMAC_DESCS_SIZE,
364 (void *)&dd, BUS_DMA_COHERENT|BUS_DMA_NOCACHE))
365 panic("dmac_pxaip_attach: bus_dmamem_map failed");
366
367 if (bus_dmamap_create(sc->sc_dmat, DMAC_DESCS_SIZE, 1,
368 DMAC_DESCS_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_desc_map))
369 panic("dmac_pxaip_attach: bus_dmamap_create failed");
370
371 if (bus_dmamap_load(sc->sc_dmat, sc->sc_desc_map, (void *)dd,
372 DMAC_DESCS_SIZE, NULL, BUS_DMA_NOWAIT))
373 panic("dmac_pxaip_attach: bus_dmamap_load failed");
374
375 SLIST_INIT(&sc->sc_descs);
376 sc->sc_free_descs = DMAC_N_DESCS;
377 for (i = 0; i < DMAC_N_DESCS; i++, dd++) {
378 SLIST_INSERT_HEAD(&sc->sc_descs, &sc->sc_all_descs[i], d_link);
379 sc->sc_all_descs[i].d_desc = dd;
380 sc->sc_all_descs[i].d_desc_pa =
381 sc->sc_segs.ds_addr + (sizeof(struct pxa2x0_dma_desc) * i);
382 }
383
384 sc->sc_irqcookie = pxa2x0_intr_establish(pxa->pxa_intr, IPL_BIO,
385 dmac_intr, sc);
386 KASSERT(sc->sc_irqcookie != NULL);
387
388 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
389 dmac_dmover_attach(sc);
390 #endif
391 }
392
393 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
394 /*
395 * We support the following dmover(9) operations
396 */
397 static const struct dmover_algdesc dmac_dmover_algdescs[] = {
398 {DMOVER_FUNC_ZERO, NULL, 0}, /* Zero-fill */
399 {DMOVER_FUNC_FILL8, NULL, 0}, /* Fill with 8-bit immediate value */
400 {DMOVER_FUNC_COPY, NULL, 1} /* Copy */
401 };
402 #define DMAC_DMOVER_ALGDESC_COUNT \
403 (sizeof(dmac_dmover_algdescs) / sizeof(dmac_dmover_algdescs[0]))
404
405 static void
406 dmac_dmover_attach(struct pxadmac_softc *sc)
407 {
408 struct dmac_dmover *dd = &sc->sc_dmover;
409 struct dmac_dmover_state *ds;
410 int i, dummy;
411
412 /*
413 * Describe ourselves to the dmover(9) code
414 */
415 dd->dd_backend.dmb_name = "pxadmac";
416 dd->dd_backend.dmb_speed = 100*1024*1024; /* XXX */
417 dd->dd_backend.dmb_cookie = sc;
418 dd->dd_backend.dmb_algdescs = dmac_dmover_algdescs;
419 dd->dd_backend.dmb_nalgdescs = DMAC_DMOVER_ALGDESC_COUNT;
420 dd->dd_backend.dmb_process = dmac_dmover_process;
421 dd->dd_busy = 0;
422 LIST_INIT(&dd->dd_free);
423
424 for (i = 0; i < PXA2X0_DMAC_DMOVER_CONCURRENCY; i++) {
425 ds = &dd->dd_state[i];
426 ds->ds_sc = sc;
427 ds->ds_current = NULL;
428 ds->ds_xfer.dxs_cookie = ds;
429 ds->ds_xfer.dxs_done = dmac_dmover_done;
430 ds->ds_xfer.dxs_priority = DMAC_PRIORITY_NORMAL;
431 ds->ds_xfer.dxs_peripheral = DMAC_PERIPH_NONE;
432 ds->ds_xfer.dxs_flow = DMAC_FLOW_CTRL_NONE;
433 ds->ds_xfer.dxs_dev_width = DMAC_DEV_WIDTH_DEFAULT;
434 ds->ds_xfer.dxs_burst_size = DMAC_BURST_SIZE_8; /* XXX */
435 ds->ds_xfer.dxs_loop_notify = DMAC_DONT_LOOP;
436 ds->ds_src_addr_hold = FALSE;
437 ds->ds_dst_addr_hold = FALSE;
438 ds->ds_src_nsegs = 0;
439 ds->ds_dst_nsegs = 0;
440 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
441
442 /*
443 * Create dma maps for both source and destination buffers.
444 */
445 if (bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER,
446 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER,
447 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
448 &ds->ds_src_dmap) ||
449 bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER,
450 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER,
451 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
452 &ds->ds_dst_dmap)) {
453 panic("dmac_dmover_attach: bus_dmamap_create failed");
454 }
455
456 /*
457 * Allocate some dma memory to be used as source buffers
458 * for the zero-fill and fill-8 operations. We only need
459 * small buffers here, since we set up the DMAC source
460 * descriptor with 'ds_addr_hold' set to TRUE.
461 */
462 if (bus_dmamem_alloc(sc->sc_dmat,
463 arm_pdcache_line_size, arm_pdcache_line_size, 0,
464 &ds->ds_zero_seg, 1, &dummy, BUS_DMA_NOWAIT) ||
465 bus_dmamem_alloc(sc->sc_dmat,
466 arm_pdcache_line_size, arm_pdcache_line_size, 0,
467 &ds->ds_fill_seg, 1, &dummy, BUS_DMA_NOWAIT)) {
468 panic("dmac_dmover_attach: bus_dmamem_alloc failed");
469 }
470
471 if (bus_dmamem_map(sc->sc_dmat, &ds->ds_zero_seg, 1,
472 arm_pdcache_line_size, &ds->ds_zero_va,
473 BUS_DMA_NOWAIT) ||
474 bus_dmamem_map(sc->sc_dmat, &ds->ds_fill_seg, 1,
475 arm_pdcache_line_size, &ds->ds_fill_va,
476 BUS_DMA_NOWAIT)) {
477 panic("dmac_dmover_attach: bus_dmamem_map failed");
478 }
479
480 /*
481 * Make sure the zero-fill source buffer really is zero filled
482 */
483 memset(ds->ds_zero_va, 0, arm_pdcache_line_size);
484 }
485
486 dmover_backend_register(&sc->sc_dmover.dd_backend);
487 }
488
489 static void
490 dmac_dmover_process(struct dmover_backend *dmb)
491 {
492 struct pxadmac_softc *sc = dmb->dmb_cookie;
493 int s = splbio();
494
495 /*
496 * If the backend is currently idle, go process the queue.
497 */
498 if (sc->sc_dmover.dd_busy == 0)
499 dmac_dmover_run(&sc->sc_dmover.dd_backend);
500 splx(s);
501 }
502
503 static void
504 dmac_dmover_run(struct dmover_backend *dmb)
505 {
506 struct dmover_request *dreq;
507 struct pxadmac_softc *sc;
508 struct dmac_dmover *dd;
509 struct dmac_dmover_state *ds;
510 size_t len_src, len_dst;
511 int rv;
512
513 sc = dmb->dmb_cookie;
514 dd = &sc->sc_dmover;
515 sc->sc_dmover.dd_busy = 1;
516
517 /*
518 * As long as we can queue up dmover requests...
519 */
520 while ((dreq = TAILQ_FIRST(&dmb->dmb_pendreqs)) != NULL &&
521 (ds = LIST_FIRST(&dd->dd_free)) != NULL) {
522 /*
523 * Pull the request off the queue, mark it 'running',
524 * and make it 'current'.
525 */
526 dmover_backend_remque(dmb, dreq);
527 dreq->dreq_flags |= DMOVER_REQ_RUNNING;
528 LIST_REMOVE(ds, ds_link);
529 ds->ds_current = dreq;
530
531 switch (dreq->dreq_outbuf_type) {
532 case DMOVER_BUF_LINEAR:
533 len_dst = dreq->dreq_outbuf.dmbuf_linear.l_len;
534 break;
535 case DMOVER_BUF_UIO:
536 len_dst = dreq->dreq_outbuf.dmbuf_uio->uio_resid;
537 break;
538 default:
539 goto error;
540 }
541
542 /*
543 * Fix up the appropriate DMA 'source' buffer
544 */
545 if (dreq->dreq_assignment->das_algdesc->dad_ninputs) {
546 struct uio *uio;
547 /*
548 * This is a 'copy' operation.
549 * Load up the specified source buffer
550 */
551 switch (dreq->dreq_inbuf_type) {
552 case DMOVER_BUF_LINEAR:
553 len_src= dreq->dreq_inbuf[0].dmbuf_linear.l_len;
554 if (len_src != len_dst)
555 goto error;
556 if (bus_dmamap_load(sc->sc_dmat,ds->ds_src_dmap,
557 dreq->dreq_inbuf[0].dmbuf_linear.l_addr,
558 len_src, NULL,
559 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
560 BUS_DMA_READ))
561 goto error;
562 break;
563
564 case DMOVER_BUF_UIO:
565 uio = dreq->dreq_inbuf[0].dmbuf_uio;
566 len_src = uio->uio_resid;
567 if (uio->uio_rw != UIO_WRITE ||
568 len_src != len_dst)
569 goto error;
570 if (bus_dmamap_load_uio(sc->sc_dmat,
571 ds->ds_src_dmap, uio,
572 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
573 BUS_DMA_READ))
574 goto error;
575 break;
576
577 default:
578 goto error;
579 }
580
581 ds->ds_src_addr_hold = FALSE;
582 } else
583 if (dreq->dreq_assignment->das_algdesc->dad_name ==
584 DMOVER_FUNC_ZERO) {
585 /*
586 * Zero-fill operation.
587 * Simply load up the pre-zeroed source buffer
588 */
589 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap,
590 ds->ds_zero_va, arm_pdcache_line_size, NULL,
591 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ))
592 goto error;
593
594 ds->ds_src_addr_hold = TRUE;
595 } else
596 if (dreq->dreq_assignment->das_algdesc->dad_name ==
597 DMOVER_FUNC_FILL8) {
598 /*
599 * Fill-8 operation.
600 * Initialise our fill-8 buffer, and load it up.
601 *
602 * XXX: Experiment with exactly how much of the
603 * source buffer needs to be filled. Particularly WRT
604 * burst size (which is hardcoded to 8 for dmover).
605 */
606 memset(ds->ds_fill_va, dreq->dreq_immediate[0],
607 arm_pdcache_line_size);
608
609 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap,
610 ds->ds_fill_va, arm_pdcache_line_size, NULL,
611 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ))
612 goto error;
613
614 ds->ds_src_addr_hold = TRUE;
615 } else {
616 goto error;
617 }
618
619 /*
620 * Now do the same for the destination buffer
621 */
622 switch (dreq->dreq_outbuf_type) {
623 case DMOVER_BUF_LINEAR:
624 if (bus_dmamap_load(sc->sc_dmat, ds->ds_dst_dmap,
625 dreq->dreq_outbuf.dmbuf_linear.l_addr,
626 len_dst, NULL,
627 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE))
628 goto error_unload_src;
629 break;
630
631 case DMOVER_BUF_UIO:
632 if (dreq->dreq_outbuf.dmbuf_uio->uio_rw != UIO_READ)
633 goto error_unload_src;
634 if (bus_dmamap_load_uio(sc->sc_dmat, ds->ds_dst_dmap,
635 dreq->dreq_outbuf.dmbuf_uio,
636 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE))
637 goto error_unload_src;
638 break;
639
640 default:
641 error_unload_src:
642 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
643 error:
644 dreq->dreq_error = EINVAL;
645 dreq->dreq_flags |= DMOVER_REQ_ERROR;
646 ds->ds_current = NULL;
647 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
648 dmover_done(dreq);
649 continue;
650 }
651
652 /*
653 * The last step before shipping the request off to the
654 * DMAC driver is to sync the dma maps.
655 */
656 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0,
657 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
658 ds->ds_src_dma_segs = ds->ds_src_dmap->dm_segs;
659 ds->ds_src_nsegs = ds->ds_src_dmap->dm_nsegs;
660
661 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0,
662 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
663 ds->ds_dst_dma_segs = ds->ds_dst_dmap->dm_segs;
664 ds->ds_dst_nsegs = ds->ds_dst_dmap->dm_nsegs;
665
666 /*
667 * Hand the request over to the dmac section of the driver.
668 */
669 if ((rv = pxa2x0_dmac_start_xfer(&ds->ds_xfer.dxs_xfer)) != 0) {
670 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
671 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap);
672 dreq->dreq_error = rv;
673 dreq->dreq_flags |= DMOVER_REQ_ERROR;
674 ds->ds_current = NULL;
675 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
676 dmover_done(dreq);
677 }
678 }
679
680 /* All done */
681 sc->sc_dmover.dd_busy = 0;
682 }
683
684 static void
685 dmac_dmover_done(struct dmac_xfer *dx, int error)
686 {
687 struct dmac_dmover_state *ds = dx->dx_cookie;
688 struct pxadmac_softc *sc = ds->ds_sc;
689 struct dmover_request *dreq = ds->ds_current;
690
691 /*
692 * A dmover(9) request has just completed.
693 */
694
695 KDASSERT(dreq != NULL);
696
697 /*
698 * Sync and unload the DMA maps
699 */
700 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0,
701 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
702 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0,
703 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
704
705 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
706 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap);
707
708 ds->ds_current = NULL;
709 LIST_INSERT_HEAD(&sc->sc_dmover.dd_free, ds, ds_link);
710
711 /*
712 * Record the completion status of the transfer
713 */
714 if (error) {
715 dreq->dreq_error = error;
716 dreq->dreq_flags |= DMOVER_REQ_ERROR;
717 } else {
718 if (dreq->dreq_outbuf_type == DMOVER_BUF_UIO)
719 dreq->dreq_outbuf.dmbuf_uio->uio_resid = 0;
720 if (dreq->dreq_assignment->das_algdesc->dad_ninputs &&
721 dreq->dreq_inbuf_type == DMOVER_BUF_UIO)
722 dreq->dreq_inbuf[0].dmbuf_uio->uio_resid = 0;
723 }
724
725 /*
726 * Done!
727 */
728 dmover_done(dreq);
729
730 /*
731 * See if we can start some more dmover(9) requests.
732 *
733 * Note: We're already at splbio() here.
734 */
735 if (sc->sc_dmover.dd_busy == 0)
736 dmac_dmover_run(&sc->sc_dmover.dd_backend);
737 }
738 #endif
739
740 struct dmac_xfer *
741 pxa2x0_dmac_allocate_xfer(int flags)
742 {
743 struct dmac_xfer_state *dxs;
744
745 dxs = malloc(sizeof(struct dmac_xfer_state), M_DEVBUF, flags);
746
747 return ((struct dmac_xfer *)dxs);
748 }
749
750 void
751 pxa2x0_dmac_free_xfer(struct dmac_xfer *dx)
752 {
753
754 /*
755 * XXX: Should verify the DMAC is not actively using this
756 * structure before freeing...
757 */
758 free(dx, M_DEVBUF);
759 }
760
761 static inline int
762 dmac_validate_desc(struct dmac_xfer_desc *xd, size_t *psize)
763 {
764 size_t size;
765 int i;
766
767 /*
768 * Make sure the transfer parameters are acceptable.
769 */
770
771 if (xd->xd_addr_hold &&
772 (xd->xd_nsegs != 1 || xd->xd_dma_segs[0].ds_len == 0))
773 return (EINVAL);
774
775 for (i = 0, size = 0; i < xd->xd_nsegs; i++) {
776 if (xd->xd_dma_segs[i].ds_addr & 0x7)
777 return (EFAULT);
778 size += xd->xd_dma_segs[i].ds_len;
779 }
780
781 *psize = size;
782 return (0);
783 }
784
785 static inline int
786 dmac_init_desc(struct dmac_desc_segs *ds, struct dmac_xfer_desc *xd,
787 size_t *psize)
788 {
789 int err;
790
791 if ((err = dmac_validate_desc(xd, psize)))
792 return (err);
793
794 ds->ds_curseg = xd->xd_dma_segs;
795 ds->ds_nsegs = xd->xd_nsegs;
796 ds->ds_offset = 0;
797 return (0);
798 }
799
800 int
801 pxa2x0_dmac_start_xfer(struct dmac_xfer *dx)
802 {
803 struct pxadmac_softc *sc = pxadmac_sc;
804 struct dmac_xfer_state *dxs = (struct dmac_xfer_state *)dx;
805 struct dmac_xfer_desc *src, *dst;
806 size_t size;
807 int err, s;
808
809 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE &&
810 dxs->dxs_peripheral >= DMAC_N_PERIPH)
811 return (EINVAL);
812
813 src = &dxs->dxs_desc[DMAC_DESC_SRC];
814 dst = &dxs->dxs_desc[DMAC_DESC_DST];
815
816 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_SRC], src, &size)))
817 return (err);
818 if (src->xd_addr_hold == FALSE &&
819 dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
820 (size % dxs->dxs_loop_notify) != 0)
821 return (EINVAL);
822
823 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_DST], dst, &size)))
824 return (err);
825 if (dst->xd_addr_hold == FALSE &&
826 dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
827 (size % dxs->dxs_loop_notify) != 0)
828 return (EINVAL);
829
830 SLIST_INIT(&dxs->dxs_descs);
831 dxs->dxs_channel = DMAC_NO_CHANNEL;
832 dxs->dxs_dcmd = (((u_int32_t)dxs->dxs_dev_width) << DCMD_WIDTH_SHIFT) |
833 (((u_int32_t)dxs->dxs_burst_size) << DCMD_SIZE_SHIFT);
834
835 switch (dxs->dxs_flow) {
836 case DMAC_FLOW_CTRL_NONE:
837 break;
838 case DMAC_FLOW_CTRL_SRC:
839 dxs->dxs_dcmd |= DCMD_FLOWSRC;
840 break;
841 case DMAC_FLOW_CTRL_DEST:
842 dxs->dxs_dcmd |= DCMD_FLOWTRG;
843 break;
844 }
845
846 if (src->xd_addr_hold == FALSE)
847 dxs->dxs_dcmd |= DCMD_INCSRCADDR;
848 if (dst->xd_addr_hold == FALSE)
849 dxs->dxs_dcmd |= DCMD_INCTRGADDR;
850
851 s = splbio();
852 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE ||
853 sc->sc_periph[dxs->dxs_peripheral].sp_busy == 0) {
854 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
855 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
856 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
857 sc->sc_periph[dxs->dxs_peripheral].sp_busy++;
858 dmac_start(sc, DMAC_PRI(dxs->dxs_priority));
859 } else {
860 dxs->dxs_queue = &sc->sc_periph[dxs->dxs_peripheral].sp_queue;
861 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
862 sc->sc_periph[dxs->dxs_peripheral].sp_busy++;
863 }
864 splx(s);
865
866 return (0);
867 }
868
869 void
870 pxa2x0_dmac_abort_xfer(struct dmac_xfer *dx)
871 {
872 struct pxadmac_softc *sc = pxadmac_sc;
873 struct dmac_xfer_state *ndxs, *dxs = (struct dmac_xfer_state *)dx;
874 struct dmac_desc *desc, *ndesc;
875 struct dmac_xfer_state_head *queue;
876 u_int32_t rv;
877 int s, timeout, need_start = 0;
878
879 s = splbio();
880
881 queue = dxs->dxs_queue;
882
883 if (dxs->dxs_channel == DMAC_NO_CHANNEL) {
884 /*
885 * The request has not yet started, or it has already
886 * completed. If the request is not on a queue, just
887 * return.
888 */
889 if (queue == NULL) {
890 splx(s);
891 return;
892 }
893
894 dxs->dxs_queue = NULL;
895 SIMPLEQ_REMOVE(queue, dxs, dmac_xfer_state, dxs_link);
896 } else {
897 /*
898 * The request is in progress. This is a bit trickier.
899 */
900 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel), 0);
901
902 for (timeout = 5000; timeout; timeout--) {
903 rv = dmac_reg_read(sc, DMAC_DCSR(dxs->dxs_channel));
904 if (rv & DCSR_STOPSTATE)
905 break;
906 delay(1);
907 }
908
909 if ((rv & DCSR_STOPSTATE) == 0)
910 panic(
911 "pxa2x0_dmac_abort_xfer: channel %d failed to abort",
912 dxs->dxs_channel);
913
914 /*
915 * Free resources allocated to the request
916 */
917 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) {
918 ndesc = SLIST_NEXT(desc, d_link);
919 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link);
920 sc->sc_free_descs++;
921 }
922
923 sc->sc_active[dxs->dxs_channel] = NULL;
924 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority),
925 dxs->dxs_channel);
926
927 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
928 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0);
929
930 need_start = 1;
931 dxs->dxs_queue = NULL;
932 }
933
934 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE ||
935 sc->sc_periph[dxs->dxs_peripheral].sp_busy-- == 1 ||
936 queue == &sc->sc_periph[dxs->dxs_peripheral].sp_queue)
937 goto out;
938
939 /*
940 * We've just removed the current item for this
941 * peripheral, and there is at least one more
942 * pending item waiting. Make it current.
943 */
944 ndxs = SIMPLEQ_FIRST(&sc->sc_periph[dxs->dxs_peripheral].sp_queue);
945 dxs = ndxs;
946 KDASSERT(dxs != NULL);
947 SIMPLEQ_REMOVE_HEAD(&sc->sc_periph[dxs->dxs_peripheral].sp_queue,
948 dxs_link);
949
950 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
951 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
952 need_start = 1;
953
954 /*
955 * Try to start any pending requests with the same
956 * priority.
957 */
958 out:
959 if (need_start)
960 dmac_start(sc, DMAC_PRI(dxs->dxs_priority));
961 splx(s);
962 }
963
964 static void
965 dmac_start(struct pxadmac_softc *sc, dmac_priority_t priority)
966 {
967 struct dmac_xfer_state *dxs;
968 u_int channel;
969
970 while (sc->sc_free_descs &&
971 (dxs = SIMPLEQ_FIRST(&sc->sc_queue[priority])) != NULL &&
972 dmac_allocate_channel(sc, priority, &channel) == 0) {
973 /*
974 * Yay, got some descriptors, a transfer request, and
975 * an available DMA channel.
976 */
977 KDASSERT(sc->sc_active[channel] == NULL);
978 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue[priority], dxs_link);
979
980 dxs->dxs_channel = channel;
981 sc->sc_active[channel] = dxs;
982 (void) dmac_continue_xfer(sc, dxs);
983 /*
984 * XXX: Deal with descriptor allocation failure for loops
985 */
986 }
987 }
988
989 static int
990 dmac_continue_xfer(struct pxadmac_softc *sc, struct dmac_xfer_state *dxs)
991 {
992 struct dmac_desc *desc, *prev_desc;
993 struct pxa2x0_dma_desc *dd;
994 struct dmac_desc_segs *src_ds, *dst_ds;
995 struct dmac_xfer_desc *src_xd, *dst_xd;
996 bus_dma_segment_t *src_seg, *dst_seg;
997 bus_addr_t src_mem_addr, dst_mem_addr;
998 bus_size_t src_size, dst_size, this_size;
999
1000 desc = NULL;
1001 prev_desc = NULL;
1002 dd = NULL;
1003 src_ds = &dxs->dxs_segs[DMAC_DESC_SRC];
1004 dst_ds = &dxs->dxs_segs[DMAC_DESC_DST];
1005 src_xd = &dxs->dxs_desc[DMAC_DESC_SRC];
1006 dst_xd = &dxs->dxs_desc[DMAC_DESC_DST];
1007 SLIST_INIT(&dxs->dxs_descs);
1008
1009 /*
1010 * As long as the source/destination buffers have DMA segments,
1011 * and we have free descriptors, build a DMA chain.
1012 */
1013 while (src_ds->ds_nsegs && dst_ds->ds_nsegs && sc->sc_free_descs) {
1014 src_seg = src_ds->ds_curseg;
1015 src_mem_addr = src_seg->ds_addr + src_ds->ds_offset;
1016 if (src_xd->xd_addr_hold == FALSE &&
1017 dxs->dxs_loop_notify != DMAC_DONT_LOOP)
1018 src_size = dxs->dxs_loop_notify;
1019 else
1020 src_size = src_seg->ds_len - src_ds->ds_offset;
1021
1022 dst_seg = dst_ds->ds_curseg;
1023 dst_mem_addr = dst_seg->ds_addr + dst_ds->ds_offset;
1024 if (dst_xd->xd_addr_hold == FALSE &&
1025 dxs->dxs_loop_notify != DMAC_DONT_LOOP)
1026 dst_size = dxs->dxs_loop_notify;
1027 else
1028 dst_size = dst_seg->ds_len - dst_ds->ds_offset;
1029
1030 /*
1031 * We may need to split a source or destination segment
1032 * across two or more DMAC descriptors.
1033 */
1034 while (src_size && dst_size &&
1035 (desc = SLIST_FIRST(&sc->sc_descs)) != NULL) {
1036 SLIST_REMOVE_HEAD(&sc->sc_descs, d_link);
1037 sc->sc_free_descs--;
1038
1039 /*
1040 * Decide how much data we're going to transfer
1041 * using this DMAC descriptor.
1042 */
1043 if (src_xd->xd_addr_hold)
1044 this_size = dst_size;
1045 else
1046 if (dst_xd->xd_addr_hold)
1047 this_size = src_size;
1048 else
1049 this_size = min(dst_size, src_size);
1050
1051 /*
1052 * But clamp the transfer size to the DMAC
1053 * descriptor's maximum.
1054 */
1055 this_size = min(this_size, DCMD_LENGTH_MASK & ~0x1f);
1056
1057 /*
1058 * Fill in the DMAC descriptor
1059 */
1060 dd = desc->d_desc;
1061 dd->dd_dsadr = src_mem_addr;
1062 dd->dd_dtadr = dst_mem_addr;
1063 dd->dd_dcmd = dxs->dxs_dcmd | this_size;
1064
1065 /*
1066 * Link it into the chain
1067 */
1068 if (prev_desc) {
1069 SLIST_INSERT_AFTER(prev_desc, desc, d_link);
1070 prev_desc->d_desc->dd_ddadr = desc->d_desc_pa;
1071 } else {
1072 SLIST_INSERT_HEAD(&dxs->dxs_descs, desc,
1073 d_link);
1074 }
1075 prev_desc = desc;
1076
1077 /*
1078 * Update the source/destination pointers
1079 */
1080 if (src_xd->xd_addr_hold == FALSE) {
1081 src_size -= this_size;
1082 src_ds->ds_offset += this_size;
1083 if (src_ds->ds_offset == src_seg->ds_len) {
1084 KDASSERT(src_size == 0);
1085 src_ds->ds_curseg = ++src_seg;
1086 src_ds->ds_offset = 0;
1087 src_ds->ds_nsegs--;
1088 } else
1089 src_mem_addr += this_size;
1090 }
1091
1092 if (dst_xd->xd_addr_hold == FALSE) {
1093 dst_size -= this_size;
1094 dst_ds->ds_offset += this_size;
1095 if (dst_ds->ds_offset == dst_seg->ds_len) {
1096 KDASSERT(dst_size == 0);
1097 dst_ds->ds_curseg = ++dst_seg;
1098 dst_ds->ds_offset = 0;
1099 dst_ds->ds_nsegs--;
1100 } else
1101 dst_mem_addr += this_size;
1102 }
1103 }
1104
1105 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP) {
1106 /*
1107 * We must be able to allocate descriptors for the
1108 * entire loop. Otherwise, return them to the pool
1109 * and bail.
1110 */
1111 if (desc == NULL) {
1112 struct dmac_desc *ndesc;
1113 for (desc = SLIST_FIRST(&dxs->dxs_descs);
1114 desc; desc = ndesc) {
1115 ndesc = SLIST_NEXT(desc, d_link);
1116 SLIST_INSERT_HEAD(&sc->sc_descs, desc,
1117 d_link);
1118 sc->sc_free_descs++;
1119 }
1120
1121 return (0);
1122 }
1123
1124 KASSERT(dd != NULL);
1125 dd->dd_dcmd |= DCMD_ENDIRQEN;
1126 }
1127 }
1128
1129 /*
1130 * Did we manage to build a chain?
1131 * If not, just return.
1132 */
1133 if (dd == NULL)
1134 return (0);
1135
1136 if (dxs->dxs_loop_notify == DMAC_DONT_LOOP) {
1137 dd->dd_dcmd |= DCMD_ENDIRQEN;
1138 dd->dd_ddadr = DMAC_DESC_LAST;
1139 } else
1140 dd->dd_ddadr = SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa;
1141
1142 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) {
1143 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral),
1144 dxs->dxs_channel | DRCMR_MAPVLD);
1145 }
1146 dmac_reg_write(sc, DMAC_DDADR(dxs->dxs_channel),
1147 SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa);
1148 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel),
1149 DCSR_ENDINTR | DCSR_RUN);
1150
1151 return (1);
1152 }
1153
1154 static u_int
1155 dmac_channel_intr(struct pxadmac_softc *sc, u_int channel)
1156 {
1157 struct dmac_xfer_state *dxs;
1158 struct dmac_desc *desc, *ndesc;
1159 u_int32_t dcsr;
1160 u_int rv = 0;
1161
1162 dcsr = dmac_reg_read(sc, DMAC_DCSR(channel));
1163 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr);
1164 if (dmac_reg_read(sc, DMAC_DCSR(channel)) & DCSR_STOPSTATE)
1165 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr & ~DCSR_RUN);
1166
1167 if ((dxs = sc->sc_active[channel]) == NULL) {
1168 printf("%s: Stray DMAC interrupt for unallocated channel %d\n",
1169 sc->sc_dev.dv_xname, channel);
1170 return (0);
1171 }
1172
1173 /*
1174 * Clear down the interrupt in the DMA Interrupt Register
1175 */
1176 dmac_reg_write(sc, DMAC_DINT, (1u << channel));
1177
1178 /*
1179 * If this is a looping request, invoke the 'done' callback and
1180 * return immediately.
1181 */
1182 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
1183 (dcsr & DCSR_BUSERRINTR) == 0) {
1184 (dxs->dxs_done)(&dxs->dxs_xfer, 0);
1185 return (0);
1186 }
1187
1188 /*
1189 * Free the descriptors allocated to the completed transfer
1190 *
1191 * XXX: If there is more data to transfer in this request,
1192 * we could simply reuse some or all of the descriptors
1193 * already allocated for the transfer which just completed.
1194 */
1195 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) {
1196 ndesc = SLIST_NEXT(desc, d_link);
1197 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link);
1198 sc->sc_free_descs++;
1199 }
1200
1201 if ((dcsr & DCSR_BUSERRINTR) || dmac_continue_xfer(sc, dxs) == 0) {
1202 /*
1203 * The transfer completed (possibly due to an error),
1204 * -OR- we were unable to continue any remaining
1205 * segment of the transfer due to a lack of descriptors.
1206 *
1207 * In either case, we have to free up DMAC resources
1208 * allocated to the request.
1209 */
1210 sc->sc_active[channel] = NULL;
1211 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority), channel);
1212 dxs->dxs_channel = DMAC_NO_CHANNEL;
1213 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
1214 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0);
1215
1216 if (dxs->dxs_segs[DMAC_DESC_SRC].ds_nsegs == 0 ||
1217 dxs->dxs_segs[DMAC_DESC_DST].ds_nsegs == 0 ||
1218 (dcsr & DCSR_BUSERRINTR)) {
1219
1220 /*
1221 * The transfer is complete.
1222 */
1223 dxs->dxs_queue = NULL;
1224 rv = 1u << DMAC_PRI(dxs->dxs_priority);
1225
1226 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE &&
1227 --sc->sc_periph[dxs->dxs_peripheral].sp_busy != 0) {
1228 struct dmac_xfer_state *ndxs;
1229 /*
1230 * We've just removed the current item for this
1231 * peripheral, and there is at least one more
1232 * pending item waiting. Make it current.
1233 */
1234 ndxs = SIMPLEQ_FIRST(
1235 &sc->sc_periph[dxs->dxs_peripheral].sp_queue);
1236 KDASSERT(ndxs != NULL);
1237 SIMPLEQ_REMOVE_HEAD(
1238 &sc->sc_periph[dxs->dxs_peripheral].sp_queue,
1239 dxs_link);
1240
1241 ndxs->dxs_queue =
1242 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
1243 SIMPLEQ_INSERT_TAIL(ndxs->dxs_queue, ndxs,
1244 dxs_link);
1245 }
1246
1247 (dxs->dxs_done)(&dxs->dxs_xfer,
1248 (dcsr & DCSR_BUSERRINTR) ? EFAULT : 0);
1249 } else {
1250 /*
1251 * The request is not yet complete, but we were unable
1252 * to make any headway at this time because there are
1253 * no free descriptors. Put the request back at the
1254 * head of the appropriate priority queue. It'll be
1255 * dealt with as other in-progress transfers complete.
1256 */
1257 SIMPLEQ_INSERT_HEAD(
1258 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)], dxs,
1259 dxs_link);
1260 }
1261 }
1262
1263 return (rv);
1264 }
1265
1266 static int
1267 dmac_intr(void *arg)
1268 {
1269 struct pxadmac_softc *sc = arg;
1270 u_int32_t rv, mask;
1271 u_int chan, pri;
1272
1273 rv = dmac_reg_read(sc, DMAC_DINT);
1274 if ((rv & DMAC_DINT_MASK) == 0)
1275 return (0);
1276
1277 /*
1278 * Deal with completed transfers
1279 */
1280 for (chan = 0, mask = 1u, pri = 0;
1281 chan < DMAC_N_CHANNELS; chan++, mask <<= 1) {
1282 if (rv & mask)
1283 pri |= dmac_channel_intr(sc, chan);
1284 }
1285
1286 /*
1287 * Now try to start any queued transfers
1288 */
1289 #if (DMAC_N_PRIORITIES > 1)
1290 if (pri & (1u << DMAC_PRIORITY_HIGH))
1291 dmac_start(sc, DMAC_PRIORITY_HIGH);
1292 if (pri & (1u << DMAC_PRIORITY_MED))
1293 dmac_start(sc, DMAC_PRIORITY_MED);
1294 if (pri & (1u << DMAC_PRIORITY_LOW))
1295 dmac_start(sc, DMAC_PRIORITY_LOW);
1296 #else
1297 if (pri)
1298 dmac_start(sc, DMAC_PRIORITY_NORMAL);
1299 #endif
1300
1301 return (1);
1302 }
1303