pxa2x0_dmac.c revision 1.8.4.1 1 /* $NetBSD: pxa2x0_dmac.c,v 1.8.4.1 2011/11/20 13:47:07 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2003, 2005 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "opt_pxa2x0_dmac.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/kernel.h>
44 #include <sys/kmem.h>
45 #include <sys/queue.h>
46
47 #include <uvm/uvm_param.h> /* For PAGE_SIZE */
48
49 #include <machine/intr.h>
50 #include <sys/bus.h>
51
52 #include <dev/dmover/dmovervar.h>
53
54 #include <arm/xscale/pxa2x0reg.h>
55 #include <arm/xscale/pxa2x0var.h>
56 #include <arm/xscale/pxa2x0cpu.h>
57
58 #include <arm/xscale/pxa2x0_dmac.h>
59
60 #include "locators.h"
61
62 #undef DMAC_N_PRIORITIES
63 #ifndef PXA2X0_DMAC_FIXED_PRIORITY
64 #define DMAC_N_PRIORITIES 3
65 #define DMAC_PRI(p) (p)
66 #else
67 #define DMAC_N_PRIORITIES 1
68 #define DMAC_PRI(p) (0)
69 #endif
70
71 struct dmac_desc {
72 SLIST_ENTRY(dmac_desc) d_link;
73 struct pxa2x0_dma_desc *d_desc;
74 paddr_t d_desc_pa;
75 };
76
77 /*
78 * This is used to maintain state for an in-progress transfer.
79 * It tracks the current DMA segment, and offset within the segment
80 * in the case where we had to split a request into several DMA
81 * operations due to a shortage of DMAC descriptors.
82 */
83 struct dmac_desc_segs {
84 bus_dma_segment_t *ds_curseg; /* Current segment */
85 u_int ds_nsegs; /* Remaining segments */
86 bus_size_t ds_offset; /* Offset within current seg */
87 };
88
89 SIMPLEQ_HEAD(dmac_xfer_state_head, dmac_xfer_state);
90
91 struct dmac_xfer_state {
92 struct dmac_xfer dxs_xfer;
93 #define dxs_cookie dxs_xfer.dx_cookie
94 #define dxs_done dxs_xfer.dx_done
95 #define dxs_priority dxs_xfer.dx_priority
96 #define dxs_peripheral dxs_xfer.dx_peripheral
97 #define dxs_flow dxs_xfer.dx_flow
98 #define dxs_dev_width dxs_xfer.dx_dev_width
99 #define dxs_burst_size dxs_xfer.dx_burst_size
100 #define dxs_loop_notify dxs_xfer.dx_loop_notify
101 #define dxs_desc dxs_xfer.dx_desc
102 SIMPLEQ_ENTRY(dmac_xfer_state) dxs_link;
103 SLIST_HEAD(, dmac_desc) dxs_descs;
104 struct dmac_xfer_state_head *dxs_queue;
105 u_int dxs_channel;
106 #define DMAC_NO_CHANNEL (~0)
107 u_int32_t dxs_dcmd;
108 struct dmac_desc_segs dxs_segs[2];
109 bool dxs_misaligned_flag;
110 };
111
112
113 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
114 /*
115 * This structure is used to maintain state for the dmover(9) backend
116 * part of the driver. We can have a number of concurrent dmover
117 * requests in progress at any given time. The exact number is given
118 * by the PXA2X0_DMAC_DMOVER_CONCURRENCY compile-time constant. One of
119 * these structures is allocated for each concurrent request.
120 */
121 struct dmac_dmover_state {
122 LIST_ENTRY(dmac_dmover_state) ds_link; /* List of idle dmover chans */
123 struct pxadmac_softc *ds_sc; /* Uplink to pxadmac softc */
124 struct dmover_request *ds_current; /* Current dmover request */
125 struct dmac_xfer_state ds_xfer;
126 bus_dmamap_t ds_src_dmap;
127 bus_dmamap_t ds_dst_dmap;
128 /*
129 * There is no inherent size limit in the DMA engine.
130 * The following limit is somewhat arbitrary.
131 */
132 #define DMAC_DMOVER_MAX_XFER (8*1024*1024)
133 #if 0
134 /* This would require 16KB * 2 just for segments... */
135 #define DMAC_DMOVER_NSEGS ((DMAC_DMOVER_MAX_XFER / PAGE_SIZE) + 1)
136 #else
137 #define DMAC_DMOVER_NSEGS 512 /* XXX: Only enough for 2MB */
138 #endif
139 bus_dma_segment_t ds_zero_seg; /* Used for zero-fill ops */
140 void *ds_zero_va;
141 bus_dma_segment_t ds_fill_seg; /* Used for fill8 ops */
142 void *ds_fill_va;
143
144 #define ds_src_addr_hold ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_addr_hold
145 #define ds_dst_addr_hold ds_xfer.dxs_desc[DMAC_DESC_DST].xd_addr_hold
146 #define ds_src_burst ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_burst_size
147 #define ds_dst_burst ds_xfer.dxs_desc[DMAC_DESC_DST].xd_burst_size
148 #define ds_src_dma_segs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_dma_segs
149 #define ds_dst_dma_segs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_dma_segs
150 #define ds_src_nsegs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_nsegs
151 #define ds_dst_nsegs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_nsegs
152 };
153
154 /*
155 * Overall dmover(9) backend state
156 */
157 struct dmac_dmover {
158 struct dmover_backend dd_backend;
159 int dd_busy;
160 LIST_HEAD(, dmac_dmover_state) dd_free;
161 struct dmac_dmover_state dd_state[PXA2X0_DMAC_DMOVER_CONCURRENCY];
162 };
163 #endif
164
165 struct pxadmac_softc {
166 device_t sc_dev;
167 bus_space_tag_t sc_bust;
168 bus_dma_tag_t sc_dmat;
169 bus_space_handle_t sc_bush;
170 void *sc_irqcookie;
171
172 /*
173 * Queue of pending requests, per priority
174 */
175 struct dmac_xfer_state_head sc_queue[DMAC_N_PRIORITIES];
176
177 /*
178 * Queue of pending requests, per peripheral
179 */
180 struct {
181 struct dmac_xfer_state_head sp_queue;
182 u_int sp_busy;
183 } sc_periph[DMAC_N_PERIPH];
184
185 /*
186 * Active requests, per channel.
187 */
188 struct dmac_xfer_state *sc_active[DMAC_N_CHANNELS];
189
190 /*
191 * Channel Priority Allocation
192 */
193 struct {
194 u_int8_t p_first;
195 u_int8_t p_pri[DMAC_N_CHANNELS];
196 } sc_prio[DMAC_N_PRIORITIES];
197 #define DMAC_PRIO_END (~0)
198 u_int8_t sc_channel_priority[DMAC_N_CHANNELS];
199
200 /*
201 * DMA descriptor management
202 */
203 bus_dmamap_t sc_desc_map;
204 bus_dma_segment_t sc_segs;
205 #define DMAC_N_DESCS ((PAGE_SIZE * 2) / sizeof(struct pxa2x0_dma_desc))
206 #define DMAC_DESCS_SIZE (DMAC_N_DESCS * sizeof(struct pxa2x0_dma_desc))
207 struct dmac_desc sc_all_descs[DMAC_N_DESCS];
208 u_int sc_free_descs;
209 SLIST_HEAD(, dmac_desc) sc_descs;
210
211 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
212 /*
213 * dmover(9) backend state
214 */
215 struct dmac_dmover sc_dmover;
216 #endif
217 };
218
219 static int pxadmac_match(device_t, cfdata_t, void *);
220 static void pxadmac_attach(device_t, device_t, void *);
221
222 CFATTACH_DECL_NEW(pxadmac, sizeof(struct pxadmac_softc),
223 pxadmac_match, pxadmac_attach, NULL, NULL);
224
225 static struct pxadmac_softc *pxadmac_sc;
226
227 static void dmac_start(struct pxadmac_softc *, dmac_priority_t);
228 static int dmac_continue_xfer(struct pxadmac_softc *, struct dmac_xfer_state *);
229 static u_int dmac_channel_intr(struct pxadmac_softc *, u_int);
230 static int dmac_intr(void *);
231
232 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
233 static void dmac_dmover_attach(struct pxadmac_softc *);
234 static void dmac_dmover_process(struct dmover_backend *);
235 static void dmac_dmover_run(struct dmover_backend *);
236 static void dmac_dmover_done(struct dmac_xfer *, int);
237 #endif
238
239 static inline u_int32_t
240 dmac_reg_read(struct pxadmac_softc *sc, int reg)
241 {
242
243 return (bus_space_read_4(sc->sc_bust, sc->sc_bush, reg));
244 }
245
246 static inline void
247 dmac_reg_write(struct pxadmac_softc *sc, int reg, u_int32_t val)
248 {
249
250 bus_space_write_4(sc->sc_bust, sc->sc_bush, reg, val);
251 }
252
253 static inline int
254 dmac_allocate_channel(struct pxadmac_softc *sc, dmac_priority_t priority,
255 u_int *chanp)
256 {
257 u_int channel;
258
259 KDASSERT((u_int)priority < DMAC_N_PRIORITIES);
260
261 if ((channel = sc->sc_prio[priority].p_first) == DMAC_PRIO_END)
262 return (-1);
263 sc->sc_prio[priority].p_first = sc->sc_prio[priority].p_pri[channel];
264
265 *chanp = channel;
266 return (0);
267 }
268
269 static inline void
270 dmac_free_channel(struct pxadmac_softc *sc, dmac_priority_t priority,
271 u_int channel)
272 {
273
274 KDASSERT((u_int)priority < DMAC_N_PRIORITIES);
275
276 sc->sc_prio[priority].p_pri[channel] = sc->sc_prio[priority].p_first;
277 sc->sc_prio[priority].p_first = channel;
278 }
279
280 static int
281 pxadmac_match(device_t parent, cfdata_t cf, void *aux)
282 {
283 struct pxaip_attach_args *pxa = aux;
284
285 if (pxadmac_sc || pxa->pxa_addr != PXA2X0_DMAC_BASE ||
286 pxa->pxa_intr != PXA2X0_INT_DMA)
287 return (0);
288
289 pxa->pxa_size = PXA2X0_DMAC_SIZE;
290
291 return (1);
292 }
293
294 static void
295 pxadmac_attach(device_t parent, device_t self, void *aux)
296 {
297 struct pxadmac_softc *sc = device_private(self);
298 struct pxaip_attach_args *pxa = aux;
299 struct pxa2x0_dma_desc *dd;
300 int i, nsegs;
301
302 sc->sc_dev = self;
303 sc->sc_bust = pxa->pxa_iot;
304 sc->sc_dmat = pxa->pxa_dmat;
305
306 aprint_normal(": DMA Controller\n");
307
308 if (bus_space_map(sc->sc_bust, pxa->pxa_addr, pxa->pxa_size, 0,
309 &sc->sc_bush)) {
310 aprint_error_dev(self, "Can't map registers!\n");
311 return;
312 }
313
314 pxadmac_sc = sc;
315
316 /*
317 * Make sure the DMAC is quiescent
318 */
319 for (i = 0; i < DMAC_N_CHANNELS; i++) {
320 dmac_reg_write(sc, DMAC_DCSR(i), 0);
321 dmac_reg_write(sc, DMAC_DRCMR(i), 0);
322 sc->sc_active[i] = NULL;
323 }
324 dmac_reg_write(sc, DMAC_DINT,
325 dmac_reg_read(sc, DMAC_DINT) & DMAC_DINT_MASK);
326
327 /*
328 * Initialise the request queues
329 */
330 for (i = 0; i < DMAC_N_PRIORITIES; i++)
331 SIMPLEQ_INIT(&sc->sc_queue[i]);
332
333 /*
334 * Initialise the request queues
335 */
336 for (i = 0; i < DMAC_N_PERIPH; i++) {
337 sc->sc_periph[i].sp_busy = 0;
338 SIMPLEQ_INIT(&sc->sc_periph[i].sp_queue);
339 }
340
341 /*
342 * Initialise the channel priority metadata
343 */
344 memset(sc->sc_prio, DMAC_PRIO_END, sizeof(sc->sc_prio));
345 for (i = 0; i < DMAC_N_CHANNELS; i++) {
346 #if (DMAC_N_PRIORITIES > 1)
347 if (i <= 3)
348 dmac_free_channel(sc, DMAC_PRIORITY_HIGH, i);
349 else
350 if (i <= 7)
351 dmac_free_channel(sc, DMAC_PRIORITY_MED, i);
352 else
353 dmac_free_channel(sc, DMAC_PRIORITY_LOW, i);
354 #else
355 dmac_free_channel(sc, DMAC_PRIORITY_NORMAL, i);
356 #endif
357 }
358
359 /*
360 * Initialise DMA descriptors and associated metadata
361 */
362 if (bus_dmamem_alloc(sc->sc_dmat, DMAC_DESCS_SIZE, DMAC_DESCS_SIZE, 0,
363 &sc->sc_segs, 1, &nsegs, BUS_DMA_NOWAIT))
364 panic("dmac_pxaip_attach: bus_dmamem_alloc failed");
365
366 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_segs, 1, DMAC_DESCS_SIZE,
367 (void *)&dd, BUS_DMA_COHERENT|BUS_DMA_NOCACHE))
368 panic("dmac_pxaip_attach: bus_dmamem_map failed");
369
370 if (bus_dmamap_create(sc->sc_dmat, DMAC_DESCS_SIZE, 1,
371 DMAC_DESCS_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_desc_map))
372 panic("dmac_pxaip_attach: bus_dmamap_create failed");
373
374 if (bus_dmamap_load(sc->sc_dmat, sc->sc_desc_map, (void *)dd,
375 DMAC_DESCS_SIZE, NULL, BUS_DMA_NOWAIT))
376 panic("dmac_pxaip_attach: bus_dmamap_load failed");
377
378 SLIST_INIT(&sc->sc_descs);
379 sc->sc_free_descs = DMAC_N_DESCS;
380 for (i = 0; i < DMAC_N_DESCS; i++, dd++) {
381 SLIST_INSERT_HEAD(&sc->sc_descs, &sc->sc_all_descs[i], d_link);
382 sc->sc_all_descs[i].d_desc = dd;
383 sc->sc_all_descs[i].d_desc_pa =
384 sc->sc_segs.ds_addr + (sizeof(struct pxa2x0_dma_desc) * i);
385 }
386
387 sc->sc_irqcookie = pxa2x0_intr_establish(pxa->pxa_intr, IPL_BIO,
388 dmac_intr, sc);
389 KASSERT(sc->sc_irqcookie != NULL);
390
391 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
392 dmac_dmover_attach(sc);
393 #endif
394 }
395
396 #if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0)
397 /*
398 * We support the following dmover(9) operations
399 */
400 static const struct dmover_algdesc dmac_dmover_algdescs[] = {
401 {DMOVER_FUNC_ZERO, NULL, 0}, /* Zero-fill */
402 {DMOVER_FUNC_FILL8, NULL, 0}, /* Fill with 8-bit immediate value */
403 {DMOVER_FUNC_COPY, NULL, 1} /* Copy */
404 };
405 #define DMAC_DMOVER_ALGDESC_COUNT \
406 (sizeof(dmac_dmover_algdescs) / sizeof(dmac_dmover_algdescs[0]))
407
408 static void
409 dmac_dmover_attach(struct pxadmac_softc *sc)
410 {
411 struct dmac_dmover *dd = &sc->sc_dmover;
412 struct dmac_dmover_state *ds;
413 int i, dummy;
414
415 /*
416 * Describe ourselves to the dmover(9) code
417 */
418 dd->dd_backend.dmb_name = "pxadmac";
419 dd->dd_backend.dmb_speed = 100*1024*1024; /* XXX */
420 dd->dd_backend.dmb_cookie = sc;
421 dd->dd_backend.dmb_algdescs = dmac_dmover_algdescs;
422 dd->dd_backend.dmb_nalgdescs = DMAC_DMOVER_ALGDESC_COUNT;
423 dd->dd_backend.dmb_process = dmac_dmover_process;
424 dd->dd_busy = 0;
425 LIST_INIT(&dd->dd_free);
426
427 for (i = 0; i < PXA2X0_DMAC_DMOVER_CONCURRENCY; i++) {
428 ds = &dd->dd_state[i];
429 ds->ds_sc = sc;
430 ds->ds_current = NULL;
431 ds->ds_xfer.dxs_cookie = ds;
432 ds->ds_xfer.dxs_done = dmac_dmover_done;
433 ds->ds_xfer.dxs_priority = DMAC_PRIORITY_NORMAL;
434 ds->ds_xfer.dxs_peripheral = DMAC_PERIPH_NONE;
435 ds->ds_xfer.dxs_flow = DMAC_FLOW_CTRL_NONE;
436 ds->ds_xfer.dxs_dev_width = DMAC_DEV_WIDTH_DEFAULT;
437 ds->ds_xfer.dxs_burst_size = DMAC_BURST_SIZE_8; /* XXX */
438 ds->ds_xfer.dxs_loop_notify = DMAC_DONT_LOOP;
439 ds->ds_src_addr_hold = false;
440 ds->ds_dst_addr_hold = false;
441 ds->ds_src_nsegs = 0;
442 ds->ds_dst_nsegs = 0;
443 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
444
445 /*
446 * Create dma maps for both source and destination buffers.
447 */
448 if (bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER,
449 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER,
450 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
451 &ds->ds_src_dmap) ||
452 bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER,
453 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER,
454 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
455 &ds->ds_dst_dmap)) {
456 panic("dmac_dmover_attach: bus_dmamap_create failed");
457 }
458
459 /*
460 * Allocate some dma memory to be used as source buffers
461 * for the zero-fill and fill-8 operations. We only need
462 * small buffers here, since we set up the DMAC source
463 * descriptor with 'ds_addr_hold' set to true.
464 */
465 if (bus_dmamem_alloc(sc->sc_dmat,
466 arm_pdcache_line_size, arm_pdcache_line_size, 0,
467 &ds->ds_zero_seg, 1, &dummy, BUS_DMA_NOWAIT) ||
468 bus_dmamem_alloc(sc->sc_dmat,
469 arm_pdcache_line_size, arm_pdcache_line_size, 0,
470 &ds->ds_fill_seg, 1, &dummy, BUS_DMA_NOWAIT)) {
471 panic("dmac_dmover_attach: bus_dmamem_alloc failed");
472 }
473
474 if (bus_dmamem_map(sc->sc_dmat, &ds->ds_zero_seg, 1,
475 arm_pdcache_line_size, &ds->ds_zero_va,
476 BUS_DMA_NOWAIT) ||
477 bus_dmamem_map(sc->sc_dmat, &ds->ds_fill_seg, 1,
478 arm_pdcache_line_size, &ds->ds_fill_va,
479 BUS_DMA_NOWAIT)) {
480 panic("dmac_dmover_attach: bus_dmamem_map failed");
481 }
482
483 /*
484 * Make sure the zero-fill source buffer really is zero filled
485 */
486 memset(ds->ds_zero_va, 0, arm_pdcache_line_size);
487 }
488
489 dmover_backend_register(&sc->sc_dmover.dd_backend);
490 }
491
492 static void
493 dmac_dmover_process(struct dmover_backend *dmb)
494 {
495 struct pxadmac_softc *sc = dmb->dmb_cookie;
496 int s = splbio();
497
498 /*
499 * If the backend is currently idle, go process the queue.
500 */
501 if (sc->sc_dmover.dd_busy == 0)
502 dmac_dmover_run(&sc->sc_dmover.dd_backend);
503 splx(s);
504 }
505
506 static void
507 dmac_dmover_run(struct dmover_backend *dmb)
508 {
509 struct dmover_request *dreq;
510 struct pxadmac_softc *sc;
511 struct dmac_dmover *dd;
512 struct dmac_dmover_state *ds;
513 size_t len_src, len_dst;
514 int rv;
515
516 sc = dmb->dmb_cookie;
517 dd = &sc->sc_dmover;
518 sc->sc_dmover.dd_busy = 1;
519
520 /*
521 * As long as we can queue up dmover requests...
522 */
523 while ((dreq = TAILQ_FIRST(&dmb->dmb_pendreqs)) != NULL &&
524 (ds = LIST_FIRST(&dd->dd_free)) != NULL) {
525 /*
526 * Pull the request off the queue, mark it 'running',
527 * and make it 'current'.
528 */
529 dmover_backend_remque(dmb, dreq);
530 dreq->dreq_flags |= DMOVER_REQ_RUNNING;
531 LIST_REMOVE(ds, ds_link);
532 ds->ds_current = dreq;
533
534 switch (dreq->dreq_outbuf_type) {
535 case DMOVER_BUF_LINEAR:
536 len_dst = dreq->dreq_outbuf.dmbuf_linear.l_len;
537 break;
538 case DMOVER_BUF_UIO:
539 len_dst = dreq->dreq_outbuf.dmbuf_uio->uio_resid;
540 break;
541 default:
542 goto error;
543 }
544
545 /*
546 * Fix up the appropriate DMA 'source' buffer
547 */
548 if (dreq->dreq_assignment->das_algdesc->dad_ninputs) {
549 struct uio *uio;
550 /*
551 * This is a 'copy' operation.
552 * Load up the specified source buffer
553 */
554 switch (dreq->dreq_inbuf_type) {
555 case DMOVER_BUF_LINEAR:
556 len_src= dreq->dreq_inbuf[0].dmbuf_linear.l_len;
557 if (len_src != len_dst)
558 goto error;
559 if (bus_dmamap_load(sc->sc_dmat,ds->ds_src_dmap,
560 dreq->dreq_inbuf[0].dmbuf_linear.l_addr,
561 len_src, NULL,
562 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
563 BUS_DMA_READ))
564 goto error;
565 break;
566
567 case DMOVER_BUF_UIO:
568 uio = dreq->dreq_inbuf[0].dmbuf_uio;
569 len_src = uio->uio_resid;
570 if (uio->uio_rw != UIO_WRITE ||
571 len_src != len_dst)
572 goto error;
573 if (bus_dmamap_load_uio(sc->sc_dmat,
574 ds->ds_src_dmap, uio,
575 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
576 BUS_DMA_READ))
577 goto error;
578 break;
579
580 default:
581 goto error;
582 }
583
584 ds->ds_src_addr_hold = false;
585 } else
586 if (dreq->dreq_assignment->das_algdesc->dad_name ==
587 DMOVER_FUNC_ZERO) {
588 /*
589 * Zero-fill operation.
590 * Simply load up the pre-zeroed source buffer
591 */
592 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap,
593 ds->ds_zero_va, arm_pdcache_line_size, NULL,
594 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ))
595 goto error;
596
597 ds->ds_src_addr_hold = true;
598 } else
599 if (dreq->dreq_assignment->das_algdesc->dad_name ==
600 DMOVER_FUNC_FILL8) {
601 /*
602 * Fill-8 operation.
603 * Initialise our fill-8 buffer, and load it up.
604 *
605 * XXX: Experiment with exactly how much of the
606 * source buffer needs to be filled. Particularly WRT
607 * burst size (which is hardcoded to 8 for dmover).
608 */
609 memset(ds->ds_fill_va, dreq->dreq_immediate[0],
610 arm_pdcache_line_size);
611
612 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap,
613 ds->ds_fill_va, arm_pdcache_line_size, NULL,
614 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ))
615 goto error;
616
617 ds->ds_src_addr_hold = true;
618 } else {
619 goto error;
620 }
621
622 /*
623 * Now do the same for the destination buffer
624 */
625 switch (dreq->dreq_outbuf_type) {
626 case DMOVER_BUF_LINEAR:
627 if (bus_dmamap_load(sc->sc_dmat, ds->ds_dst_dmap,
628 dreq->dreq_outbuf.dmbuf_linear.l_addr,
629 len_dst, NULL,
630 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE))
631 goto error_unload_src;
632 break;
633
634 case DMOVER_BUF_UIO:
635 if (dreq->dreq_outbuf.dmbuf_uio->uio_rw != UIO_READ)
636 goto error_unload_src;
637 if (bus_dmamap_load_uio(sc->sc_dmat, ds->ds_dst_dmap,
638 dreq->dreq_outbuf.dmbuf_uio,
639 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE))
640 goto error_unload_src;
641 break;
642
643 default:
644 error_unload_src:
645 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
646 error:
647 dreq->dreq_error = EINVAL;
648 dreq->dreq_flags |= DMOVER_REQ_ERROR;
649 ds->ds_current = NULL;
650 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
651 dmover_done(dreq);
652 continue;
653 }
654
655 /*
656 * The last step before shipping the request off to the
657 * DMAC driver is to sync the dma maps.
658 */
659 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0,
660 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
661 ds->ds_src_dma_segs = ds->ds_src_dmap->dm_segs;
662 ds->ds_src_nsegs = ds->ds_src_dmap->dm_nsegs;
663
664 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0,
665 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
666 ds->ds_dst_dma_segs = ds->ds_dst_dmap->dm_segs;
667 ds->ds_dst_nsegs = ds->ds_dst_dmap->dm_nsegs;
668
669 /*
670 * Hand the request over to the dmac section of the driver.
671 */
672 if ((rv = pxa2x0_dmac_start_xfer(&ds->ds_xfer.dxs_xfer)) != 0) {
673 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
674 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap);
675 dreq->dreq_error = rv;
676 dreq->dreq_flags |= DMOVER_REQ_ERROR;
677 ds->ds_current = NULL;
678 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link);
679 dmover_done(dreq);
680 }
681 }
682
683 /* All done */
684 sc->sc_dmover.dd_busy = 0;
685 }
686
687 static void
688 dmac_dmover_done(struct dmac_xfer *dx, int error)
689 {
690 struct dmac_dmover_state *ds = dx->dx_cookie;
691 struct pxadmac_softc *sc = ds->ds_sc;
692 struct dmover_request *dreq = ds->ds_current;
693
694 /*
695 * A dmover(9) request has just completed.
696 */
697
698 KDASSERT(dreq != NULL);
699
700 /*
701 * Sync and unload the DMA maps
702 */
703 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0,
704 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
705 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0,
706 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
707
708 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap);
709 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap);
710
711 ds->ds_current = NULL;
712 LIST_INSERT_HEAD(&sc->sc_dmover.dd_free, ds, ds_link);
713
714 /*
715 * Record the completion status of the transfer
716 */
717 if (error) {
718 dreq->dreq_error = error;
719 dreq->dreq_flags |= DMOVER_REQ_ERROR;
720 } else {
721 if (dreq->dreq_outbuf_type == DMOVER_BUF_UIO)
722 dreq->dreq_outbuf.dmbuf_uio->uio_resid = 0;
723 if (dreq->dreq_assignment->das_algdesc->dad_ninputs &&
724 dreq->dreq_inbuf_type == DMOVER_BUF_UIO)
725 dreq->dreq_inbuf[0].dmbuf_uio->uio_resid = 0;
726 }
727
728 /*
729 * Done!
730 */
731 dmover_done(dreq);
732
733 /*
734 * See if we can start some more dmover(9) requests.
735 *
736 * Note: We're already at splbio() here.
737 */
738 if (sc->sc_dmover.dd_busy == 0)
739 dmac_dmover_run(&sc->sc_dmover.dd_backend);
740 }
741 #endif
742
743 struct dmac_xfer *
744 pxa2x0_dmac_allocate_xfer(void)
745 {
746 struct dmac_xfer_state *dxs;
747
748 dxs = kmem_alloc(sizeof(*dxs), KM_SLEEP);
749
750 return ((struct dmac_xfer *)dxs);
751 }
752
753 void
754 pxa2x0_dmac_free_xfer(struct dmac_xfer *dx)
755 {
756 struct dmac_xfer_state *dxs = (struct dmac_xfer_state *)dx;
757
758 /*
759 * XXX: Should verify the DMAC is not actively using this
760 * structure before freeing...
761 */
762 kmem_free(dxs, sizeof(*dxs));
763 }
764
765 static inline int
766 dmac_validate_desc(struct dmac_xfer_desc *xd, size_t *psize,
767 bool *misaligned_flag)
768 {
769 size_t size;
770 int i;
771
772 /*
773 * Make sure the transfer parameters are acceptable.
774 */
775
776 if (xd->xd_addr_hold &&
777 (xd->xd_nsegs != 1 || xd->xd_dma_segs[0].ds_len == 0))
778 return (EINVAL);
779
780 for (i = 0, size = 0; i < xd->xd_nsegs; i++) {
781 if (xd->xd_dma_segs[i].ds_addr & 0x7) {
782 if (!CPU_IS_PXA270)
783 return (EFAULT);
784 *misaligned_flag = true;
785 }
786 size += xd->xd_dma_segs[i].ds_len;
787 }
788
789 *psize = size;
790 return (0);
791 }
792
793 static inline int
794 dmac_init_desc(struct dmac_desc_segs *ds, struct dmac_xfer_desc *xd,
795 size_t *psize, bool *misaligned_flag)
796 {
797 int err;
798
799 if ((err = dmac_validate_desc(xd, psize, misaligned_flag)))
800 return (err);
801
802 ds->ds_curseg = xd->xd_dma_segs;
803 ds->ds_nsegs = xd->xd_nsegs;
804 ds->ds_offset = 0;
805 return (0);
806 }
807
808 int
809 pxa2x0_dmac_start_xfer(struct dmac_xfer *dx)
810 {
811 struct pxadmac_softc *sc = pxadmac_sc;
812 struct dmac_xfer_state *dxs = (struct dmac_xfer_state *)dx;
813 struct dmac_xfer_desc *src, *dst;
814 size_t size;
815 int err, s;
816
817 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE &&
818 dxs->dxs_peripheral >= DMAC_N_PERIPH)
819 return (EINVAL);
820
821 src = &dxs->dxs_desc[DMAC_DESC_SRC];
822 dst = &dxs->dxs_desc[DMAC_DESC_DST];
823
824 dxs->dxs_misaligned_flag = false;
825
826 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_SRC], src, &size,
827 &dxs->dxs_misaligned_flag)))
828 return (err);
829 if (src->xd_addr_hold == false &&
830 dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
831 (size % dxs->dxs_loop_notify) != 0)
832 return (EINVAL);
833
834 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_DST], dst, &size,
835 &dxs->dxs_misaligned_flag)))
836 return (err);
837 if (dst->xd_addr_hold == false &&
838 dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
839 (size % dxs->dxs_loop_notify) != 0)
840 return (EINVAL);
841
842 SLIST_INIT(&dxs->dxs_descs);
843 dxs->dxs_channel = DMAC_NO_CHANNEL;
844 dxs->dxs_dcmd = (((u_int32_t)dxs->dxs_dev_width) << DCMD_WIDTH_SHIFT) |
845 (((u_int32_t)dxs->dxs_burst_size) << DCMD_SIZE_SHIFT);
846
847 switch (dxs->dxs_flow) {
848 case DMAC_FLOW_CTRL_NONE:
849 break;
850 case DMAC_FLOW_CTRL_SRC:
851 dxs->dxs_dcmd |= DCMD_FLOWSRC;
852 break;
853 case DMAC_FLOW_CTRL_DEST:
854 dxs->dxs_dcmd |= DCMD_FLOWTRG;
855 break;
856 }
857
858 if (src->xd_addr_hold == false)
859 dxs->dxs_dcmd |= DCMD_INCSRCADDR;
860 if (dst->xd_addr_hold == false)
861 dxs->dxs_dcmd |= DCMD_INCTRGADDR;
862
863 s = splbio();
864 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE ||
865 sc->sc_periph[dxs->dxs_peripheral].sp_busy == 0) {
866 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
867 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
868 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
869 sc->sc_periph[dxs->dxs_peripheral].sp_busy++;
870 dmac_start(sc, DMAC_PRI(dxs->dxs_priority));
871 } else {
872 dxs->dxs_queue = &sc->sc_periph[dxs->dxs_peripheral].sp_queue;
873 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
874 sc->sc_periph[dxs->dxs_peripheral].sp_busy++;
875 }
876 splx(s);
877
878 return (0);
879 }
880
881 void
882 pxa2x0_dmac_abort_xfer(struct dmac_xfer *dx)
883 {
884 struct pxadmac_softc *sc = pxadmac_sc;
885 struct dmac_xfer_state *ndxs, *dxs = (struct dmac_xfer_state *)dx;
886 struct dmac_desc *desc, *ndesc;
887 struct dmac_xfer_state_head *queue;
888 u_int32_t rv;
889 int s, timeout, need_start = 0;
890
891 s = splbio();
892
893 queue = dxs->dxs_queue;
894
895 if (dxs->dxs_channel == DMAC_NO_CHANNEL) {
896 /*
897 * The request has not yet started, or it has already
898 * completed. If the request is not on a queue, just
899 * return.
900 */
901 if (queue == NULL) {
902 splx(s);
903 return;
904 }
905
906 dxs->dxs_queue = NULL;
907 SIMPLEQ_REMOVE(queue, dxs, dmac_xfer_state, dxs_link);
908 } else {
909 /*
910 * The request is in progress. This is a bit trickier.
911 */
912 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel), 0);
913
914 for (timeout = 5000; timeout; timeout--) {
915 rv = dmac_reg_read(sc, DMAC_DCSR(dxs->dxs_channel));
916 if (rv & DCSR_STOPSTATE)
917 break;
918 delay(1);
919 }
920
921 if ((rv & DCSR_STOPSTATE) == 0)
922 panic(
923 "pxa2x0_dmac_abort_xfer: channel %d failed to abort",
924 dxs->dxs_channel);
925
926 /*
927 * Free resources allocated to the request
928 */
929 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) {
930 ndesc = SLIST_NEXT(desc, d_link);
931 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link);
932 sc->sc_free_descs++;
933 }
934
935 sc->sc_active[dxs->dxs_channel] = NULL;
936 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority),
937 dxs->dxs_channel);
938
939 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
940 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0);
941
942 need_start = 1;
943 dxs->dxs_queue = NULL;
944 }
945
946 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE ||
947 sc->sc_periph[dxs->dxs_peripheral].sp_busy-- == 1 ||
948 queue == &sc->sc_periph[dxs->dxs_peripheral].sp_queue)
949 goto out;
950
951 /*
952 * We've just removed the current item for this
953 * peripheral, and there is at least one more
954 * pending item waiting. Make it current.
955 */
956 ndxs = SIMPLEQ_FIRST(&sc->sc_periph[dxs->dxs_peripheral].sp_queue);
957 dxs = ndxs;
958 KDASSERT(dxs != NULL);
959 SIMPLEQ_REMOVE_HEAD(&sc->sc_periph[dxs->dxs_peripheral].sp_queue,
960 dxs_link);
961
962 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
963 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link);
964 need_start = 1;
965
966 /*
967 * Try to start any pending requests with the same
968 * priority.
969 */
970 out:
971 if (need_start)
972 dmac_start(sc, DMAC_PRI(dxs->dxs_priority));
973 splx(s);
974 }
975
976 static void
977 dmac_start(struct pxadmac_softc *sc, dmac_priority_t priority)
978 {
979 struct dmac_xfer_state *dxs;
980 u_int channel;
981
982 while (sc->sc_free_descs &&
983 (dxs = SIMPLEQ_FIRST(&sc->sc_queue[priority])) != NULL &&
984 dmac_allocate_channel(sc, priority, &channel) == 0) {
985 /*
986 * Yay, got some descriptors, a transfer request, and
987 * an available DMA channel.
988 */
989 KDASSERT(sc->sc_active[channel] == NULL);
990 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue[priority], dxs_link);
991
992 /* set DMA alignment register */
993 if (CPU_IS_PXA270) {
994 uint32_t dalgn;
995
996 dalgn = dmac_reg_read(sc, DMAC_DALGN);
997 dalgn &= ~(1U << channel);
998 if (dxs->dxs_misaligned_flag)
999 dalgn |= (1U << channel);
1000 dmac_reg_write(sc, DMAC_DALGN, dalgn);
1001 }
1002
1003 dxs->dxs_channel = channel;
1004 sc->sc_active[channel] = dxs;
1005 (void) dmac_continue_xfer(sc, dxs);
1006 /*
1007 * XXX: Deal with descriptor allocation failure for loops
1008 */
1009 }
1010 }
1011
1012 static int
1013 dmac_continue_xfer(struct pxadmac_softc *sc, struct dmac_xfer_state *dxs)
1014 {
1015 struct dmac_desc *desc, *prev_desc;
1016 struct pxa2x0_dma_desc *dd;
1017 struct dmac_desc_segs *src_ds, *dst_ds;
1018 struct dmac_xfer_desc *src_xd, *dst_xd;
1019 bus_dma_segment_t *src_seg, *dst_seg;
1020 bus_addr_t src_mem_addr, dst_mem_addr;
1021 bus_size_t src_size, dst_size, this_size;
1022
1023 desc = NULL;
1024 prev_desc = NULL;
1025 dd = NULL;
1026 src_ds = &dxs->dxs_segs[DMAC_DESC_SRC];
1027 dst_ds = &dxs->dxs_segs[DMAC_DESC_DST];
1028 src_xd = &dxs->dxs_desc[DMAC_DESC_SRC];
1029 dst_xd = &dxs->dxs_desc[DMAC_DESC_DST];
1030 SLIST_INIT(&dxs->dxs_descs);
1031
1032 /*
1033 * As long as the source/destination buffers have DMA segments,
1034 * and we have free descriptors, build a DMA chain.
1035 */
1036 while (src_ds->ds_nsegs && dst_ds->ds_nsegs && sc->sc_free_descs) {
1037 src_seg = src_ds->ds_curseg;
1038 src_mem_addr = src_seg->ds_addr + src_ds->ds_offset;
1039 if (src_xd->xd_addr_hold == false &&
1040 dxs->dxs_loop_notify != DMAC_DONT_LOOP)
1041 src_size = dxs->dxs_loop_notify;
1042 else
1043 src_size = src_seg->ds_len - src_ds->ds_offset;
1044
1045 dst_seg = dst_ds->ds_curseg;
1046 dst_mem_addr = dst_seg->ds_addr + dst_ds->ds_offset;
1047 if (dst_xd->xd_addr_hold == false &&
1048 dxs->dxs_loop_notify != DMAC_DONT_LOOP)
1049 dst_size = dxs->dxs_loop_notify;
1050 else
1051 dst_size = dst_seg->ds_len - dst_ds->ds_offset;
1052
1053 /*
1054 * We may need to split a source or destination segment
1055 * across two or more DMAC descriptors.
1056 */
1057 while (src_size && dst_size &&
1058 (desc = SLIST_FIRST(&sc->sc_descs)) != NULL) {
1059 SLIST_REMOVE_HEAD(&sc->sc_descs, d_link);
1060 sc->sc_free_descs--;
1061
1062 /*
1063 * Decide how much data we're going to transfer
1064 * using this DMAC descriptor.
1065 */
1066 if (src_xd->xd_addr_hold)
1067 this_size = dst_size;
1068 else
1069 if (dst_xd->xd_addr_hold)
1070 this_size = src_size;
1071 else
1072 this_size = min(dst_size, src_size);
1073
1074 /*
1075 * But clamp the transfer size to the DMAC
1076 * descriptor's maximum.
1077 */
1078 this_size = min(this_size, DCMD_LENGTH_MASK & ~0x1f);
1079
1080 /*
1081 * Fill in the DMAC descriptor
1082 */
1083 dd = desc->d_desc;
1084 dd->dd_dsadr = src_mem_addr;
1085 dd->dd_dtadr = dst_mem_addr;
1086 dd->dd_dcmd = dxs->dxs_dcmd | this_size;
1087
1088 /*
1089 * Link it into the chain
1090 */
1091 if (prev_desc) {
1092 SLIST_INSERT_AFTER(prev_desc, desc, d_link);
1093 prev_desc->d_desc->dd_ddadr = desc->d_desc_pa;
1094 } else {
1095 SLIST_INSERT_HEAD(&dxs->dxs_descs, desc,
1096 d_link);
1097 }
1098 prev_desc = desc;
1099
1100 /*
1101 * Update the source/destination pointers
1102 */
1103 if (src_xd->xd_addr_hold == false) {
1104 src_size -= this_size;
1105 src_ds->ds_offset += this_size;
1106 if (src_ds->ds_offset == src_seg->ds_len) {
1107 KDASSERT(src_size == 0);
1108 src_ds->ds_curseg = ++src_seg;
1109 src_ds->ds_offset = 0;
1110 src_ds->ds_nsegs--;
1111 } else
1112 src_mem_addr += this_size;
1113 }
1114
1115 if (dst_xd->xd_addr_hold == false) {
1116 dst_size -= this_size;
1117 dst_ds->ds_offset += this_size;
1118 if (dst_ds->ds_offset == dst_seg->ds_len) {
1119 KDASSERT(dst_size == 0);
1120 dst_ds->ds_curseg = ++dst_seg;
1121 dst_ds->ds_offset = 0;
1122 dst_ds->ds_nsegs--;
1123 } else
1124 dst_mem_addr += this_size;
1125 }
1126 }
1127
1128 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP) {
1129 /*
1130 * We must be able to allocate descriptors for the
1131 * entire loop. Otherwise, return them to the pool
1132 * and bail.
1133 */
1134 if (desc == NULL) {
1135 struct dmac_desc *ndesc;
1136 for (desc = SLIST_FIRST(&dxs->dxs_descs);
1137 desc; desc = ndesc) {
1138 ndesc = SLIST_NEXT(desc, d_link);
1139 SLIST_INSERT_HEAD(&sc->sc_descs, desc,
1140 d_link);
1141 sc->sc_free_descs++;
1142 }
1143
1144 return (0);
1145 }
1146
1147 KASSERT(dd != NULL);
1148 dd->dd_dcmd |= DCMD_ENDIRQEN;
1149 }
1150 }
1151
1152 /*
1153 * Did we manage to build a chain?
1154 * If not, just return.
1155 */
1156 if (dd == NULL)
1157 return (0);
1158
1159 if (dxs->dxs_loop_notify == DMAC_DONT_LOOP) {
1160 dd->dd_dcmd |= DCMD_ENDIRQEN;
1161 dd->dd_ddadr = DMAC_DESC_LAST;
1162 } else
1163 dd->dd_ddadr = SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa;
1164
1165 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) {
1166 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral),
1167 dxs->dxs_channel | DRCMR_MAPVLD);
1168 }
1169 dmac_reg_write(sc, DMAC_DDADR(dxs->dxs_channel),
1170 SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa);
1171 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel),
1172 DCSR_ENDINTR | DCSR_RUN);
1173
1174 return (1);
1175 }
1176
1177 static u_int
1178 dmac_channel_intr(struct pxadmac_softc *sc, u_int channel)
1179 {
1180 struct dmac_xfer_state *dxs;
1181 struct dmac_desc *desc, *ndesc;
1182 u_int32_t dcsr;
1183 u_int rv = 0;
1184
1185 dcsr = dmac_reg_read(sc, DMAC_DCSR(channel));
1186 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr);
1187 if (dmac_reg_read(sc, DMAC_DCSR(channel)) & DCSR_STOPSTATE)
1188 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr & ~DCSR_RUN);
1189
1190 if ((dxs = sc->sc_active[channel]) == NULL) {
1191 aprint_error_dev(sc->sc_dev,
1192 "Stray DMAC interrupt for unallocated channel %d\n",
1193 channel);
1194 return (0);
1195 }
1196
1197 /*
1198 * Clear down the interrupt in the DMA Interrupt Register
1199 */
1200 dmac_reg_write(sc, DMAC_DINT, (1u << channel));
1201
1202 /*
1203 * If this is a looping request, invoke the 'done' callback and
1204 * return immediately.
1205 */
1206 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP &&
1207 (dcsr & DCSR_BUSERRINTR) == 0) {
1208 (dxs->dxs_done)(&dxs->dxs_xfer, 0);
1209 return (0);
1210 }
1211
1212 /*
1213 * Free the descriptors allocated to the completed transfer
1214 *
1215 * XXX: If there is more data to transfer in this request,
1216 * we could simply reuse some or all of the descriptors
1217 * already allocated for the transfer which just completed.
1218 */
1219 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) {
1220 ndesc = SLIST_NEXT(desc, d_link);
1221 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link);
1222 sc->sc_free_descs++;
1223 }
1224
1225 if ((dcsr & DCSR_BUSERRINTR) || dmac_continue_xfer(sc, dxs) == 0) {
1226 /*
1227 * The transfer completed (possibly due to an error),
1228 * -OR- we were unable to continue any remaining
1229 * segment of the transfer due to a lack of descriptors.
1230 *
1231 * In either case, we have to free up DMAC resources
1232 * allocated to the request.
1233 */
1234 sc->sc_active[channel] = NULL;
1235 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority), channel);
1236 dxs->dxs_channel = DMAC_NO_CHANNEL;
1237 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE)
1238 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0);
1239
1240 if (dxs->dxs_segs[DMAC_DESC_SRC].ds_nsegs == 0 ||
1241 dxs->dxs_segs[DMAC_DESC_DST].ds_nsegs == 0 ||
1242 (dcsr & DCSR_BUSERRINTR)) {
1243
1244 /*
1245 * The transfer is complete.
1246 */
1247 dxs->dxs_queue = NULL;
1248 rv = 1u << DMAC_PRI(dxs->dxs_priority);
1249
1250 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE &&
1251 --sc->sc_periph[dxs->dxs_peripheral].sp_busy != 0) {
1252 struct dmac_xfer_state *ndxs;
1253 /*
1254 * We've just removed the current item for this
1255 * peripheral, and there is at least one more
1256 * pending item waiting. Make it current.
1257 */
1258 ndxs = SIMPLEQ_FIRST(
1259 &sc->sc_periph[dxs->dxs_peripheral].sp_queue);
1260 KDASSERT(ndxs != NULL);
1261 SIMPLEQ_REMOVE_HEAD(
1262 &sc->sc_periph[dxs->dxs_peripheral].sp_queue,
1263 dxs_link);
1264
1265 ndxs->dxs_queue =
1266 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)];
1267 SIMPLEQ_INSERT_TAIL(ndxs->dxs_queue, ndxs,
1268 dxs_link);
1269 }
1270
1271 (dxs->dxs_done)(&dxs->dxs_xfer,
1272 (dcsr & DCSR_BUSERRINTR) ? EFAULT : 0);
1273 } else {
1274 /*
1275 * The request is not yet complete, but we were unable
1276 * to make any headway at this time because there are
1277 * no free descriptors. Put the request back at the
1278 * head of the appropriate priority queue. It'll be
1279 * dealt with as other in-progress transfers complete.
1280 */
1281 SIMPLEQ_INSERT_HEAD(
1282 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)], dxs,
1283 dxs_link);
1284 }
1285 }
1286
1287 return (rv);
1288 }
1289
1290 static int
1291 dmac_intr(void *arg)
1292 {
1293 struct pxadmac_softc *sc = arg;
1294 u_int32_t rv, mask;
1295 u_int chan, pri;
1296
1297 rv = dmac_reg_read(sc, DMAC_DINT);
1298 if ((rv & DMAC_DINT_MASK) == 0)
1299 return (0);
1300
1301 /*
1302 * Deal with completed transfers
1303 */
1304 for (chan = 0, mask = 1u, pri = 0;
1305 chan < DMAC_N_CHANNELS; chan++, mask <<= 1) {
1306 if (rv & mask)
1307 pri |= dmac_channel_intr(sc, chan);
1308 }
1309
1310 /*
1311 * Now try to start any queued transfers
1312 */
1313 #if (DMAC_N_PRIORITIES > 1)
1314 if (pri & (1u << DMAC_PRIORITY_HIGH))
1315 dmac_start(sc, DMAC_PRIORITY_HIGH);
1316 if (pri & (1u << DMAC_PRIORITY_MED))
1317 dmac_start(sc, DMAC_PRIORITY_MED);
1318 if (pri & (1u << DMAC_PRIORITY_LOW))
1319 dmac_start(sc, DMAC_PRIORITY_LOW);
1320 #else
1321 if (pri)
1322 dmac_start(sc, DMAC_PRIORITY_NORMAL);
1323 #endif
1324
1325 return (1);
1326 }
1327