nextdma.c revision 1.21.4.1 1 1.21.4.1 he /* $NetBSD: nextdma.c,v 1.21.4.1 2001/06/16 20:30:47 he Exp $ */
2 1.1 dbj /*
3 1.1 dbj * Copyright (c) 1998 Darrin B. Jewell
4 1.1 dbj * All rights reserved.
5 1.1 dbj *
6 1.1 dbj * Redistribution and use in source and binary forms, with or without
7 1.1 dbj * modification, are permitted provided that the following conditions
8 1.1 dbj * are met:
9 1.1 dbj * 1. Redistributions of source code must retain the above copyright
10 1.1 dbj * notice, this list of conditions and the following disclaimer.
11 1.1 dbj * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 dbj * notice, this list of conditions and the following disclaimer in the
13 1.1 dbj * documentation and/or other materials provided with the distribution.
14 1.1 dbj * 3. All advertising materials mentioning features or use of this software
15 1.1 dbj * must display the following acknowledgement:
16 1.1 dbj * This product includes software developed by Darrin B. Jewell
17 1.1 dbj * 4. The name of the author may not be used to endorse or promote products
18 1.1 dbj * derived from this software without specific prior written permission
19 1.1 dbj *
20 1.1 dbj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.1 dbj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.1 dbj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.1 dbj * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.1 dbj * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.1 dbj * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.1 dbj * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.1 dbj * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.1 dbj * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.1 dbj * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.1 dbj */
31 1.1 dbj
32 1.1 dbj #include <sys/param.h>
33 1.1 dbj #include <sys/systm.h>
34 1.1 dbj #include <sys/mbuf.h>
35 1.1 dbj #include <sys/syslog.h>
36 1.1 dbj #include <sys/socket.h>
37 1.1 dbj #include <sys/device.h>
38 1.1 dbj #include <sys/malloc.h>
39 1.1 dbj #include <sys/ioctl.h>
40 1.1 dbj #include <sys/errno.h>
41 1.1 dbj
42 1.1 dbj #include <machine/autoconf.h>
43 1.1 dbj #include <machine/cpu.h>
44 1.1 dbj #include <machine/intr.h>
45 1.5 dbj
46 1.5 dbj #include <m68k/cacheops.h>
47 1.1 dbj
48 1.1 dbj #include <next68k/next68k/isr.h>
49 1.1 dbj
50 1.16 dbj #define _NEXT68K_BUS_DMA_PRIVATE
51 1.1 dbj #include <machine/bus.h>
52 1.1 dbj
53 1.1 dbj #include "nextdmareg.h"
54 1.1 dbj #include "nextdmavar.h"
55 1.1 dbj
56 1.8 dbj #if 1
57 1.1 dbj #define ND_DEBUG
58 1.1 dbj #endif
59 1.1 dbj
60 1.1 dbj #if defined(ND_DEBUG)
61 1.8 dbj int nextdma_debug = 0;
62 1.8 dbj #define DPRINTF(x) if (nextdma_debug) printf x;
63 1.1 dbj #else
64 1.1 dbj #define DPRINTF(x)
65 1.1 dbj #endif
66 1.1 dbj
67 1.1 dbj void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
68 1.1 dbj bus_size_t, int));
69 1.1 dbj int next_dma_continue __P((struct nextdma_config *));
70 1.1 dbj void next_dma_rotate __P((struct nextdma_config *));
71 1.1 dbj
72 1.1 dbj void next_dma_setup_cont_regs __P((struct nextdma_config *));
73 1.1 dbj void next_dma_setup_curr_regs __P((struct nextdma_config *));
74 1.20 dbj void next_dma_finish_xfer __P((struct nextdma_config *));
75 1.1 dbj
76 1.1 dbj void
77 1.1 dbj nextdma_config(nd)
78 1.1 dbj struct nextdma_config *nd;
79 1.1 dbj {
80 1.1 dbj /* Initialize the dma_tag. As a hack, we currently
81 1.1 dbj * put the dma tag in the structure itself. It shouldn't be there.
82 1.1 dbj */
83 1.1 dbj
84 1.1 dbj {
85 1.1 dbj bus_dma_tag_t t;
86 1.1 dbj t = &nd->_nd_dmat;
87 1.1 dbj t->_cookie = nd;
88 1.1 dbj t->_dmamap_create = _bus_dmamap_create;
89 1.1 dbj t->_dmamap_destroy = _bus_dmamap_destroy;
90 1.1 dbj t->_dmamap_load = _bus_dmamap_load_direct;
91 1.1 dbj t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
92 1.1 dbj t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
93 1.1 dbj t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
94 1.1 dbj t->_dmamap_unload = _bus_dmamap_unload;
95 1.16 dbj t->_dmamap_sync = _bus_dmamap_sync;
96 1.1 dbj
97 1.1 dbj t->_dmamem_alloc = _bus_dmamem_alloc;
98 1.1 dbj t->_dmamem_free = _bus_dmamem_free;
99 1.1 dbj t->_dmamem_map = _bus_dmamem_map;
100 1.1 dbj t->_dmamem_unmap = _bus_dmamem_unmap;
101 1.1 dbj t->_dmamem_mmap = _bus_dmamem_mmap;
102 1.1 dbj
103 1.1 dbj nd->nd_dmat = t;
104 1.1 dbj }
105 1.1 dbj
106 1.1 dbj nextdma_init(nd);
107 1.1 dbj
108 1.14 dbj isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
109 1.14 dbj INTR_ENABLE(nd->nd_intr);
110 1.1 dbj }
111 1.1 dbj
112 1.1 dbj void
113 1.1 dbj nextdma_init(nd)
114 1.1 dbj struct nextdma_config *nd;
115 1.1 dbj {
116 1.1 dbj DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
117 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
118 1.1 dbj
119 1.1 dbj nd->_nd_map = NULL;
120 1.1 dbj nd->_nd_idx = 0;
121 1.1 dbj nd->_nd_map_cont = NULL;
122 1.1 dbj nd->_nd_idx_cont = 0;
123 1.1 dbj
124 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
125 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
126 1.20 dbj DMACSR_RESET | DMACSR_INITBUF);
127 1.1 dbj
128 1.1 dbj next_dma_setup_curr_regs(nd);
129 1.1 dbj next_dma_setup_cont_regs(nd);
130 1.1 dbj
131 1.20 dbj #if defined(DIAGNOSTIC)
132 1.1 dbj {
133 1.1 dbj u_long state;
134 1.1 dbj state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
135 1.20 dbj
136 1.20 dbj #if 1
137 1.20 dbj /* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
138 1.20 dbj * milo (a 25Mhz 68040 mono cube) didn't have this problem
139 1.20 dbj * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
140 1.20 dbj */
141 1.20 dbj state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
142 1.20 dbj #else
143 1.1 dbj state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
144 1.1 dbj DMACSR_SUPDATE | DMACSR_ENABLE);
145 1.20 dbj #endif
146 1.1 dbj if (state) {
147 1.1 dbj next_dma_print(nd);
148 1.20 dbj panic("DMA did not reset");
149 1.1 dbj }
150 1.1 dbj }
151 1.1 dbj #endif
152 1.1 dbj }
153 1.1 dbj
154 1.4 dbj
155 1.1 dbj void
156 1.1 dbj nextdma_reset(nd)
157 1.1 dbj struct nextdma_config *nd;
158 1.1 dbj {
159 1.1 dbj int s;
160 1.18 dbj s = spldma();
161 1.8 dbj
162 1.8 dbj DPRINTF(("DMA reset\n"));
163 1.8 dbj
164 1.8 dbj #if (defined(ND_DEBUG))
165 1.8 dbj if (nextdma_debug) next_dma_print(nd);
166 1.8 dbj #endif
167 1.8 dbj
168 1.20 dbj /* @@@ clean up dma maps */
169 1.20 dbj
170 1.1 dbj nextdma_init(nd);
171 1.1 dbj splx(s);
172 1.1 dbj }
173 1.1 dbj
174 1.1 dbj /****************************************************************/
175 1.1 dbj
176 1.1 dbj
177 1.1 dbj /* Call the completed and continue callbacks to try to fill
178 1.1 dbj * in the dma continue buffers.
179 1.1 dbj */
180 1.1 dbj void
181 1.1 dbj next_dma_rotate(nd)
182 1.1 dbj struct nextdma_config *nd;
183 1.1 dbj {
184 1.1 dbj
185 1.1 dbj DPRINTF(("DMA next_dma_rotate()\n"));
186 1.1 dbj
187 1.1 dbj /* Rotate the continue map into the current map */
188 1.1 dbj nd->_nd_map = nd->_nd_map_cont;
189 1.1 dbj nd->_nd_idx = nd->_nd_idx_cont;
190 1.1 dbj
191 1.1 dbj if ((!nd->_nd_map_cont) ||
192 1.1 dbj ((nd->_nd_map_cont) &&
193 1.1 dbj (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
194 1.1 dbj if (nd->nd_continue_cb) {
195 1.1 dbj nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
196 1.1 dbj } else {
197 1.1 dbj nd->_nd_map_cont = 0;
198 1.1 dbj }
199 1.1 dbj nd->_nd_idx_cont = 0;
200 1.1 dbj }
201 1.7 dbj
202 1.7 dbj #ifdef DIAGNOSTIC
203 1.16 dbj if (nd->_nd_map) {
204 1.17 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 0x1234beef;
205 1.16 dbj }
206 1.16 dbj #endif
207 1.16 dbj
208 1.21.4.1 he #if defined(DIAGNOSTIC) && 0
209 1.7 dbj if (nd->_nd_map_cont) {
210 1.12 dbj if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
211 1.12 dbj next_dma_print(nd);
212 1.7 dbj panic("DMA request unaligned at start\n");
213 1.7 dbj }
214 1.12 dbj if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
215 1.12 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
216 1.12 dbj next_dma_print(nd);
217 1.7 dbj panic("DMA request unaligned at end\n");
218 1.7 dbj }
219 1.7 dbj }
220 1.7 dbj #endif
221 1.7 dbj
222 1.1 dbj }
223 1.1 dbj
224 1.1 dbj void
225 1.1 dbj next_dma_setup_cont_regs(nd)
226 1.1 dbj struct nextdma_config *nd;
227 1.1 dbj {
228 1.20 dbj bus_addr_t dd_start;
229 1.20 dbj bus_addr_t dd_stop;
230 1.20 dbj bus_addr_t dd_saved_start;
231 1.20 dbj bus_addr_t dd_saved_stop;
232 1.20 dbj
233 1.1 dbj DPRINTF(("DMA next_dma_setup_regs()\n"));
234 1.1 dbj
235 1.1 dbj if (nd->_nd_map_cont) {
236 1.20 dbj dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
237 1.20 dbj dd_stop = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
238 1.20 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
239 1.1 dbj
240 1.1 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
241 1.20 dbj dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
242 1.21.4.1 he dd_stop += 15;
243 1.20 dbj }
244 1.20 dbj } else {
245 1.20 dbj dd_start = 0xdeadbeef;
246 1.20 dbj dd_stop = 0xdeadbeef;
247 1.20 dbj }
248 1.1 dbj
249 1.20 dbj dd_saved_start = dd_start;
250 1.20 dbj dd_saved_stop = dd_stop;
251 1.15 dbj
252 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
253 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
254 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
255 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
256 1.1 dbj
257 1.20 dbj #ifdef DIAGNOSTIC
258 1.20 dbj if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start) ||
259 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop) ||
260 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start) ||
261 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)) {
262 1.20 dbj next_dma_print(nd);
263 1.20 dbj panic("DMA failure writing to continue regs");
264 1.1 dbj }
265 1.7 dbj #endif
266 1.1 dbj }
267 1.1 dbj
268 1.1 dbj void
269 1.1 dbj next_dma_setup_curr_regs(nd)
270 1.1 dbj struct nextdma_config *nd;
271 1.1 dbj {
272 1.20 dbj bus_addr_t dd_next;
273 1.20 dbj bus_addr_t dd_limit;
274 1.20 dbj bus_addr_t dd_saved_next;
275 1.20 dbj bus_addr_t dd_saved_limit;
276 1.20 dbj
277 1.1 dbj DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
278 1.1 dbj
279 1.15 dbj
280 1.15 dbj if (nd->_nd_map) {
281 1.20 dbj dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
282 1.20 dbj dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
283 1.20 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
284 1.15 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
285 1.20 dbj dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
286 1.21.4.1 he dd_limit += 15;
287 1.20 dbj }
288 1.20 dbj } else {
289 1.20 dbj dd_next = 0xdeadbeef;
290 1.20 dbj dd_limit = 0xdeadbeef;
291 1.20 dbj }
292 1.1 dbj
293 1.20 dbj dd_saved_next = dd_next;
294 1.20 dbj dd_saved_limit = dd_limit;
295 1.1 dbj
296 1.20 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
297 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
298 1.15 dbj } else {
299 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
300 1.15 dbj }
301 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
302 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
303 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
304 1.1 dbj
305 1.20 dbj #ifdef DIAGNOSTIC
306 1.20 dbj if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next) ||
307 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next) ||
308 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit) ||
309 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next) ||
310 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)) {
311 1.20 dbj next_dma_print(nd);
312 1.20 dbj panic("DMA failure writing to current regs");
313 1.20 dbj }
314 1.7 dbj #endif
315 1.1 dbj }
316 1.1 dbj
317 1.1 dbj
318 1.1 dbj /* This routine is used for debugging */
319 1.1 dbj
320 1.1 dbj void
321 1.1 dbj next_dma_print(nd)
322 1.1 dbj struct nextdma_config *nd;
323 1.1 dbj {
324 1.1 dbj u_long dd_csr;
325 1.1 dbj u_long dd_next;
326 1.1 dbj u_long dd_next_initbuf;
327 1.1 dbj u_long dd_limit;
328 1.1 dbj u_long dd_start;
329 1.1 dbj u_long dd_stop;
330 1.1 dbj u_long dd_saved_next;
331 1.1 dbj u_long dd_saved_limit;
332 1.1 dbj u_long dd_saved_start;
333 1.1 dbj u_long dd_saved_stop;
334 1.1 dbj
335 1.1 dbj /* Read all of the registers before we print anything out,
336 1.1 dbj * in case something changes
337 1.1 dbj */
338 1.1 dbj dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
339 1.1 dbj dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
340 1.1 dbj dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
341 1.1 dbj dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
342 1.1 dbj dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
343 1.1 dbj dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
344 1.1 dbj dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
345 1.1 dbj dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
346 1.1 dbj dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
347 1.1 dbj dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
348 1.1 dbj
349 1.20 dbj printf("NDMAP: *intrstat = 0x%b\n",
350 1.20 dbj (*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),NEXT_INTR_BITS);
351 1.20 dbj printf("NDMAP: *intrmask = 0x%b\n",
352 1.20 dbj (*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),NEXT_INTR_BITS);
353 1.20 dbj
354 1.12 dbj /* NDMAP is Next DMA Print (really!) */
355 1.12 dbj
356 1.1 dbj if (nd->_nd_map) {
357 1.11 dbj printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
358 1.11 dbj nd->_nd_map->dm_mapsize);
359 1.11 dbj printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
360 1.11 dbj nd->_nd_map->dm_nsegs);
361 1.1 dbj printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
362 1.1 dbj nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
363 1.1 dbj printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
364 1.1 dbj nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
365 1.17 dbj printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
366 1.17 dbj nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len);
367 1.1 dbj } else {
368 1.1 dbj printf("NDMAP: nd->_nd_map = NULL\n");
369 1.1 dbj }
370 1.1 dbj if (nd->_nd_map_cont) {
371 1.11 dbj printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
372 1.11 dbj nd->_nd_map_cont->dm_mapsize);
373 1.11 dbj printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
374 1.11 dbj nd->_nd_map_cont->dm_nsegs);
375 1.1 dbj printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
376 1.1 dbj nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
377 1.2 dbj printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
378 1.1 dbj nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
379 1.17 dbj printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
380 1.17 dbj nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len);
381 1.1 dbj } else {
382 1.1 dbj printf("NDMAP: nd->_nd_map_cont = NULL\n");
383 1.1 dbj }
384 1.1 dbj
385 1.1 dbj printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS);
386 1.1 dbj printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
387 1.1 dbj printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
388 1.1 dbj printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
389 1.1 dbj printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
390 1.1 dbj printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
391 1.1 dbj printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
392 1.1 dbj printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
393 1.1 dbj printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
394 1.1 dbj printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
395 1.1 dbj
396 1.1 dbj printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
397 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
398 1.1 dbj }
399 1.1 dbj
400 1.1 dbj /****************************************************************/
401 1.20 dbj void
402 1.20 dbj next_dma_finish_xfer(nd)
403 1.20 dbj struct nextdma_config *nd;
404 1.20 dbj {
405 1.20 dbj bus_addr_t onext;
406 1.20 dbj bus_addr_t olimit;
407 1.20 dbj bus_addr_t slimit;
408 1.20 dbj
409 1.20 dbj onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
410 1.20 dbj olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
411 1.20 dbj
412 1.20 dbj if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
413 1.20 dbj slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
414 1.20 dbj } else {
415 1.20 dbj slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
416 1.20 dbj }
417 1.20 dbj
418 1.20 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
419 1.20 dbj slimit &= ~0x80000000;
420 1.21.4.1 he slimit -= 15;
421 1.20 dbj }
422 1.20 dbj
423 1.20 dbj #ifdef DIAGNOSTIC
424 1.20 dbj if ((slimit < onext) || (slimit > olimit)) {
425 1.20 dbj next_dma_print(nd);
426 1.20 dbj panic("DMA: Unexpected registers in finish_xfer\n");
427 1.20 dbj }
428 1.20 dbj #endif
429 1.20 dbj
430 1.20 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = slimit-onext;
431 1.20 dbj
432 1.20 dbj /* If we've reached the end of the current map, then inform
433 1.20 dbj * that we've completed that map.
434 1.20 dbj */
435 1.20 dbj if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
436 1.20 dbj if (nd->nd_completed_cb)
437 1.20 dbj (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
438 1.20 dbj }
439 1.20 dbj nd->_nd_map = 0;
440 1.20 dbj nd->_nd_idx = 0;
441 1.20 dbj }
442 1.20 dbj
443 1.1 dbj
444 1.1 dbj int
445 1.1 dbj nextdma_intr(arg)
446 1.1 dbj void *arg;
447 1.1 dbj {
448 1.1 dbj /* @@@ This is bogus, we can't be certain of arg's type
449 1.18 dbj * unless the interrupt is for us. For now we successfully
450 1.18 dbj * cheat because DMA interrupts are the only things invoked
451 1.18 dbj * at this interrupt level.
452 1.1 dbj */
453 1.18 dbj struct nextdma_config *nd = arg;
454 1.1 dbj
455 1.1 dbj if (!INTR_OCCURRED(nd->nd_intr)) return 0;
456 1.1 dbj /* Handle dma interrupts */
457 1.1 dbj
458 1.1 dbj DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
459 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
460 1.1 dbj
461 1.7 dbj #ifdef DIAGNOSTIC
462 1.7 dbj if (!nd->_nd_map) {
463 1.7 dbj next_dma_print(nd);
464 1.7 dbj panic("DMA missing current map in interrupt!\n");
465 1.7 dbj }
466 1.7 dbj #endif
467 1.7 dbj
468 1.1 dbj {
469 1.1 dbj int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
470 1.1 dbj
471 1.7 dbj #ifdef DIAGNOSTIC
472 1.20 dbj if ((!(state & DMACSR_COMPLETE)) || (state & DMACSR_SUPDATE)) {
473 1.1 dbj next_dma_print(nd);
474 1.20 dbj panic("DMA Unexpected dma state in interrupt (0x%b)",state,DMACSR_BITS);
475 1.7 dbj }
476 1.1 dbj #endif
477 1.1 dbj
478 1.20 dbj next_dma_finish_xfer(nd);
479 1.7 dbj
480 1.7 dbj /* Check to see if we are expecting dma to shut down */
481 1.20 dbj if ((nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL)) {
482 1.12 dbj
483 1.12 dbj #ifdef DIAGNOSTIC
484 1.20 dbj if (state & DMACSR_ENABLE) {
485 1.12 dbj next_dma_print(nd);
486 1.20 dbj panic("DMA: unexpected DMA state at shutdown (0x%b)\n",
487 1.12 dbj state,DMACSR_BITS);
488 1.7 dbj }
489 1.7 dbj #endif
490 1.7 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
491 1.7 dbj DMACSR_CLRCOMPLETE | DMACSR_RESET);
492 1.7 dbj
493 1.7 dbj DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
494 1.7 dbj if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
495 1.7 dbj
496 1.2 dbj return(1);
497 1.7 dbj }
498 1.7 dbj
499 1.20 dbj next_dma_rotate(nd);
500 1.20 dbj next_dma_setup_cont_regs(nd);
501 1.1 dbj
502 1.7 dbj {
503 1.20 dbj u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
504 1.1 dbj
505 1.20 dbj if (state & DMACSR_READ) {
506 1.20 dbj dmadir = DMACSR_SETREAD;
507 1.20 dbj } else {
508 1.20 dbj dmadir = DMACSR_SETWRITE;
509 1.20 dbj }
510 1.1 dbj
511 1.21 dbj /* we used to SETENABLE here only
512 1.21 dbj conditionally, but we got burned
513 1.21 dbj because DMA sometimes would shut
514 1.21 dbj down between when we checked and
515 1.21 dbj when we acted upon it. CL19991211 */
516 1.21 dbj if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
517 1.21 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
518 1.21 dbj DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETENABLE);
519 1.7 dbj } else {
520 1.21 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
521 1.21 dbj DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE | DMACSR_SETENABLE);
522 1.21 dbj }
523 1.1 dbj
524 1.1 dbj }
525 1.1 dbj
526 1.1 dbj }
527 1.1 dbj
528 1.1 dbj DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
529 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
530 1.1 dbj
531 1.1 dbj return(1);
532 1.1 dbj }
533 1.1 dbj
534 1.1 dbj /*
535 1.1 dbj * Check to see if dma has finished for a channel */
536 1.1 dbj int
537 1.1 dbj nextdma_finished(nd)
538 1.1 dbj struct nextdma_config *nd;
539 1.1 dbj {
540 1.1 dbj int r;
541 1.1 dbj int s;
542 1.1 dbj s = spldma(); /* @@@ should this be splimp()? */
543 1.1 dbj r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
544 1.1 dbj splx(s);
545 1.1 dbj return(r);
546 1.1 dbj }
547 1.1 dbj
548 1.1 dbj void
549 1.1 dbj nextdma_start(nd, dmadir)
550 1.1 dbj struct nextdma_config *nd;
551 1.19 dbj u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
552 1.1 dbj {
553 1.1 dbj
554 1.1 dbj #ifdef DIAGNOSTIC
555 1.1 dbj if (!nextdma_finished(nd)) {
556 1.1 dbj panic("DMA trying to start before previous finished on intr(0x%b)\n",
557 1.1 dbj NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
558 1.1 dbj }
559 1.1 dbj #endif
560 1.1 dbj
561 1.1 dbj DPRINTF(("DMA start (%ld) intr(0x%b)\n",
562 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
563 1.1 dbj
564 1.1 dbj #ifdef DIAGNOSTIC
565 1.1 dbj if (nd->_nd_map) {
566 1.1 dbj next_dma_print(nd);
567 1.1 dbj panic("DMA: nextdma_start() with non null map\n");
568 1.1 dbj }
569 1.1 dbj if (nd->_nd_map_cont) {
570 1.1 dbj next_dma_print(nd);
571 1.1 dbj panic("DMA: nextdma_start() with non null continue map\n");
572 1.1 dbj }
573 1.1 dbj #endif
574 1.1 dbj
575 1.9 dbj #ifdef DIAGNOSTIC
576 1.19 dbj if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
577 1.19 dbj panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
578 1.9 dbj }
579 1.9 dbj #endif
580 1.9 dbj
581 1.7 dbj /* preload both the current and the continue maps */
582 1.1 dbj next_dma_rotate(nd);
583 1.1 dbj
584 1.1 dbj #ifdef DIAGNOSTIC
585 1.1 dbj if (!nd->_nd_map_cont) {
586 1.1 dbj panic("No map available in nextdma_start()");
587 1.1 dbj }
588 1.1 dbj #endif
589 1.1 dbj
590 1.7 dbj next_dma_rotate(nd);
591 1.7 dbj
592 1.1 dbj DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
593 1.20 dbj (dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs,
594 1.1 dbj NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
595 1.1 dbj
596 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
597 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
598 1.20 dbj DMACSR_INITBUF | DMACSR_RESET | dmadir);
599 1.1 dbj
600 1.7 dbj next_dma_setup_curr_regs(nd);
601 1.1 dbj next_dma_setup_cont_regs(nd);
602 1.1 dbj
603 1.4 dbj #if (defined(ND_DEBUG))
604 1.8 dbj if (nextdma_debug) next_dma_print(nd);
605 1.4 dbj #endif
606 1.1 dbj
607 1.20 dbj if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
608 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
609 1.20 dbj DMACSR_SETENABLE | dmadir);
610 1.20 dbj } else {
611 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
612 1.20 dbj DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
613 1.1 dbj }
614 1.1 dbj }
615