nextdma.c revision 1.5 1 1.5 dbj /* $NetBSD: nextdma.c,v 1.5 1998/11/10 22:45:44 dbj Exp $ */
2 1.1 dbj /*
3 1.1 dbj * Copyright (c) 1998 Darrin B. Jewell
4 1.1 dbj * All rights reserved.
5 1.1 dbj *
6 1.1 dbj * Redistribution and use in source and binary forms, with or without
7 1.1 dbj * modification, are permitted provided that the following conditions
8 1.1 dbj * are met:
9 1.1 dbj * 1. Redistributions of source code must retain the above copyright
10 1.1 dbj * notice, this list of conditions and the following disclaimer.
11 1.1 dbj * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 dbj * notice, this list of conditions and the following disclaimer in the
13 1.1 dbj * documentation and/or other materials provided with the distribution.
14 1.1 dbj * 3. All advertising materials mentioning features or use of this software
15 1.1 dbj * must display the following acknowledgement:
16 1.1 dbj * This product includes software developed by Darrin B. Jewell
17 1.1 dbj * 4. The name of the author may not be used to endorse or promote products
18 1.1 dbj * derived from this software without specific prior written permission
19 1.1 dbj *
20 1.1 dbj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.1 dbj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.1 dbj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.1 dbj * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.1 dbj * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.1 dbj * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.1 dbj * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.1 dbj * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.1 dbj * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.1 dbj * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.1 dbj */
31 1.1 dbj
32 1.1 dbj #include <sys/param.h>
33 1.1 dbj #include <sys/systm.h>
34 1.1 dbj #include <sys/mbuf.h>
35 1.1 dbj #include <sys/syslog.h>
36 1.1 dbj #include <sys/socket.h>
37 1.1 dbj #include <sys/device.h>
38 1.1 dbj #include <sys/malloc.h>
39 1.1 dbj #include <sys/ioctl.h>
40 1.1 dbj #include <sys/errno.h>
41 1.1 dbj
42 1.1 dbj #include <machine/autoconf.h>
43 1.1 dbj #include <machine/cpu.h>
44 1.1 dbj #include <machine/intr.h>
45 1.5 dbj
46 1.5 dbj #include <m68k/cacheops.h>
47 1.1 dbj
48 1.1 dbj #include <next68k/next68k/isr.h>
49 1.1 dbj
50 1.1 dbj #define _GENERIC_BUS_DMA_PRIVATE
51 1.1 dbj #include <machine/bus.h>
52 1.1 dbj
53 1.1 dbj #include "nextdmareg.h"
54 1.1 dbj #include "nextdmavar.h"
55 1.1 dbj
56 1.1 dbj #if 0
57 1.1 dbj #define ND_DEBUG
58 1.1 dbj #endif
59 1.1 dbj
60 1.1 dbj #if defined(ND_DEBUG)
61 1.1 dbj #define DPRINTF(x) printf x;
62 1.1 dbj #else
63 1.1 dbj #define DPRINTF(x)
64 1.1 dbj #endif
65 1.1 dbj
66 1.1 dbj /* @@@ for debugging */
67 1.1 dbj struct nextdma_config *debugernd;
68 1.1 dbj struct nextdma_config *debugexnd;
69 1.1 dbj
70 1.1 dbj int nextdma_intr __P((void *));
71 1.1 dbj void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
72 1.1 dbj bus_size_t, int));
73 1.1 dbj int next_dma_continue __P((struct nextdma_config *));
74 1.1 dbj void next_dma_rotate __P((struct nextdma_config *));
75 1.1 dbj
76 1.1 dbj void next_dma_setup_cont_regs __P((struct nextdma_config *));
77 1.1 dbj void next_dma_setup_curr_regs __P((struct nextdma_config *));
78 1.1 dbj
79 1.1 dbj void next_dma_print __P((struct nextdma_config *));
80 1.1 dbj
81 1.1 dbj void
82 1.1 dbj nextdma_config(nd)
83 1.1 dbj struct nextdma_config *nd;
84 1.1 dbj {
85 1.1 dbj /* Initialize the dma_tag. As a hack, we currently
86 1.1 dbj * put the dma tag in the structure itself. It shouldn't be there.
87 1.1 dbj */
88 1.1 dbj
89 1.1 dbj {
90 1.1 dbj bus_dma_tag_t t;
91 1.1 dbj t = &nd->_nd_dmat;
92 1.1 dbj t->_cookie = nd;
93 1.1 dbj t->_get_tag = NULL; /* lose */
94 1.1 dbj t->_dmamap_create = _bus_dmamap_create;
95 1.1 dbj t->_dmamap_destroy = _bus_dmamap_destroy;
96 1.1 dbj t->_dmamap_load = _bus_dmamap_load_direct;
97 1.1 dbj t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
98 1.1 dbj t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
99 1.1 dbj t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
100 1.1 dbj t->_dmamap_unload = _bus_dmamap_unload;
101 1.1 dbj t->_dmamap_sync = next_dmamap_sync;
102 1.1 dbj
103 1.1 dbj t->_dmamem_alloc = _bus_dmamem_alloc;
104 1.1 dbj t->_dmamem_free = _bus_dmamem_free;
105 1.1 dbj t->_dmamem_map = _bus_dmamem_map;
106 1.1 dbj t->_dmamem_unmap = _bus_dmamem_unmap;
107 1.1 dbj t->_dmamem_mmap = _bus_dmamem_mmap;
108 1.1 dbj
109 1.1 dbj nd->nd_dmat = t;
110 1.1 dbj }
111 1.1 dbj
112 1.1 dbj /* @@@ for debugging */
113 1.1 dbj if (nd->nd_intr == NEXT_I_ENETR_DMA) {
114 1.1 dbj debugernd = nd;
115 1.1 dbj }
116 1.1 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
117 1.1 dbj debugexnd = nd;
118 1.1 dbj }
119 1.1 dbj
120 1.1 dbj nextdma_init(nd);
121 1.1 dbj
122 1.1 dbj isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
123 1.1 dbj INTR_ENABLE(nd->nd_intr);
124 1.1 dbj }
125 1.1 dbj
126 1.1 dbj void
127 1.1 dbj nextdma_init(nd)
128 1.1 dbj struct nextdma_config *nd;
129 1.1 dbj {
130 1.1 dbj DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
131 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
132 1.1 dbj
133 1.1 dbj /* @@@ should probably check and free these maps */
134 1.1 dbj nd->_nd_map = NULL;
135 1.1 dbj nd->_nd_idx = 0;
136 1.1 dbj nd->_nd_map_cont = NULL;
137 1.1 dbj nd->_nd_idx_cont = 0;
138 1.1 dbj
139 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
140 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
141 1.1 dbj DMACSR_INITBUF | DMACSR_CLRCOMPLETE | DMACSR_RESET);
142 1.1 dbj
143 1.1 dbj next_dma_setup_curr_regs(nd);
144 1.1 dbj next_dma_setup_cont_regs(nd);
145 1.1 dbj
146 1.1 dbj #if 0 && defined(DIAGNOSTIC)
147 1.1 dbj /* Today, my computer (mourning) appears to fail this test.
148 1.1 dbj * yesterday, another NeXT (milo) didn't have this problem
149 1.1 dbj * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
150 1.1 dbj */
151 1.1 dbj {
152 1.1 dbj u_long state;
153 1.1 dbj state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
154 1.1 dbj state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
155 1.1 dbj state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
156 1.1 dbj DMACSR_SUPDATE | DMACSR_ENABLE);
157 1.1 dbj
158 1.1 dbj if (state) {
159 1.1 dbj next_dma_print(nd);
160 1.1 dbj panic("DMA did not reset\n");
161 1.1 dbj }
162 1.1 dbj }
163 1.1 dbj #endif
164 1.1 dbj }
165 1.1 dbj
166 1.4 dbj
167 1.1 dbj void
168 1.1 dbj nextdma_reset(nd)
169 1.1 dbj struct nextdma_config *nd;
170 1.1 dbj {
171 1.1 dbj int s;
172 1.1 dbj s = spldma(); /* @@@ should this be splimp()? */
173 1.1 dbj nextdma_init(nd);
174 1.1 dbj splx(s);
175 1.1 dbj }
176 1.1 dbj
177 1.1 dbj /****************************************************************/
178 1.1 dbj
179 1.1 dbj /* If the next had multiple busses, this should probably
180 1.1 dbj * go elsewhere, but it is here anyway */
181 1.1 dbj void
182 1.1 dbj next_dmamap_sync(t, map, offset, len, ops)
183 1.1 dbj bus_dma_tag_t t;
184 1.1 dbj bus_dmamap_t map;
185 1.1 dbj bus_addr_t offset;
186 1.1 dbj bus_size_t len;
187 1.1 dbj int ops;
188 1.1 dbj {
189 1.1 dbj /* flush/purge the cache.
190 1.1 dbj * assumes pointers are aligned
191 1.3 dbj * @@@ should probably be fixed to use offset and len args.
192 1.3 dbj * should also optimize this to work on pages for larger regions?
193 1.1 dbj */
194 1.1 dbj if (ops & BUS_DMASYNC_PREWRITE) {
195 1.1 dbj int i;
196 1.1 dbj for(i=0;i<map->dm_nsegs;i++) {
197 1.1 dbj bus_addr_t p = map->dm_segs[i].ds_addr;
198 1.1 dbj bus_addr_t e = p+map->dm_segs[i].ds_len;
199 1.1 dbj while(p<e) {
200 1.1 dbj DCFL(p); /* flush */
201 1.1 dbj p += 16; /* cache line length */
202 1.1 dbj }
203 1.1 dbj }
204 1.1 dbj }
205 1.1 dbj
206 1.1 dbj if (ops & BUS_DMASYNC_POSTREAD) {
207 1.1 dbj int i;
208 1.1 dbj for(i=0;i<map->dm_nsegs;i++) {
209 1.1 dbj bus_addr_t p = map->dm_segs[i].ds_addr;
210 1.1 dbj bus_addr_t e = p+map->dm_segs[i].ds_len;
211 1.1 dbj while(p<e) {
212 1.1 dbj DCPL(p); /* purge */
213 1.1 dbj p += 16; /* cache line length */
214 1.1 dbj }
215 1.1 dbj }
216 1.1 dbj }
217 1.1 dbj }
218 1.1 dbj
219 1.1 dbj /****************************************************************/
220 1.1 dbj
221 1.1 dbj
222 1.1 dbj /* Call the completed and continue callbacks to try to fill
223 1.1 dbj * in the dma continue buffers.
224 1.1 dbj */
225 1.1 dbj void
226 1.1 dbj next_dma_rotate(nd)
227 1.1 dbj struct nextdma_config *nd;
228 1.1 dbj {
229 1.1 dbj
230 1.1 dbj DPRINTF(("DMA next_dma_rotate()\n"));
231 1.1 dbj
232 1.1 dbj /* If we've reached the end of the current map, then inform
233 1.1 dbj * that we've completed that map.
234 1.1 dbj */
235 1.1 dbj if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
236 1.1 dbj if (nd->nd_completed_cb)
237 1.1 dbj (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
238 1.1 dbj }
239 1.1 dbj
240 1.1 dbj /* Rotate the continue map into the current map */
241 1.1 dbj nd->_nd_map = nd->_nd_map_cont;
242 1.1 dbj nd->_nd_idx = nd->_nd_idx_cont;
243 1.1 dbj
244 1.1 dbj if ((!nd->_nd_map_cont) ||
245 1.1 dbj ((nd->_nd_map_cont) &&
246 1.1 dbj (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
247 1.1 dbj if (nd->nd_continue_cb) {
248 1.1 dbj nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
249 1.1 dbj } else {
250 1.1 dbj nd->_nd_map_cont = 0;
251 1.1 dbj }
252 1.1 dbj nd->_nd_idx_cont = 0;
253 1.1 dbj }
254 1.1 dbj }
255 1.1 dbj
256 1.1 dbj void
257 1.1 dbj next_dma_setup_cont_regs(nd)
258 1.1 dbj struct nextdma_config *nd;
259 1.1 dbj {
260 1.1 dbj DPRINTF(("DMA next_dma_setup_regs()\n"));
261 1.1 dbj
262 1.1 dbj if (nd->_nd_map_cont) {
263 1.1 dbj
264 1.1 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
265 1.1 dbj /* Ethernet transmit needs secret magic */
266 1.1 dbj
267 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
268 1.1 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
269 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
270 1.1 dbj ((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
271 1.1 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)
272 1.1 dbj + 0x0) | 0x80000000);
273 1.1 dbj } else {
274 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
275 1.1 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
276 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
277 1.1 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
278 1.1 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
279 1.1 dbj }
280 1.1 dbj
281 1.1 dbj } else {
282 1.1 dbj
283 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,0);
284 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 0);
285 1.1 dbj }
286 1.1 dbj
287 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START,
288 1.1 dbj bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START));
289 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP,
290 1.1 dbj bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP));
291 1.1 dbj
292 1.1 dbj }
293 1.1 dbj
294 1.1 dbj void
295 1.1 dbj next_dma_setup_curr_regs(nd)
296 1.1 dbj struct nextdma_config *nd;
297 1.1 dbj {
298 1.1 dbj DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
299 1.1 dbj
300 1.1 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
301 1.1 dbj /* Ethernet transmit needs secret magic */
302 1.1 dbj
303 1.1 dbj if (nd->_nd_map) {
304 1.1 dbj
305 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
306 1.1 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
307 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
308 1.1 dbj ((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
309 1.1 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
310 1.1 dbj + 0x0) | 0x80000000);
311 1.1 dbj } else {
312 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,0);
313 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0);
314 1.1 dbj
315 1.1 dbj }
316 1.1 dbj
317 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
318 1.1 dbj bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
319 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
320 1.1 dbj bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
321 1.1 dbj
322 1.1 dbj } else {
323 1.1 dbj
324 1.1 dbj if (nd->_nd_map) {
325 1.1 dbj
326 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT,
327 1.1 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
328 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
329 1.1 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
330 1.1 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
331 1.1 dbj } else {
332 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT,0);
333 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0);
334 1.1 dbj
335 1.1 dbj }
336 1.1 dbj
337 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
338 1.1 dbj bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT));
339 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
340 1.1 dbj bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
341 1.1 dbj
342 1.1 dbj }
343 1.1 dbj
344 1.1 dbj }
345 1.1 dbj
346 1.1 dbj
347 1.1 dbj /* This routine is used for debugging */
348 1.1 dbj
349 1.1 dbj void
350 1.1 dbj next_dma_print(nd)
351 1.1 dbj struct nextdma_config *nd;
352 1.1 dbj {
353 1.1 dbj u_long dd_csr;
354 1.1 dbj u_long dd_next;
355 1.1 dbj u_long dd_next_initbuf;
356 1.1 dbj u_long dd_limit;
357 1.1 dbj u_long dd_start;
358 1.1 dbj u_long dd_stop;
359 1.1 dbj u_long dd_saved_next;
360 1.1 dbj u_long dd_saved_limit;
361 1.1 dbj u_long dd_saved_start;
362 1.1 dbj u_long dd_saved_stop;
363 1.1 dbj
364 1.1 dbj /* Read all of the registers before we print anything out,
365 1.1 dbj * in case something changes
366 1.1 dbj */
367 1.1 dbj dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
368 1.1 dbj dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
369 1.1 dbj dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
370 1.1 dbj dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
371 1.1 dbj dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
372 1.1 dbj dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
373 1.1 dbj dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
374 1.1 dbj dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
375 1.1 dbj dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
376 1.1 dbj dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
377 1.1 dbj
378 1.1 dbj if (nd->_nd_map) {
379 1.1 dbj printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
380 1.1 dbj nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
381 1.1 dbj printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
382 1.1 dbj nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
383 1.1 dbj } else {
384 1.1 dbj printf("NDMAP: nd->_nd_map = NULL\n");
385 1.1 dbj }
386 1.1 dbj if (nd->_nd_map_cont) {
387 1.1 dbj printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
388 1.1 dbj nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
389 1.2 dbj printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
390 1.1 dbj nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
391 1.1 dbj } else {
392 1.1 dbj printf("NDMAP: nd->_nd_map_cont = NULL\n");
393 1.1 dbj }
394 1.1 dbj
395 1.1 dbj printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS);
396 1.1 dbj printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
397 1.1 dbj printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
398 1.1 dbj printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
399 1.1 dbj printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
400 1.1 dbj printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
401 1.1 dbj printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
402 1.1 dbj printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
403 1.1 dbj printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
404 1.1 dbj printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
405 1.1 dbj
406 1.1 dbj printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
407 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
408 1.1 dbj }
409 1.1 dbj
410 1.1 dbj /****************************************************************/
411 1.1 dbj
412 1.1 dbj int
413 1.1 dbj nextdma_intr(arg)
414 1.1 dbj void *arg;
415 1.1 dbj {
416 1.1 dbj struct nextdma_config *nd = arg;
417 1.1 dbj
418 1.1 dbj /* @@@ This is bogus, we can't be certain of arg's type
419 1.1 dbj * unless the interrupt is for us
420 1.1 dbj */
421 1.1 dbj
422 1.1 dbj if (!INTR_OCCURRED(nd->nd_intr)) return 0;
423 1.1 dbj /* Handle dma interrupts */
424 1.1 dbj
425 1.1 dbj #ifdef DIAGNOSTIC
426 1.1 dbj if (nd->nd_intr == NEXT_I_ENETR_DMA) {
427 1.1 dbj if (debugernd != nd) {
428 1.1 dbj panic("DMA incorrect handling of rx nd->nd_intr");
429 1.1 dbj }
430 1.1 dbj }
431 1.1 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
432 1.1 dbj if (debugexnd != nd) {
433 1.1 dbj panic("DMA incorrect handling of tx nd->nd_intr");
434 1.1 dbj }
435 1.1 dbj }
436 1.1 dbj #endif
437 1.1 dbj
438 1.1 dbj DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
439 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
440 1.1 dbj
441 1.1 dbj {
442 1.1 dbj int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
443 1.1 dbj
444 1.1 dbj state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
445 1.1 dbj DMACSR_SUPDATE | DMACSR_ENABLE);
446 1.1 dbj
447 1.1 dbj if (state & DMACSR_BUSEXC) {
448 1.1 dbj #if 0 /* This bit seems to get set periodically and I don't know why */
449 1.1 dbj next_dma_print(nd);
450 1.1 dbj panic("Bus exception in DMA ipl (%ld) intr(0x%b)\n",
451 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
452 1.1 dbj #endif
453 1.1 dbj }
454 1.1 dbj
455 1.1 dbj #ifdef DIAGNOSTIC
456 1.1 dbj if (!(state & DMACSR_COMPLETE)) {
457 1.1 dbj next_dma_print(nd);
458 1.2 dbj #if 0 /* This bit doesn't seem to get set every once in a while,
459 1.2 dbj * and I don't know why. Let's try treating it as a spurious
460 1.2 dbj * interrupt. ie. report it and ignore the interrupt.
461 1.2 dbj */
462 1.1 dbj printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
463 1.1 dbj panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
464 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
465 1.2 dbj #else
466 1.2 dbj printf("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
467 1.2 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
468 1.2 dbj return(1);
469 1.2 dbj #endif
470 1.1 dbj }
471 1.1 dbj #endif
472 1.1 dbj
473 1.1 dbj /* Set the length of the segment to match actual length.
474 1.1 dbj * @@@ is it okay to resize dma segments here?
475 1.1 dbj * i should probably ask jason about this.
476 1.1 dbj */
477 1.1 dbj if (nd->_nd_map) {
478 1.1 dbj
479 1.1 dbj bus_addr_t next;
480 1.1 dbj bus_addr_t limit;
481 1.1 dbj
482 1.1 dbj #if 0
483 1.1 dbj if (state & DMACSR_ENABLE) {
484 1.1 dbj next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
485 1.1 dbj } else {
486 1.1 dbj next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
487 1.1 dbj }
488 1.1 dbj #else
489 1.1 dbj next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
490 1.1 dbj #endif
491 1.1 dbj limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
492 1.1 dbj
493 1.1 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
494 1.1 dbj limit &= ~0x80000000;
495 1.1 dbj }
496 1.1 dbj
497 1.1 dbj #ifdef DIAGNOSTIC
498 1.1 dbj if (next != nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr) {
499 1.1 dbj next_dma_print(nd);
500 1.1 dbj printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
501 1.1 dbj
502 1.1 dbj panic("DMA ipl (%ld) intr(0x%b), unexpected completed address\n",
503 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
504 1.1 dbj }
505 1.1 dbj #endif
506 1.1 dbj
507 1.1 dbj /* @@@ I observed a case where DMACSR_ENABLE wasn't set and
508 1.1 dbj * DD_SAVED_LIMIT didn't contain the expected limit value. This
509 1.1 dbj * should be tested, fixed, and removed. */
510 1.1 dbj
511 1.1 dbj if (((limit-next) > nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
512 1.1 dbj || (limit-next < 0)) {
513 1.1 dbj #if 0
514 1.1 dbj next_dma_print(nd);
515 1.1 dbj printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
516 1.1 dbj panic("DMA packlen: next = 0x%08x limit = 0x%08x\n",next,limit);
517 1.1 dbj #else
518 1.1 dbj DPRINTF(("DMA packlen: next = 0x%08x limit = 0x%08x",next,limit));
519 1.1 dbj #endif
520 1.1 dbj
521 1.1 dbj } else {
522 1.1 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_len = limit - next;
523 1.1 dbj }
524 1.1 dbj }
525 1.1 dbj
526 1.1 dbj
527 1.1 dbj if ((state & DMACSR_ENABLE) == 0) {
528 1.1 dbj
529 1.1 dbj /* Non chaining interrupts shutdown immediately */
530 1.1 dbj if (!nd->nd_chaining_flag) {
531 1.1 dbj nd->_nd_map = nd->_nd_map_cont;
532 1.1 dbj nd->_nd_idx = nd->_nd_idx_cont;
533 1.1 dbj nd->_nd_map_cont = 0;
534 1.1 dbj nd->_nd_idx_cont = 0;
535 1.1 dbj }
536 1.1 dbj
537 1.1 dbj /* Call the completed callback for the last packet */
538 1.1 dbj if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
539 1.1 dbj if (nd->nd_completed_cb)
540 1.1 dbj (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
541 1.1 dbj }
542 1.1 dbj nd->_nd_map = 0;
543 1.1 dbj nd->_nd_idx = 0;
544 1.1 dbj
545 1.1 dbj if (nd->_nd_map_cont) {
546 1.1 dbj DPRINTF(("DMA ipl (%ld) intr(0x%b), restarting\n",
547 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
548 1.1 dbj
549 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
550 1.1 dbj DMACSR_SETSUPDATE | DMACSR_SETENABLE);
551 1.1 dbj
552 1.1 dbj } else {
553 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
554 1.1 dbj DMACSR_CLRCOMPLETE | DMACSR_RESET);
555 1.1 dbj DPRINTF(("DMA: enable not set w/o continue map, shutting down dma\n"));
556 1.1 dbj if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
557 1.1 dbj }
558 1.1 dbj
559 1.1 dbj } else {
560 1.1 dbj next_dma_rotate(nd);
561 1.1 dbj next_dma_setup_cont_regs(nd);
562 1.1 dbj
563 1.1 dbj if (nd->_nd_map_cont) {
564 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
565 1.1 dbj DMACSR_SETSUPDATE | DMACSR_CLRCOMPLETE);
566 1.1 dbj } else {
567 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
568 1.1 dbj DMACSR_CLRCOMPLETE);
569 1.1 dbj }
570 1.1 dbj
571 1.1 dbj }
572 1.1 dbj
573 1.1 dbj }
574 1.1 dbj
575 1.1 dbj DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
576 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
577 1.1 dbj
578 1.1 dbj return(1);
579 1.1 dbj }
580 1.1 dbj
581 1.1 dbj /*
582 1.1 dbj * Check to see if dma has finished for a channel */
583 1.1 dbj int
584 1.1 dbj nextdma_finished(nd)
585 1.1 dbj struct nextdma_config *nd;
586 1.1 dbj {
587 1.1 dbj int r;
588 1.1 dbj int s;
589 1.1 dbj s = spldma(); /* @@@ should this be splimp()? */
590 1.1 dbj r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
591 1.1 dbj splx(s);
592 1.1 dbj return(r);
593 1.1 dbj }
594 1.1 dbj
595 1.1 dbj void
596 1.1 dbj nextdma_start(nd, dmadir)
597 1.1 dbj struct nextdma_config *nd;
598 1.1 dbj u_long dmadir; /* DMACSR_READ or DMACSR_WRITE */
599 1.1 dbj {
600 1.1 dbj
601 1.1 dbj #ifdef DIAGNOSTIC
602 1.1 dbj if (!nextdma_finished(nd)) {
603 1.1 dbj panic("DMA trying to start before previous finished on intr(0x%b)\n",
604 1.1 dbj NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
605 1.1 dbj }
606 1.1 dbj #endif
607 1.1 dbj
608 1.1 dbj
609 1.1 dbj DPRINTF(("DMA start (%ld) intr(0x%b)\n",
610 1.1 dbj NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
611 1.1 dbj
612 1.1 dbj #ifdef DIAGNOSTIC
613 1.1 dbj if (nd->_nd_map) {
614 1.1 dbj next_dma_print(nd);
615 1.1 dbj panic("DMA: nextdma_start() with non null map\n");
616 1.1 dbj }
617 1.1 dbj if (nd->_nd_map_cont) {
618 1.1 dbj next_dma_print(nd);
619 1.1 dbj panic("DMA: nextdma_start() with non null continue map\n");
620 1.1 dbj }
621 1.1 dbj #endif
622 1.1 dbj
623 1.1 dbj next_dma_rotate(nd);
624 1.1 dbj
625 1.1 dbj #ifdef DIAGNOSTIC
626 1.1 dbj if (!nd->_nd_map_cont) {
627 1.1 dbj panic("No map available in nextdma_start()");
628 1.1 dbj }
629 1.1 dbj if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr)) {
630 1.1 dbj panic("unaligned begin dma at start\n");
631 1.1 dbj }
632 1.1 dbj if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr +
633 1.1 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_len)) {
634 1.1 dbj panic("unaligned end dma at start\n");
635 1.1 dbj }
636 1.1 dbj #endif
637 1.1 dbj
638 1.1 dbj DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
639 1.1 dbj (dmadir == DMACSR_READ ? "read" : "write"), nd->_nd_map_cont->dm_nsegs,
640 1.1 dbj NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
641 1.1 dbj
642 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
643 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
644 1.1 dbj DMACSR_INITBUF | DMACSR_RESET | dmadir);
645 1.1 dbj
646 1.1 dbj next_dma_setup_cont_regs(nd);
647 1.1 dbj
648 1.1 dbj /* When starting DMA, we must put the continue map
649 1.1 dbj * into the current register. We reset the nd->_nd_map
650 1.1 dbj * pointer here to avoid duplicated completed callbacks
651 1.1 dbj * for the first buffer.
652 1.1 dbj */
653 1.1 dbj nd->_nd_map = nd->_nd_map_cont;
654 1.1 dbj nd->_nd_idx = nd->_nd_idx_cont;
655 1.1 dbj next_dma_setup_curr_regs(nd);
656 1.1 dbj nd->_nd_map = 0;
657 1.1 dbj nd->_nd_idx = 0;
658 1.4 dbj
659 1.4 dbj
660 1.4 dbj #if (defined(ND_DEBUG))
661 1.4 dbj next_dma_print(nd);
662 1.4 dbj #endif
663 1.1 dbj
664 1.1 dbj if (nd->nd_chaining_flag) {
665 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
666 1.1 dbj DMACSR_SETSUPDATE | DMACSR_SETENABLE);
667 1.1 dbj } else {
668 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
669 1.1 dbj DMACSR_SETENABLE);
670 1.1 dbj }
671 1.1 dbj
672 1.1 dbj }
673