if_xe.c revision 1.13 1 /* $NetBSD: if_xe.c,v 1.13 2003/05/03 18:10:55 wiz Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "opt_inet.h"
33 #include "bpfilter.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/syslog.h>
39 #include <sys/socket.h>
40 #include <sys/device.h>
41
42 #include <net/if.h>
43 #include <net/if_ether.h>
44 #include <net/if_media.h>
45
46 #ifdef INET
47 #include <netinet/in.h>
48 #include <netinet/if_inarp.h>
49 #endif
50
51 #include <machine/autoconf.h>
52 #include <machine/cpu.h>
53 #include <machine/intr.h>
54 #include <machine/bus.h>
55
56 #include <next68k/next68k/isr.h>
57
58 #include <next68k/dev/mb8795reg.h>
59 #include <next68k/dev/mb8795var.h>
60
61 #include <next68k/dev/bmapreg.h>
62 #include <next68k/dev/intiovar.h>
63 #include <next68k/dev/nextdmareg.h>
64 #include <next68k/dev/nextdmavar.h>
65
66 #include <next68k/dev/if_xevar.h>
67 #include <next68k/dev/if_xereg.h>
68
69 #ifdef DEBUG
70 #define XE_DEBUG
71 #endif
72
73 #ifdef XE_DEBUG
74 int xe_debug = 0;
75 #define DPRINTF(x) if (xe_debug) printf x;
76 extern char *ndtracep;
77 extern char ndtrace[];
78 extern int ndtraceshow;
79 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
80 #else
81 #define DPRINTF(x)
82 #define NDTRACEIF(x)
83 #endif
84 #define PRINTF(x) printf x;
85
86 extern int turbo;
87
88 int xe_match __P((struct device *, struct cfdata *, void *));
89 void xe_attach __P((struct device *, struct device *, void *));
90 int xe_tint __P((void *));
91 int xe_rint __P((void *));
92
93 struct mbuf * xe_dma_rxmap_load __P((struct mb8795_softc *,
94 bus_dmamap_t map));
95
96 bus_dmamap_t xe_dma_rx_continue __P((void *));
97 void xe_dma_rx_completed __P((bus_dmamap_t,void *));
98 bus_dmamap_t xe_dma_tx_continue __P((void *));
99 void xe_dma_tx_completed __P((bus_dmamap_t,void *));
100 void xe_dma_rx_shutdown __P((void *));
101 void xe_dma_tx_shutdown __P((void *));
102
103 static void findchannel_defer __P((struct device *));
104
105 CFATTACH_DECL(xe, sizeof(struct xe_softc),
106 xe_match, xe_attach, NULL, NULL);
107
108 static int xe_dma_medias[] = {
109 IFM_ETHER|IFM_AUTO,
110 IFM_ETHER|IFM_10_T,
111 IFM_ETHER|IFM_10_2,
112 };
113 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
114
115 static int attached = 0;
116
117 /*
118 * Functions and the switch for the MI code.
119 */
120 u_char xe_read_reg __P((struct mb8795_softc *, int));
121 void xe_write_reg __P((struct mb8795_softc *, int, u_char));
122 void xe_dma_reset __P((struct mb8795_softc *));
123 void xe_dma_rx_setup __P((struct mb8795_softc *));
124 void xe_dma_rx_go __P((struct mb8795_softc *));
125 struct mbuf * xe_dma_rx_mbuf __P((struct mb8795_softc *));
126 void xe_dma_tx_setup __P((struct mb8795_softc *));
127 void xe_dma_tx_go __P((struct mb8795_softc *));
128 int xe_dma_tx_mbuf __P((struct mb8795_softc *, struct mbuf *));
129 int xe_dma_tx_isactive __P((struct mb8795_softc *));
130 #if 0
131 int xe_dma_setup __P((struct mb8795_softc *, caddr_t *,
132 size_t *, int, size_t *));
133 void xe_dma_go __P((struct mb8795_softc *));
134 void xe_dma_stop __P((struct mb8795_softc *));
135 int xe_dma_isactive __P((struct mb8795_softc *));
136 #endif
137
138 struct mb8795_glue xe_glue = {
139 xe_read_reg,
140 xe_write_reg,
141 xe_dma_reset,
142 xe_dma_rx_setup,
143 xe_dma_rx_go,
144 xe_dma_rx_mbuf,
145 xe_dma_tx_setup,
146 xe_dma_tx_go,
147 xe_dma_tx_mbuf,
148 xe_dma_tx_isactive,
149 #if 0
150 xe_dma_setup,
151 xe_dma_go,
152 xe_dma_stop,
153 xe_dma_isactive,
154 NULL, /* gl_clear_latched_intr */
155 #endif
156 };
157
158 int
159 xe_match(parent, match, aux)
160 struct device *parent;
161 struct cfdata *match;
162 void *aux;
163 {
164 struct intio_attach_args *ia = (struct intio_attach_args *)aux;
165
166 if (attached)
167 return (0);
168
169 ia->ia_addr = (void *)NEXT_P_ENET;
170
171 return (1);
172 }
173
174 static void
175 findchannel_defer(self)
176 struct device *self;
177 {
178 struct xe_softc *xsc = (struct xe_softc *)self;
179 struct mb8795_softc *sc = &xsc->sc_mb8795;
180 int i, error;
181
182 if (!xsc->sc_txdma) {
183 xsc->sc_txdma = nextdma_findchannel ("enetx");
184 if (xsc->sc_txdma == NULL)
185 panic ("%s: can't find enetx DMA channel",
186 sc->sc_dev.dv_xname);
187 }
188 if (!xsc->sc_rxdma) {
189 xsc->sc_rxdma = nextdma_findchannel ("enetr");
190 if (xsc->sc_rxdma == NULL)
191 panic ("%s: can't find enetr DMA channel",
192 sc->sc_dev.dv_xname);
193 }
194 printf ("%s: using DMA channels %s %s\n", sc->sc_dev.dv_xname,
195 xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
196
197 nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
198 nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
199 nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
200 nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
201
202 nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
203 nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
204 nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
205 nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
206
207 /* Initialize the DMA maps */
208 error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
209 (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
210 &xsc->sc_tx_dmamap);
211 if (error) {
212 panic("%s: can't create tx DMA map, error = %d",
213 sc->sc_dev.dv_xname, error);
214 }
215
216 for(i = 0; i < MB8795_NRXBUFS; i++) {
217 error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
218 (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
219 &xsc->sc_rx_dmamap[i]);
220 if (error) {
221 panic("%s: can't create rx DMA map, error = %d",
222 sc->sc_dev.dv_xname, error);
223 }
224 xsc->sc_rx_mb_head[i] = NULL;
225 }
226 xsc->sc_rx_loaded_idx = 0;
227 xsc->sc_rx_completed_idx = 0;
228 xsc->sc_rx_handled_idx = 0;
229
230 /* @@@ more next hacks
231 * the 2000 covers at least a 1500 mtu + headers
232 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
233 */
234 xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
235 if (!xsc->sc_txbuf)
236 panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
237
238 xsc->sc_tx_mb_head = NULL;
239 xsc->sc_tx_loaded = 0;
240
241 mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
242
243 isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
244 INTR_ENABLE(NEXT_I_ENETX);
245 isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
246 INTR_ENABLE(NEXT_I_ENETR);
247 }
248
249 void
250 xe_attach(parent, self, aux)
251 struct device *parent, *self;
252 void *aux;
253 {
254 struct intio_attach_args *ia = (struct intio_attach_args *)aux;
255 struct xe_softc *xsc = (struct xe_softc *)self;
256 struct mb8795_softc *sc = &xsc->sc_mb8795;
257
258 DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
259
260 {
261 extern u_char rom_enetaddr[6]; /* kludge from machdep.c:next68k_bootargs() */
262 int i;
263 for(i=0;i<6;i++) {
264 sc->sc_enaddr[i] = rom_enetaddr[i];
265 }
266 }
267
268 printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
269 sc->sc_dev.dv_xname,
270 sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
271 sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
272
273 xsc->sc_bst = ia->ia_bst;
274 if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
275 XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
276 panic("\n%s: can't map mb8795 registers",
277 sc->sc_dev.dv_xname);
278 }
279
280 sc->sc_bmap_bst = ia->ia_bst;
281 if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
282 BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
283 panic("\n%s: can't map bmap registers",
284 sc->sc_dev.dv_xname);
285 }
286
287 /*
288 * Set up glue for MI code.
289 */
290 sc->sc_glue = &xe_glue;
291
292 xsc->sc_txdma = nextdma_findchannel ("enetx");
293 xsc->sc_rxdma = nextdma_findchannel ("enetr");
294 if (xsc->sc_rxdma && xsc->sc_txdma) {
295 findchannel_defer (self);
296 } else {
297 config_defer (self, findchannel_defer);
298 }
299
300 attached = 1;
301 }
302
303 int
304 xe_tint(arg)
305 void *arg;
306 {
307 if (!INTR_OCCURRED(NEXT_I_ENETX))
308 return 0;
309 mb8795_tint((struct mb8795_softc *)arg);
310 return(1);
311 }
312
313 int
314 xe_rint(arg)
315 void *arg;
316 {
317 if (!INTR_OCCURRED(NEXT_I_ENETR))
318 return(0);
319 mb8795_rint((struct mb8795_softc *)arg);
320 return(1);
321 }
322
323 /*
324 * Glue functions.
325 */
326
327 u_char
328 xe_read_reg(sc, reg)
329 struct mb8795_softc *sc;
330 int reg;
331 {
332 struct xe_softc *xsc = (struct xe_softc *)sc;
333
334 return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
335 }
336
337 void
338 xe_write_reg(sc, reg, val)
339 struct mb8795_softc *sc;
340 int reg;
341 u_char val;
342 {
343 struct xe_softc *xsc = (struct xe_softc *)sc;
344
345 bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
346 }
347
348 void
349 xe_dma_reset(sc)
350 struct mb8795_softc *sc;
351 {
352 struct xe_softc *xsc = (struct xe_softc *)sc;
353 int i;
354
355 DPRINTF(("xe DMA reset\n"));
356
357 nextdma_reset(xsc->sc_rxdma);
358 nextdma_reset(xsc->sc_txdma);
359
360 if (xsc->sc_tx_loaded) {
361 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
362 0, xsc->sc_tx_dmamap->dm_mapsize,
363 BUS_DMASYNC_POSTWRITE);
364 bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
365 xsc->sc_tx_loaded = 0;
366 }
367 if (xsc->sc_tx_mb_head) {
368 m_freem(xsc->sc_tx_mb_head);
369 xsc->sc_tx_mb_head = NULL;
370 }
371
372 for(i = 0; i < MB8795_NRXBUFS; i++) {
373 if (xsc->sc_rx_mb_head[i]) {
374 bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
375 m_freem(xsc->sc_rx_mb_head[i]);
376 xsc->sc_rx_mb_head[i] = NULL;
377 }
378 }
379 }
380
381 void
382 xe_dma_rx_setup (sc)
383 struct mb8795_softc *sc;
384 {
385 struct xe_softc *xsc = (struct xe_softc *)sc;
386 int i;
387
388 DPRINTF(("xe DMA rx setup\n"));
389
390 for(i = 0; i < MB8795_NRXBUFS; i++) {
391 xsc->sc_rx_mb_head[i] =
392 xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
393 }
394 xsc->sc_rx_loaded_idx = 0;
395 xsc->sc_rx_completed_idx = 0;
396 xsc->sc_rx_handled_idx = 0;
397
398 nextdma_init(xsc->sc_rxdma);
399 }
400
401 void
402 xe_dma_rx_go (sc)
403 struct mb8795_softc *sc;
404 {
405 struct xe_softc *xsc = (struct xe_softc *)sc;
406
407 DPRINTF(("xe DMA rx go\n"));
408
409 nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
410 }
411
412 struct mbuf *
413 xe_dma_rx_mbuf (sc)
414 struct mb8795_softc *sc;
415 {
416 struct xe_softc *xsc = (struct xe_softc *)sc;
417 bus_dmamap_t map;
418 struct mbuf *m;
419
420 m = NULL;
421 if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
422 xsc->sc_rx_handled_idx++;
423 xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
424
425 map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
426 m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
427
428 m->m_len = map->dm_xfer_len;
429
430 bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
431 0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
432
433 bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
434
435 /* Install a fresh mbuf for next packet */
436
437 xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
438 xe_dma_rxmap_load(sc,map);
439
440 /* Punt runt packets
441 * DMA restarts create 0 length packets for example
442 */
443 if (m->m_len < ETHER_MIN_LEN) {
444 m_freem(m);
445 m = NULL;
446 }
447 }
448 return (m);
449 }
450
451 void
452 xe_dma_tx_setup (sc)
453 struct mb8795_softc *sc;
454 {
455 struct xe_softc *xsc = (struct xe_softc *)sc;
456
457 DPRINTF(("xe DMA tx setup\n"));
458
459 nextdma_init(xsc->sc_txdma);
460 }
461
462 void
463 xe_dma_tx_go (sc)
464 struct mb8795_softc *sc;
465 {
466 struct xe_softc *xsc = (struct xe_softc *)sc;
467
468 DPRINTF(("xe DMA tx go\n"));
469
470 nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
471 }
472
473 int
474 xe_dma_tx_mbuf (sc, m)
475 struct mb8795_softc *sc;
476 struct mbuf *m;
477 {
478 struct xe_softc *xsc = (struct xe_softc *)sc;
479 int error;
480
481 xsc->sc_tx_mb_head = m;
482
483 /* The following is a next specific hack that should
484 * probably be moved out of MI code.
485 * This macro assumes it can move forward as needed
486 * in the buffer. Perhaps it should zero the extra buffer.
487 */
488 #define REALIGN_DMABUF(s,l) \
489 { (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
490 &~(DMA_BEGINALIGNMENT-1))); \
491 (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
492 &~(DMA_ENDALIGNMENT-1)))-(s);}
493
494 #if 0
495 error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
496 xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
497 #else
498 {
499 u_char *buf = xsc->sc_txbuf;
500 int buflen = 0;
501
502 buflen = m->m_pkthdr.len;
503
504 {
505 u_char *p = buf;
506 for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
507 if (m->m_len == 0) continue;
508 bcopy(mtod(m, u_char *), p, m->m_len);
509 p += m->m_len;
510 }
511 /* Fix runt packets */
512 if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
513 memset(p, 0,
514 ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
515 buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
516 }
517 }
518
519 error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
520 buf,buflen,NULL,BUS_DMA_NOWAIT);
521 }
522 #endif
523 if (error) {
524 printf("%s: can't load mbuf chain, error = %d\n",
525 sc->sc_dev.dv_xname, error);
526 m_freem(xsc->sc_tx_mb_head);
527 xsc->sc_tx_mb_head = NULL;
528 return (error);
529 }
530
531 #ifdef DIAGNOSTIC
532 if (xsc->sc_tx_loaded != 0) {
533 panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
534 xsc->sc_tx_loaded);
535 }
536 #endif
537
538 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
539 xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
540
541 return (0);
542 }
543
544 int
545 xe_dma_tx_isactive (sc)
546 struct mb8795_softc *sc;
547 {
548 struct xe_softc *xsc = (struct xe_softc *)sc;
549
550 return (xsc->sc_tx_loaded != 0);
551 }
552
553 /****************************************************************/
554
555 void
556 xe_dma_tx_completed(map, arg)
557 bus_dmamap_t map;
558 void *arg;
559 {
560 struct mb8795_softc *sc = arg;
561 struct xe_softc *xsc = (struct xe_softc *)sc;
562
563 DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
564
565 #ifdef DIAGNOSTIC
566 if (!xsc->sc_tx_loaded) {
567 panic("%s: tx completed never loaded",sc->sc_dev.dv_xname);
568 }
569 if (map != xsc->sc_tx_dmamap) {
570 panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
571 }
572
573 #endif
574 }
575
576 void
577 xe_dma_tx_shutdown(arg)
578 void *arg;
579 {
580 struct mb8795_softc *sc = arg;
581 struct xe_softc *xsc = (struct xe_softc *)sc;
582 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
583
584 DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
585
586 #ifdef DIAGNOSTIC
587 if (!xsc->sc_tx_loaded) {
588 panic("%s: tx shutdown never loaded",sc->sc_dev.dv_xname);
589 }
590 #endif
591
592 if (turbo)
593 MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
594 if (xsc->sc_tx_loaded) {
595 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
596 0, xsc->sc_tx_dmamap->dm_mapsize,
597 BUS_DMASYNC_POSTWRITE);
598 bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
599 m_freem(xsc->sc_tx_mb_head);
600 xsc->sc_tx_mb_head = NULL;
601
602 xsc->sc_tx_loaded--;
603 }
604
605 #ifdef DIAGNOSTIC
606 if (xsc->sc_tx_loaded != 0) {
607 panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
608 xsc->sc_tx_loaded);
609 }
610 #endif
611
612 ifp->if_timer = 0;
613
614 #if 1
615 if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
616 void mb8795_start_dma __P((struct mb8795_softc *)); /* XXXX */
617 mb8795_start_dma(sc);
618 }
619 #endif
620
621 #if 0
622 /* Enable ready interrupt */
623 MB_WRITE_REG(sc, MB8795_TXMASK,
624 MB_READ_REG(sc, MB8795_TXMASK)
625 | MB8795_TXMASK_TXRXIE/* READYIE */);
626 #endif
627 }
628
629
630 void
631 xe_dma_rx_completed(map, arg)
632 bus_dmamap_t map;
633 void *arg;
634 {
635 struct mb8795_softc *sc = arg;
636 struct xe_softc *xsc = (struct xe_softc *)sc;
637 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
638
639 if (ifp->if_flags & IFF_RUNNING) {
640 xsc->sc_rx_completed_idx++;
641 xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
642
643 DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
644 sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
645
646 #if (defined(DIAGNOSTIC))
647 if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
648 panic("%s: Unexpected rx dmamap completed",
649 sc->sc_dev.dv_xname);
650 }
651 #endif
652 }
653 #ifdef DIAGNOSTIC
654 else
655 DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
656 sc->sc_dev.dv_xname));
657 #endif
658 }
659
660 void
661 xe_dma_rx_shutdown(arg)
662 void *arg;
663 {
664 struct mb8795_softc *sc = arg;
665 struct xe_softc *xsc = (struct xe_softc *)sc;
666 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
667
668 if (ifp->if_flags & IFF_RUNNING) {
669 DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
670 sc->sc_dev.dv_xname));
671
672 nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
673 if (turbo)
674 MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
675 }
676 #ifdef DIAGNOSTIC
677 else
678 DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
679 sc->sc_dev.dv_xname));
680 #endif
681 }
682
683 /*
684 * load a dmamap with a freshly allocated mbuf
685 */
686 struct mbuf *
687 xe_dma_rxmap_load(sc,map)
688 struct mb8795_softc *sc;
689 bus_dmamap_t map;
690 {
691 struct xe_softc *xsc = (struct xe_softc *)sc;
692 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
693 struct mbuf *m;
694 int error;
695
696 MGETHDR(m, M_DONTWAIT, MT_DATA);
697 if (m) {
698 MCLGET(m, M_DONTWAIT);
699 if ((m->m_flags & M_EXT) == 0) {
700 m_freem(m);
701 m = NULL;
702 } else {
703 m->m_len = MCLBYTES;
704 }
705 }
706 if (!m) {
707 /* @@@ Handle this gracefully by reusing a scratch buffer
708 * or something.
709 */
710 panic("Unable to get memory for incoming ethernet");
711 }
712
713 /* Align buffer, @@@ next specific.
714 * perhaps should be using M_ALIGN here instead?
715 * First we give us a little room to align with.
716 */
717 {
718 u_char *buf = m->m_data;
719 int buflen = m->m_len;
720 buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
721 REALIGN_DMABUF(buf, buflen);
722 m->m_data = buf;
723 m->m_len = buflen;
724 }
725
726 m->m_pkthdr.rcvif = ifp;
727 m->m_pkthdr.len = m->m_len;
728
729 error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
730 map, m, BUS_DMA_NOWAIT);
731
732 bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
733 map->dm_mapsize, BUS_DMASYNC_PREREAD);
734
735 if (error) {
736 DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
737 m->m_data, m->m_len));
738 DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
739 MCLBYTES, map->_dm_size));
740
741 panic("%s: can't load rx mbuf chain, error = %d",
742 sc->sc_dev.dv_xname, error);
743 m_freem(m);
744 m = NULL;
745 }
746
747 return(m);
748 }
749
750 bus_dmamap_t
751 xe_dma_rx_continue(arg)
752 void *arg;
753 {
754 struct mb8795_softc *sc = arg;
755 struct xe_softc *xsc = (struct xe_softc *)sc;
756 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
757 bus_dmamap_t map = NULL;
758
759 if (ifp->if_flags & IFF_RUNNING) {
760 if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
761 /* make space for one packet by dropping one */
762 struct mbuf *m;
763 m = xe_dma_rx_mbuf (sc);
764 if (m)
765 m_freem(m);
766 #if (defined(DIAGNOSTIC))
767 DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
768 #endif
769 }
770 xsc->sc_rx_loaded_idx++;
771 xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
772 map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
773
774 DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
775 sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
776 }
777 #ifdef DIAGNOSTIC
778 else
779 panic("%s: Unexpected rx DMA continue while if not running",
780 sc->sc_dev.dv_xname);
781 #endif
782
783 return(map);
784 }
785
786 bus_dmamap_t
787 xe_dma_tx_continue(arg)
788 void *arg;
789 {
790 struct mb8795_softc *sc = arg;
791 struct xe_softc *xsc = (struct xe_softc *)sc;
792 bus_dmamap_t map;
793
794 DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
795
796 if (xsc->sc_tx_loaded) {
797 map = NULL;
798 } else {
799 map = xsc->sc_tx_dmamap;
800 xsc->sc_tx_loaded++;
801 }
802
803 #ifdef DIAGNOSTIC
804 if (xsc->sc_tx_loaded != 1) {
805 panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
806 xsc->sc_tx_loaded);
807 }
808 #endif
809
810 return(map);
811 }
812