if_xe.c revision 1.14 1 /* $NetBSD: if_xe.c,v 1.14 2003/07/15 02:59:31 lukem Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.14 2003/07/15 02:59:31 lukem Exp $");
34
35 #include "opt_inet.h"
36 #include "bpfilter.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/device.h>
44
45 #include <net/if.h>
46 #include <net/if_ether.h>
47 #include <net/if_media.h>
48
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/if_inarp.h>
52 #endif
53
54 #include <machine/autoconf.h>
55 #include <machine/cpu.h>
56 #include <machine/intr.h>
57 #include <machine/bus.h>
58
59 #include <next68k/next68k/isr.h>
60
61 #include <next68k/dev/mb8795reg.h>
62 #include <next68k/dev/mb8795var.h>
63
64 #include <next68k/dev/bmapreg.h>
65 #include <next68k/dev/intiovar.h>
66 #include <next68k/dev/nextdmareg.h>
67 #include <next68k/dev/nextdmavar.h>
68
69 #include <next68k/dev/if_xevar.h>
70 #include <next68k/dev/if_xereg.h>
71
72 #ifdef DEBUG
73 #define XE_DEBUG
74 #endif
75
76 #ifdef XE_DEBUG
77 int xe_debug = 0;
78 #define DPRINTF(x) if (xe_debug) printf x;
79 extern char *ndtracep;
80 extern char ndtrace[];
81 extern int ndtraceshow;
82 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
83 #else
84 #define DPRINTF(x)
85 #define NDTRACEIF(x)
86 #endif
87 #define PRINTF(x) printf x;
88
89 extern int turbo;
90
91 int xe_match __P((struct device *, struct cfdata *, void *));
92 void xe_attach __P((struct device *, struct device *, void *));
93 int xe_tint __P((void *));
94 int xe_rint __P((void *));
95
96 struct mbuf * xe_dma_rxmap_load __P((struct mb8795_softc *,
97 bus_dmamap_t map));
98
99 bus_dmamap_t xe_dma_rx_continue __P((void *));
100 void xe_dma_rx_completed __P((bus_dmamap_t,void *));
101 bus_dmamap_t xe_dma_tx_continue __P((void *));
102 void xe_dma_tx_completed __P((bus_dmamap_t,void *));
103 void xe_dma_rx_shutdown __P((void *));
104 void xe_dma_tx_shutdown __P((void *));
105
106 static void findchannel_defer __P((struct device *));
107
108 CFATTACH_DECL(xe, sizeof(struct xe_softc),
109 xe_match, xe_attach, NULL, NULL);
110
111 static int xe_dma_medias[] = {
112 IFM_ETHER|IFM_AUTO,
113 IFM_ETHER|IFM_10_T,
114 IFM_ETHER|IFM_10_2,
115 };
116 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
117
118 static int attached = 0;
119
120 /*
121 * Functions and the switch for the MI code.
122 */
123 u_char xe_read_reg __P((struct mb8795_softc *, int));
124 void xe_write_reg __P((struct mb8795_softc *, int, u_char));
125 void xe_dma_reset __P((struct mb8795_softc *));
126 void xe_dma_rx_setup __P((struct mb8795_softc *));
127 void xe_dma_rx_go __P((struct mb8795_softc *));
128 struct mbuf * xe_dma_rx_mbuf __P((struct mb8795_softc *));
129 void xe_dma_tx_setup __P((struct mb8795_softc *));
130 void xe_dma_tx_go __P((struct mb8795_softc *));
131 int xe_dma_tx_mbuf __P((struct mb8795_softc *, struct mbuf *));
132 int xe_dma_tx_isactive __P((struct mb8795_softc *));
133 #if 0
134 int xe_dma_setup __P((struct mb8795_softc *, caddr_t *,
135 size_t *, int, size_t *));
136 void xe_dma_go __P((struct mb8795_softc *));
137 void xe_dma_stop __P((struct mb8795_softc *));
138 int xe_dma_isactive __P((struct mb8795_softc *));
139 #endif
140
141 struct mb8795_glue xe_glue = {
142 xe_read_reg,
143 xe_write_reg,
144 xe_dma_reset,
145 xe_dma_rx_setup,
146 xe_dma_rx_go,
147 xe_dma_rx_mbuf,
148 xe_dma_tx_setup,
149 xe_dma_tx_go,
150 xe_dma_tx_mbuf,
151 xe_dma_tx_isactive,
152 #if 0
153 xe_dma_setup,
154 xe_dma_go,
155 xe_dma_stop,
156 xe_dma_isactive,
157 NULL, /* gl_clear_latched_intr */
158 #endif
159 };
160
161 int
162 xe_match(parent, match, aux)
163 struct device *parent;
164 struct cfdata *match;
165 void *aux;
166 {
167 struct intio_attach_args *ia = (struct intio_attach_args *)aux;
168
169 if (attached)
170 return (0);
171
172 ia->ia_addr = (void *)NEXT_P_ENET;
173
174 return (1);
175 }
176
177 static void
178 findchannel_defer(self)
179 struct device *self;
180 {
181 struct xe_softc *xsc = (struct xe_softc *)self;
182 struct mb8795_softc *sc = &xsc->sc_mb8795;
183 int i, error;
184
185 if (!xsc->sc_txdma) {
186 xsc->sc_txdma = nextdma_findchannel ("enetx");
187 if (xsc->sc_txdma == NULL)
188 panic ("%s: can't find enetx DMA channel",
189 sc->sc_dev.dv_xname);
190 }
191 if (!xsc->sc_rxdma) {
192 xsc->sc_rxdma = nextdma_findchannel ("enetr");
193 if (xsc->sc_rxdma == NULL)
194 panic ("%s: can't find enetr DMA channel",
195 sc->sc_dev.dv_xname);
196 }
197 printf ("%s: using DMA channels %s %s\n", sc->sc_dev.dv_xname,
198 xsc->sc_txdma->sc_dev.dv_xname, xsc->sc_rxdma->sc_dev.dv_xname);
199
200 nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
201 nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
202 nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
203 nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
204
205 nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
206 nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
207 nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
208 nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
209
210 /* Initialize the DMA maps */
211 error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
212 (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
213 &xsc->sc_tx_dmamap);
214 if (error) {
215 panic("%s: can't create tx DMA map, error = %d",
216 sc->sc_dev.dv_xname, error);
217 }
218
219 for(i = 0; i < MB8795_NRXBUFS; i++) {
220 error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
221 (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
222 &xsc->sc_rx_dmamap[i]);
223 if (error) {
224 panic("%s: can't create rx DMA map, error = %d",
225 sc->sc_dev.dv_xname, error);
226 }
227 xsc->sc_rx_mb_head[i] = NULL;
228 }
229 xsc->sc_rx_loaded_idx = 0;
230 xsc->sc_rx_completed_idx = 0;
231 xsc->sc_rx_handled_idx = 0;
232
233 /* @@@ more next hacks
234 * the 2000 covers at least a 1500 mtu + headers
235 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
236 */
237 xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
238 if (!xsc->sc_txbuf)
239 panic("%s: can't malloc tx DMA buffer", sc->sc_dev.dv_xname);
240
241 xsc->sc_tx_mb_head = NULL;
242 xsc->sc_tx_loaded = 0;
243
244 mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
245
246 isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
247 INTR_ENABLE(NEXT_I_ENETX);
248 isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
249 INTR_ENABLE(NEXT_I_ENETR);
250 }
251
252 void
253 xe_attach(parent, self, aux)
254 struct device *parent, *self;
255 void *aux;
256 {
257 struct intio_attach_args *ia = (struct intio_attach_args *)aux;
258 struct xe_softc *xsc = (struct xe_softc *)self;
259 struct mb8795_softc *sc = &xsc->sc_mb8795;
260
261 DPRINTF(("%s: xe_attach()\n",sc->sc_dev.dv_xname));
262
263 {
264 extern u_char rom_enetaddr[6]; /* kludge from machdep.c:next68k_bootargs() */
265 int i;
266 for(i=0;i<6;i++) {
267 sc->sc_enaddr[i] = rom_enetaddr[i];
268 }
269 }
270
271 printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
272 sc->sc_dev.dv_xname,
273 sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
274 sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
275
276 xsc->sc_bst = ia->ia_bst;
277 if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
278 XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
279 panic("\n%s: can't map mb8795 registers",
280 sc->sc_dev.dv_xname);
281 }
282
283 sc->sc_bmap_bst = ia->ia_bst;
284 if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
285 BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
286 panic("\n%s: can't map bmap registers",
287 sc->sc_dev.dv_xname);
288 }
289
290 /*
291 * Set up glue for MI code.
292 */
293 sc->sc_glue = &xe_glue;
294
295 xsc->sc_txdma = nextdma_findchannel ("enetx");
296 xsc->sc_rxdma = nextdma_findchannel ("enetr");
297 if (xsc->sc_rxdma && xsc->sc_txdma) {
298 findchannel_defer (self);
299 } else {
300 config_defer (self, findchannel_defer);
301 }
302
303 attached = 1;
304 }
305
306 int
307 xe_tint(arg)
308 void *arg;
309 {
310 if (!INTR_OCCURRED(NEXT_I_ENETX))
311 return 0;
312 mb8795_tint((struct mb8795_softc *)arg);
313 return(1);
314 }
315
316 int
317 xe_rint(arg)
318 void *arg;
319 {
320 if (!INTR_OCCURRED(NEXT_I_ENETR))
321 return(0);
322 mb8795_rint((struct mb8795_softc *)arg);
323 return(1);
324 }
325
326 /*
327 * Glue functions.
328 */
329
330 u_char
331 xe_read_reg(sc, reg)
332 struct mb8795_softc *sc;
333 int reg;
334 {
335 struct xe_softc *xsc = (struct xe_softc *)sc;
336
337 return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
338 }
339
340 void
341 xe_write_reg(sc, reg, val)
342 struct mb8795_softc *sc;
343 int reg;
344 u_char val;
345 {
346 struct xe_softc *xsc = (struct xe_softc *)sc;
347
348 bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
349 }
350
351 void
352 xe_dma_reset(sc)
353 struct mb8795_softc *sc;
354 {
355 struct xe_softc *xsc = (struct xe_softc *)sc;
356 int i;
357
358 DPRINTF(("xe DMA reset\n"));
359
360 nextdma_reset(xsc->sc_rxdma);
361 nextdma_reset(xsc->sc_txdma);
362
363 if (xsc->sc_tx_loaded) {
364 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
365 0, xsc->sc_tx_dmamap->dm_mapsize,
366 BUS_DMASYNC_POSTWRITE);
367 bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
368 xsc->sc_tx_loaded = 0;
369 }
370 if (xsc->sc_tx_mb_head) {
371 m_freem(xsc->sc_tx_mb_head);
372 xsc->sc_tx_mb_head = NULL;
373 }
374
375 for(i = 0; i < MB8795_NRXBUFS; i++) {
376 if (xsc->sc_rx_mb_head[i]) {
377 bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
378 m_freem(xsc->sc_rx_mb_head[i]);
379 xsc->sc_rx_mb_head[i] = NULL;
380 }
381 }
382 }
383
384 void
385 xe_dma_rx_setup (sc)
386 struct mb8795_softc *sc;
387 {
388 struct xe_softc *xsc = (struct xe_softc *)sc;
389 int i;
390
391 DPRINTF(("xe DMA rx setup\n"));
392
393 for(i = 0; i < MB8795_NRXBUFS; i++) {
394 xsc->sc_rx_mb_head[i] =
395 xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
396 }
397 xsc->sc_rx_loaded_idx = 0;
398 xsc->sc_rx_completed_idx = 0;
399 xsc->sc_rx_handled_idx = 0;
400
401 nextdma_init(xsc->sc_rxdma);
402 }
403
404 void
405 xe_dma_rx_go (sc)
406 struct mb8795_softc *sc;
407 {
408 struct xe_softc *xsc = (struct xe_softc *)sc;
409
410 DPRINTF(("xe DMA rx go\n"));
411
412 nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
413 }
414
415 struct mbuf *
416 xe_dma_rx_mbuf (sc)
417 struct mb8795_softc *sc;
418 {
419 struct xe_softc *xsc = (struct xe_softc *)sc;
420 bus_dmamap_t map;
421 struct mbuf *m;
422
423 m = NULL;
424 if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
425 xsc->sc_rx_handled_idx++;
426 xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
427
428 map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
429 m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
430
431 m->m_len = map->dm_xfer_len;
432
433 bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
434 0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
435
436 bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
437
438 /* Install a fresh mbuf for next packet */
439
440 xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
441 xe_dma_rxmap_load(sc,map);
442
443 /* Punt runt packets
444 * DMA restarts create 0 length packets for example
445 */
446 if (m->m_len < ETHER_MIN_LEN) {
447 m_freem(m);
448 m = NULL;
449 }
450 }
451 return (m);
452 }
453
454 void
455 xe_dma_tx_setup (sc)
456 struct mb8795_softc *sc;
457 {
458 struct xe_softc *xsc = (struct xe_softc *)sc;
459
460 DPRINTF(("xe DMA tx setup\n"));
461
462 nextdma_init(xsc->sc_txdma);
463 }
464
465 void
466 xe_dma_tx_go (sc)
467 struct mb8795_softc *sc;
468 {
469 struct xe_softc *xsc = (struct xe_softc *)sc;
470
471 DPRINTF(("xe DMA tx go\n"));
472
473 nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
474 }
475
476 int
477 xe_dma_tx_mbuf (sc, m)
478 struct mb8795_softc *sc;
479 struct mbuf *m;
480 {
481 struct xe_softc *xsc = (struct xe_softc *)sc;
482 int error;
483
484 xsc->sc_tx_mb_head = m;
485
486 /* The following is a next specific hack that should
487 * probably be moved out of MI code.
488 * This macro assumes it can move forward as needed
489 * in the buffer. Perhaps it should zero the extra buffer.
490 */
491 #define REALIGN_DMABUF(s,l) \
492 { (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
493 &~(DMA_BEGINALIGNMENT-1))); \
494 (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
495 &~(DMA_ENDALIGNMENT-1)))-(s);}
496
497 #if 0
498 error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
499 xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
500 #else
501 {
502 u_char *buf = xsc->sc_txbuf;
503 int buflen = 0;
504
505 buflen = m->m_pkthdr.len;
506
507 {
508 u_char *p = buf;
509 for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
510 if (m->m_len == 0) continue;
511 bcopy(mtod(m, u_char *), p, m->m_len);
512 p += m->m_len;
513 }
514 /* Fix runt packets */
515 if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
516 memset(p, 0,
517 ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
518 buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
519 }
520 }
521
522 error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
523 buf,buflen,NULL,BUS_DMA_NOWAIT);
524 }
525 #endif
526 if (error) {
527 printf("%s: can't load mbuf chain, error = %d\n",
528 sc->sc_dev.dv_xname, error);
529 m_freem(xsc->sc_tx_mb_head);
530 xsc->sc_tx_mb_head = NULL;
531 return (error);
532 }
533
534 #ifdef DIAGNOSTIC
535 if (xsc->sc_tx_loaded != 0) {
536 panic("%s: xsc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
537 xsc->sc_tx_loaded);
538 }
539 #endif
540
541 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
542 xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
543
544 return (0);
545 }
546
547 int
548 xe_dma_tx_isactive (sc)
549 struct mb8795_softc *sc;
550 {
551 struct xe_softc *xsc = (struct xe_softc *)sc;
552
553 return (xsc->sc_tx_loaded != 0);
554 }
555
556 /****************************************************************/
557
558 void
559 xe_dma_tx_completed(map, arg)
560 bus_dmamap_t map;
561 void *arg;
562 {
563 struct mb8795_softc *sc = arg;
564 struct xe_softc *xsc = (struct xe_softc *)sc;
565
566 DPRINTF(("%s: xe_dma_tx_completed()\n",sc->sc_dev.dv_xname));
567
568 #ifdef DIAGNOSTIC
569 if (!xsc->sc_tx_loaded) {
570 panic("%s: tx completed never loaded",sc->sc_dev.dv_xname);
571 }
572 if (map != xsc->sc_tx_dmamap) {
573 panic("%s: unexpected tx completed map",sc->sc_dev.dv_xname);
574 }
575
576 #endif
577 }
578
579 void
580 xe_dma_tx_shutdown(arg)
581 void *arg;
582 {
583 struct mb8795_softc *sc = arg;
584 struct xe_softc *xsc = (struct xe_softc *)sc;
585 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
586
587 DPRINTF(("%s: xe_dma_tx_shutdown()\n",sc->sc_dev.dv_xname));
588
589 #ifdef DIAGNOSTIC
590 if (!xsc->sc_tx_loaded) {
591 panic("%s: tx shutdown never loaded",sc->sc_dev.dv_xname);
592 }
593 #endif
594
595 if (turbo)
596 MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
597 if (xsc->sc_tx_loaded) {
598 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
599 0, xsc->sc_tx_dmamap->dm_mapsize,
600 BUS_DMASYNC_POSTWRITE);
601 bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
602 m_freem(xsc->sc_tx_mb_head);
603 xsc->sc_tx_mb_head = NULL;
604
605 xsc->sc_tx_loaded--;
606 }
607
608 #ifdef DIAGNOSTIC
609 if (xsc->sc_tx_loaded != 0) {
610 panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
611 xsc->sc_tx_loaded);
612 }
613 #endif
614
615 ifp->if_timer = 0;
616
617 #if 1
618 if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
619 void mb8795_start_dma __P((struct mb8795_softc *)); /* XXXX */
620 mb8795_start_dma(sc);
621 }
622 #endif
623
624 #if 0
625 /* Enable ready interrupt */
626 MB_WRITE_REG(sc, MB8795_TXMASK,
627 MB_READ_REG(sc, MB8795_TXMASK)
628 | MB8795_TXMASK_TXRXIE/* READYIE */);
629 #endif
630 }
631
632
633 void
634 xe_dma_rx_completed(map, arg)
635 bus_dmamap_t map;
636 void *arg;
637 {
638 struct mb8795_softc *sc = arg;
639 struct xe_softc *xsc = (struct xe_softc *)sc;
640 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
641
642 if (ifp->if_flags & IFF_RUNNING) {
643 xsc->sc_rx_completed_idx++;
644 xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
645
646 DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
647 sc->sc_dev.dv_xname, xsc->sc_rx_completed_idx));
648
649 #if (defined(DIAGNOSTIC))
650 if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
651 panic("%s: Unexpected rx dmamap completed",
652 sc->sc_dev.dv_xname);
653 }
654 #endif
655 }
656 #ifdef DIAGNOSTIC
657 else
658 DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
659 sc->sc_dev.dv_xname));
660 #endif
661 }
662
663 void
664 xe_dma_rx_shutdown(arg)
665 void *arg;
666 {
667 struct mb8795_softc *sc = arg;
668 struct xe_softc *xsc = (struct xe_softc *)sc;
669 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
670
671 if (ifp->if_flags & IFF_RUNNING) {
672 DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
673 sc->sc_dev.dv_xname));
674
675 nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
676 if (turbo)
677 MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
678 }
679 #ifdef DIAGNOSTIC
680 else
681 DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
682 sc->sc_dev.dv_xname));
683 #endif
684 }
685
686 /*
687 * load a dmamap with a freshly allocated mbuf
688 */
689 struct mbuf *
690 xe_dma_rxmap_load(sc,map)
691 struct mb8795_softc *sc;
692 bus_dmamap_t map;
693 {
694 struct xe_softc *xsc = (struct xe_softc *)sc;
695 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
696 struct mbuf *m;
697 int error;
698
699 MGETHDR(m, M_DONTWAIT, MT_DATA);
700 if (m) {
701 MCLGET(m, M_DONTWAIT);
702 if ((m->m_flags & M_EXT) == 0) {
703 m_freem(m);
704 m = NULL;
705 } else {
706 m->m_len = MCLBYTES;
707 }
708 }
709 if (!m) {
710 /* @@@ Handle this gracefully by reusing a scratch buffer
711 * or something.
712 */
713 panic("Unable to get memory for incoming ethernet");
714 }
715
716 /* Align buffer, @@@ next specific.
717 * perhaps should be using M_ALIGN here instead?
718 * First we give us a little room to align with.
719 */
720 {
721 u_char *buf = m->m_data;
722 int buflen = m->m_len;
723 buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
724 REALIGN_DMABUF(buf, buflen);
725 m->m_data = buf;
726 m->m_len = buflen;
727 }
728
729 m->m_pkthdr.rcvif = ifp;
730 m->m_pkthdr.len = m->m_len;
731
732 error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
733 map, m, BUS_DMA_NOWAIT);
734
735 bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
736 map->dm_mapsize, BUS_DMASYNC_PREREAD);
737
738 if (error) {
739 DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
740 m->m_data, m->m_len));
741 DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
742 MCLBYTES, map->_dm_size));
743
744 panic("%s: can't load rx mbuf chain, error = %d",
745 sc->sc_dev.dv_xname, error);
746 m_freem(m);
747 m = NULL;
748 }
749
750 return(m);
751 }
752
753 bus_dmamap_t
754 xe_dma_rx_continue(arg)
755 void *arg;
756 {
757 struct mb8795_softc *sc = arg;
758 struct xe_softc *xsc = (struct xe_softc *)sc;
759 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
760 bus_dmamap_t map = NULL;
761
762 if (ifp->if_flags & IFF_RUNNING) {
763 if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
764 /* make space for one packet by dropping one */
765 struct mbuf *m;
766 m = xe_dma_rx_mbuf (sc);
767 if (m)
768 m_freem(m);
769 #if (defined(DIAGNOSTIC))
770 DPRINTF(("%s: out of receive DMA buffers\n",sc->sc_dev.dv_xname));
771 #endif
772 }
773 xsc->sc_rx_loaded_idx++;
774 xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
775 map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
776
777 DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
778 sc->sc_dev.dv_xname,xsc->sc_rx_loaded_idx));
779 }
780 #ifdef DIAGNOSTIC
781 else
782 panic("%s: Unexpected rx DMA continue while if not running",
783 sc->sc_dev.dv_xname);
784 #endif
785
786 return(map);
787 }
788
789 bus_dmamap_t
790 xe_dma_tx_continue(arg)
791 void *arg;
792 {
793 struct mb8795_softc *sc = arg;
794 struct xe_softc *xsc = (struct xe_softc *)sc;
795 bus_dmamap_t map;
796
797 DPRINTF(("%s: xe_dma_tx_continue()\n",sc->sc_dev.dv_xname));
798
799 if (xsc->sc_tx_loaded) {
800 map = NULL;
801 } else {
802 map = xsc->sc_tx_dmamap;
803 xsc->sc_tx_loaded++;
804 }
805
806 #ifdef DIAGNOSTIC
807 if (xsc->sc_tx_loaded != 1) {
808 panic("%s: sc->sc_tx_loaded is %d",sc->sc_dev.dv_xname,
809 xsc->sc_tx_loaded);
810 }
811 #endif
812
813 return(map);
814 }
815