if_xe.c revision 1.22 1 /* $NetBSD: if_xe.c,v 1.22 2012/10/27 17:18:05 chs Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_xe.c,v 1.22 2012/10/27 17:18:05 chs Exp $");
29
30 #include "opt_inet.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38
39 #include <net/if.h>
40 #include <net/if_ether.h>
41 #include <net/if_media.h>
42
43 #ifdef INET
44 #include <netinet/in.h>
45 #include <netinet/if_inarp.h>
46 #endif
47
48 #include <machine/autoconf.h>
49 #include <machine/cpu.h>
50 #include <machine/intr.h>
51 #include <machine/bus.h>
52
53 #include <next68k/next68k/isr.h>
54
55 #include <next68k/dev/mb8795reg.h>
56 #include <next68k/dev/mb8795var.h>
57
58 #include <next68k/dev/bmapreg.h>
59 #include <next68k/dev/intiovar.h>
60 #include <next68k/dev/nextdmareg.h>
61 #include <next68k/dev/nextdmavar.h>
62
63 #include <next68k/dev/if_xevar.h>
64 #include <next68k/dev/if_xereg.h>
65
66 #ifdef DEBUG
67 #define XE_DEBUG
68 #endif
69
70 #ifdef XE_DEBUG
71 int xe_debug = 0;
72 #define DPRINTF(x) if (xe_debug) printf x;
73 extern char *ndtracep;
74 extern char ndtrace[];
75 extern int ndtraceshow;
76 #define NDTRACEIF(x) if (10 && ndtracep < (ndtrace + 8192)) do {x;} while (0)
77 #else
78 #define DPRINTF(x)
79 #define NDTRACEIF(x)
80 #endif
81 #define PRINTF(x) printf x;
82
83 extern int turbo;
84
85 int xe_match(device_t, cfdata_t, void *);
86 void xe_attach(device_t, device_t, void *);
87 int xe_tint(void *);
88 int xe_rint(void *);
89
90 struct mbuf * xe_dma_rxmap_load(struct mb8795_softc *, bus_dmamap_t);
91
92 bus_dmamap_t xe_dma_rx_continue(void *);
93 void xe_dma_rx_completed(bus_dmamap_t, void *);
94 bus_dmamap_t xe_dma_tx_continue(void *);
95 void xe_dma_tx_completed(bus_dmamap_t, void *);
96 void xe_dma_rx_shutdown(void *);
97 void xe_dma_tx_shutdown(void *);
98
99 static void findchannel_defer(device_t);
100
101 CFATTACH_DECL_NEW(xe, sizeof(struct xe_softc),
102 xe_match, xe_attach, NULL, NULL);
103
104 static int xe_dma_medias[] = {
105 IFM_ETHER|IFM_AUTO,
106 IFM_ETHER|IFM_10_T,
107 IFM_ETHER|IFM_10_2,
108 };
109 static int nxe_dma_medias = (sizeof(xe_dma_medias)/sizeof(xe_dma_medias[0]));
110
111 static int attached = 0;
112
113 /*
114 * Functions and the switch for the MI code.
115 */
116 u_char xe_read_reg(struct mb8795_softc *, int);
117 void xe_write_reg(struct mb8795_softc *, int, u_char);
118 void xe_dma_reset(struct mb8795_softc *);
119 void xe_dma_rx_setup(struct mb8795_softc *);
120 void xe_dma_rx_go(struct mb8795_softc *);
121 struct mbuf * xe_dma_rx_mbuf(struct mb8795_softc *);
122 void xe_dma_tx_setup(struct mb8795_softc *);
123 void xe_dma_tx_go(struct mb8795_softc *);
124 int xe_dma_tx_mbuf(struct mb8795_softc *, struct mbuf *);
125 int xe_dma_tx_isactive(struct mb8795_softc *);
126
127 struct mb8795_glue xe_glue = {
128 xe_read_reg,
129 xe_write_reg,
130 xe_dma_reset,
131 xe_dma_rx_setup,
132 xe_dma_rx_go,
133 xe_dma_rx_mbuf,
134 xe_dma_tx_setup,
135 xe_dma_tx_go,
136 xe_dma_tx_mbuf,
137 xe_dma_tx_isactive,
138 };
139
140 int
141 xe_match(device_t parent, cfdata_t match, void *aux)
142 {
143 struct intio_attach_args *ia = (struct intio_attach_args *)aux;
144
145 if (attached)
146 return (0);
147
148 ia->ia_addr = (void *)NEXT_P_ENET;
149
150 return (1);
151 }
152
153 static void
154 findchannel_defer(device_t self)
155 {
156 struct xe_softc *xsc = device_private(self);
157 struct mb8795_softc *sc = &xsc->sc_mb8795;
158 int i, error;
159
160 if (!xsc->sc_txdma) {
161 xsc->sc_txdma = nextdma_findchannel ("enetx");
162 if (xsc->sc_txdma == NULL)
163 panic("%s: can't find enetx DMA channel",
164 device_xname(sc->sc_dev));
165 }
166 if (!xsc->sc_rxdma) {
167 xsc->sc_rxdma = nextdma_findchannel ("enetr");
168 if (xsc->sc_rxdma == NULL)
169 panic ("%s: can't find enetr DMA channel",
170 device_xname(sc->sc_dev));
171 }
172 aprint_normal_dev(sc->sc_dev, "using DMA channels %s %s\n",
173 device_xname(xsc->sc_txdma->sc_dev),
174 device_xname(xsc->sc_rxdma->sc_dev));
175
176 nextdma_setconf (xsc->sc_rxdma, continue_cb, xe_dma_rx_continue);
177 nextdma_setconf (xsc->sc_rxdma, completed_cb, xe_dma_rx_completed);
178 nextdma_setconf (xsc->sc_rxdma, shutdown_cb, xe_dma_rx_shutdown);
179 nextdma_setconf (xsc->sc_rxdma, cb_arg, sc);
180
181 nextdma_setconf (xsc->sc_txdma, continue_cb, xe_dma_tx_continue);
182 nextdma_setconf (xsc->sc_txdma, completed_cb, xe_dma_tx_completed);
183 nextdma_setconf (xsc->sc_txdma, shutdown_cb, xe_dma_tx_shutdown);
184 nextdma_setconf (xsc->sc_txdma, cb_arg, sc);
185
186 /* Initialize the DMA maps */
187 error = bus_dmamap_create(xsc->sc_txdma->sc_dmat, MCLBYTES,
188 (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
189 &xsc->sc_tx_dmamap);
190 if (error) {
191 aprint_error_dev(sc->sc_dev, "can't create tx DMA map, error = %d",
192 error);
193 }
194
195 for(i = 0; i < MB8795_NRXBUFS; i++) {
196 error = bus_dmamap_create(xsc->sc_rxdma->sc_dmat, MCLBYTES,
197 (MCLBYTES/MSIZE), MCLBYTES, 0, BUS_DMA_ALLOCNOW,
198 &xsc->sc_rx_dmamap[i]);
199 if (error) {
200 panic("%s: can't create rx DMA map, error = %d",
201 device_xname(sc->sc_dev), error);
202 }
203 xsc->sc_rx_mb_head[i] = NULL;
204 }
205 xsc->sc_rx_loaded_idx = 0;
206 xsc->sc_rx_completed_idx = 0;
207 xsc->sc_rx_handled_idx = 0;
208
209 /* @@@ more next hacks
210 * the 2000 covers at least a 1500 mtu + headers
211 * + DMA_BEGINALIGNMENT+ DMA_ENDALIGNMENT
212 */
213 xsc->sc_txbuf = malloc(2000, M_DEVBUF, M_NOWAIT);
214 if (!xsc->sc_txbuf)
215 panic("%s: can't malloc tx DMA buffer", device_xname(sc->sc_dev));
216
217 xsc->sc_tx_mb_head = NULL;
218 xsc->sc_tx_loaded = 0;
219
220 mb8795_config(sc, xe_dma_medias, nxe_dma_medias, xe_dma_medias[0]);
221
222 isrlink_autovec(xe_tint, sc, NEXT_I_IPL(NEXT_I_ENETX), 1, NULL);
223 INTR_ENABLE(NEXT_I_ENETX);
224 isrlink_autovec(xe_rint, sc, NEXT_I_IPL(NEXT_I_ENETR), 1, NULL);
225 INTR_ENABLE(NEXT_I_ENETR);
226 }
227
228 void
229 xe_attach(device_t parent, device_t self, void *aux)
230 {
231 struct intio_attach_args *ia = (struct intio_attach_args *)aux;
232 struct xe_softc *xsc = device_private(self);
233 struct mb8795_softc *sc = &xsc->sc_mb8795;
234
235 sc->sc_dev = self;
236 DPRINTF(("%s: xe_attach()\n", device_xname(self)));
237
238 {
239 extern u_char rom_enetaddr[6]; /* kludge from machdep.c:next68k_bootargs() */
240 int i;
241 for(i=0;i<6;i++) {
242 sc->sc_enaddr[i] = rom_enetaddr[i];
243 }
244 }
245
246 printf("\n%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
247 device_xname(self),
248 sc->sc_enaddr[0],sc->sc_enaddr[1],sc->sc_enaddr[2],
249 sc->sc_enaddr[3],sc->sc_enaddr[4],sc->sc_enaddr[5]);
250
251 xsc->sc_bst = ia->ia_bst;
252 if (bus_space_map(xsc->sc_bst, NEXT_P_ENET,
253 XE_DEVICE_SIZE, 0, &xsc->sc_bsh)) {
254 panic("\n%s: can't map mb8795 registers",
255 device_xname(self));
256 }
257
258 sc->sc_bmap_bst = ia->ia_bst;
259 if (bus_space_map(sc->sc_bmap_bst, NEXT_P_BMAP,
260 BMAP_SIZE, 0, &sc->sc_bmap_bsh)) {
261 panic("\n%s: can't map bmap registers",
262 device_xname(self));
263 }
264
265 /*
266 * Set up glue for MI code.
267 */
268 sc->sc_glue = &xe_glue;
269
270 xsc->sc_txdma = nextdma_findchannel("enetx");
271 xsc->sc_rxdma = nextdma_findchannel("enetr");
272 if (xsc->sc_rxdma && xsc->sc_txdma) {
273 findchannel_defer(self);
274 } else {
275 config_defer(self, findchannel_defer);
276 }
277
278 attached = 1;
279 }
280
281 int
282 xe_tint(void *arg)
283 {
284 if (!INTR_OCCURRED(NEXT_I_ENETX))
285 return 0;
286 mb8795_tint((struct mb8795_softc *)arg);
287 return(1);
288 }
289
290 int
291 xe_rint(void *arg)
292 {
293 if (!INTR_OCCURRED(NEXT_I_ENETR))
294 return(0);
295 mb8795_rint((struct mb8795_softc *)arg);
296 return(1);
297 }
298
299 /*
300 * Glue functions.
301 */
302
303 u_char
304 xe_read_reg(struct mb8795_softc *sc, int reg)
305 {
306 struct xe_softc *xsc = (struct xe_softc *)sc;
307
308 return(bus_space_read_1(xsc->sc_bst, xsc->sc_bsh, reg));
309 }
310
311 void
312 xe_write_reg(struct mb8795_softc *sc, int reg, u_char val)
313 {
314 struct xe_softc *xsc = (struct xe_softc *)sc;
315
316 bus_space_write_1(xsc->sc_bst, xsc->sc_bsh, reg, val);
317 }
318
319 void
320 xe_dma_reset(struct mb8795_softc *sc)
321 {
322 struct xe_softc *xsc = (struct xe_softc *)sc;
323 int i;
324
325 DPRINTF(("xe DMA reset\n"));
326
327 nextdma_reset(xsc->sc_rxdma);
328 nextdma_reset(xsc->sc_txdma);
329
330 if (xsc->sc_tx_loaded) {
331 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
332 0, xsc->sc_tx_dmamap->dm_mapsize,
333 BUS_DMASYNC_POSTWRITE);
334 bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
335 xsc->sc_tx_loaded = 0;
336 }
337 if (xsc->sc_tx_mb_head) {
338 m_freem(xsc->sc_tx_mb_head);
339 xsc->sc_tx_mb_head = NULL;
340 }
341
342 for(i = 0; i < MB8795_NRXBUFS; i++) {
343 if (xsc->sc_rx_mb_head[i]) {
344 bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, xsc->sc_rx_dmamap[i]);
345 m_freem(xsc->sc_rx_mb_head[i]);
346 xsc->sc_rx_mb_head[i] = NULL;
347 }
348 }
349 }
350
351 void
352 xe_dma_rx_setup(struct mb8795_softc *sc)
353 {
354 struct xe_softc *xsc = (struct xe_softc *)sc;
355 int i;
356
357 DPRINTF(("xe DMA rx setup\n"));
358
359 for(i = 0; i < MB8795_NRXBUFS; i++) {
360 xsc->sc_rx_mb_head[i] =
361 xe_dma_rxmap_load(sc, xsc->sc_rx_dmamap[i]);
362 }
363 xsc->sc_rx_loaded_idx = 0;
364 xsc->sc_rx_completed_idx = 0;
365 xsc->sc_rx_handled_idx = 0;
366
367 nextdma_init(xsc->sc_rxdma);
368 }
369
370 void
371 xe_dma_rx_go(struct mb8795_softc *sc)
372 {
373 struct xe_softc *xsc = (struct xe_softc *)sc;
374
375 DPRINTF(("xe DMA rx go\n"));
376
377 nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
378 }
379
380 struct mbuf *
381 xe_dma_rx_mbuf(struct mb8795_softc *sc)
382 {
383 struct xe_softc *xsc = (struct xe_softc *)sc;
384 bus_dmamap_t map;
385 struct mbuf *m;
386
387 m = NULL;
388 if (xsc->sc_rx_handled_idx != xsc->sc_rx_completed_idx) {
389 xsc->sc_rx_handled_idx++;
390 xsc->sc_rx_handled_idx %= MB8795_NRXBUFS;
391
392 map = xsc->sc_rx_dmamap[xsc->sc_rx_handled_idx];
393 m = xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx];
394
395 m->m_len = map->dm_xfer_len;
396
397 bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map,
398 0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
399
400 bus_dmamap_unload(xsc->sc_rxdma->sc_dmat, map);
401
402 /* Install a fresh mbuf for next packet */
403
404 xsc->sc_rx_mb_head[xsc->sc_rx_handled_idx] =
405 xe_dma_rxmap_load(sc,map);
406
407 /* Punt runt packets
408 * DMA restarts create 0 length packets for example
409 */
410 if (m->m_len < ETHER_MIN_LEN) {
411 m_freem(m);
412 m = NULL;
413 }
414 }
415 return (m);
416 }
417
418 void
419 xe_dma_tx_setup(struct mb8795_softc *sc)
420 {
421 struct xe_softc *xsc = (struct xe_softc *)sc;
422
423 DPRINTF(("xe DMA tx setup\n"));
424
425 nextdma_init(xsc->sc_txdma);
426 }
427
428 void
429 xe_dma_tx_go(struct mb8795_softc *sc)
430 {
431 struct xe_softc *xsc = (struct xe_softc *)sc;
432
433 DPRINTF(("xe DMA tx go\n"));
434
435 nextdma_start(xsc->sc_txdma, DMACSR_SETWRITE);
436 }
437
438 int
439 xe_dma_tx_mbuf(struct mb8795_softc *sc, struct mbuf *m)
440 {
441 struct xe_softc *xsc = (struct xe_softc *)sc;
442 int error;
443
444 xsc->sc_tx_mb_head = m;
445
446 /* The following is a next specific hack that should
447 * probably be moved out of MI code.
448 * This macro assumes it can move forward as needed
449 * in the buffer. Perhaps it should zero the extra buffer.
450 */
451 #define REALIGN_DMABUF(s,l) \
452 { (s) = ((u_char *)(((unsigned)(s)+DMA_BEGINALIGNMENT-1) \
453 &~(DMA_BEGINALIGNMENT-1))); \
454 (l) = ((u_char *)(((unsigned)((s)+(l))+DMA_ENDALIGNMENT-1) \
455 &~(DMA_ENDALIGNMENT-1)))-(s);}
456
457 #if 0
458 error = bus_dmamap_load_mbuf(xsc->sc_txdma->sc_dmat,
459 xsc->sc_tx_dmamap, xsc->sc_tx_mb_head, BUS_DMA_NOWAIT);
460 #else
461 {
462 u_char *buf = xsc->sc_txbuf;
463 int buflen = 0;
464
465 buflen = m->m_pkthdr.len;
466
467 {
468 u_char *p = buf;
469 for (m=xsc->sc_tx_mb_head; m; m = m->m_next) {
470 if (m->m_len == 0) continue;
471 memcpy(p, mtod(m, u_char *), m->m_len);
472 p += m->m_len;
473 }
474 /* Fix runt packets */
475 if (buflen < ETHER_MIN_LEN - ETHER_CRC_LEN) {
476 memset(p, 0,
477 ETHER_MIN_LEN - ETHER_CRC_LEN - buflen);
478 buflen = ETHER_MIN_LEN - ETHER_CRC_LEN;
479 }
480 }
481
482 error = bus_dmamap_load(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
483 buf,buflen,NULL,BUS_DMA_NOWAIT);
484 }
485 #endif
486 if (error) {
487 aprint_error_dev(sc->sc_dev, "can't load mbuf chain, error = %d\n",
488 error);
489 m_freem(xsc->sc_tx_mb_head);
490 xsc->sc_tx_mb_head = NULL;
491 return (error);
492 }
493
494 #ifdef DIAGNOSTIC
495 if (xsc->sc_tx_loaded != 0) {
496 panic("%s: xsc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
497 xsc->sc_tx_loaded);
498 }
499 #endif
500
501 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap, 0,
502 xsc->sc_tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
503
504 return (0);
505 }
506
507 int
508 xe_dma_tx_isactive(struct mb8795_softc *sc)
509 {
510 struct xe_softc *xsc = (struct xe_softc *)sc;
511
512 return (xsc->sc_tx_loaded != 0);
513 }
514
515 /****************************************************************/
516
517 void
518 xe_dma_tx_completed(bus_dmamap_t map, void *arg)
519 {
520 #if defined (XE_DEBUG) || defined (DIAGNOSTIC)
521 struct mb8795_softc *sc = arg;
522 #endif
523 #ifdef DIAGNOSTIC
524 struct xe_softc *xsc = (struct xe_softc *)sc;
525 #endif
526
527 DPRINTF(("%s: xe_dma_tx_completed()\n", device_xname(sc->sc_dev)));
528
529 #ifdef DIAGNOSTIC
530 if (!xsc->sc_tx_loaded) {
531 panic("%s: tx completed never loaded", device_xname(sc->sc_dev));
532 }
533 if (map != xsc->sc_tx_dmamap) {
534 panic("%s: unexpected tx completed map", device_xname(sc->sc_dev));
535 }
536
537 #endif
538 }
539
540 void
541 xe_dma_tx_shutdown(void *arg)
542 {
543 struct mb8795_softc *sc = arg;
544 struct xe_softc *xsc = (struct xe_softc *)sc;
545 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
546
547 DPRINTF(("%s: xe_dma_tx_shutdown()\n", device_xname(sc->sc_dev)));
548
549 #ifdef DIAGNOSTIC
550 if (!xsc->sc_tx_loaded) {
551 panic("%s: tx shutdown never loaded", device_xname(sc->sc_dev));
552 }
553 #endif
554
555 if (turbo)
556 MB_WRITE_REG(sc, MB8795_TXMODE, MB8795_TXMODE_TURBO1);
557 if (xsc->sc_tx_loaded) {
558 bus_dmamap_sync(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap,
559 0, xsc->sc_tx_dmamap->dm_mapsize,
560 BUS_DMASYNC_POSTWRITE);
561 bus_dmamap_unload(xsc->sc_txdma->sc_dmat, xsc->sc_tx_dmamap);
562 m_freem(xsc->sc_tx_mb_head);
563 xsc->sc_tx_mb_head = NULL;
564
565 xsc->sc_tx_loaded--;
566 }
567
568 #ifdef DIAGNOSTIC
569 if (xsc->sc_tx_loaded != 0) {
570 panic("%s: sc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
571 xsc->sc_tx_loaded);
572 }
573 #endif
574
575 ifp->if_timer = 0;
576
577 #if 1
578 if ((ifp->if_flags & IFF_RUNNING) && !IF_IS_EMPTY(&sc->sc_tx_snd)) {
579 void mb8795_start_dma(struct mb8795_softc *); /* XXXX */
580 mb8795_start_dma(sc);
581 }
582 #endif
583
584 #if 0
585 /* Enable ready interrupt */
586 MB_WRITE_REG(sc, MB8795_TXMASK,
587 MB_READ_REG(sc, MB8795_TXMASK)
588 | MB8795_TXMASK_TXRXIE/* READYIE */);
589 #endif
590 }
591
592
593 void
594 xe_dma_rx_completed(bus_dmamap_t map, void *arg)
595 {
596 struct mb8795_softc *sc = arg;
597 struct xe_softc *xsc = (struct xe_softc *)sc;
598 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
599
600 if (ifp->if_flags & IFF_RUNNING) {
601 xsc->sc_rx_completed_idx++;
602 xsc->sc_rx_completed_idx %= MB8795_NRXBUFS;
603
604 DPRINTF(("%s: xe_dma_rx_completed(), sc->sc_rx_completed_idx = %d\n",
605 device_xname(sc->sc_dev), xsc->sc_rx_completed_idx));
606
607 #if (defined(DIAGNOSTIC))
608 if (map != xsc->sc_rx_dmamap[xsc->sc_rx_completed_idx]) {
609 panic("%s: Unexpected rx dmamap completed",
610 device_xname(sc->sc_dev));
611 }
612 #endif
613 }
614 #ifdef DIAGNOSTIC
615 else
616 DPRINTF(("%s: Unexpected rx dmamap completed while if not running\n",
617 device_xname(sc->sc_dev)));
618 #endif
619 }
620
621 void
622 xe_dma_rx_shutdown(void *arg)
623 {
624 struct mb8795_softc *sc = arg;
625 struct xe_softc *xsc = (struct xe_softc *)sc;
626 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
627
628 if (ifp->if_flags & IFF_RUNNING) {
629 DPRINTF(("%s: xe_dma_rx_shutdown(), restarting.\n",
630 device_xname(sc->sc_dev)));
631
632 nextdma_start(xsc->sc_rxdma, DMACSR_SETREAD);
633 if (turbo)
634 MB_WRITE_REG(sc, MB8795_RXMODE, MB8795_RXMODE_TEST | MB8795_RXMODE_MULTICAST);
635 }
636 #ifdef DIAGNOSTIC
637 else
638 DPRINTF(("%s: Unexpected rx DMA shutdown while if not running\n",
639 device_xname(sc->sc_dev)));
640 #endif
641 }
642
643 /*
644 * load a dmamap with a freshly allocated mbuf
645 */
646 struct mbuf *
647 xe_dma_rxmap_load(struct mb8795_softc *sc, bus_dmamap_t map)
648 {
649 struct xe_softc *xsc = (struct xe_softc *)sc;
650 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
651 struct mbuf *m;
652 int error;
653
654 MGETHDR(m, M_DONTWAIT, MT_DATA);
655 if (m) {
656 MCLGET(m, M_DONTWAIT);
657 if ((m->m_flags & M_EXT) == 0) {
658 m_freem(m);
659 m = NULL;
660 } else {
661 m->m_len = MCLBYTES;
662 }
663 }
664 if (!m) {
665 /* @@@ Handle this gracefully by reusing a scratch buffer
666 * or something.
667 */
668 panic("Unable to get memory for incoming ethernet");
669 }
670
671 /* Align buffer, @@@ next specific.
672 * perhaps should be using M_ALIGN here instead?
673 * First we give us a little room to align with.
674 */
675 {
676 u_char *buf = m->m_data;
677 int buflen = m->m_len;
678 buflen -= DMA_ENDALIGNMENT+DMA_BEGINALIGNMENT;
679 REALIGN_DMABUF(buf, buflen);
680 m->m_data = buf;
681 m->m_len = buflen;
682 }
683
684 m->m_pkthdr.rcvif = ifp;
685 m->m_pkthdr.len = m->m_len;
686
687 error = bus_dmamap_load_mbuf(xsc->sc_rxdma->sc_dmat,
688 map, m, BUS_DMA_NOWAIT);
689
690 bus_dmamap_sync(xsc->sc_rxdma->sc_dmat, map, 0,
691 map->dm_mapsize, BUS_DMASYNC_PREREAD);
692
693 if (error) {
694 DPRINTF(("DEBUG: m->m_data = %p, m->m_len = %d\n",
695 m->m_data, m->m_len));
696 DPRINTF(("DEBUG: MCLBYTES = %d, map->_dm_size = %ld\n",
697 MCLBYTES, map->_dm_size));
698
699 panic("%s: can't load rx mbuf chain, error = %d",
700 device_xname(sc->sc_dev), error);
701 m_freem(m);
702 m = NULL;
703 }
704
705 return(m);
706 }
707
708 bus_dmamap_t
709 xe_dma_rx_continue(void *arg)
710 {
711 struct mb8795_softc *sc = arg;
712 struct xe_softc *xsc = (struct xe_softc *)sc;
713 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
714 bus_dmamap_t map = NULL;
715
716 if (ifp->if_flags & IFF_RUNNING) {
717 if (((xsc->sc_rx_loaded_idx+1)%MB8795_NRXBUFS) == xsc->sc_rx_handled_idx) {
718 /* make space for one packet by dropping one */
719 struct mbuf *m;
720 m = xe_dma_rx_mbuf (sc);
721 if (m)
722 m_freem(m);
723 #if (defined(DIAGNOSTIC))
724 DPRINTF(("%s: out of receive DMA buffers\n", device_xname(sc->sc_dev)));
725 #endif
726 }
727 xsc->sc_rx_loaded_idx++;
728 xsc->sc_rx_loaded_idx %= MB8795_NRXBUFS;
729 map = xsc->sc_rx_dmamap[xsc->sc_rx_loaded_idx];
730
731 DPRINTF(("%s: xe_dma_rx_continue() xsc->sc_rx_loaded_idx = %d\nn",
732 device_xname(sc->sc_dev), xsc->sc_rx_loaded_idx));
733 }
734 #ifdef DIAGNOSTIC
735 else
736 panic("%s: Unexpected rx DMA continue while if not running",
737 device_xname(sc->sc_dev));
738 #endif
739
740 return(map);
741 }
742
743 bus_dmamap_t
744 xe_dma_tx_continue(void *arg)
745 {
746 struct mb8795_softc *sc = arg;
747 struct xe_softc *xsc = (struct xe_softc *)sc;
748 bus_dmamap_t map;
749
750 DPRINTF(("%s: xe_dma_tx_continue()\n", device_xname(sc->sc_dev)));
751
752 if (xsc->sc_tx_loaded) {
753 map = NULL;
754 } else {
755 map = xsc->sc_tx_dmamap;
756 xsc->sc_tx_loaded++;
757 }
758
759 #ifdef DIAGNOSTIC
760 if (xsc->sc_tx_loaded != 1) {
761 panic("%s: sc->sc_tx_loaded is %d", device_xname(sc->sc_dev),
762 xsc->sc_tx_loaded);
763 }
764 #endif
765
766 return(map);
767 }
768