qe.c revision 1.25 1 /* $NetBSD: qe.c,v 1.25 2002/09/27 02:24:33 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1998 Jason L. Wright.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. The name of the authors may not be used to endorse or promote products
52 * derived from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * Driver for the SBus qec+qe QuadEthernet board.
68 *
69 * This driver was written using the AMD MACE Am79C940 documentation, some
70 * ideas gleaned from the S/Linux driver for this card, Solaris header files,
71 * and a loan of a card from Paul Southworth of the Internet Engineering
72 * Group (www.ieng.com).
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.25 2002/09/27 02:24:33 thorpej Exp $");
77
78 #define QEDEBUG
79
80 #include "opt_ddb.h"
81 #include "opt_inet.h"
82 #include "opt_ccitt.h"
83 #include "opt_llc.h"
84 #include "opt_ns.h"
85 #include "bpfilter.h"
86 #include "rnd.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/errno.h>
92 #include <sys/ioctl.h>
93 #include <sys/mbuf.h>
94 #include <sys/socket.h>
95 #include <sys/syslog.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_types.h>
105 #include <net/netisr.h>
106 #include <net/if_media.h>
107 #include <net/if_ether.h>
108
109 #ifdef INET
110 #include <netinet/in.h>
111 #include <netinet/if_inarp.h>
112 #include <netinet/in_systm.h>
113 #include <netinet/in_var.h>
114 #include <netinet/ip.h>
115 #endif
116
117 #ifdef NS
118 #include <netns/ns.h>
119 #include <netns/ns_if.h>
120 #endif
121
122 #if NBPFILTER > 0
123 #include <net/bpf.h>
124 #include <net/bpfdesc.h>
125 #endif
126
127 #include <machine/bus.h>
128 #include <machine/intr.h>
129 #include <machine/autoconf.h>
130
131 #include <dev/sbus/sbusvar.h>
132 #include <dev/sbus/qecreg.h>
133 #include <dev/sbus/qecvar.h>
134 #include <dev/sbus/qereg.h>
135
136 struct qe_softc {
137 struct device sc_dev; /* base device */
138 struct sbusdev sc_sd; /* sbus device */
139 bus_space_tag_t sc_bustag; /* bus & dma tags */
140 bus_dma_tag_t sc_dmatag;
141 bus_dmamap_t sc_dmamap;
142 struct ethercom sc_ethercom;
143 struct ifmedia sc_ifmedia; /* interface media */
144
145 struct qec_softc *sc_qec; /* QEC parent */
146
147 bus_space_handle_t sc_qr; /* QEC registers */
148 bus_space_handle_t sc_mr; /* MACE registers */
149 bus_space_handle_t sc_cr; /* channel registers */
150
151 int sc_channel; /* channel number */
152 u_int sc_rev; /* board revision */
153
154 int sc_burst;
155
156 struct qec_ring sc_rb; /* Packet Ring Buffer */
157
158 /* MAC address */
159 u_int8_t sc_enaddr[6];
160
161 #ifdef QEDEBUG
162 int sc_debug;
163 #endif
164 };
165
166 int qematch __P((struct device *, struct cfdata *, void *));
167 void qeattach __P((struct device *, struct device *, void *));
168
169 void qeinit __P((struct qe_softc *));
170 void qestart __P((struct ifnet *));
171 void qestop __P((struct qe_softc *));
172 void qewatchdog __P((struct ifnet *));
173 int qeioctl __P((struct ifnet *, u_long, caddr_t));
174 void qereset __P((struct qe_softc *));
175
176 int qeintr __P((void *));
177 int qe_eint __P((struct qe_softc *, u_int32_t));
178 int qe_rint __P((struct qe_softc *));
179 int qe_tint __P((struct qe_softc *));
180 void qe_mcreset __P((struct qe_softc *));
181
182 static int qe_put __P((struct qe_softc *, int, struct mbuf *));
183 static void qe_read __P((struct qe_softc *, int, int));
184 static struct mbuf *qe_get __P((struct qe_softc *, int, int));
185
186 /* ifmedia callbacks */
187 void qe_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
188 int qe_ifmedia_upd __P((struct ifnet *));
189
190 struct cfattach qe_ca = {
191 sizeof(struct qe_softc), qematch, qeattach
192 };
193
194 int
195 qematch(parent, cf, aux)
196 struct device *parent;
197 struct cfdata *cf;
198 void *aux;
199 {
200 struct sbus_attach_args *sa = aux;
201
202 return (strcmp(cf->cf_name, sa->sa_name) == 0);
203 }
204
205 void
206 qeattach(parent, self, aux)
207 struct device *parent, *self;
208 void *aux;
209 {
210 struct sbus_attach_args *sa = aux;
211 struct qec_softc *qec = (struct qec_softc *)parent;
212 struct qe_softc *sc = (struct qe_softc *)self;
213 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
214 int node = sa->sa_node;
215 bus_dma_tag_t dmatag = sa->sa_dmatag;
216 bus_dma_segment_t seg;
217 bus_size_t size;
218 int rseg, error;
219 extern void myetheraddr __P((u_char *));
220
221 if (sa->sa_nreg < 2) {
222 printf("%s: only %d register sets\n",
223 self->dv_xname, sa->sa_nreg);
224 return;
225 }
226
227 if (bus_space_map(sa->sa_bustag,
228 (bus_addr_t)BUS_ADDR(
229 sa->sa_reg[0].oa_space,
230 sa->sa_reg[0].oa_base),
231 (bus_size_t)sa->sa_reg[0].oa_size,
232 0, &sc->sc_cr) != 0) {
233 printf("%s: cannot map registers\n", self->dv_xname);
234 return;
235 }
236
237 if (bus_space_map(sa->sa_bustag,
238 (bus_addr_t)BUS_ADDR(
239 sa->sa_reg[1].oa_space,
240 sa->sa_reg[1].oa_base),
241 (bus_size_t)sa->sa_reg[1].oa_size,
242 0, &sc->sc_mr) != 0) {
243 printf("%s: cannot map registers\n", self->dv_xname);
244 return;
245 }
246
247 sc->sc_rev = PROM_getpropint(node, "mace-version", -1);
248 printf(" rev %x", sc->sc_rev);
249
250 sc->sc_bustag = sa->sa_bustag;
251 sc->sc_dmatag = sa->sa_dmatag;
252 sc->sc_qec = qec;
253 sc->sc_qr = qec->sc_regs;
254
255 sc->sc_channel = PROM_getpropint(node, "channel#", -1);
256 sc->sc_burst = qec->sc_burst;
257
258 qestop(sc);
259
260 /* Note: no interrupt level passed */
261 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, 0, qeintr, sc);
262 myetheraddr(sc->sc_enaddr);
263
264 /*
265 * Allocate descriptor ring and buffers.
266 */
267
268 /* for now, allocate as many bufs as there are ring descriptors */
269 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
270 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;
271
272 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
273 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
274 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ +
275 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ;
276
277 /* Get a DMA handle */
278 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
279 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
280 printf("%s: DMA map create error %d\n", self->dv_xname, error);
281 return;
282 }
283
284 /* Allocate DMA buffer */
285 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0,
286 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
287 printf("%s: DMA buffer alloc error %d\n",
288 self->dv_xname, error);
289 return;
290 }
291
292 /* Map DMA buffer in CPU addressable space */
293 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
294 &sc->sc_rb.rb_membase,
295 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
296 printf("%s: DMA buffer map error %d\n",
297 self->dv_xname, error);
298 bus_dmamem_free(dmatag, &seg, rseg);
299 return;
300 }
301
302 /* Load the buffer */
303 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
304 sc->sc_rb.rb_membase, size, NULL,
305 BUS_DMA_NOWAIT)) != 0) {
306 printf("%s: DMA buffer map load error %d\n",
307 self->dv_xname, error);
308 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
309 bus_dmamem_free(dmatag, &seg, rseg);
310 return;
311 }
312 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
313
314 /* Initialize media properties */
315 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts);
316 ifmedia_add(&sc->sc_ifmedia,
317 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0),
318 0, NULL);
319 ifmedia_add(&sc->sc_ifmedia,
320 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0),
321 0, NULL);
322 ifmedia_add(&sc->sc_ifmedia,
323 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0),
324 0, NULL);
325 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
326
327 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
328 ifp->if_softc = sc;
329 ifp->if_start = qestart;
330 ifp->if_ioctl = qeioctl;
331 ifp->if_watchdog = qewatchdog;
332 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS |
333 IFF_MULTICAST;
334 IFQ_SET_READY(&ifp->if_snd);
335
336 /* Attach the interface. */
337 if_attach(ifp);
338 ether_ifattach(ifp, sc->sc_enaddr);
339
340 printf(" address %s\n", ether_sprintf(sc->sc_enaddr));
341 }
342
343 /*
344 * Pull data off an interface.
345 * Len is the length of data, with local net header stripped.
346 * We copy the data into mbufs. When full cluster sized units are present,
347 * we copy into clusters.
348 */
349 static __inline__ struct mbuf *
350 qe_get(sc, idx, totlen)
351 struct qe_softc *sc;
352 int idx, totlen;
353 {
354 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
355 struct mbuf *m;
356 struct mbuf *top, **mp;
357 int len, pad, boff = 0;
358 caddr_t bp;
359
360 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ;
361
362 MGETHDR(m, M_DONTWAIT, MT_DATA);
363 if (m == NULL)
364 return (NULL);
365 m->m_pkthdr.rcvif = ifp;
366 m->m_pkthdr.len = totlen;
367 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
368 m->m_data += pad;
369 len = MHLEN - pad;
370 top = NULL;
371 mp = ⊤
372
373 while (totlen > 0) {
374 if (top) {
375 MGET(m, M_DONTWAIT, MT_DATA);
376 if (m == NULL) {
377 m_freem(top);
378 return (NULL);
379 }
380 len = MLEN;
381 }
382 if (top && totlen >= MINCLSIZE) {
383 MCLGET(m, M_DONTWAIT);
384 if (m->m_flags & M_EXT)
385 len = MCLBYTES;
386 }
387 m->m_len = len = min(totlen, len);
388 bcopy(bp + boff, mtod(m, caddr_t), len);
389 boff += len;
390 totlen -= len;
391 *mp = m;
392 mp = &m->m_next;
393 }
394
395 return (top);
396 }
397
398 /*
399 * Routine to copy from mbuf chain to transmit buffer in
400 * network buffer memory.
401 */
402 __inline__ int
403 qe_put(sc, idx, m)
404 struct qe_softc *sc;
405 int idx;
406 struct mbuf *m;
407 {
408 struct mbuf *n;
409 int len, tlen = 0, boff = 0;
410 caddr_t bp;
411
412 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ;
413
414 for (; m; m = n) {
415 len = m->m_len;
416 if (len == 0) {
417 MFREE(m, n);
418 continue;
419 }
420 bcopy(mtod(m, caddr_t), bp+boff, len);
421 boff += len;
422 tlen += len;
423 MFREE(m, n);
424 }
425 return (tlen);
426 }
427
428 /*
429 * Pass a packet to the higher levels.
430 */
431 __inline__ void
432 qe_read(sc, idx, len)
433 struct qe_softc *sc;
434 int idx, len;
435 {
436 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
437 struct mbuf *m;
438
439 if (len <= sizeof(struct ether_header) ||
440 len > ETHERMTU + sizeof(struct ether_header)) {
441
442 printf("%s: invalid packet size %d; dropping\n",
443 ifp->if_xname, len);
444
445 ifp->if_ierrors++;
446 return;
447 }
448
449 /*
450 * Pull packet off interface.
451 */
452 m = qe_get(sc, idx, len);
453 if (m == NULL) {
454 ifp->if_ierrors++;
455 return;
456 }
457 ifp->if_ipackets++;
458
459 #if NBPFILTER > 0
460 /*
461 * Check if there's a BPF listener on this interface.
462 * If so, hand off the raw packet to BPF.
463 */
464 if (ifp->if_bpf)
465 bpf_mtap(ifp->if_bpf, m);
466 #endif
467 /* Pass the packet up. */
468 (*ifp->if_input)(ifp, m);
469 }
470
471 /*
472 * Start output on interface.
473 * We make two assumptions here:
474 * 1) that the current priority is set to splnet _before_ this code
475 * is called *and* is returned to the appropriate priority after
476 * return
477 * 2) that the IFF_OACTIVE flag is checked before this code is called
478 * (i.e. that the output part of the interface is idle)
479 */
480 void
481 qestart(ifp)
482 struct ifnet *ifp;
483 {
484 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc;
485 struct qec_xd *txd = sc->sc_rb.rb_txd;
486 struct mbuf *m;
487 unsigned int bix, len;
488 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
489
490 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
491 return;
492
493 bix = sc->sc_rb.rb_tdhead;
494
495 for (;;) {
496 IFQ_DEQUEUE(&ifp->if_snd, m);
497 if (m == 0)
498 break;
499
500 #if NBPFILTER > 0
501 /*
502 * If BPF is listening on this interface, let it see the
503 * packet before we commit it to the wire.
504 */
505 if (ifp->if_bpf)
506 bpf_mtap(ifp->if_bpf, m);
507 #endif
508
509 /*
510 * Copy the mbuf chain into the transmit buffer.
511 */
512 len = qe_put(sc, bix, m);
513
514 /*
515 * Initialize transmit registers and start transmission
516 */
517 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP |
518 (len & QEC_XD_LENGTH);
519 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL,
520 QE_CR_CTRL_TWAKEUP);
521
522 if (++bix == QEC_XD_RING_MAXSIZE)
523 bix = 0;
524
525 if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
526 ifp->if_flags |= IFF_OACTIVE;
527 break;
528 }
529 }
530
531 sc->sc_rb.rb_tdhead = bix;
532 }
533
534 void
535 qestop(sc)
536 struct qe_softc *sc;
537 {
538 bus_space_tag_t t = sc->sc_bustag;
539 bus_space_handle_t mr = sc->sc_mr;
540 bus_space_handle_t cr = sc->sc_cr;
541 int n;
542
543 #if defined(SUN4U) || defined(__GNUC__)
544 (void)&t;
545 #endif
546 /* Stop the schwurst */
547 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST);
548 for (n = 200; n > 0; n--) {
549 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) &
550 QE_MR_BIUCC_SWRST) == 0)
551 break;
552 DELAY(20);
553 }
554
555 /* then reset */
556 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET);
557 for (n = 200; n > 0; n--) {
558 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) &
559 QE_CR_CTRL_RESET) == 0)
560 break;
561 DELAY(20);
562 }
563 }
564
565 /*
566 * Reset interface.
567 */
568 void
569 qereset(sc)
570 struct qe_softc *sc;
571 {
572 int s;
573
574 s = splnet();
575 qestop(sc);
576 qeinit(sc);
577 splx(s);
578 }
579
580 void
581 qewatchdog(ifp)
582 struct ifnet *ifp;
583 {
584 struct qe_softc *sc = ifp->if_softc;
585
586 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
587 ifp->if_oerrors++;
588
589 qereset(sc);
590 }
591
592 /*
593 * Interrupt dispatch.
594 */
595 int
596 qeintr(arg)
597 void *arg;
598 {
599 struct qe_softc *sc = (struct qe_softc *)arg;
600 bus_space_tag_t t = sc->sc_bustag;
601 u_int32_t qecstat, qestat;
602 int r = 0;
603
604 #if defined(SUN4U) || defined(__GNUC__)
605 (void)&t;
606 #endif
607 /* Read QEC status and channel status */
608 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT);
609 #ifdef QEDEBUG
610 if (sc->sc_debug) {
611 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat);
612 }
613 #endif
614
615 /* Filter out status for this channel */
616 qecstat = qecstat >> (4 * sc->sc_channel);
617 if ((qecstat & 0xf) == 0)
618 return (r);
619
620 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT);
621
622 #ifdef QEDEBUG
623 if (sc->sc_debug) {
624 char bits[64]; int i;
625 bus_space_tag_t t = sc->sc_bustag;
626 bus_space_handle_t mr = sc->sc_mr;
627
628 printf("qe%d: intr: qestat=%s\n", sc->sc_channel,
629 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, sizeof(bits)));
630
631 printf("MACE registers:\n");
632 for (i = 0 ; i < 32; i++) {
633 printf(" m[%d]=%x,", i, bus_space_read_1(t, mr, i));
634 if (((i+1) & 7) == 0)
635 printf("\n");
636 }
637 }
638 #endif
639
640 if (qestat & QE_CR_STAT_ALLERRORS) {
641 #ifdef QEDEBUG
642 if (sc->sc_debug) {
643 char bits[64];
644 printf("qe%d: eint: qestat=%s\n", sc->sc_channel,
645 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits,
646 sizeof(bits)));
647 }
648 #endif
649 r |= qe_eint(sc, qestat);
650 if (r == -1)
651 return (1);
652 }
653
654 if (qestat & QE_CR_STAT_TXIRQ)
655 r |= qe_tint(sc);
656
657 if (qestat & QE_CR_STAT_RXIRQ)
658 r |= qe_rint(sc);
659
660 return (r);
661 }
662
663 /*
664 * Transmit interrupt.
665 */
666 int
667 qe_tint(sc)
668 struct qe_softc *sc;
669 {
670 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
671 unsigned int bix, txflags;
672
673 bix = sc->sc_rb.rb_tdtail;
674
675 for (;;) {
676 if (sc->sc_rb.rb_td_nbusy <= 0)
677 break;
678
679 txflags = sc->sc_rb.rb_txd[bix].xd_flags;
680
681 if (txflags & QEC_XD_OWN)
682 break;
683
684 ifp->if_flags &= ~IFF_OACTIVE;
685 ifp->if_opackets++;
686
687 if (++bix == QEC_XD_RING_MAXSIZE)
688 bix = 0;
689
690 --sc->sc_rb.rb_td_nbusy;
691 }
692
693 sc->sc_rb.rb_tdtail = bix;
694
695 qestart(ifp);
696
697 if (sc->sc_rb.rb_td_nbusy == 0)
698 ifp->if_timer = 0;
699
700 return (1);
701 }
702
703 /*
704 * Receive interrupt.
705 */
706 int
707 qe_rint(sc)
708 struct qe_softc *sc;
709 {
710 struct qec_xd *xd = sc->sc_rb.rb_rxd;
711 unsigned int bix, len;
712 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
713 #ifdef QEDEBUG
714 int npackets = 0;
715 #endif
716
717 bix = sc->sc_rb.rb_rdtail;
718
719 /*
720 * Process all buffers with valid data.
721 */
722 for (;;) {
723 len = xd[bix].xd_flags;
724 if (len & QEC_XD_OWN)
725 break;
726
727 #ifdef QEDEBUG
728 npackets++;
729 #endif
730
731 len &= QEC_XD_LENGTH;
732 len -= 4;
733 qe_read(sc, bix, len);
734
735 /* ... */
736 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags =
737 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH);
738
739 if (++bix == QEC_XD_RING_MAXSIZE)
740 bix = 0;
741 }
742 #ifdef QEDEBUG
743 if (npackets == 0 && sc->sc_debug)
744 printf("%s: rint: no packets; rb index %d; status 0x%x\n",
745 sc->sc_dev.dv_xname, bix, len);
746 #endif
747
748 sc->sc_rb.rb_rdtail = bix;
749
750 return (1);
751 }
752
753 /*
754 * Error interrupt.
755 */
756 int
757 qe_eint(sc, why)
758 struct qe_softc *sc;
759 u_int32_t why;
760 {
761 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
762 int r = 0, rst = 0;
763
764 if (why & QE_CR_STAT_EDEFER) {
765 printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname);
766 r |= 1;
767 ifp->if_oerrors++;
768 }
769
770 if (why & QE_CR_STAT_CLOSS) {
771 printf("%s: no carrier, link down?\n", sc->sc_dev.dv_xname);
772 ifp->if_oerrors++;
773 r |= 1;
774 }
775
776 if (why & QE_CR_STAT_ERETRIES) {
777 printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname);
778 ifp->if_oerrors++;
779 r |= 1;
780 rst = 1;
781 }
782
783
784 if (why & QE_CR_STAT_LCOLL) {
785 printf("%s: late tx transmission\n", sc->sc_dev.dv_xname);
786 ifp->if_oerrors++;
787 r |= 1;
788 rst = 1;
789 }
790
791 if (why & QE_CR_STAT_FUFLOW) {
792 printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname);
793 ifp->if_oerrors++;
794 r |= 1;
795 rst = 1;
796 }
797
798 if (why & QE_CR_STAT_JERROR) {
799 printf("%s: jabber seen\n", sc->sc_dev.dv_xname);
800 r |= 1;
801 }
802
803 if (why & QE_CR_STAT_BERROR) {
804 printf("%s: babble seen\n", sc->sc_dev.dv_xname);
805 r |= 1;
806 }
807
808 if (why & QE_CR_STAT_TCCOFLOW) {
809 ifp->if_collisions += 256;
810 ifp->if_oerrors += 256;
811 r |= 1;
812 }
813
814 if (why & QE_CR_STAT_TXDERROR) {
815 printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname);
816 rst = 1;
817 r |= 1;
818 }
819
820 if (why & QE_CR_STAT_TXLERR) {
821 printf("%s: tx late error\n", sc->sc_dev.dv_xname);
822 ifp->if_oerrors++;
823 rst = 1;
824 r |= 1;
825 }
826
827 if (why & QE_CR_STAT_TXPERR) {
828 printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname);
829 ifp->if_oerrors++;
830 rst = 1;
831 r |= 1;
832 }
833
834 if (why & QE_CR_STAT_TXSERR) {
835 printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname);
836 ifp->if_oerrors++;
837 rst = 1;
838 r |= 1;
839 }
840
841 if (why & QE_CR_STAT_RCCOFLOW) {
842 ifp->if_collisions += 256;
843 ifp->if_ierrors += 256;
844 r |= 1;
845 }
846
847 if (why & QE_CR_STAT_RUOFLOW) {
848 ifp->if_ierrors += 256;
849 r |= 1;
850 }
851
852 if (why & QE_CR_STAT_MCOFLOW) {
853 ifp->if_ierrors += 256;
854 r |= 1;
855 }
856
857 if (why & QE_CR_STAT_RXFOFLOW) {
858 printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname);
859 ifp->if_ierrors++;
860 r |= 1;
861 }
862
863 if (why & QE_CR_STAT_RLCOLL) {
864 printf("%s: rx late collision\n", sc->sc_dev.dv_xname);
865 ifp->if_ierrors++;
866 ifp->if_collisions++;
867 r |= 1;
868 }
869
870 if (why & QE_CR_STAT_FCOFLOW) {
871 ifp->if_ierrors += 256;
872 r |= 1;
873 }
874
875 if (why & QE_CR_STAT_CECOFLOW) {
876 ifp->if_ierrors += 256;
877 r |= 1;
878 }
879
880 if (why & QE_CR_STAT_RXDROP) {
881 printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname);
882 ifp->if_ierrors++;
883 r |= 1;
884 }
885
886 if (why & QE_CR_STAT_RXSMALL) {
887 printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname);
888 ifp->if_ierrors++;
889 r |= 1;
890 rst = 1;
891 }
892
893 if (why & QE_CR_STAT_RXLERR) {
894 printf("%s: rx late error\n", sc->sc_dev.dv_xname);
895 ifp->if_ierrors++;
896 r |= 1;
897 rst = 1;
898 }
899
900 if (why & QE_CR_STAT_RXPERR) {
901 printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname);
902 ifp->if_ierrors++;
903 r |= 1;
904 rst = 1;
905 }
906
907 if (why & QE_CR_STAT_RXSERR) {
908 printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname);
909 ifp->if_ierrors++;
910 r |= 1;
911 rst = 1;
912 }
913
914 if (r == 0)
915 printf("%s: unexpected interrupt error: %08x\n",
916 sc->sc_dev.dv_xname, why);
917
918 if (rst) {
919 printf("%s: resetting...\n", sc->sc_dev.dv_xname);
920 qereset(sc);
921 return (-1);
922 }
923
924 return (r);
925 }
926
927 int
928 qeioctl(ifp, cmd, data)
929 struct ifnet *ifp;
930 u_long cmd;
931 caddr_t data;
932 {
933 struct qe_softc *sc = ifp->if_softc;
934 struct ifaddr *ifa = (struct ifaddr *)data;
935 struct ifreq *ifr = (struct ifreq *)data;
936 int s, error = 0;
937
938 s = splnet();
939
940 switch (cmd) {
941 case SIOCSIFADDR:
942 ifp->if_flags |= IFF_UP;
943 switch (ifa->ifa_addr->sa_family) {
944 #ifdef INET
945 case AF_INET:
946 qeinit(sc);
947 arp_ifinit(ifp, ifa);
948 break;
949 #endif /* INET */
950 #ifdef NS
951 case AF_NS:
952 {
953 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
954
955 if (ns_nullhost(*ina))
956 ina->x_host =
957 *(union ns_host *)LLADDR(ifp->if_sadl);
958 else
959 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
960 sizeof(sc->sc_enaddr));
961 /* Set new address. */
962 qeinit(sc);
963 break;
964 }
965 #endif /* NS */
966 default:
967 qeinit(sc);
968 break;
969 }
970 break;
971
972 case SIOCSIFFLAGS:
973 if ((ifp->if_flags & IFF_UP) == 0 &&
974 (ifp->if_flags & IFF_RUNNING) != 0) {
975 /*
976 * If interface is marked down and it is running, then
977 * stop it.
978 */
979 qestop(sc);
980 ifp->if_flags &= ~IFF_RUNNING;
981
982 } else if ((ifp->if_flags & IFF_UP) != 0 &&
983 (ifp->if_flags & IFF_RUNNING) == 0) {
984 /*
985 * If interface is marked up and it is stopped, then
986 * start it.
987 */
988 qeinit(sc);
989
990 } else {
991 /*
992 * Reset the interface to pick up changes in any other
993 * flags that affect hardware registers.
994 */
995 qestop(sc);
996 qeinit(sc);
997 }
998 #ifdef QEDEBUG
999 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1000 #endif
1001 break;
1002
1003 case SIOCADDMULTI:
1004 case SIOCDELMULTI:
1005 error = (cmd == SIOCADDMULTI) ?
1006 ether_addmulti(ifr, &sc->sc_ethercom):
1007 ether_delmulti(ifr, &sc->sc_ethercom);
1008
1009 if (error == ENETRESET) {
1010 /*
1011 * Multicast list has changed; set the hardware filter
1012 * accordingly.
1013 */
1014 qe_mcreset(sc);
1015 error = 0;
1016 }
1017 break;
1018
1019 case SIOCGIFMEDIA:
1020 case SIOCSIFMEDIA:
1021 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
1022 break;
1023
1024 default:
1025 error = EINVAL;
1026 break;
1027 }
1028
1029 splx(s);
1030 return (error);
1031 }
1032
1033
1034 void
1035 qeinit(sc)
1036 struct qe_softc *sc;
1037 {
1038 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1039 bus_space_tag_t t = sc->sc_bustag;
1040 bus_space_handle_t cr = sc->sc_cr;
1041 bus_space_handle_t mr = sc->sc_mr;
1042 struct qec_softc *qec = sc->sc_qec;
1043 u_int32_t qecaddr;
1044 u_int8_t *ea;
1045 int s;
1046
1047 #if defined(SUN4U) || defined(__GNUC__)
1048 (void)&t;
1049 #endif
1050 s = splnet();
1051
1052 qestop(sc);
1053
1054 /*
1055 * Allocate descriptor ring and buffers
1056 */
1057 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ);
1058
1059 /* Channel registers: */
1060 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma);
1061 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma);
1062
1063 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0);
1064 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0);
1065 bus_space_write_4(t, cr, QE_CRI_QMASK, 0);
1066 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL);
1067 bus_space_write_4(t, cr, QE_CRI_CCNT, 0);
1068 bus_space_write_4(t, cr, QE_CRI_PIPG, 0);
1069
1070 qecaddr = sc->sc_channel * qec->sc_msize;
1071 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr);
1072 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr);
1073 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize);
1074 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize);
1075
1076 /* MACE registers: */
1077 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL);
1078 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT);
1079 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0);
1080
1081 /*
1082 * Mask MACE's receive interrupt, since we're being notified
1083 * by the QEC after DMA completes.
1084 */
1085 bus_space_write_1(t, mr, QE_MRI_IMR,
1086 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM);
1087
1088 bus_space_write_1(t, mr, QE_MRI_BIUCC,
1089 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS);
1090
1091 bus_space_write_1(t, mr, QE_MRI_FIFOFC,
1092 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 |
1093 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU);
1094
1095 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP);
1096
1097 /*
1098 * Station address
1099 */
1100 ea = sc->sc_enaddr;
1101 bus_space_write_1(t, mr, QE_MRI_IAC,
1102 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR);
1103 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6);
1104
1105 /* Apply media settings */
1106 qe_ifmedia_upd(ifp);
1107
1108 /*
1109 * Clear Logical address filter
1110 */
1111 bus_space_write_1(t, mr, QE_MRI_IAC,
1112 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1113 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8);
1114 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1115
1116 /* Clear missed packet count (register cleared on read) */
1117 (void)bus_space_read_1(t, mr, QE_MRI_MPC);
1118
1119 #if 0
1120 /* test register: */
1121 bus_space_write_1(t, mr, QE_MRI_UTR, 0);
1122 #endif
1123
1124 /* Reset multicast filter */
1125 qe_mcreset(sc);
1126
1127 ifp->if_flags |= IFF_RUNNING;
1128 ifp->if_flags &= ~IFF_OACTIVE;
1129 splx(s);
1130 }
1131
1132 /*
1133 * Reset multicast filter.
1134 */
1135 void
1136 qe_mcreset(sc)
1137 struct qe_softc *sc;
1138 {
1139 struct ethercom *ec = &sc->sc_ethercom;
1140 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1141 bus_space_tag_t t = sc->sc_bustag;
1142 bus_space_handle_t mr = sc->sc_mr;
1143 struct ether_multi *enm;
1144 struct ether_multistep step;
1145 u_int32_t crc;
1146 u_int16_t hash[4];
1147 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0];
1148 int i, j;
1149
1150 #if defined(SUN4U) || defined(__GNUC__)
1151 (void)&t;
1152 #endif
1153
1154 /* We also enable transmitter & receiver here */
1155 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV;
1156
1157 if (ifp->if_flags & IFF_PROMISC) {
1158 maccc |= QE_MR_MACCC_PROM;
1159 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1160 return;
1161 }
1162
1163 if (ifp->if_flags & IFF_ALLMULTI) {
1164 bus_space_write_1(t, mr, QE_MRI_IAC,
1165 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1166 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1167 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1168 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1169 return;
1170 }
1171
1172 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1173
1174 ETHER_FIRST_MULTI(step, ec, enm);
1175 while (enm != NULL) {
1176 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1177 ETHER_ADDR_LEN) != 0) {
1178 /*
1179 * We must listen to a range of multicast
1180 * addresses. For now, just accept all
1181 * multicasts, rather than trying to set only
1182 * those filter bits needed to match the range.
1183 * (At this time, the only use of address
1184 * ranges is for IP multicast routing, for
1185 * which the range is big enough to require
1186 * all bits set.)
1187 */
1188 bus_space_write_1(t, mr, QE_MRI_IAC,
1189 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1190 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1191 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1192 ifp->if_flags |= IFF_ALLMULTI;
1193 break;
1194 }
1195
1196 crc = 0xffffffff;
1197
1198 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1199 octet = enm->enm_addrlo[i];
1200
1201 for (j = 0; j < 8; j++) {
1202 if ((crc & 1) ^ (octet & 1)) {
1203 crc >>= 1;
1204 crc ^= MC_POLY_LE;
1205 }
1206 else
1207 crc >>= 1;
1208 octet >>= 1;
1209 }
1210 }
1211
1212 crc >>= 26;
1213 hash[crc >> 4] |= 1 << (crc & 0xf);
1214 ETHER_NEXT_MULTI(step, enm);
1215 }
1216
1217 bus_space_write_1(t, mr, QE_MRI_IAC,
1218 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1219 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8);
1220 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1221 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1222 }
1223
1224 /*
1225 * Get current media settings.
1226 */
1227 void
1228 qe_ifmedia_sts(ifp, ifmr)
1229 struct ifnet *ifp;
1230 struct ifmediareq *ifmr;
1231 {
1232 struct qe_softc *sc = ifp->if_softc;
1233 bus_space_tag_t t = sc->sc_bustag;
1234 bus_space_handle_t mr = sc->sc_mr;
1235 u_int8_t v;
1236
1237 #if defined(SUN4U) || defined(__GNUC__)
1238 (void)&t;
1239 #endif
1240 v = bus_space_read_1(t, mr, QE_MRI_PLSCC);
1241
1242 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) {
1243 case QE_MR_PLSCC_TP:
1244 ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1245 break;
1246 case QE_MR_PLSCC_AUI:
1247 ifmr->ifm_active = IFM_ETHER | IFM_10_5;
1248 break;
1249 case QE_MR_PLSCC_GPSI:
1250 case QE_MR_PLSCC_DAI:
1251 /* ... */
1252 break;
1253 }
1254
1255 v = bus_space_read_1(t, mr, QE_MRI_PHYCC);
1256 ifmr->ifm_status |= IFM_AVALID;
1257 if ((v & QE_MR_PHYCC_LNKFL) != 0)
1258 ifmr->ifm_status &= ~IFM_ACTIVE;
1259 else
1260 ifmr->ifm_status |= IFM_ACTIVE;
1261
1262 }
1263
1264 /*
1265 * Set media options.
1266 */
1267 int
1268 qe_ifmedia_upd(ifp)
1269 struct ifnet *ifp;
1270 {
1271 struct qe_softc *sc = ifp->if_softc;
1272 struct ifmedia *ifm = &sc->sc_ifmedia;
1273 bus_space_tag_t t = sc->sc_bustag;
1274 bus_space_handle_t mr = sc->sc_mr;
1275 int newmedia = ifm->ifm_media;
1276 u_int8_t plscc, phycc;
1277
1278 #if defined(SUN4U) || defined(__GNUC__)
1279 (void)&t;
1280 #endif
1281 if (IFM_TYPE(newmedia) != IFM_ETHER)
1282 return (EINVAL);
1283
1284 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK;
1285 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL;
1286
1287 if (IFM_SUBTYPE(newmedia) == IFM_AUTO)
1288 phycc |= QE_MR_PHYCC_ASEL;
1289 else if (IFM_SUBTYPE(newmedia) == IFM_10_T)
1290 plscc |= QE_MR_PLSCC_TP;
1291 else if (IFM_SUBTYPE(newmedia) == IFM_10_5)
1292 plscc |= QE_MR_PLSCC_AUI;
1293
1294 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc);
1295 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc);
1296
1297 return (0);
1298 }
1299