qe.c revision 1.1 1 /* $NetBSD: qe.c,v 1.1 1999/01/17 20:44:18 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1998 Jason L. Wright.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. The name of the authors may not be used to endorse or promote products
52 * derived from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * Driver for the SBus qec+qe QuadEthernet board.
68 *
69 * This driver was written using the AMD MACE Am79C940 documentation, some
70 * ideas gleaned from the S/Linux driver for this card, Solaris header files,
71 * and a loan of a card from Paul Southworth of the Internet Engineering
72 * Group (www.ieng.com).
73 */
74
75 #include "opt_ddb.h"
76 #include "opt_inet.h"
77 #include "opt_ccitt.h"
78 #include "opt_llc.h"
79 #include "opt_ns.h"
80 #include "bpfilter.h"
81 #include "rnd.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
86 #include <sys/errno.h>
87 #include <sys/ioctl.h>
88 #include <sys/mbuf.h>
89 #include <sys/socket.h>
90 #include <sys/syslog.h>
91 #include <sys/device.h>
92 #include <sys/malloc.h>
93 #if NRND > 0
94 #include <sys/rnd.h>
95 #endif
96
97 #include <net/if.h>
98 #include <net/if_dl.h>
99 #include <net/if_types.h>
100 #include <net/netisr.h>
101 #include <net/if_media.h>
102 #include <net/if_ether.h>
103
104 #ifdef INET
105 #include <netinet/in.h>
106 #include <netinet/if_inarp.h>
107 #include <netinet/in_systm.h>
108 #include <netinet/in_var.h>
109 #include <netinet/ip.h>
110 #endif
111
112 #if NBPFILTER > 0
113 #include <net/bpf.h>
114 #include <net/bpfdesc.h>
115 #endif
116
117 #include <machine/autoconf.h>
118 #include <machine/cpu.h>
119
120 #include <sparc/dev/sbusvar.h>
121
122 #include <dev/sbus/qecreg.h>
123 #include <dev/sbus/qecvar.h>
124 #include <dev/sbus/qereg.h>
125
126 struct qe_softc {
127 struct device sc_dev; /* base device */
128 struct sbusdev sc_sd; /* sbus device */
129 bus_space_tag_t sc_bustag; /* bus & dma tags */
130 bus_dma_tag_t sc_dmatag;
131 struct ethercom sc_ethercom;
132 struct ifmedia sc_ifmedia; /* interface media */
133
134 struct qec_softc *sc_qec; /* QEC parent */
135
136 bus_space_handle_t sc_qr; /* QEC registers */
137 bus_space_handle_t sc_mr; /* MACE registers */
138 bus_space_handle_t sc_cr; /* channel registers */
139
140 int sc_channel; /* channel number */
141 u_int sc_rev; /* board revision */
142
143 int sc_promisc;
144 int sc_burst;
145
146 struct qec_ring sc_rb; /* Packet Ring Buffer */
147
148 /* MAC address */
149 u_int8_t sc_enaddr[6];
150 };
151
152 int qematch __P((struct device *, struct cfdata *, void *));
153 void qeattach __P((struct device *, struct device *, void *));
154
155 void qeinit __P((struct qe_softc *));
156 void qestart __P((struct ifnet *));
157 void qestop __P((struct qe_softc *));
158 void qewatchdog __P((struct ifnet *));
159 int qeioctl __P((struct ifnet *, u_long, caddr_t));
160 void qereset __P((struct qe_softc *));
161
162 int qeintr __P((void *));
163 int qe_eint __P((struct qe_softc *, u_int32_t));
164 int qe_rint __P((struct qe_softc *));
165 int qe_tint __P((struct qe_softc *));
166 void qe_mcreset __P((struct qe_softc *));
167
168 static int qe_put __P((struct qe_softc *, int, struct mbuf *));
169 static void qe_read __P((struct qe_softc *, int, int));
170 static struct mbuf *qe_get __P((struct qe_softc *, int, int));
171
172 /* ifmedia callbacks */
173 void qe_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
174 int qe_ifmedia_upd __P((struct ifnet *));
175
176 struct cfattach qe_ca = {
177 sizeof(struct qe_softc), qematch, qeattach
178 };
179
180 int
181 qematch(parent, cf, aux)
182 struct device *parent;
183 struct cfdata *cf;
184 void *aux;
185 {
186 struct sbus_attach_args *sa = aux;
187
188 return (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0);
189 }
190
191 void
192 qeattach(parent, self, aux)
193 struct device *parent, *self;
194 void *aux;
195 {
196 struct sbus_attach_args *sa = aux;
197 struct qec_softc *qec = (struct qec_softc *)parent;
198 struct qe_softc *sc = (struct qe_softc *)self;
199 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
200 int node = sa->sa_node;
201 bus_dma_segment_t seg;
202 bus_size_t size;
203 int rseg, error;
204 extern void myetheraddr __P((u_char *));
205
206 if (sa->sa_nreg < 2) {
207 printf("%s: only %d register sets\n",
208 self->dv_xname, sa->sa_nreg);
209 return;
210 }
211
212 if (bus_space_map2(sa->sa_bustag,
213 (bus_type_t)sa->sa_reg[0].sbr_slot,
214 (bus_addr_t)sa->sa_reg[0].sbr_offset,
215 (bus_size_t)sa->sa_reg[0].sbr_size,
216 BUS_SPACE_MAP_LINEAR, 0, &sc->sc_cr) != 0) {
217 printf("%s: cannot map registers\n", self->dv_xname);
218 return;
219 }
220
221 if (bus_space_map2(sa->sa_bustag,
222 (bus_type_t)sa->sa_reg[1].sbr_slot,
223 (bus_addr_t)sa->sa_reg[1].sbr_offset,
224 (bus_size_t)sa->sa_reg[1].sbr_size,
225 BUS_SPACE_MAP_LINEAR, 0, &sc->sc_mr) != 0) {
226 printf("%s: cannot map registers\n", self->dv_xname);
227 return;
228 }
229
230 sc->sc_rev = getpropint(node, "mace-version", -1);
231 printf(" rev %x", sc->sc_rev);
232
233 sc->sc_qec = qec;
234 sc->sc_qr = qec->sc_regs;
235
236 sc->sc_channel = getpropint(node, "channel#", -1);
237 sc->sc_burst = qec->sc_burst;
238
239 qestop(sc);
240
241 /* Note: no interrupt level passed */
242 (void)bus_intr_establish(sa->sa_bustag, 0, 0, qeintr, sc);
243 myetheraddr(sc->sc_enaddr);
244
245 /*
246 * Allocate descriptor ring and buffers.
247 */
248
249 /* for now, allocate as many bufs as there are ring descriptors */
250 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
251 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;
252
253 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
254 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
255 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ +
256 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ;
257 if ((error = bus_dmamem_alloc(sa->sa_dmatag, size,
258 NBPG, 0,
259 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
260 printf("%s: DMA buffer alloc error %d\n",
261 self->dv_xname, error);
262 return;
263 }
264 sc->sc_rb.rb_dmabase = seg.ds_addr;
265
266 if ((error = bus_dmamem_map(sa->sa_dmatag, &seg, rseg, size,
267 &sc->sc_rb.rb_membase,
268 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
269 printf("%s: DMA buffer map error %d\n",
270 self->dv_xname, error);
271 bus_dmamem_free(sa->sa_dmatag, &seg, rseg);
272 return;
273 }
274
275 /* Initialize media properties */
276 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts);
277 ifmedia_add(&sc->sc_ifmedia,
278 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0),
279 0, NULL);
280 ifmedia_add(&sc->sc_ifmedia,
281 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0),
282 0, NULL);
283 ifmedia_add(&sc->sc_ifmedia,
284 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0),
285 0, NULL);
286 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
287
288 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
289 ifp->if_softc = sc;
290 ifp->if_start = qestart;
291 ifp->if_ioctl = qeioctl;
292 ifp->if_watchdog = qewatchdog;
293 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS |
294 IFF_MULTICAST;
295
296 /* Attach the interface. */
297 if_attach(ifp);
298 ether_ifattach(ifp, sc->sc_enaddr);
299
300 printf(" address %s\n", ether_sprintf(sc->sc_enaddr));
301
302 #if NBPFILTER > 0
303 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB,
304 sizeof(struct ether_header));
305 #endif
306 }
307
308 /*
309 * Pull data off an interface.
310 * Len is the length of data, with local net header stripped.
311 * We copy the data into mbufs. When full cluster sized units are present,
312 * we copy into clusters.
313 */
314 static __inline__ struct mbuf *
315 qe_get(sc, idx, totlen)
316 struct qe_softc *sc;
317 int idx, totlen;
318 {
319 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
320 struct mbuf *m;
321 struct mbuf *top, **mp;
322 int len, pad, boff = 0;
323 caddr_t bp;
324
325 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ;
326
327 MGETHDR(m, M_DONTWAIT, MT_DATA);
328 if (m == NULL)
329 return (NULL);
330 m->m_pkthdr.rcvif = ifp;
331 m->m_pkthdr.len = totlen;
332 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
333 m->m_data += pad;
334 len = MHLEN - pad;
335 top = NULL;
336 mp = ⊤
337
338 while (totlen > 0) {
339 if (top) {
340 MGET(m, M_DONTWAIT, MT_DATA);
341 if (m == NULL) {
342 m_freem(top);
343 return (NULL);
344 }
345 len = MLEN;
346 }
347 if (top && totlen >= MINCLSIZE) {
348 MCLGET(m, M_DONTWAIT);
349 if (m->m_flags & M_EXT)
350 len = MCLBYTES;
351 }
352 m->m_len = len = min(totlen, len);
353 bcopy(bp + boff, mtod(m, caddr_t), len);
354 boff += len;
355 totlen -= len;
356 *mp = m;
357 mp = &m->m_next;
358 }
359
360 return (top);
361 }
362
363 /*
364 * Routine to copy from mbuf chain to transmit buffer in
365 * network buffer memory.
366 */
367 __inline__ int
368 qe_put(sc, idx, m)
369 struct qe_softc *sc;
370 int idx;
371 struct mbuf *m;
372 {
373 struct mbuf *n;
374 int len, tlen = 0, boff = 0;
375 caddr_t bp;
376
377 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ;
378
379 for (; m; m = n) {
380 len = m->m_len;
381 if (len == 0) {
382 MFREE(m, n);
383 continue;
384 }
385 bcopy(mtod(m, caddr_t), bp+boff, len);
386 boff += len;
387 tlen += len;
388 MFREE(m, n);
389 }
390 return (tlen);
391 }
392
393 /*
394 * Pass a packet to the higher levels.
395 */
396 __inline__ void
397 qe_read(sc, idx, len)
398 struct qe_softc *sc;
399 int idx, len;
400 {
401 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
402 struct ether_header *eh;
403 struct mbuf *m;
404
405 if (len <= sizeof(struct ether_header) ||
406 len > ETHERMTU + sizeof(struct ether_header)) {
407
408 printf("%s: invalid packet size %d; dropping\n",
409 ifp->if_xname, len);
410
411 ifp->if_ierrors++;
412 return;
413 }
414
415 /*
416 * Pull packet off interface.
417 */
418 m = qe_get(sc, idx, len);
419 if (m == NULL) {
420 ifp->if_ierrors++;
421 return;
422 }
423 ifp->if_ipackets++;
424
425 /* We assume that the header fit entirely in one mbuf. */
426 eh = mtod(m, struct ether_header *);
427
428 #if NBPFILTER > 0
429 /*
430 * Check if there's a BPF listener on this interface.
431 * If so, hand off the raw packet to BPF.
432 */
433 if (ifp->if_bpf)
434 bpf_mtap(ifp->if_bpf, m);
435 #endif
436 /* Pass the packet up, with the ether header sort-of removed. */
437 m_adj(m, sizeof(struct ether_header));
438 ether_input(ifp, eh, m);
439 }
440
441 /*
442 * Start output on interface.
443 * We make two assumptions here:
444 * 1) that the current priority is set to splnet _before_ this code
445 * is called *and* is returned to the appropriate priority after
446 * return
447 * 2) that the IFF_OACTIVE flag is checked before this code is called
448 * (i.e. that the output part of the interface is idle)
449 */
450 void
451 qestart(ifp)
452 struct ifnet *ifp;
453 {
454 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc;
455 struct qec_xd *txd = sc->sc_rb.rb_txd;
456 struct mbuf *m;
457 unsigned int bix, len;
458 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
459
460 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
461 return;
462
463 bix = sc->sc_rb.rb_tdhead;
464
465 for (;;) {
466 IF_DEQUEUE(&ifp->if_snd, m);
467 if (m == 0)
468 break;
469
470 #if NBPFILTER > 0
471 /*
472 * If BPF is listening on this interface, let it see the
473 * packet before we commit it to the wire.
474 */
475 if (ifp->if_bpf)
476 bpf_mtap(ifp->if_bpf, m);
477 #endif
478
479 /*
480 * Copy the mbuf chain into the transmit buffer.
481 */
482 len = qe_put(sc, bix, m);
483
484 /*
485 * Initialize transmit registers and start transmission
486 */
487 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP |
488 (len & QEC_XD_LENGTH);
489 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL,
490 QE_CR_CTRL_TWAKEUP);
491
492 if (++bix == QEC_XD_RING_MAXSIZE)
493 bix = 0;
494
495 if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
496 ifp->if_flags |= IFF_OACTIVE;
497 break;
498 }
499 }
500
501 sc->sc_rb.rb_tdhead = bix;
502 }
503
504 void
505 qestop(sc)
506 struct qe_softc *sc;
507 {
508 bus_space_tag_t t = sc->sc_bustag;
509 bus_space_handle_t mr = sc->sc_mr;
510 bus_space_handle_t cr = sc->sc_cr;
511 int n;
512
513 /* Stop the schwurst */
514 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST);
515 for (n = 200; n > 0; n--) {
516 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) &
517 QE_MR_BIUCC_SWRST) == 0)
518 break;
519 DELAY(20);
520 }
521
522 /* then reset */
523 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET);
524 for (n = 200; n > 0; n--) {
525 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) &
526 QE_CR_CTRL_RESET) == 0)
527 break;
528 DELAY(20);
529 }
530 }
531
532 /*
533 * Reset interface.
534 */
535 void
536 qereset(sc)
537 struct qe_softc *sc;
538 {
539 int s;
540
541 s = splnet();
542 qestop(sc);
543 qeinit(sc);
544 splx(s);
545 }
546
547 void
548 qewatchdog(ifp)
549 struct ifnet *ifp;
550 {
551 struct qe_softc *sc = ifp->if_softc;
552
553 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
554 ++sc->sc_ethercom.ec_if.if_oerrors;
555
556 qereset(sc);
557 }
558
559 /*
560 * Interrupt dispatch.
561 */
562 int
563 qeintr(arg)
564 void *arg;
565 {
566 struct qe_softc *sc = (struct qe_softc *)arg;
567 bus_space_tag_t t = sc->sc_bustag;
568 u_int32_t qecstat, qestat;
569 int r = 0;
570
571 /* Read QEC status and channel status */
572 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT);
573
574 /* Filter out status for this channel */
575 qecstat = qecstat >> (4 * sc->sc_channel);
576 if ((qecstat & 0xf) == 0)
577 return (r);
578
579 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT);
580
581 if (qestat & QE_CR_STAT_ALLERRORS) {
582 r |= qe_eint(sc, qestat);
583 if (r == -1)
584 return (1);
585 }
586
587 if (qestat & QE_CR_STAT_TXIRQ)
588 r |= qe_tint(sc);
589
590 if (qestat & QE_CR_STAT_RXIRQ)
591 r |= qe_rint(sc);
592
593 return (r);
594 }
595
596 /*
597 * Transmit interrupt.
598 */
599 int
600 qe_tint(sc)
601 struct qe_softc *sc;
602 {
603 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
604 unsigned int bix, txflags;
605
606 bix = sc->sc_rb.rb_tdtail;
607
608 for (;;) {
609 if (sc->sc_rb.rb_td_nbusy <= 0)
610 break;
611
612 txflags = sc->sc_rb.rb_txd[bix].xd_flags;
613
614 if (txflags & QEC_XD_OWN)
615 break;
616
617 ifp->if_flags &= ~IFF_OACTIVE;
618 ifp->if_opackets++;
619
620 if (++bix == QEC_XD_RING_MAXSIZE)
621 bix = 0;
622
623 --sc->sc_rb.rb_td_nbusy;
624 }
625
626 sc->sc_rb.rb_tdtail = bix;
627
628 qestart(ifp);
629
630 if (sc->sc_rb.rb_td_nbusy == 0)
631 ifp->if_timer = 0;
632
633 return (1);
634 }
635
636 /*
637 * Receive interrupt.
638 */
639 int
640 qe_rint(sc)
641 struct qe_softc *sc;
642 {
643 struct qec_xd *xd = sc->sc_rb.rb_rxd;
644 unsigned int bix, len;
645 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
646
647 bix = sc->sc_rb.rb_rdtail;
648
649 /*
650 * Process all buffers with valid data.
651 */
652 for (;;) {
653 len = xd[bix].xd_flags;
654 if (len & QEC_XD_OWN)
655 break;
656
657 len &= QEC_XD_LENGTH;
658 len -= 4;
659 qe_read(sc, bix, len);
660
661 /* ... */
662 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags =
663 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH);
664
665 if (++bix == QEC_XD_RING_MAXSIZE)
666 bix = 0;
667 }
668
669 sc->sc_rb.rb_rdtail = bix;
670
671 return (1);
672 }
673
674 /*
675 * Error interrupt.
676 */
677 int
678 qe_eint(sc, why)
679 struct qe_softc *sc;
680 u_int32_t why;
681 {
682 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
683 int r = 0, rst = 0;
684
685 if (why & QE_CR_STAT_EDEFER) {
686 printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname);
687 r |= 1;
688 ifp->if_oerrors++;
689 }
690
691 if (why & QE_CR_STAT_CLOSS) {
692 printf("%s: no carrier, link down?\n", sc->sc_dev.dv_xname);
693 ifp->if_oerrors++;
694 r |= 1;
695 }
696
697 if (why & QE_CR_STAT_ERETRIES) {
698 printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname);
699 ifp->if_oerrors++;
700 r |= 1;
701 rst = 1;
702 }
703
704
705 if (why & QE_CR_STAT_LCOLL) {
706 printf("%s: late tx transmission\n", sc->sc_dev.dv_xname);
707 ifp->if_oerrors++;
708 r |= 1;
709 rst = 1;
710 }
711
712 if (why & QE_CR_STAT_FUFLOW) {
713 printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname);
714 ifp->if_oerrors++;
715 r |= 1;
716 rst = 1;
717 }
718
719 if (why & QE_CR_STAT_JERROR) {
720 printf("%s: jabber seen\n", sc->sc_dev.dv_xname);
721 r |= 1;
722 }
723
724 if (why & QE_CR_STAT_BERROR) {
725 printf("%s: babble seen\n", sc->sc_dev.dv_xname);
726 r |= 1;
727 }
728
729 if (why & QE_CR_STAT_TCCOFLOW) {
730 ifp->if_collisions += 256;
731 ifp->if_oerrors += 256;
732 r |= 1;
733 }
734
735 if (why & QE_CR_STAT_TXDERROR) {
736 printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname);
737 rst = 1;
738 r |= 1;
739 }
740
741 if (why & QE_CR_STAT_TXLERR) {
742 printf("%s: tx late error\n", sc->sc_dev.dv_xname);
743 ifp->if_oerrors++;
744 rst = 1;
745 r |= 1;
746 }
747
748 if (why & QE_CR_STAT_TXPERR) {
749 printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname);
750 ifp->if_oerrors++;
751 rst = 1;
752 r |= 1;
753 }
754
755 if (why & QE_CR_STAT_TXSERR) {
756 printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname);
757 ifp->if_oerrors++;
758 rst = 1;
759 r |= 1;
760 }
761
762 if (why & QE_CR_STAT_RCCOFLOW) {
763 ifp->if_collisions += 256;
764 ifp->if_ierrors += 256;
765 r |= 1;
766 }
767
768 if (why & QE_CR_STAT_RUOFLOW) {
769 ifp->if_ierrors += 256;
770 r |= 1;
771 }
772
773 if (why & QE_CR_STAT_MCOFLOW) {
774 ifp->if_ierrors += 256;
775 r |= 1;
776 }
777
778 if (why & QE_CR_STAT_RXFOFLOW) {
779 printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname);
780 ifp->if_ierrors++;
781 r |= 1;
782 }
783
784 if (why & QE_CR_STAT_RLCOLL) {
785 printf("%s: rx late collision\n", sc->sc_dev.dv_xname);
786 ifp->if_ierrors++;
787 ifp->if_collisions++;
788 r |= 1;
789 }
790
791 if (why & QE_CR_STAT_FCOFLOW) {
792 ifp->if_ierrors += 256;
793 r |= 1;
794 }
795
796 if (why & QE_CR_STAT_CECOFLOW) {
797 ifp->if_ierrors += 256;
798 r |= 1;
799 }
800
801 if (why & QE_CR_STAT_RXDROP) {
802 printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname);
803 ifp->if_ierrors++;
804 r |= 1;
805 }
806
807 if (why & QE_CR_STAT_RXSMALL) {
808 printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname);
809 ifp->if_ierrors++;
810 r |= 1;
811 rst = 1;
812 }
813
814 if (why & QE_CR_STAT_RXLERR) {
815 printf("%s: rx late error\n", sc->sc_dev.dv_xname);
816 ifp->if_ierrors++;
817 r |= 1;
818 rst = 1;
819 }
820
821 if (why & QE_CR_STAT_RXPERR) {
822 printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname);
823 ifp->if_ierrors++;
824 r |= 1;
825 rst = 1;
826 }
827
828 if (why & QE_CR_STAT_RXSERR) {
829 printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname);
830 ifp->if_ierrors++;
831 r |= 1;
832 rst = 1;
833 }
834
835 if (r == 0)
836 printf("%s: unexpected interrupt error: %08x\n",
837 sc->sc_dev.dv_xname, why);
838
839 if (rst) {
840 printf("%s: resetting...\n", sc->sc_dev.dv_xname);
841 qereset(sc);
842 return (-1);
843 }
844
845 return (r);
846 }
847
848 int
849 qeioctl(ifp, cmd, data)
850 struct ifnet *ifp;
851 u_long cmd;
852 caddr_t data;
853 {
854 struct qe_softc *sc = ifp->if_softc;
855 struct ifaddr *ifa = (struct ifaddr *)data;
856 struct ifreq *ifr = (struct ifreq *)data;
857 int s, error = 0;
858
859 s = splnet();
860
861 switch (cmd) {
862 case SIOCSIFADDR:
863 ifp->if_flags |= IFF_UP;
864 switch (ifa->ifa_addr->sa_family) {
865 #ifdef INET
866 case AF_INET:
867 qeinit(sc);
868 arp_ifinit(ifp, ifa);
869 break;
870 #endif /* INET */
871 #ifdef NS
872 /* XXX - This code is probably wrong. */
873 case AF_NS:
874 {
875 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
876
877 if (ns_nullhost(*ina))
878 ina->x_host = *(union ns_host *)
879 (sc->sc_ethercom.ac_enaddr);
880 else
881 bcopy(ina->x_host.c_host,
882 sc->sc_enaddr, sizeof(sc->sc_enaddr));
883 /* Set new address. */
884 qeinit(sc);
885 break;
886 }
887 #endif /* NS */
888 default:
889 qeinit(sc);
890 break;
891 }
892 break;
893
894 case SIOCSIFFLAGS:
895 sc->sc_promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
896 if ((ifp->if_flags & IFF_UP) == 0 &&
897 (ifp->if_flags & IFF_RUNNING) != 0) {
898 /*
899 * If interface is marked down and it is running, then
900 * stop it.
901 */
902 qestop(sc);
903 ifp->if_flags &= ~IFF_RUNNING;
904 } else if ((ifp->if_flags & IFF_UP) != 0 &&
905 (ifp->if_flags & IFF_RUNNING) == 0) {
906 /*
907 * If interface is marked up and it is stopped, then
908 * start it.
909 */
910 qeinit(sc);
911 } else {
912 /*
913 * Reset the interface to pick up changes in any other
914 * flags that affect hardware registers.
915 */
916 qestop(sc);
917 qeinit(sc);
918 }
919 #ifdef QEDEBUG
920 if (ifp->if_flags & IFF_DEBUG)
921 sc->sc_debug = 1;
922 else
923 sc->sc_debug = 0;
924 #endif
925 break;
926
927 case SIOCADDMULTI:
928 case SIOCDELMULTI:
929 error = (cmd == SIOCADDMULTI) ?
930 ether_addmulti(ifr, &sc->sc_ethercom):
931 ether_delmulti(ifr, &sc->sc_ethercom);
932
933 if (error == ENETRESET) {
934 /*
935 * Multicast list has changed; set the hardware filter
936 * accordingly.
937 */
938 qe_mcreset(sc);
939 error = 0;
940 }
941 break;
942
943 case SIOCGIFMEDIA:
944 case SIOCSIFMEDIA:
945 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
946 break;
947
948 default:
949 error = EINVAL;
950 break;
951 }
952
953 splx(s);
954 return (error);
955 }
956
957
958 void
959 qeinit(sc)
960 struct qe_softc *sc;
961 {
962 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
963 bus_space_tag_t t = sc->sc_bustag;
964 bus_space_handle_t cr = sc->sc_cr;
965 bus_space_handle_t mr = sc->sc_mr;
966 struct qec_softc *qec = sc->sc_qec;
967 u_int32_t qecaddr;
968 u_int8_t *ea;
969 int i, s;
970
971 s = splimp();
972 qestop(sc);
973
974 /*
975 * Allocate descriptor ring and buffers
976 */
977 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ);
978
979 /* Channel registers: */
980 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma);
981 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma);
982
983 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0);
984 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0);
985 bus_space_write_4(t, cr, QE_CRI_QMASK, 0);
986 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL);
987 bus_space_write_4(t, cr, QE_CRI_CCNT, 0);
988 bus_space_write_4(t, cr, QE_CRI_PIPG, 0);
989
990 qecaddr = sc->sc_channel * qec->sc_msize;
991 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr);
992 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr);
993 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize);
994 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize);
995
996 /* MACE registers: */
997 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL);
998 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT);
999 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0);
1000 bus_space_write_1(t, mr, QE_MRI_IMR,
1001 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM);
1002 bus_space_write_1(t, mr, QE_MRI_BIUCC,
1003 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS);
1004
1005 bus_space_write_1(t, mr, QE_MRI_FIFOFC,
1006 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 |
1007 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU);
1008
1009 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP);
1010
1011 /*
1012 * Station address
1013 */
1014 ea = sc->sc_enaddr;
1015 bus_space_write_1(t, mr, QE_MRI_IAC,
1016 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR);
1017 bus_space_write_1(t, mr, QE_MRI_PADR, ea[0]);
1018 bus_space_write_1(t, mr, QE_MRI_PADR, ea[1]);
1019 bus_space_write_1(t, mr, QE_MRI_PADR, ea[2]);
1020 bus_space_write_1(t, mr, QE_MRI_PADR, ea[3]);
1021 bus_space_write_1(t, mr, QE_MRI_PADR, ea[4]);
1022 bus_space_write_1(t, mr, QE_MRI_PADR, ea[5]);
1023
1024 /* Apply media settings */
1025 qe_ifmedia_upd(ifp);
1026
1027 /*
1028 * Logical address filter
1029 */
1030 bus_space_write_1(t, mr, QE_MRI_IAC,
1031 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1032 for (i = 0; i < 8; i++)
1033 bus_space_write_1(t, mr, QE_MRI_LADRF, 0);
1034 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1035
1036 /* Clear missed packet count (register cleared on read) */
1037 (void)bus_space_read_1(t, mr, QE_MRI_MPC);
1038
1039 /* Enable transmitter & receiver */
1040 bus_space_write_1(t, mr, QE_MRI_MACCC,
1041 QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV |
1042 ((ifp->if_flags&IFF_PROMISC) ? QE_MR_MACCC_PROM : 0));
1043
1044 ifp->if_flags |= IFF_RUNNING;
1045 ifp->if_flags &= ~IFF_OACTIVE;
1046 splx(s);
1047 }
1048
1049 /*
1050 * Reset multicast filter.
1051 */
1052 void
1053 qe_mcreset(sc)
1054 struct qe_softc *sc;
1055 {
1056 struct ethercom *ec = &sc->sc_ethercom;
1057 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1058 bus_space_tag_t t = sc->sc_bustag;
1059 bus_space_handle_t mr = sc->sc_mr;
1060 struct ether_multi *enm;
1061 struct ether_multistep step;
1062 u_int32_t crc;
1063 u_int16_t hash[4];
1064 u_int8_t octet, maccc = 0, *ladrp = (u_int8_t *)&hash[0];
1065 int i, j;
1066
1067 if (ifp->if_flags & IFF_ALLMULTI) {
1068 bus_space_write_1(t, mr, QE_MRI_IAC,
1069 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1070 for (i = 0; i < 8; i++)
1071 bus_space_write_1(t, mr, QE_MRI_LADRF, 0xff);
1072 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1073 } else if (ifp->if_flags & IFF_PROMISC) {
1074 maccc |= QE_MR_MACCC_PROM;
1075 } else {
1076 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1077
1078 ETHER_FIRST_MULTI(step, ec, enm);
1079 while (enm != NULL) {
1080 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
1081 ETHER_ADDR_LEN) != 0) {
1082 /*
1083 * We must listen to a range of multicast
1084 * addresses. For now, just accept all
1085 * multicasts, rather than trying to set only
1086 * those filter bits needed to match the range.
1087 * (At this time, the only use of address
1088 * ranges is for IP multicast routing, for
1089 * which the range is big enough to require
1090 * all bits set.)
1091 */
1092 bus_space_write_1(t, mr, QE_MRI_IAC,
1093 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1094 for (i = 0; i < 8; i++)
1095 bus_space_write_1(t, mr, QE_MRI_LADRF,
1096 0xff);
1097 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1098 ifp->if_flags |= IFF_ALLMULTI;
1099 break;
1100 }
1101
1102 crc = 0xffffffff;
1103
1104 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1105 octet = enm->enm_addrlo[i];
1106
1107 for (j = 0; j < 8; j++) {
1108 if ((crc & 1) ^ (octet & 1)) {
1109 crc >>= 1;
1110 crc ^= MC_POLY_LE;
1111 }
1112 else
1113 crc >>= 1;
1114 octet >>= 1;
1115 }
1116 }
1117
1118 crc >>= 26;
1119 hash[crc >> 4] |= 1 << (crc & 0xf);
1120 ETHER_NEXT_MULTI(step, enm);
1121 }
1122
1123 bus_space_write_1(t, mr, QE_MRI_IAC,
1124 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1125 for (i = 0; i < 8; i++)
1126 bus_space_write_1(t, mr, QE_MRI_LADRF, ladrp[i]);
1127 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1128 }
1129
1130 bus_space_write_1(t, mr, QE_MRI_MACCC,
1131 maccc | QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV);
1132 }
1133
1134 /*
1135 * Get current media settings.
1136 */
1137 void
1138 qe_ifmedia_sts(ifp, ifmr)
1139 struct ifnet *ifp;
1140 struct ifmediareq *ifmr;
1141 {
1142 struct qe_softc *sc = ifp->if_softc;
1143 bus_space_tag_t t = sc->sc_bustag;
1144 bus_space_handle_t mr = sc->sc_mr;
1145 u_int8_t v;
1146
1147 v = bus_space_read_1(t, mr, QE_MRI_PLSCC);
1148
1149 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) {
1150 case QE_MR_PLSCC_TP:
1151 ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1152 break;
1153 case QE_MR_PLSCC_AUI:
1154 ifmr->ifm_active = IFM_ETHER | IFM_10_5;
1155 break;
1156 case QE_MR_PLSCC_GPSI:
1157 case QE_MR_PLSCC_DAI:
1158 /* ... */
1159 break;
1160 }
1161
1162 v = bus_space_read_1(t, mr, QE_MRI_PHYCC);
1163 ifmr->ifm_status |= IFM_AVALID;
1164 if ((v & QE_MR_PHYCC_LNKFL) != 0)
1165 ifmr->ifm_status &= ~IFM_ACTIVE;
1166 else
1167 ifmr->ifm_status |= IFM_ACTIVE;
1168
1169 }
1170
1171 /*
1172 * Set media options.
1173 */
1174 int
1175 qe_ifmedia_upd(ifp)
1176 struct ifnet *ifp;
1177 {
1178 struct qe_softc *sc = ifp->if_softc;
1179 struct ifmedia *ifm = &sc->sc_ifmedia;
1180 bus_space_tag_t t = sc->sc_bustag;
1181 bus_space_handle_t mr = sc->sc_mr;
1182 int newmedia = ifm->ifm_media;
1183 u_int8_t plscc, phycc;
1184
1185 if (IFM_TYPE(newmedia) != IFM_ETHER)
1186 return (EINVAL);
1187
1188 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK;
1189 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL;
1190
1191 if (IFM_SUBTYPE(newmedia) == IFM_AUTO)
1192 phycc |= QE_MR_PHYCC_ASEL;
1193 else if (IFM_SUBTYPE(newmedia) == IFM_10_T)
1194 plscc |= QE_MR_PLSCC_TP;
1195 else if (IFM_SUBTYPE(newmedia) == IFM_10_5)
1196 plscc |= QE_MR_PLSCC_AUI;
1197
1198 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc);
1199 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc);
1200
1201 return (0);
1202 }
1203