tulip.c revision 1.2 1 /* $NetBSD: tulip.c,v 1.2 1999/09/01 05:07:03 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Digital Semiconductor ``Tulip'' (21x4x)
42 * Ethernet controller family, and a variety of clone chips.
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <vm/vm.h> /* for PAGE_SIZE */
60
61 #include <net/if.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64 #include <net/if_ether.h>
65
66 #if NBPFILTER > 0
67 #include <net/bpf.h>
68 #endif
69
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/if_inarp.h>
73 #endif
74
75 #ifdef NS
76 #include <netns/ns.h>
77 #include <netns/ns_if.h>
78 #endif
79
80 #include <machine/bus.h>
81 #include <machine/intr.h>
82
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85
86 #include <dev/ic/tulipreg.h>
87 #include <dev/ic/tulipvar.h>
88
89 /*
90 * The following tables compute the transmit threshold mode. We start
91 * at index 0. When ever we get a transmit underrun, we increment our
92 * index, falling back if we encounter the NULL terminator.
93 *
94 * Note: Store and forward mode is only available on the 100mbps chips
95 * (21140 and higher).
96 */
97 const struct tulip_txthresh_tab tlp_10_txthresh_tab[] = {
98 { OPMODE_TR_72, "72 bytes" },
99 { OPMODE_TR_96, "96 bytes" },
100 { OPMODE_TR_128, "128 bytes" },
101 { OPMODE_TR_160, "160 bytes" },
102 { 0, NULL },
103 };
104
105 const struct tulip_txthresh_tab tlp_10_100_txthresh_tab[] = {
106 { OPMODE_TR_72, "72/128 bytes" },
107 { OPMODE_TR_96, "96/256 bytes" },
108 { OPMODE_TR_128, "128/512 bytes" },
109 { OPMODE_TR_160, "160/1024 bytes" },
110 { OPMODE_SF, "store and forward mode" },
111 { 0, NULL },
112 };
113
114 void tlp_start __P((struct ifnet *));
115 void tlp_watchdog __P((struct ifnet *));
116 int tlp_ioctl __P((struct ifnet *, u_long, caddr_t));
117
118 void tlp_shutdown __P((void *));
119
120 void tlp_reset __P((struct tulip_softc *));
121 int tlp_init __P((struct tulip_softc *));
122 void tlp_rxdrain __P((struct tulip_softc *));
123 void tlp_stop __P((struct tulip_softc *, int));
124 int tlp_add_rxbuf __P((struct tulip_softc *, int));
125 void tlp_idle __P((struct tulip_softc *, u_int32_t));
126 void tlp_srom_idle __P((struct tulip_softc *));
127
128 void tlp_filter_setup __P((struct tulip_softc *));
129 void tlp_winb_filter_setup __P((struct tulip_softc *));
130
131 void tlp_rxintr __P((struct tulip_softc *));
132 void tlp_txintr __P((struct tulip_softc *));
133
134 void tlp_mii_tick __P((void *));
135 void tlp_mii_statchg __P((struct device *));
136
137 void tlp_mii_getmedia __P((struct tulip_softc *, struct ifmediareq *));
138 int tlp_mii_setmedia __P((struct tulip_softc *));
139
140 void tlp_sio_mii_sync __P((struct tulip_softc *));
141 void tlp_sio_mii_sendbits __P((struct tulip_softc *, u_int32_t, int));
142 int tlp_sio_mii_readreg __P((struct device *, int, int));
143 void tlp_sio_mii_writereg __P((struct device *, int, int, int));
144
145 int tlp_pnic_mii_readreg __P((struct device *, int, int));
146 void tlp_pnic_mii_writereg __P((struct device *, int, int, int));
147
148 u_int32_t tlp_crc32 __P((const u_int8_t *, size_t));
149 #define tlp_mchash(addr) (tlp_crc32((addr), ETHER_ADDR_LEN) & \
150 (TULIP_MCHASHSIZE - 1))
151
152 #ifdef TLP_DEBUG
153 #define DPRINTF(x) printf x
154 #else
155 #define DPRINTF(x) /* nothing */
156 #endif
157
158 /*
159 * tlp_attach:
160 *
161 * Attach a Tulip interface to the system.
162 */
163 void
164 tlp_attach(sc, name, enaddr)
165 struct tulip_softc *sc;
166 const char *name;
167 const u_int8_t *enaddr;
168 {
169 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
170 int i, rseg, error;
171 bus_dma_segment_t seg;
172
173 /*
174 * NOTE: WE EXPECT THE FRONT-END TO INITIALIZE sc_regshift!
175 */
176
177 /*
178 * Setup the transmit threshold table.
179 */
180 switch (sc->sc_chip) {
181 case TULIP_CHIP_DE425:
182 case TULIP_CHIP_21040:
183 case TULIP_CHIP_21041:
184 sc->sc_txth = tlp_10_txthresh_tab;
185 break;
186
187 default:
188 sc->sc_txth = tlp_10_100_txthresh_tab;
189 break;
190 }
191
192 /*
193 * Setup the filter setup function.
194 */
195 switch (sc->sc_chip) {
196 case TULIP_CHIP_WB89C840F:
197 sc->sc_filter_setup = tlp_winb_filter_setup;
198 break;
199
200 default:
201 sc->sc_filter_setup = tlp_filter_setup;
202 break;
203 }
204
205 /*
206 * Set up various chip-specific quirks.
207 */
208 switch (sc->sc_chip) {
209 case TULIP_CHIP_WB89C840F:
210 sc->sc_flags |= TULIPF_IC_FS;
211 break;
212
213 default:
214 /* Nothing. */
215 }
216
217 SIMPLEQ_INIT(&sc->sc_txfreeq);
218 SIMPLEQ_INIT(&sc->sc_txdirtyq);
219
220 /*
221 * Allocate the control data structures, and create and load the
222 * DMA map for it.
223 */
224 if ((error = bus_dmamem_alloc(sc->sc_dmat,
225 sizeof(struct tulip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
226 0)) != 0) {
227 printf("%s: unable to allocate control data, error = %d\n",
228 sc->sc_dev.dv_xname, error);
229 goto fail_0;
230 }
231
232 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
233 sizeof(struct tulip_control_data), (caddr_t *)&sc->sc_control_data,
234 BUS_DMA_COHERENT)) != 0) {
235 printf("%s: unable to map control data, error = %d\n",
236 sc->sc_dev.dv_xname, error);
237 goto fail_1;
238 }
239
240 if ((error = bus_dmamap_create(sc->sc_dmat,
241 sizeof(struct tulip_control_data), 1,
242 sizeof(struct tulip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
243 printf("%s: unable to create control data DMA map, "
244 "error = %d\n", sc->sc_dev.dv_xname, error);
245 goto fail_2;
246 }
247
248 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
249 sc->sc_control_data, sizeof(struct tulip_control_data), NULL,
250 0)) != 0) {
251 printf("%s: unable to load control data DMA map, error = %d\n",
252 sc->sc_dev.dv_xname, error);
253 goto fail_3;
254 }
255
256 /*
257 * Create the transmit buffer DMA maps.
258 */
259 for (i = 0; i < TULIP_TXQUEUELEN; i++) {
260 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
261 TULIP_NTXSEGS, MCLBYTES, 0, 0,
262 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
263 printf("%s: unable to create tx DMA map %d, "
264 "error = %d\n", sc->sc_dev.dv_xname, i, error);
265 goto fail_4;
266 }
267 }
268
269 /*
270 * Create the recieve buffer DMA maps.
271 */
272 for (i = 0; i < TULIP_NRXDESC; i++) {
273 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
274 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
275 printf("%s: unable to create rx DMA map %d, "
276 "error = %d\n", sc->sc_dev.dv_xname, i, error);
277 goto fail_5;
278 }
279 sc->sc_rxsoft[i].rxs_mbuf = NULL;
280 }
281
282 /*
283 * Reset the chip to a known state.
284 */
285 tlp_reset(sc);
286
287 /* Announce ourselves. */
288 printf("%s: %s%sEthernet address %s\n", sc->sc_dev.dv_xname,
289 name != NULL ? name : "", name != NULL ? ", " : "",
290 ether_sprintf(enaddr));
291
292 /*
293 * Initialize our media structures. This may probe the MII, if
294 * present.
295 */
296 (*sc->sc_mediasw->tmsw_init)(sc);
297
298 ifp = &sc->sc_ethercom.ec_if;
299 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
300 ifp->if_softc = sc;
301 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
302 ifp->if_ioctl = tlp_ioctl;
303 ifp->if_start = tlp_start;
304 ifp->if_watchdog = tlp_watchdog;
305
306 /*
307 * Attach the interface.
308 */
309 if_attach(ifp);
310 ether_ifattach(ifp, enaddr);
311 #if NBPFILTER > 0
312 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
313 sizeof(struct ether_header));
314 #endif
315
316 /*
317 * Make sure the interface is shutdown during reboot.
318 */
319 sc->sc_sdhook = shutdownhook_establish(tlp_shutdown, sc);
320 if (sc->sc_sdhook == NULL)
321 printf("%s: WARNING: unable to establish shutdown hook\n",
322 sc->sc_dev.dv_xname);
323 return;
324
325 /*
326 * Free any resources we've allocated during the failed attach
327 * attempt. Do this in reverse order and fall through.
328 */
329 fail_5:
330 for (i = 0; i < TULIP_NRXDESC; i++) {
331 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
332 bus_dmamap_destroy(sc->sc_dmat,
333 sc->sc_rxsoft[i].rxs_dmamap);
334 }
335 fail_4:
336 for (i = 0; i < TULIP_TXQUEUELEN; i++) {
337 if (sc->sc_txsoft[i].txs_dmamap != NULL)
338 bus_dmamap_destroy(sc->sc_dmat,
339 sc->sc_txsoft[i].txs_dmamap);
340 }
341 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
342 fail_3:
343 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
344 fail_2:
345 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
346 sizeof(struct tulip_control_data));
347 fail_1:
348 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
349 fail_0:
350 return;
351 }
352
353 /*
354 * tlp_shutdown:
355 *
356 * Make sure the interface is stopped at reboot time.
357 */
358 void
359 tlp_shutdown(arg)
360 void *arg;
361 {
362 struct tulip_softc *sc = arg;
363
364 tlp_stop(sc, 1);
365 }
366
367 /*
368 * tlp_start: [ifnet interface function]
369 *
370 * Start packet transmission on the interface.
371 */
372 void
373 tlp_start(ifp)
374 struct ifnet *ifp;
375 {
376 struct tulip_softc *sc = ifp->if_softc;
377 struct mbuf *m0, *m;
378 struct tulip_txsoft *txs, *last_txs;
379 bus_dmamap_t dmamap;
380 int error, firsttx, nexttx, lasttx, ofree, seg;
381
382 DPRINTF(("%s: tlp_start: sc_flags 0x%08x, if_flags 0x%08x\n",
383 sc->sc_dev.dv_xname, sc->sc_flags, ifp->if_flags));
384
385 /*
386 * If we want a filter setup, it means no more descriptors were
387 * available for the setup routine. Let it get a chance to wedge
388 * itself into the ring.
389 */
390 if (sc->sc_flags & TULIPF_WANT_SETUP)
391 ifp->if_flags |= IFF_OACTIVE;
392
393 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
394 return;
395
396 /*
397 * Remember the previous number of free descriptors and
398 * the first descriptor we'll use.
399 */
400 ofree = sc->sc_txfree;
401 firsttx = sc->sc_txnext;
402
403 DPRINTF(("%s: tlp_start: txfree %d, txnext %d\n",
404 sc->sc_dev.dv_xname, ofree, firsttx));
405
406 /*
407 * Loop through the send queue, setting up transmit descriptors
408 * until we drain the queue, or use up all available transmit
409 * descriptors.
410 */
411 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
412 sc->sc_txfree != 0) {
413 /*
414 * Grab a packet off the queue.
415 */
416 IF_DEQUEUE(&ifp->if_snd, m0);
417 if (m0 == NULL)
418 break;
419
420 dmamap = txs->txs_dmamap;
421
422 /*
423 * Load the DMA map. If this fails, the packet either
424 * didn't fit in the alloted number of segments, or we were
425 * short on resources. In this case, we'll copy and try
426 * again.
427 */
428 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
429 BUS_DMA_NOWAIT) != 0) {
430 MGETHDR(m, M_DONTWAIT, MT_DATA);
431 if (m == NULL) {
432 printf("%s: unable to allocate Tx mbuf\n",
433 sc->sc_dev.dv_xname);
434 IF_PREPEND(&ifp->if_snd, m0);
435 break;
436 }
437 if (m0->m_pkthdr.len > MHLEN) {
438 MCLGET(m, M_DONTWAIT);
439 if ((m->m_flags & M_EXT) == 0) {
440 printf("%s: unable to allocate Tx "
441 "cluster\n", sc->sc_dev.dv_xname);
442 m_freem(m);
443 IF_PREPEND(&ifp->if_snd, m0);
444 break;
445 }
446 }
447 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
448 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
449 m_freem(m0);
450 m0 = m;
451 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
452 m0, BUS_DMA_NOWAIT);
453 if (error) {
454 printf("%s: unable to load Tx buffer, "
455 "error = %d\n", sc->sc_dev.dv_xname, error);
456 IF_PREPEND(&ifp->if_snd, m0);
457 break;
458 }
459 }
460
461 /*
462 * Ensure we have enough descriptors free to describe
463 * the packet.
464 */
465 if (dmamap->dm_nsegs > sc->sc_txfree) {
466 /*
467 * Not enough free descriptors to transmit this
468 * packet. We haven't committed to anything yet,
469 * so just unload the DMA map, put the packet
470 * back on the queue, and punt. Notify the upper
471 * layer that there are no more slots left.
472 *
473 * XXX We could allocate an mbuf and copy, but
474 * XXX it is worth it?
475 */
476 ifp->if_flags |= IFF_OACTIVE;
477 bus_dmamap_unload(sc->sc_dmat, dmamap);
478 IF_PREPEND(&ifp->if_snd, m0);
479 break;
480 }
481
482 /*
483 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
484 */
485
486 /* Sync the DMA map. */
487 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
488 BUS_DMASYNC_PREWRITE);
489
490 /*
491 * Initialize the transmit descriptors.
492 */
493 for (nexttx = sc->sc_txnext, seg = 0;
494 seg < dmamap->dm_nsegs;
495 seg++, nexttx = TULIP_NEXTTX(nexttx)) {
496 /*
497 * If this is the first descriptor we're
498 * enqueueing, don't set the OWN bit just
499 * yet. That could cause a race condition.
500 * We'll do it below.
501 */
502 sc->sc_txdescs[nexttx].td_status =
503 (nexttx == firsttx) ? 0 : TDSTAT_OWN;
504 sc->sc_txdescs[nexttx].td_bufaddr1 =
505 dmamap->dm_segs[seg].ds_addr;
506 sc->sc_txdescs[nexttx].td_ctl =
507 (dmamap->dm_segs[seg].ds_len << TDCTL_SIZE1_SHIFT) |
508 TDCTL_CH;
509 lasttx = nexttx;
510 }
511
512 /* Set `first segment' and `last segment' appropriately. */
513 sc->sc_txdescs[sc->sc_txnext].td_ctl |= TDCTL_Tx_FS;
514 sc->sc_txdescs[lasttx].td_ctl |= TDCTL_Tx_LS;
515
516 #ifdef TLP_DEBUG
517 printf(" txsoft %p trainsmit chain:\n", txs);
518 for (seg = sc->sc_txnext;; seg = TULIP_NEXTTX(seg)) {
519 printf(" descriptor %d:\n", seg);
520 printf(" td_status: 0x%08x\n",
521 sc->sc_txdescs[seg].td_status);
522 printf(" td_ctl: 0x%08x\n",
523 sc->sc_txdescs[seg].td_ctl);
524 printf(" td_bufaddr1: 0x%08x\n",
525 sc->sc_txdescs[seg].td_bufaddr1);
526 printf(" td_bufaddr2: 0x%08x\n",
527 sc->sc_txdescs[seg].td_bufaddr2);
528 if (seg == lasttx)
529 break;
530 }
531 #endif
532
533 /* Sync the descriptors we're using. */
534 TULIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
535 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
536
537 /*
538 * Store a pointer to the packet so we can free it later,
539 * and remember what txdirty will be once the packet is
540 * done.
541 */
542 txs->txs_mbuf = m0;
543 txs->txs_firstdesc = sc->sc_txnext;
544 txs->txs_lastdesc = lasttx;
545
546 /* Advance the tx pointer. */
547 sc->sc_txfree -= dmamap->dm_nsegs;
548 sc->sc_txnext = nexttx;
549
550 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs, txs_q);
551 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
552
553 last_txs = txs;
554
555 #if NBPFILTER > 0
556 /*
557 * Pass the packet to any BPF listeners.
558 */
559 if (ifp->if_bpf)
560 bpf_mtap(ifp->if_bpf, m0);
561 #endif /* NBPFILTER > 0 */
562 }
563
564 if (txs == NULL || sc->sc_txfree == 0) {
565 /* No more slots left; notify upper layer. */
566 ifp->if_flags |= IFF_OACTIVE;
567 }
568
569 if (sc->sc_txfree != ofree) {
570 DPRINTF(("%s: packets enqueued, IC on %d, OWN on %d\n",
571 sc->sc_dev.dv_xname, lasttx, firsttx));
572 /*
573 * Cause a transmit interrupt to happen on the
574 * last packet we enqueued.
575 */
576 sc->sc_txdescs[lasttx].td_ctl |= TDCTL_Tx_IC;
577 TULIP_CDTXSYNC(sc, lasttx, 1,
578 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
579
580 /*
581 * Some clone chips want IC on the *first* segment in
582 * the packet. Appease them.
583 */
584 if ((sc->sc_flags & TULIPF_IC_FS) != 0 &&
585 last_txs->txs_firstdesc != lasttx) {
586 sc->sc_txdescs[last_txs->txs_firstdesc].td_ctl |=
587 TDCTL_Tx_IC;
588 TULIP_CDTXSYNC(sc, last_txs->txs_firstdesc, 1,
589 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
590 }
591
592 /*
593 * The entire packet chain is set up. Give the
594 * first descriptor to the chip now.
595 */
596 sc->sc_txdescs[firsttx].td_status |= TDSTAT_OWN;
597 TULIP_CDTXSYNC(sc, firsttx, 1,
598 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
599
600 /* Wake up the transmitter. */
601 /* XXX USE AUTOPOLLING? */
602 TULIP_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD);
603
604 /* Set a watchdog timer in case the chip flakes out. */
605 ifp->if_timer = 5;
606 }
607 }
608
609 /*
610 * tlp_watchdog: [ifnet interface function]
611 *
612 * Watchdog timer handler.
613 */
614 void
615 tlp_watchdog(ifp)
616 struct ifnet *ifp;
617 {
618 struct tulip_softc *sc = ifp->if_softc;
619
620 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
621 ifp->if_oerrors++;
622 (void) tlp_init(sc);
623
624 /* Try to get more packets going. */
625 tlp_start(ifp);
626 }
627
628 /*
629 * tlp_ioctl: [ifnet interface function]
630 *
631 * Handle control requests from the operator.
632 */
633 int
634 tlp_ioctl(ifp, cmd, data)
635 struct ifnet *ifp;
636 u_long cmd;
637 caddr_t data;
638 {
639 struct tulip_softc *sc = ifp->if_softc;
640 struct ifreq *ifr = (struct ifreq *)data;
641 struct ifaddr *ifa = (struct ifaddr *)data;
642 int s, error = 0;
643
644 s = splnet();
645
646 switch (cmd) {
647 case SIOCSIFADDR:
648 ifp->if_flags |= IFF_UP;
649
650 switch (ifa->ifa_addr->sa_family) {
651 #ifdef INET
652 case AF_INET:
653 if ((error = tlp_init(sc)) != 0)
654 break;
655 arp_ifinit(ifp, ifa);
656 break;
657 #endif /* INET */
658 #ifdef NS
659 case AF_NS:
660 {
661 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
662
663 if (ns_nullhost(*ina))
664 ina->x_host = *(union ns_host *)
665 LLADDR(ifp->if_sadl);
666 else
667 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
668 ifp->if_addrlen);
669 /* Set new address. */
670 error = tlp_init(sc);
671 break;
672 }
673 #endif /* NS */
674 default:
675 error = tlp_init(sc);
676 break;
677 }
678 break;
679
680 case SIOCSIFMTU:
681 if (ifr->ifr_mtu > ETHERMTU)
682 error = EINVAL;
683 else
684 ifp->if_mtu = ifr->ifr_mtu;
685 break;
686
687 case SIOCSIFFLAGS:
688 if ((ifp->if_flags & IFF_UP) == 0 &&
689 (ifp->if_flags & IFF_RUNNING) != 0) {
690 /*
691 * If interface is marked down and it is running, then
692 * stop it.
693 */
694 tlp_stop(sc, 1);
695 } else if ((ifp->if_flags & IFF_UP) != 0 &&
696 (ifp->if_flags & IFF_RUNNING) == 0) {
697 /*
698 * If interfase it marked up and it is stopped, then
699 * start it.
700 */
701 error = tlp_init(sc);
702 } else if ((ifp->if_flags & IFF_UP) != 0) {
703 /*
704 * Reset the interface to pick up changes in any other
705 * flags that affect the hardware state.
706 */
707 error = tlp_init(sc);
708 }
709 break;
710
711 case SIOCADDMULTI:
712 case SIOCDELMULTI:
713 error = (cmd == SIOCADDMULTI) ?
714 ether_addmulti(ifr, &sc->sc_ethercom) :
715 ether_delmulti(ifr, &sc->sc_ethercom);
716
717 if (error == ENETRESET) {
718 /*
719 * Multicast list has changed. Set the filter
720 * accordingly.
721 */
722 (*sc->sc_filter_setup)(sc);
723 error = 0;
724 }
725 break;
726
727 case SIOCSIFMEDIA:
728 case SIOCGIFMEDIA:
729 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
730 break;
731
732 default:
733 error = EINVAL;
734 break;
735 }
736
737 /* Try to get more packets going. */
738 tlp_start(ifp);
739
740 splx(s);
741 return (error);
742 }
743
744 /*
745 * tlp_intr:
746 *
747 * Interrupt service routine.
748 */
749 int
750 tlp_intr(arg)
751 void *arg;
752 {
753 struct tulip_softc *sc = arg;
754 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
755 u_int32_t status;
756 int handled = 0, txthresh;
757
758 DPRINTF(("%s: tlp_intr\n", sc->sc_dev.dv_xname));
759
760 for (;;) {
761 status = TULIP_READ(sc, CSR_STATUS);
762 if (status)
763 TULIP_WRITE(sc, CSR_STATUS, status);
764
765 if ((status & sc->sc_inten) == 0)
766 break;
767
768 handled = 1;
769
770 if (status & (STATUS_RI|STATUS_RU|STATUS_RWT)) {
771 /* Grab new any new packets. */
772 tlp_rxintr(sc);
773
774 if (status & STATUS_RWT)
775 printf("%s: receive watchdog timeout\n",
776 sc->sc_dev.dv_xname);
777
778 if (status & STATUS_RU) {
779 printf("%s: receive ring overrun\n",
780 sc->sc_dev.dv_xname);
781 /* Get the receive process going again. */
782 tlp_idle(sc, OPMODE_SR);
783 TULIP_WRITE(sc, CSR_RXLIST,
784 TULIP_CDRXADDR(sc, sc->sc_rxptr));
785 TULIP_WRITE(sc, CSR_OPMODE, sc->sc_opmode);
786 TULIP_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
787 break;
788 }
789 }
790
791 if (status & (STATUS_TI|STATUS_UNF|STATUS_TJT)) {
792 /* Sweep up transmit descriptors. */
793 tlp_txintr(sc);
794
795 if (status & STATUS_TJT)
796 printf("%s: transmit jabber timeout\n",
797 sc->sc_dev.dv_xname);
798
799 if (status & STATUS_UNF) {
800 /*
801 * Increase our transmit threshold if
802 * another is available.
803 */
804 txthresh = sc->sc_txthresh + 1;
805 if (sc->sc_txth[txthresh].txth_name != NULL) {
806 /* Idle the transmit process. */
807 tlp_idle(sc, OPMODE_ST);
808
809 sc->sc_txthresh = txthresh;
810 sc->sc_opmode &= ~(OPMODE_TR|OPMODE_SF);
811 sc->sc_opmode |=
812 sc->sc_txth[txthresh].txth_opmode;
813 printf("%s: transmit underrun; new "
814 "threshold: %s\n",
815 sc->sc_dev.dv_xname,
816 sc->sc_txth[txthresh].txth_name);
817
818 /*
819 * Set the new threshold and restart
820 * the transmit process.
821 */
822 TULIP_WRITE(sc, CSR_OPMODE,
823 sc->sc_opmode);
824 }
825 /*
826 * XXX Log every Nth underrun from
827 * XXX now on?
828 */
829 }
830 }
831
832 if (status & (STATUS_TPS|STATUS_RPS)) {
833 if (status & STATUS_TPS)
834 printf("%s: transmit process stopped\n",
835 sc->sc_dev.dv_xname);
836 if (status & STATUS_RPS)
837 printf("%s: receive process stopped\n",
838 sc->sc_dev.dv_xname);
839 (void) tlp_init(sc);
840 break;
841 }
842
843 if (status & STATUS_SE) {
844 const char *str;
845 switch (status & STATUS_EB) {
846 case STATUS_EB_PARITY:
847 str = "parity error";
848 break;
849
850 case STATUS_EB_MABT:
851 str = "master abort";
852 break;
853
854 case STATUS_EB_TABT:
855 str = "target abort";
856 break;
857
858 default:
859 str = "unknown error";
860 break;
861 }
862 printf("%s: fatal system error: %s\n",
863 sc->sc_dev.dv_xname, str);
864 (void) tlp_init(sc);
865 break;
866 }
867
868 /*
869 * Not handled:
870 *
871 * Transmit buffer unavailable -- normal
872 * condition, nothing to do, really.
873 *
874 * General purpose timer experied -- we don't
875 * use the general purpose timer.
876 *
877 * Early receive interrupt -- not available on
878 * all chips, we just use RI. We also only
879 * use single-segment receive DMA, so this
880 * is mostly useless.
881 */
882 }
883
884 /* Try to get more packets going. */
885 tlp_start(ifp);
886
887 return (handled);
888 }
889
890 /*
891 * tlp_rxintr:
892 *
893 * Helper; handle receive interrupts.
894 */
895 void
896 tlp_rxintr(sc)
897 struct tulip_softc *sc;
898 {
899 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
900 struct ether_header *eh;
901 struct tulip_rxsoft *rxs;
902 struct mbuf *m;
903 u_int32_t rxstat;
904 int i, len;
905
906 for (i = sc->sc_rxptr;; i = TULIP_NEXTRX(i)) {
907 rxs = &sc->sc_rxsoft[i];
908
909 TULIP_CDRXSYNC(sc, i,
910 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
911
912 rxstat = sc->sc_rxdescs[i].td_status;
913
914 if (rxstat & TDSTAT_OWN) {
915 /*
916 * We have processed all of the receive buffers.
917 */
918 break;
919 }
920
921 /*
922 * Make sure the packet fit in one buffer. This should
923 * always be the case. But the Lite-On PNIC, rev 33
924 * has an awful receive engine bug, which may require
925 * a very icky work-around.
926 */
927 if ((rxstat & (TDSTAT_Rx_FS|TDSTAT_Rx_LS)) !=
928 (TDSTAT_Rx_FS|TDSTAT_Rx_LS)) {
929 printf("%s: incoming packet spilled, resetting\n",
930 sc->sc_dev.dv_xname);
931 (void) tlp_init(sc);
932 return;
933 }
934
935 /*
936 * If any collisions were seen on the wire, count one.
937 */
938 if (rxstat & TDSTAT_Rx_CS)
939 ifp->if_collisions++;
940
941 /*
942 * If an error occured, update stats, clear the status
943 * word, and leave the packet buffer in place. It will
944 * simply be reused the next time the ring comes around.
945 */
946 if (rxstat & TDSTAT_ES) {
947 #define PRINTERR(bit, str) \
948 if (rxstat & (bit)) \
949 printf("%s: receive error: %s\n", \
950 sc->sc_dev.dv_xname, str)
951 ifp->if_ierrors++;
952 PRINTERR(TDSTAT_Rx_DE, "descriptor error");
953 PRINTERR(TDSTAT_Rx_RF, "runt frame");
954 PRINTERR(TDSTAT_Rx_TL, "frame too long");
955 PRINTERR(TDSTAT_Rx_RE, "MII error");
956 PRINTERR(TDSTAT_Rx_DB, "dribbling bit");
957 PRINTERR(TDSTAT_Rx_CE, "CRC error");
958 #undef PRINTERR
959 TULIP_INIT_RXDESC(sc, i);
960 continue;
961 }
962
963 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
964 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
965
966 /*
967 * No errors; receive the packet. Note the Tulip
968 * includes the CRC with every packet; trim it.
969 */
970 len = TDSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN;
971
972 #ifdef __NO_STRICT_ALIGNMENT
973 /*
974 * Allocate a new mbuf cluster. If that fails, we are
975 * out of memory, and must drop the packet and recycle
976 * the buffer that's already attached to this descriptor.
977 */
978 m = rxs->rxs_mbuf;
979 if (tlp_add_rxbuf(sc, i) != 0) {
980 ifp->if_ierrors++;
981 TULIP_INIT_RXDESC(sc, i);
982 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
983 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
984 continue;
985 }
986 #else
987 /*
988 * The Tulip's receive buffers must be 4-byte aligned.
989 * But this means that the data after the Ethernet header
990 * is misaligned. We must allocate a new buffer and
991 * copy the data, shifted forward 2 bytes.
992 */
993 MGETHDR(m, M_DONTWAIT, MT_DATA);
994 if (m == NULL) {
995 dropit:
996 ifp->if_ierrors++;
997 TULIP_INIT_RXDESC(sc, i);
998 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
999 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1000 continue;
1001 }
1002 if (len > (MHLEN - 2)) {
1003 MCLGET(m, M_DONTWAIT);
1004 if ((m->m_flags & M_EXT) == 0) {
1005 m_freem(m);
1006 goto dropit;
1007 }
1008 }
1009 m->m_data += 2;
1010
1011 /*
1012 * Note that we use clusters for incoming frames, so the
1013 * buffer is virtually contiguous.
1014 */
1015 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
1016
1017 /* Allow the receive descriptor to continue using its mbuf. */
1018 TULIP_INIT_RXDESC(sc, i);
1019 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1020 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1021 #endif /* __NO_STRICT_ALIGNMENT */
1022
1023 ifp->if_ipackets++;
1024 eh = mtod(m, struct ether_header *);
1025 m->m_pkthdr.rcvif = ifp;
1026 m->m_pkthdr.len = m->m_len = len;
1027
1028 #if NBPFILTER > 0
1029 /*
1030 * Pass this up to any BPF listeners, but only
1031 * pass it up the stack if its for us.
1032 */
1033 if (ifp->if_bpf)
1034 bpf_mtap(ifp->if_bpf, m);
1035 #endif /* NPBFILTER > 0 */
1036
1037 /*
1038 * This test is outside the NBPFILTER block because
1039 * on the 21140 we have to use Hash-Only mode due to
1040 * a bug in the filter logic.
1041 */
1042 if ((ifp->if_flags & IFF_PROMISC) != 0 ||
1043 sc->sc_filtmode == TDCTL_Tx_FT_HASHONLY) {
1044 if (memcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
1045 ETHER_ADDR_LEN) != 0 &&
1046 ETHER_IS_MULTICAST(eh->ether_dhost) == 0) {
1047 m_freem(m);
1048 continue;
1049 }
1050 }
1051
1052 /* Pass it on. */
1053 (*ifp->if_input)(ifp, m);
1054 }
1055
1056 /* Update the recieve pointer. */
1057 sc->sc_rxptr = i;
1058 }
1059
1060 /*
1061 * tlp_txintr:
1062 *
1063 * Helper; handle transmit interrupts.
1064 */
1065 void
1066 tlp_txintr(sc)
1067 struct tulip_softc *sc;
1068 {
1069 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1070 struct tulip_txsoft *txs;
1071 u_int32_t txstat;
1072
1073 DPRINTF(("%s: tlp_txintr: sc_flags 0x%08x\n",
1074 sc->sc_dev.dv_xname, sc->sc_flags));
1075
1076 ifp->if_flags &= ~IFF_OACTIVE;
1077
1078 /*
1079 * Go through our Tx list and free mbufs for those
1080 * frames that have been transmitted.
1081 */
1082 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1083 TULIP_CDTXSYNC(sc, txs->txs_firstdesc,
1084 txs->txs_dmamap->dm_nsegs,
1085 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1086
1087 #ifdef TLP_DEBUG
1088 { int i;
1089 printf(" txsoft %p trainsmit chain:\n", txs);
1090 for (i = txs->txs_firstdesc;; i = TULIP_NEXTTX(i)) {
1091 printf(" descriptor %d:\n", i);
1092 printf(" td_status: 0x%08x\n",
1093 sc->sc_txdescs[i].td_status);
1094 printf(" td_ctl: 0x%08x\n",
1095 sc->sc_txdescs[i].td_ctl);
1096 printf(" td_bufaddr1: 0x%08x\n",
1097 sc->sc_txdescs[i].td_bufaddr1);
1098 printf(" td_bufaddr2: 0x%08x\n",
1099 sc->sc_txdescs[i].td_bufaddr2);
1100 if (i == txs->txs_lastdesc)
1101 break;
1102 }}
1103 #endif
1104
1105 txstat = sc->sc_txdescs[txs->txs_firstdesc].td_status;
1106 if (txstat & TDSTAT_OWN)
1107 break;
1108
1109 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1110
1111 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1112
1113 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1114 0, txs->txs_dmamap->dm_mapsize,
1115 BUS_DMASYNC_POSTWRITE);
1116 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1117 m_freem(txs->txs_mbuf);
1118 txs->txs_mbuf = NULL;
1119
1120 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1121
1122 /*
1123 * Check for errors and collisions.
1124 */
1125 if (txstat & TDSTAT_ES) {
1126 ifp->if_oerrors++;
1127 if (txstat & TDSTAT_Tx_EC)
1128 ifp->if_collisions += 16;
1129 if (txstat & TDSTAT_Tx_LC)
1130 ifp->if_collisions++;
1131 } else {
1132 /* Packet was transmitted successfully. */
1133 ifp->if_opackets++;
1134 ifp->if_collisions += TDSTAT_Tx_COLLISIONS(txstat);
1135 }
1136 }
1137
1138 /*
1139 * If there are no more pending transmissions, cancel the watchdog
1140 * timer.
1141 */
1142 if (txs == NULL)
1143 ifp->if_timer = 0;
1144
1145 /*
1146 * If we have a receive filter setup pending, do it now.
1147 */
1148 if (sc->sc_flags & TULIPF_WANT_SETUP)
1149 (*sc->sc_filter_setup)(sc);
1150 }
1151
1152 /*
1153 * tlp_reset:
1154 *
1155 * Perform a soft reset on the Tulip.
1156 */
1157 void
1158 tlp_reset(sc)
1159 struct tulip_softc *sc;
1160 {
1161 int i;
1162
1163 TULIP_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR);
1164
1165 for (i = 0; i < 1000; i++) {
1166 if (TULIP_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0)
1167 break;
1168 delay(10);
1169 }
1170
1171 if (TULIP_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR))
1172 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1173
1174 delay(1000);
1175 }
1176
1177 /*
1178 * tlp_init:
1179 *
1180 * Initialize the interface. Must be called at splnet().
1181 */
1182 int
1183 tlp_init(sc)
1184 struct tulip_softc *sc;
1185 {
1186 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1187 struct tulip_txsoft *txs;
1188 struct tulip_rxsoft *rxs;
1189 int i, error = 0;
1190
1191 /*
1192 * Cancel any pending I/O.
1193 */
1194 tlp_stop(sc, 0);
1195
1196 /*
1197 * Reset the Tulip to a known state.
1198 */
1199 tlp_reset(sc);
1200
1201 /*
1202 * Initialize the BUSMODE register.
1203 *
1204 * XXX What about read-multiple/read-line/write-line on
1205 * XXX the 21140 and up?
1206 */
1207 sc->sc_busmode = BUSMODE_BAR | BUSMODE_PBL_DEFAULT;
1208 switch (sc->sc_cacheline) {
1209 default:
1210 /*
1211 * Note: We must *always* set these bits; a cache
1212 * alignment of 0 is RESERVED.
1213 */
1214 case 8:
1215 sc->sc_busmode |= BUSMODE_CAL_8LW;
1216 break;
1217 case 16:
1218 sc->sc_busmode |= BUSMODE_CAL_16LW;
1219 break;
1220 case 32:
1221 sc->sc_busmode |= BUSMODE_CAL_32LW;
1222 break;
1223 }
1224 switch (sc->sc_chip) {
1225 case TULIP_CHIP_82C168:
1226 case TULIP_CHIP_82C169:
1227 sc->sc_busmode |= BUSMODE_PNIC_MBO;
1228 break;
1229 default:
1230 /* Nothing. */
1231 break;
1232 }
1233 #if BYTE_ORDER == BIG_ENDIAN
1234 /*
1235 * XXX There are reports that this doesn't work properly
1236 * in the old Tulip driver, but BUSMODE_DBO does. However,
1237 * BUSMODE_DBO is not available on the 21040, and requires
1238 * us to byte-swap the setup packet. What to do?
1239 */
1240 sc->sc_busmode |= BUSMODE_BLE;
1241 #endif
1242 TULIP_WRITE(sc, CSR_BUSMODE, sc->sc_busmode);
1243
1244 /*
1245 * Initialize the OPMODE register. We don't write it until
1246 * we're ready to begin the transmit and receive processes.
1247 *
1248 * Media-related OPMODE bits are set in the media callbacks
1249 * for each specific chip/board.
1250 */
1251 sc->sc_opmode = OPMODE_SR | OPMODE_ST |
1252 sc->sc_txth[sc->sc_txthresh].txth_opmode;
1253 switch (sc->sc_chip) {
1254 case TULIP_CHIP_21140:
1255 case TULIP_CHIP_21140A:
1256 case TULIP_CHIP_21142:
1257 case TULIP_CHIP_21143:
1258 sc->sc_opmode |= OPMODE_MBO;
1259 break;
1260
1261 default:
1262 /* Nothing. */
1263 }
1264
1265 if (sc->sc_flags & TULIPF_HAS_MII) {
1266 /* Enable the MII port. */
1267 sc->sc_opmode |= OPMODE_PS;
1268
1269 switch (sc->sc_chip) {
1270 case TULIP_CHIP_82C168:
1271 case TULIP_CHIP_82C169:
1272 TULIP_WRITE(sc, CSR_PNIC_ENDEC, PNIC_ENDEC_JABBERDIS);
1273 break;
1274
1275 default:
1276 /* Nothing. */
1277 }
1278 }
1279
1280 /*
1281 * Magical mystery initialization on the Macronix chips.
1282 * The MX98713 uses its own magic value, the rest share
1283 * a common one.
1284 */
1285 switch (sc->sc_chip) {
1286 case TULIP_CHIP_MX98713:
1287 TULIP_WRITE(sc, CSR_PMAC_TOR, PMAC_TOR_98713);
1288 break;
1289
1290 case TULIP_CHIP_MX98713A:
1291 case TULIP_CHIP_MX98715:
1292 case TULIP_CHIP_MX98725:
1293 TULIP_WRITE(sc, CSR_PMAC_TOR, PMAC_TOR_98715);
1294 break;
1295
1296 default:
1297 /* Nothing. */
1298 }
1299
1300 /*
1301 * Initialize the transmit descriptor ring.
1302 */
1303 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1304 for (i = 0; i < TULIP_NTXDESC; i++) {
1305 sc->sc_txdescs[i].td_ctl = TDCTL_CH;
1306 sc->sc_txdescs[i].td_bufaddr2 =
1307 TULIP_CDTXADDR(sc, TULIP_NEXTTX(i));
1308 }
1309 TULIP_CDTXSYNC(sc, 0, TULIP_NTXDESC,
1310 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1311 sc->sc_txfree = TULIP_NTXDESC;
1312 sc->sc_txnext = 0;
1313
1314 /*
1315 * Initialize the transmit job descriptors.
1316 */
1317 SIMPLEQ_INIT(&sc->sc_txfreeq);
1318 SIMPLEQ_INIT(&sc->sc_txdirtyq);
1319 for (i = 0; i < TULIP_TXQUEUELEN; i++) {
1320 txs = &sc->sc_txsoft[i];
1321 txs->txs_mbuf = NULL;
1322 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1323 }
1324
1325 /*
1326 * Initialize the receive descriptor and receive job
1327 * descriptor rings.
1328 */
1329 for (i = 0; i < TULIP_NRXDESC; i++) {
1330 rxs = &sc->sc_rxsoft[i];
1331 if (rxs->rxs_mbuf == NULL) {
1332 if ((error = tlp_add_rxbuf(sc, i)) != 0) {
1333 printf("%s: unable to allocate or map rx "
1334 "buffer %d, error = %d\n",
1335 sc->sc_dev.dv_xname, i, error);
1336 /*
1337 * XXX Should attempt to run with fewer receive
1338 * XXX buffers instead of just failing.
1339 */
1340 tlp_rxdrain(sc);
1341 goto out;
1342 }
1343 }
1344 }
1345 sc->sc_rxptr = 0;
1346
1347 /*
1348 * Initialize the interrupt mask and enable interrupts.
1349 */
1350 /* normal interrupts */
1351 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS;
1352 /* abnormal interrupts */
1353 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF |
1354 STATUS_RU | STATUS_RPS | STATUS_RWT | STATUS_SE | STATUS_AIS;
1355
1356 TULIP_WRITE(sc, CSR_INTEN, sc->sc_inten);
1357 TULIP_WRITE(sc, CSR_STATUS, 0xffffffff);
1358
1359 /*
1360 * Give the transmit and receive rings to the Tulip.
1361 */
1362 TULIP_WRITE(sc, CSR_TXLIST, TULIP_CDTXADDR(sc, sc->sc_txnext));
1363 TULIP_WRITE(sc, CSR_RXLIST, TULIP_CDRXADDR(sc, sc->sc_rxptr));
1364
1365 /*
1366 * On chips that do this differently, set the station address.
1367 */
1368 switch (sc->sc_chip) {
1369 case TULIP_CHIP_WB89C840F:
1370 /* XXX Do this with stream writes? */
1371 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1372 bus_space_write_1(sc->sc_st, sc->sc_sh,
1373 (CSR_WINB_NODE0 >> sc->sc_regshift) + i,
1374 LLADDR(ifp->if_sadl)[i]);
1375 }
1376 break;
1377
1378 default:
1379 /* Nothing. */
1380 }
1381
1382 /*
1383 * Set the receive filter. This will start the transmit and
1384 * receive processes.
1385 */
1386 (*sc->sc_filter_setup)(sc);
1387
1388 /*
1389 * Start the receive process.
1390 */
1391 TULIP_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
1392
1393 if (sc->sc_flags & TULIPF_HAS_MII) {
1394 /* Start the one second clock. */
1395 timeout(tlp_mii_tick, sc, hz);
1396 }
1397
1398 /*
1399 * Note that the interface is now running.
1400 */
1401 ifp->if_flags |= IFF_RUNNING;
1402 ifp->if_flags &= ~IFF_OACTIVE;
1403
1404 /*
1405 * Set the media. We must do this after the transmit process is
1406 * running, since we may actually have to transmit packets on
1407 * our board to test link integrity.
1408 */
1409 (void) (*sc->sc_mediasw->tmsw_set)(sc);
1410
1411 out:
1412 if (error)
1413 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1414 return (error);
1415 }
1416
1417 /*
1418 * tlp_rxdrain:
1419 *
1420 * Drain the receive queue.
1421 */
1422 void
1423 tlp_rxdrain(sc)
1424 struct tulip_softc *sc;
1425 {
1426 struct tulip_rxsoft *rxs;
1427 int i;
1428
1429 for (i = 0; i < TULIP_NRXDESC; i++) {
1430 rxs = &sc->sc_rxsoft[i];
1431 if (rxs->rxs_mbuf != NULL) {
1432 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1433 m_freem(rxs->rxs_mbuf);
1434 rxs->rxs_mbuf = NULL;
1435 }
1436 }
1437 }
1438
1439 /*
1440 * tlp_stop:
1441 *
1442 * Stop transmission on the interface.
1443 */
1444 void
1445 tlp_stop(sc, drain)
1446 struct tulip_softc *sc;
1447 int drain;
1448 {
1449 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1450 struct tulip_txsoft *txs;
1451
1452 if (sc->sc_flags & TULIPF_HAS_MII) {
1453 /* Stop the one second clock. */
1454 untimeout(tlp_mii_tick, sc);
1455 }
1456
1457 /* Disable interrupts. */
1458 TULIP_WRITE(sc, CSR_INTEN, 0);
1459
1460 /* Stop the transmit and receive processes. */
1461 TULIP_WRITE(sc, CSR_OPMODE, 0);
1462 TULIP_WRITE(sc, CSR_RXLIST, 0);
1463 TULIP_WRITE(sc, CSR_TXLIST, 0);
1464
1465 /*
1466 * Release any queued transmit buffers.
1467 */
1468 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1469 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1470 if (txs->txs_mbuf != NULL) {
1471 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1472 m_freem(txs->txs_mbuf);
1473 txs->txs_mbuf = NULL;
1474 }
1475 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1476 }
1477
1478 if (drain) {
1479 /*
1480 * Release the receive buffers.
1481 */
1482 tlp_rxdrain(sc);
1483 }
1484
1485 sc->sc_flags &= ~TULIPF_WANT_SETUP;
1486
1487 /*
1488 * Mark the interface down and cancel the watchdog timer.
1489 */
1490 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1491 ifp->if_timer = 0;
1492 }
1493
1494 #define SROM_EMIT(sc, x) \
1495 do { \
1496 TULIP_WRITE((sc), CSR_MIIROM, (x)); \
1497 delay(1); \
1498 } while (0)
1499
1500 /*
1501 * tlp_srom_idle:
1502 *
1503 * Put the SROM in idle state.
1504 */
1505 void
1506 tlp_srom_idle(sc)
1507 struct tulip_softc *sc;
1508 {
1509 u_int32_t miirom;
1510 int i;
1511
1512 miirom = MIIROM_SR;
1513 SROM_EMIT(sc, miirom);
1514
1515 miirom |= MIIROM_RD;
1516 SROM_EMIT(sc, miirom);
1517
1518 miirom |= MIIROM_SROMCS;
1519 SROM_EMIT(sc, miirom);
1520
1521 SROM_EMIT(sc, miirom|MIIROM_SROMSK);
1522
1523 /* Strobe the clock 25 times. */
1524 for (i = 0; i < 25; i++) {
1525 SROM_EMIT(sc, miirom);
1526 SROM_EMIT(sc, miirom|MIIROM_SROMSK);
1527 }
1528
1529 SROM_EMIT(sc, miirom);
1530
1531 miirom &= ~MIIROM_SROMCS;
1532 SROM_EMIT(sc, miirom);
1533
1534 SROM_EMIT(sc, 0);
1535 }
1536
1537 /*
1538 * tlp_read_srom:
1539 *
1540 * Read the Tulip SROM.
1541 */
1542 void
1543 tlp_read_srom(sc, word, wordcnt, data)
1544 struct tulip_softc *sc;
1545 int word, wordcnt;
1546 u_int16_t *data;
1547 {
1548 u_int32_t miirom;
1549 int i, x;
1550
1551 tlp_srom_idle(sc);
1552
1553 /* Select the SROM. */
1554 miirom = MIIROM_SR;
1555 SROM_EMIT(sc, miirom);
1556
1557 miirom |= MIIROM_RD;
1558 SROM_EMIT(sc, miirom);
1559
1560 for (i = 0; i < wordcnt; i++) {
1561 /* Send CHIP SELECT for one clock tick. */
1562 miirom |= MIIROM_SROMCS;
1563 SROM_EMIT(sc, miirom);
1564
1565 /* Shift in the READ opcode. */
1566 for (x = 3; x > 0; x--) {
1567 if (TULIP_SROM_OPC_READ & (1 << (x - 1)))
1568 miirom |= MIIROM_SROMDI;
1569 else
1570 miirom &= ~MIIROM_SROMDI;
1571 SROM_EMIT(sc, miirom);
1572 SROM_EMIT(sc, miirom|MIIROM_SROMSK);
1573 SROM_EMIT(sc, miirom);
1574 }
1575
1576 /* Shift in address. */
1577 for (x = 6; x > 0; x--) {
1578 if ((word + i) & (1 << (x - 1)))
1579 miirom |= MIIROM_SROMDI;
1580 else
1581 miirom &= ~MIIROM_SROMDI;
1582 SROM_EMIT(sc, miirom);
1583 SROM_EMIT(sc, miirom|MIIROM_SROMSK);
1584 SROM_EMIT(sc, miirom);
1585 }
1586
1587 /* Shift out data. */
1588 miirom &= ~MIIROM_SROMDI;
1589 data[i] = 0;
1590 for (x = 16; x > 0; x--) {
1591 SROM_EMIT(sc, miirom|MIIROM_SROMSK);
1592 if (TULIP_ISSET(sc, CSR_MIIROM, MIIROM_SROMDO))
1593 data[i] |= (1 << (x - 1));
1594 SROM_EMIT(sc, miirom);
1595 }
1596
1597 /* Clear CHIP SELECT. */
1598 miirom &= ~MIIROM_SROMCS;
1599 SROM_EMIT(sc, miirom);
1600 }
1601
1602 /* Deselect the SROM. */
1603 SROM_EMIT(sc, 0);
1604
1605 /* ...and idle it. */
1606 tlp_srom_idle(sc);
1607 }
1608
1609 #undef SROM_EMIT
1610
1611 /*
1612 * tlp_add_rxbuf:
1613 *
1614 * Add a receive buffer to the indicated descriptor.
1615 */
1616 int
1617 tlp_add_rxbuf(sc, idx)
1618 struct tulip_softc *sc;
1619 int idx;
1620 {
1621 struct tulip_rxsoft *rxs = &sc->sc_rxsoft[idx];
1622 struct mbuf *m;
1623 int error;
1624
1625 MGETHDR(m, M_DONTWAIT, MT_DATA);
1626 if (m == NULL)
1627 return (ENOBUFS);
1628
1629 MCLGET(m, M_DONTWAIT);
1630 if ((m->m_flags & M_EXT) == 0) {
1631 m_freem(m);
1632 return (ENOBUFS);
1633 }
1634
1635 if (rxs->rxs_mbuf != NULL)
1636 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1637
1638 rxs->rxs_mbuf = m;
1639
1640 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1641 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1642 if (error) {
1643 printf("%s: can't load rx DMA map %d, error = %d\n",
1644 sc->sc_dev.dv_xname, idx, error);
1645 panic("tlp_add_rxbuf"); /* XXX */
1646 }
1647
1648 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1649 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1650
1651 TULIP_INIT_RXDESC(sc, idx);
1652
1653 return (0);
1654 }
1655
1656 /*
1657 * tlp_crc32:
1658 *
1659 * Compute the 32-bit CRC of the provided buffer.
1660 */
1661 u_int32_t
1662 tlp_crc32(buf, len)
1663 const u_int8_t *buf;
1664 size_t len;
1665 {
1666 static const u_int32_t crctab[] = {
1667 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1668 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1669 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1670 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1671 };
1672 u_int32_t crc;
1673 int i;
1674
1675 crc = 0xffffffff;
1676 for (i = 0; i < len; i++) {
1677 crc ^= buf[i];
1678 crc = (crc >> 4) ^ crctab[crc & 0xf];
1679 crc = (crc >> 4) ^ crctab[crc & 0xf];
1680 }
1681 return (crc);
1682 }
1683
1684 /*
1685 * tlp_srom_crcok:
1686 *
1687 * Check the CRC of the Tulip SROM.
1688 */
1689 int
1690 tlp_srom_crcok(romdata)
1691 u_int8_t *romdata;
1692 {
1693 u_int32_t crc;
1694
1695 crc = tlp_crc32(romdata, 126);
1696 if ((crc ^ 0xffff) == (romdata[126] | (romdata[127] << 8)))
1697 return (1);
1698 return (0);
1699 }
1700
1701 /*
1702 * tlp_filter_setup:
1703 *
1704 * Set the Tulip's receive filter.
1705 */
1706 void
1707 tlp_filter_setup(sc)
1708 struct tulip_softc *sc;
1709 {
1710 struct ethercom *ec = &sc->sc_ethercom;
1711 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1712 struct ether_multi *enm;
1713 struct ether_multistep step;
1714 __volatile u_int32_t *sp;
1715 u_int8_t enaddr[ETHER_ADDR_LEN];
1716 u_int32_t hash;
1717 int cnt;
1718
1719 DPRINTF(("%s: tlp_filter_setup: sc_flags 0x%08x\n",
1720 sc->sc_dev.dv_xname, sc->sc_flags));
1721
1722 memcpy(enaddr, LLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1723
1724 /*
1725 * If there are transmissions pending, wait until they have
1726 * completed.
1727 */
1728 if (SIMPLEQ_FIRST(&sc->sc_txdirtyq) != NULL) {
1729 sc->sc_flags |= TULIPF_WANT_SETUP;
1730 DPRINTF(("%s: tlp_filter_setup: deferring\n",
1731 sc->sc_dev.dv_xname));
1732 return;
1733 }
1734 sc->sc_flags &= ~TULIPF_WANT_SETUP;
1735
1736 /*
1737 * If we're running, idle the transmit and receive engines. If
1738 * we're NOT running, we're being called from tlp_init(), and our
1739 * writing OPMODE will start the transmit and receive processes
1740 * in motion.
1741 */
1742 if (ifp->if_flags & IFF_RUNNING)
1743 tlp_idle(sc, OPMODE_ST|OPMODE_SR);
1744
1745 sc->sc_opmode &= ~(OPMODE_PR|OPMODE_PM);
1746
1747 if (ifp->if_flags & IFF_PROMISC) {
1748 sc->sc_opmode |= OPMODE_PR;
1749 goto allmulti;
1750 }
1751
1752 /*
1753 * Try Perfect filtering first.
1754 */
1755
1756 sc->sc_filtmode = TDCTL_Tx_FT_PERFECT;
1757 sp = TULIP_CDSP(sc);
1758 memset(TULIP_CDSP(sc), 0, TULIP_SETUP_PACKET_LEN);
1759 cnt = 0;
1760 ETHER_FIRST_MULTI(step, ec, enm);
1761 while (enm != NULL) {
1762 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1763 /*
1764 * We must listen to a range of multicast addresses.
1765 * For now, just accept all multicasts, rather than
1766 * trying to set only those filter bits needed to match
1767 * the range. (At this time, the only use of address
1768 * ranges is for IP multicast routing, for which the
1769 * range is big enough to require all bits set.)
1770 */
1771 goto allmulti;
1772 }
1773 if (cnt == (TULIP_MAXADDRS - 2)) {
1774 /*
1775 * We already have our multicast limit (still need
1776 * our station address and broadcast). Go to
1777 * Hash-Perfect mode.
1778 */
1779 goto hashperfect;
1780 }
1781 *sp++ = ((u_int16_t *) enm->enm_addrlo)[0];
1782 *sp++ = ((u_int16_t *) enm->enm_addrlo)[1];
1783 *sp++ = ((u_int16_t *) enm->enm_addrlo)[2];
1784 ETHER_NEXT_MULTI(step, enm);
1785 }
1786
1787 if (ifp->if_flags & IFF_BROADCAST) {
1788 /* ...and the broadcast address. */
1789 cnt++;
1790 *sp++ = 0xffff;
1791 *sp++ = 0xffff;
1792 *sp++ = 0xffff;
1793 }
1794
1795 /* Pad the rest with our station address. */
1796 for (; cnt < TULIP_MAXADDRS; cnt++) {
1797 *sp++ = ((u_int16_t *) enaddr)[0];
1798 *sp++ = ((u_int16_t *) enaddr)[1];
1799 *sp++ = ((u_int16_t *) enaddr)[2];
1800 }
1801 ifp->if_flags &= ~IFF_ALLMULTI;
1802 goto setit;
1803
1804 hashperfect:
1805 /*
1806 * Try Hash-Perfect mode.
1807 */
1808
1809 /*
1810 * Some 21140 chips have broken Hash-Perfect modes. On these
1811 * chips, we simply use Hash-Only mode, and put our station
1812 * address into the filter.
1813 */
1814 if (sc->sc_chip == TULIP_CHIP_21140)
1815 sc->sc_filtmode = TDCTL_Tx_FT_HASHONLY;
1816 else
1817 sc->sc_filtmode = TDCTL_Tx_FT_HASH;
1818 sp = TULIP_CDSP(sc);
1819 memset(TULIP_CDSP(sc), 0, TULIP_SETUP_PACKET_LEN);
1820 ETHER_FIRST_MULTI(step, ec, enm);
1821 while (enm != NULL) {
1822 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1823 /*
1824 * We must listen to a range of multicast addresses.
1825 * For now, just accept all multicasts, rather than
1826 * trying to set only those filter bits needed to match
1827 * the range. (At this time, the only use of address
1828 * ranges is for IP multicast routing, for which the
1829 * range is big enough to require all bits set.)
1830 */
1831 goto allmulti;
1832 }
1833 hash = tlp_mchash(enm->enm_addrlo);
1834 sp[hash >> 4] |= 1 << (hash & 0xf);
1835 ETHER_NEXT_MULTI(step, enm);
1836 }
1837
1838 if (ifp->if_flags & IFF_BROADCAST) {
1839 /* ...and the broadcast address. */
1840 hash = tlp_mchash(etherbroadcastaddr);
1841 sp[hash >> 4] |= 1 << (hash & 0xf);
1842 }
1843
1844 if (sc->sc_filtmode == TDCTL_Tx_FT_HASHONLY) {
1845 /* ...and our station address. */
1846 hash = tlp_mchash(enaddr);
1847 sp[hash >> 4] |= 1 << (hash & 0xf);
1848 } else {
1849 /*
1850 * Hash-Perfect mode; put our station address after
1851 * the hash table.
1852 */
1853 sp[39] = ((u_int16_t *) enaddr)[0];
1854 sp[40] = ((u_int16_t *) enaddr)[1];
1855 sp[41] = ((u_int16_t *) enaddr)[2];
1856 }
1857 ifp->if_flags &= ~IFF_ALLMULTI;
1858 goto setit;
1859
1860 allmulti:
1861 /*
1862 * Use Perfect filter mode. First address is the broadcast address,
1863 * and pad the rest with our station address. We'll set Pass-all-
1864 * multicast in OPMODE below.
1865 */
1866 sc->sc_filtmode = TDCTL_Tx_FT_PERFECT;
1867 sp = TULIP_CDSP(sc);
1868 memset(TULIP_CDSP(sc), 0, TULIP_SETUP_PACKET_LEN);
1869 cnt = 0;
1870 if (ifp->if_flags & IFF_BROADCAST) {
1871 cnt++;
1872 *sp++ = 0xffff;
1873 *sp++ = 0xffff;
1874 *sp++ = 0xffff;
1875 }
1876 for (; cnt < TULIP_MAXADDRS; cnt++) {
1877 *sp++ = ((u_int16_t *) enaddr)[0];
1878 *sp++ = ((u_int16_t *) enaddr)[1];
1879 *sp++ = ((u_int16_t *) enaddr)[2];
1880 }
1881 ifp->if_flags |= IFF_ALLMULTI;
1882
1883 setit:
1884 if (ifp->if_flags & IFF_ALLMULTI)
1885 sc->sc_opmode |= OPMODE_PM;
1886
1887 /* Sync the setup packet buffer. */
1888 TULIP_CDSPSYNC(sc, BUS_DMASYNC_PREWRITE);
1889
1890 /*
1891 * Fill in the setup packet descriptor.
1892 */
1893 sc->sc_setup_desc.td_bufaddr1 = TULIP_CDSPADDR(sc);
1894 sc->sc_setup_desc.td_bufaddr2 = TULIP_CDTXADDR(sc, sc->sc_txnext);
1895 sc->sc_setup_desc.td_ctl =
1896 (TULIP_SETUP_PACKET_LEN << TDCTL_SIZE1_SHIFT) |
1897 sc->sc_filtmode | TDCTL_Tx_SET | TDCTL_Tx_FS | TDCTL_Tx_LS |
1898 TDCTL_CH;
1899 sc->sc_setup_desc.td_status = TDSTAT_OWN;
1900 TULIP_CDSDSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1901
1902 /*
1903 * Write the address of the setup descriptor. This also has
1904 * the side effect of giving the transmit ring to the chip,
1905 * since the setup descriptor points to the next available
1906 * descriptor in the ring.
1907 */
1908 TULIP_WRITE(sc, CSR_TXLIST, TULIP_CDSDADDR(sc));
1909
1910 /*
1911 * Set the OPMODE register. This will also resume the
1912 * transmit transmit process we idled above.
1913 */
1914 TULIP_WRITE(sc, CSR_OPMODE, sc->sc_opmode);
1915
1916 /*
1917 * Kick the transmitter; this will cause the Tulip to
1918 * read the setup descriptor.
1919 */
1920 /* XXX USE AUTOPOLLING? */
1921 TULIP_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD);
1922
1923 /*
1924 * Now wait for the OWN bit to clear.
1925 */
1926 for (cnt = 0; cnt < 1000; cnt++) {
1927 TULIP_CDSDSYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1928 if ((sc->sc_setup_desc.td_status & TDSTAT_OWN) == 0)
1929 break;
1930 delay(10);
1931 }
1932 if (sc->sc_setup_desc.td_status & TDSTAT_OWN)
1933 printf("%s: filter setup failed to complete\n",
1934 sc->sc_dev.dv_xname);
1935 DPRINTF(("%s: tlp_filter_setup: returning\n", sc->sc_dev.dv_xname));
1936 }
1937
1938 /*
1939 * tlp_winb_filter_setup:
1940 *
1941 * Set the Winbond 89C840F's receive filter.
1942 */
1943 void
1944 tlp_winb_filter_setup(sc)
1945 struct tulip_softc *sc;
1946 {
1947 struct ethercom *ec = &sc->sc_ethercom;
1948 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1949 struct ether_multi *enm;
1950 struct ether_multistep step;
1951 u_int32_t hash, mchash[2];
1952
1953 DPRINTF(("%s: tlp_winb_filter_setup: sc_flags 0x%08x\n",
1954 sc->sc_dev.dv_xname, sc->sc_flags));
1955
1956 sc->sc_opmode &= ~(OPMODE_PR|OPMODE_PM);
1957
1958 if (ifp->if_flags & IFF_PROMISC) {
1959 sc->sc_opmode |= OPMODE_PR;
1960 goto allmulti;
1961 }
1962
1963 mchash[0] = mchash[1] = 0;
1964
1965 ETHER_FIRST_MULTI(step, ec, enm);
1966 while (enm != NULL) {
1967 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1968 /*
1969 * We must listen to a range of multicast addresses.
1970 * For now, just accept all multicasts, rather than
1971 * trying to set only those filter bits needed to match
1972 * the range. (At this time, the only use of address
1973 * ranges is for IP multicast routing, for which the
1974 * range is big enough to require all bits set.)
1975 */
1976 goto allmulti;
1977 }
1978
1979 /*
1980 * According to the FreeBSD `wb' driver, yes, you
1981 * really do invert the hash.
1982 */
1983 hash = (~(tlp_crc32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26))
1984 & 0x3f;
1985 mchash[hash >> 5] |= 1 << (hash & 0x1f);
1986 ETHER_NEXT_MULTI(step, enm);
1987 }
1988 ifp->if_flags &= ~IFF_ALLMULTI;
1989 goto setit;
1990
1991 allmulti:
1992 ifp->if_flags |= IFF_ALLMULTI;
1993 mchash[0] = mchash[1] = 0xffffffff;
1994
1995 setit:
1996 if (ifp->if_flags & IFF_ALLMULTI)
1997 sc->sc_opmode |= OPMODE_PM;
1998
1999 TULIP_WRITE(sc, CSR_WINB_MAR0, mchash[0]);
2000 TULIP_WRITE(sc, CSR_WINB_MAR1, mchash[1]);
2001 TULIP_WRITE(sc, CSR_OPMODE, sc->sc_opmode);
2002 DPRINTF(("%s: tlp_winb_filter_setup: returning\n",
2003 sc->sc_dev.dv_xname));
2004 }
2005
2006 /*
2007 * tlp_idle:
2008 *
2009 * Cause the transmit and/or receive processes to go idle.
2010 */
2011 void
2012 tlp_idle(sc, bits)
2013 struct tulip_softc *sc;
2014 u_int32_t bits;
2015 {
2016 static const char *tx_state_names[] = {
2017 "STOPPED",
2018 "RUNNING - FETCH",
2019 "RUNNING - WAIT",
2020 "RUNNING - READING",
2021 "-- RESERVED --",
2022 "RUNNING - SETUP",
2023 "SUSPENDED",
2024 "RUNNING - CLOSE",
2025 };
2026 static const char *rx_state_names[] = {
2027 "STOPPED",
2028 "RUNNING - FETCH",
2029 "RUNNING - CHECK",
2030 "RUNNING - WAIT",
2031 "SUSPENDED",
2032 "RUNNING - CLOSE",
2033 "RUNNING - FLUSH",
2034 "RUNNING - QUEUE",
2035 };
2036 u_int32_t csr, ackmask = 0;
2037 int i;
2038
2039 if (bits & OPMODE_ST)
2040 ackmask |= STATUS_TPS;
2041
2042 if (bits & OPMODE_SR)
2043 ackmask |= STATUS_RPS;
2044
2045 TULIP_WRITE(sc, CSR_OPMODE, sc->sc_opmode & ~bits);
2046
2047 for (i = 0; i < 1000; i++) {
2048 if (TULIP_ISSET(sc, CSR_STATUS, ackmask) == ackmask)
2049 break;
2050 delay(10);
2051 }
2052
2053 csr = TULIP_READ(sc, CSR_STATUS);
2054 if ((csr & ackmask) != ackmask) {
2055 if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 &&
2056 (csr & STATUS_TS) != STATUS_TS_STOPPED)
2057 printf("%s: transmit process failed to idle: "
2058 "state %s\n", sc->sc_dev.dv_xname,
2059 tx_state_names[(csr & STATUS_TS) >> 20]);
2060 if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 &&
2061 (csr & STATUS_RS) != STATUS_RS_STOPPED)
2062 printf("%s: receive process failed to idle: "
2063 "state %s\n", sc->sc_dev.dv_xname,
2064 rx_state_names[(csr & STATUS_RS) >> 17]);
2065 }
2066 TULIP_WRITE(sc, CSR_STATUS, ackmask);
2067 }
2068
2069 /*****************************************************************************
2070 * Generic media support functions.
2071 *****************************************************************************/
2072
2073 /*
2074 * tlp_mediastatus: [ifmedia interface function]
2075 *
2076 * Query the current media.
2077 */
2078 void
2079 tlp_mediastatus(ifp, ifmr)
2080 struct ifnet *ifp;
2081 struct ifmediareq *ifmr;
2082 {
2083 struct tulip_softc *sc = ifp->if_softc;
2084
2085 (*sc->sc_mediasw->tmsw_get)(sc, ifmr);
2086 }
2087
2088 /*
2089 * tlp_mediachange: [ifmedia interface function]
2090 *
2091 * Update the current media.
2092 */
2093 int
2094 tlp_mediachange(ifp)
2095 struct ifnet *ifp;
2096 {
2097 struct tulip_softc *sc = ifp->if_softc;
2098
2099 return ((*sc->sc_mediasw->tmsw_set)(sc));
2100 }
2101
2102 /*****************************************************************************
2103 * Support functions for MII-attached media.
2104 *****************************************************************************/
2105
2106 /*
2107 * tlp_mii_tick:
2108 *
2109 * One second timer, used to tick the MII.
2110 */
2111 void
2112 tlp_mii_tick(arg)
2113 void *arg;
2114 {
2115 struct tulip_softc *sc = arg;
2116 int s;
2117
2118 s = splnet();
2119 mii_tick(&sc->sc_mii);
2120 splx(s);
2121
2122 timeout(tlp_mii_tick, sc, hz);
2123 }
2124
2125 /*
2126 * tlp_mii_statchg: [mii interface function]
2127 *
2128 * Callback from PHY when media changes.
2129 */
2130 void
2131 tlp_mii_statchg(self)
2132 struct device *self;
2133 {
2134 struct tulip_softc *sc = (struct tulip_softc *)self;
2135
2136 /* Idle the transmit and receive processes. */
2137 tlp_idle(sc, OPMODE_ST|OPMODE_SR);
2138
2139 /*
2140 * XXX What about Heartbeat Disable? Is it magically frobbed
2141 * XXX by the PHY? I hope so...
2142 */
2143
2144 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T)
2145 sc->sc_opmode |= OPMODE_TTM;
2146 else
2147 sc->sc_opmode &= ~OPMODE_TTM;
2148
2149 if (sc->sc_mii.mii_media_active & IFM_FDX)
2150 sc->sc_opmode |= OPMODE_FD;
2151 else
2152 sc->sc_opmode &= ~OPMODE_FD;
2153
2154 /*
2155 * Write new OPMODE bits. This also restarts the transmit
2156 * and receive processes.
2157 */
2158 TULIP_WRITE(sc, CSR_OPMODE, sc->sc_opmode);
2159
2160 /* XXX Update ifp->if_baudrate */
2161 }
2162
2163 /*
2164 * tlp_mii_getmedia:
2165 *
2166 * Callback from ifmedia to request current media status.
2167 */
2168 void
2169 tlp_mii_getmedia(sc, ifmr)
2170 struct tulip_softc *sc;
2171 struct ifmediareq *ifmr;
2172 {
2173
2174 mii_pollstat(&sc->sc_mii);
2175 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2176 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2177 }
2178
2179 /*
2180 * tlp_mii_setmedia:
2181 *
2182 * Callback from ifmedia to request new media setting.
2183 */
2184 int
2185 tlp_mii_setmedia(sc)
2186 struct tulip_softc *sc;
2187 {
2188 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2189
2190 if (ifp->if_flags & IFF_UP)
2191 mii_mediachg(&sc->sc_mii);
2192 return (0);
2193 }
2194
2195 #define MII_EMIT(sc, x) \
2196 do { \
2197 TULIP_WRITE((sc), CSR_MIIROM, (x)); \
2198 delay(1); \
2199 } while (0)
2200
2201 /*
2202 * tlp_sio_mii_sync:
2203 *
2204 * Synchronize the SIO-attached MII.
2205 */
2206 void
2207 tlp_sio_mii_sync(sc)
2208 struct tulip_softc *sc;
2209 {
2210 u_int32_t miirom;
2211 int i;
2212
2213 miirom = MIIROM_MIIDIR|MIIROM_MDO;
2214
2215 MII_EMIT(sc, miirom);
2216 for (i = 0; i < 32; i++) {
2217 MII_EMIT(sc, miirom | MIIROM_MDC);
2218 MII_EMIT(sc, miirom);
2219 }
2220 }
2221
2222 /*
2223 * tlp_sio_mii_sendbits:
2224 *
2225 * Send a series of bits out the SIO to the MII.
2226 */
2227 void
2228 tlp_sio_mii_sendbits(sc, data, nbits)
2229 struct tulip_softc *sc;
2230 u_int32_t data;
2231 int nbits;
2232 {
2233 u_int32_t miirom, i;
2234
2235 miirom = MIIROM_MIIDIR;
2236 MII_EMIT(sc, miirom);
2237
2238 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2239 if (data & i)
2240 miirom |= MIIROM_MDO;
2241 else
2242 miirom &= ~MIIROM_MDO;
2243 MII_EMIT(sc, miirom);
2244 MII_EMIT(sc, miirom|MIIROM_MDC);
2245 MII_EMIT(sc, miirom);
2246 }
2247 }
2248
2249 /*
2250 * tlp_sio_mii_readreg:
2251 *
2252 * Read a PHY register via SIO-attached MII.
2253 */
2254 int
2255 tlp_sio_mii_readreg(self, phy, reg)
2256 struct device *self;
2257 int phy, reg;
2258 {
2259 struct tulip_softc *sc = (void *) self;
2260 int val = 0, err = 0, i;
2261
2262 tlp_sio_mii_sync(sc);
2263
2264 tlp_sio_mii_sendbits(sc, MII_COMMAND_START, 2);
2265 tlp_sio_mii_sendbits(sc, MII_COMMAND_READ, 2);
2266 tlp_sio_mii_sendbits(sc, phy, 5);
2267 tlp_sio_mii_sendbits(sc, reg, 5);
2268
2269 MII_EMIT(sc, MIIROM_MIIDIR);
2270 MII_EMIT(sc, MIIROM_MIIDIR|MIIROM_MDC);
2271
2272 MII_EMIT(sc, 0);
2273 MII_EMIT(sc, MIIROM_MDC);
2274
2275 err = TULIP_ISSET(sc, CSR_MIIROM, MIIROM_MDI);
2276
2277 MII_EMIT(sc, 0);
2278 MII_EMIT(sc, MIIROM_MDC);
2279
2280 for (i = 0; i < 16; i++) {
2281 val <<= 1;
2282 MII_EMIT(sc, 0);
2283 if (err == 0 && TULIP_ISSET(sc, CSR_MIIROM, MIIROM_MDI))
2284 val |= 1;
2285 MII_EMIT(sc, MIIROM_MDC);
2286 }
2287
2288 MII_EMIT(sc, 0);
2289
2290 return (err ? 0 : val);
2291 }
2292
2293 /*
2294 * tlp_sio_mii_writereg:
2295 *
2296 * Write a PHY register via SIO-attached MII.
2297 */
2298 void
2299 tlp_sio_mii_writereg(self, phy, reg, val)
2300 struct device *self;
2301 int phy, reg, val;
2302 {
2303 struct tulip_softc *sc = (void *) self;
2304
2305 tlp_sio_mii_sync(sc);
2306
2307 tlp_sio_mii_sendbits(sc, MII_COMMAND_START, 2);
2308 tlp_sio_mii_sendbits(sc, MII_COMMAND_WRITE, 2);
2309 tlp_sio_mii_sendbits(sc, phy, 5);
2310 tlp_sio_mii_sendbits(sc, reg, 5);
2311 tlp_sio_mii_sendbits(sc, MII_COMMAND_ACK, 2);
2312 tlp_sio_mii_sendbits(sc, val, 16);
2313
2314 MII_EMIT(sc, 0);
2315 }
2316
2317 #undef MII_EMIT
2318
2319 /*
2320 * tlp_pnic_mii_readreg:
2321 *
2322 * Read a PHY register on the Lite-On PNIC.
2323 */
2324 int
2325 tlp_pnic_mii_readreg(self, phy, reg)
2326 struct device *self;
2327 int phy, reg;
2328 {
2329 struct tulip_softc *sc = (void *) self;
2330 u_int32_t val;
2331 int i;
2332
2333 TULIP_WRITE(sc, CSR_PNIC_MII,
2334 PNIC_MII_READ | (phy << PNIC_MII_PHYSHIFT) |
2335 (reg << PNIC_MII_REGSHIFT));
2336
2337 for (i = 0; i < 1000; i++) {
2338 delay(10);
2339 val = TULIP_READ(sc, CSR_PNIC_MII);
2340 if ((val & PNIC_MII_BUSY) == 0) {
2341 if ((val & PNIC_MII_DATA) == PNIC_MII_DATA)
2342 return (0);
2343 else
2344 return (val & PNIC_MII_DATA);
2345 }
2346 }
2347 printf("%s: MII read timed out\n", sc->sc_dev.dv_xname);
2348 return (0);
2349 }
2350
2351 /*
2352 * tlp_pnic_mii_writereg:
2353 *
2354 * Write a PHY register on the Lite-On PNIC.
2355 */
2356 void
2357 tlp_pnic_mii_writereg(self, phy, reg, val)
2358 struct device *self;
2359 int phy, reg, val;
2360 {
2361 struct tulip_softc *sc = (void *) self;
2362 int i;
2363
2364 TULIP_WRITE(sc, CSR_PNIC_MII,
2365 PNIC_MII_WRITE | (phy << PNIC_MII_PHYSHIFT) |
2366 (reg << PNIC_MII_REGSHIFT) | val);
2367
2368 for (i = 0; i < 1000; i++) {
2369 delay(10);
2370 if (TULIP_ISSET(sc, CSR_PNIC_MII, PNIC_MII_BUSY) == 0)
2371 return;
2372 }
2373 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
2374 }
2375
2376 /*****************************************************************************
2377 * Chip/board-specific media switches. The ones here are ones that
2378 * are potentially common to multiple front-ends.
2379 *****************************************************************************/
2380
2381 /*
2382 * MII-on-SIO media switch. Handles only MII attached to the SIO.
2383 */
2384 void tlp_sio_mii_tmsw_init __P((struct tulip_softc *));
2385
2386 const struct tulip_mediasw tlp_sio_mii_mediasw = {
2387 tlp_sio_mii_tmsw_init, tlp_mii_getmedia, tlp_mii_setmedia
2388 };
2389
2390 void
2391 tlp_sio_mii_tmsw_init(sc)
2392 struct tulip_softc *sc;
2393 {
2394 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2395
2396 sc->sc_mii.mii_ifp = ifp;
2397 sc->sc_mii.mii_readreg = tlp_sio_mii_readreg;
2398 sc->sc_mii.mii_writereg = tlp_sio_mii_writereg;
2399 sc->sc_mii.mii_statchg = tlp_mii_statchg;
2400 ifmedia_init(&sc->sc_mii.mii_media, 0, tlp_mediachange,
2401 tlp_mediastatus);
2402 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
2403 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2404 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2405 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2406 } else {
2407 sc->sc_flags |= TULIPF_HAS_MII;
2408 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2409 }
2410 }
2411
2412 /*
2413 * Lite-On PNIC media switch. Must handle MII or internal NWAY.
2414 */
2415 void tlp_pnic_tmsw_init __P((struct tulip_softc *));
2416 void tlp_pnic_tmsw_get __P((struct tulip_softc *, struct ifmediareq *));
2417 int tlp_pnic_tmsw_set __P((struct tulip_softc *));
2418
2419 const struct tulip_mediasw tlp_pnic_mediasw = {
2420 tlp_pnic_tmsw_init, tlp_pnic_tmsw_get, tlp_pnic_tmsw_set
2421 };
2422
2423 void
2424 tlp_pnic_tmsw_init(sc)
2425 struct tulip_softc *sc;
2426 {
2427 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2428
2429 sc->sc_mii.mii_ifp = ifp;
2430 sc->sc_mii.mii_readreg = tlp_pnic_mii_readreg;
2431 sc->sc_mii.mii_writereg = tlp_pnic_mii_writereg;
2432 sc->sc_mii.mii_statchg = tlp_mii_statchg;
2433 ifmedia_init(&sc->sc_mii.mii_media, 0, tlp_mediachange,
2434 tlp_mediastatus);
2435 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
2436 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2437 /* XXX USE INTERNAL NWAY! */
2438 printf("%s: no support for PNIC NWAY yet\n",
2439 sc->sc_dev.dv_xname);
2440 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2441 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2442 } else {
2443 sc->sc_flags |= TULIPF_HAS_MII;
2444 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2445 }
2446 }
2447
2448 void
2449 tlp_pnic_tmsw_get(sc, ifmr)
2450 struct tulip_softc *sc;
2451 struct ifmediareq *ifmr;
2452 {
2453
2454 if (sc->sc_flags & TULIPF_HAS_MII)
2455 tlp_mii_getmedia(sc, ifmr);
2456 else {
2457 /* XXX CHECK INTERNAL NWAY! */
2458 ifmr->ifm_status = 0;
2459 ifmr->ifm_active = IFM_ETHER|IFM_NONE;
2460 }
2461 }
2462
2463 int
2464 tlp_pnic_tmsw_set(sc)
2465 struct tulip_softc *sc;
2466 {
2467
2468 if (sc->sc_flags & TULIPF_HAS_MII)
2469 return (tlp_mii_setmedia(sc));
2470
2471 /* XXX USE INTERNAL NWAY! */
2472 return (EIO);
2473 }
2474