aic6915.c revision 1.20 1 /* $NetBSD: aic6915.c,v 1.20 2008/01/19 22:10:16 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the Adaptec AIC-6915 (``Starfire'')
41 * 10/100 Ethernet controller.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.20 2008/01/19 22:10:16 dyoung Exp $");
46
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_ether.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <sys/bus.h>
72 #include <sys/intr.h>
73
74 #include <dev/mii/miivar.h>
75
76 #include <dev/ic/aic6915reg.h>
77 #include <dev/ic/aic6915var.h>
78
79 static void sf_start(struct ifnet *);
80 static void sf_watchdog(struct ifnet *);
81 static int sf_ioctl(struct ifnet *, u_long, void *);
82 static int sf_init(struct ifnet *);
83 static void sf_stop(struct ifnet *, int);
84
85 static void sf_shutdown(void *);
86
87 static void sf_txintr(struct sf_softc *);
88 static void sf_rxintr(struct sf_softc *);
89 static void sf_stats_update(struct sf_softc *);
90
91 static void sf_reset(struct sf_softc *);
92 static void sf_macreset(struct sf_softc *);
93 static void sf_rxdrain(struct sf_softc *);
94 static int sf_add_rxbuf(struct sf_softc *, int);
95 static uint8_t sf_read_eeprom(struct sf_softc *, int);
96 static void sf_set_filter(struct sf_softc *);
97
98 static int sf_mii_read(struct device *, int, int);
99 static void sf_mii_write(struct device *, int, int, int);
100 static void sf_mii_statchg(struct device *);
101
102 static void sf_tick(void *);
103
104 #define sf_funcreg_read(sc, reg) \
105 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
106 #define sf_funcreg_write(sc, reg, val) \
107 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
108
109 static inline uint32_t
110 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
111 {
112
113 if (__predict_false(sc->sc_iomapped)) {
114 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
115 reg);
116 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
117 SF_IndirectIoDataPort));
118 }
119
120 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
121 }
122
123 static inline void
124 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
125 {
126
127 if (__predict_false(sc->sc_iomapped)) {
128 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
129 reg);
130 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
131 val);
132 return;
133 }
134
135 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
136 }
137
138 #define sf_genreg_read(sc, reg) \
139 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
140 #define sf_genreg_write(sc, reg, val) \
141 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
142
143 /*
144 * sf_attach:
145 *
146 * Attach a Starfire interface to the system.
147 */
148 void
149 sf_attach(struct sf_softc *sc)
150 {
151 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
152 int i, rseg, error;
153 bus_dma_segment_t seg;
154 u_int8_t enaddr[ETHER_ADDR_LEN];
155
156 callout_init(&sc->sc_tick_callout, 0);
157
158 /*
159 * If we're I/O mapped, the functional register handle is
160 * the same as the base handle. If we're memory mapped,
161 * carve off a chunk of the register space for the functional
162 * registers, to save on arithmetic later.
163 */
164 if (sc->sc_iomapped)
165 sc->sc_sh_func = sc->sc_sh;
166 else {
167 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
168 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
169 printf("%s: unable to sub-region functional "
170 "registers, error = %d\n", sc->sc_dev.dv_xname,
171 error);
172 return;
173 }
174 }
175
176 /*
177 * Initialize the transmit threshold for this interface. The
178 * manual describes the default as 4 * 16 bytes. We start out
179 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
180 * several platforms.
181 */
182 sc->sc_txthresh = 10;
183
184 /*
185 * Allocate the control data structures, and create and load the
186 * DMA map for it.
187 */
188 if ((error = bus_dmamem_alloc(sc->sc_dmat,
189 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
190 BUS_DMA_NOWAIT)) != 0) {
191 printf("%s: unable to allocate control data, error = %d\n",
192 sc->sc_dev.dv_xname, error);
193 goto fail_0;
194 }
195
196 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
197 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
198 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
199 printf("%s: unable to map control data, error = %d\n",
200 sc->sc_dev.dv_xname, error);
201 goto fail_1;
202 }
203
204 if ((error = bus_dmamap_create(sc->sc_dmat,
205 sizeof(struct sf_control_data), 1,
206 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
207 &sc->sc_cddmamap)) != 0) {
208 printf("%s: unable to create control data DMA map, "
209 "error = %d\n", sc->sc_dev.dv_xname, error);
210 goto fail_2;
211 }
212
213 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
214 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
215 BUS_DMA_NOWAIT)) != 0) {
216 printf("%s: unable to load control data DMA map, error = %d\n",
217 sc->sc_dev.dv_xname, error);
218 goto fail_3;
219 }
220
221 /*
222 * Create the transmit buffer DMA maps.
223 */
224 for (i = 0; i < SF_NTXDESC; i++) {
225 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
226 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
227 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
228 printf("%s: unable to create tx DMA map %d, "
229 "error = %d\n", sc->sc_dev.dv_xname, i, error);
230 goto fail_4;
231 }
232 }
233
234 /*
235 * Create the receive buffer DMA maps.
236 */
237 for (i = 0; i < SF_NRXDESC; i++) {
238 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
239 MCLBYTES, 0, BUS_DMA_NOWAIT,
240 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
241 printf("%s: unable to create rx DMA map %d, "
242 "error = %d\n", sc->sc_dev.dv_xname, i, error);
243 goto fail_5;
244 }
245 }
246
247 /*
248 * Reset the chip to a known state.
249 */
250 sf_reset(sc);
251
252 /*
253 * Read the Ethernet address from the EEPROM.
254 */
255 for (i = 0; i < ETHER_ADDR_LEN; i++)
256 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
257
258 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
259 ether_sprintf(enaddr));
260
261 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
262 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
263
264 /*
265 * Initialize our media structures and probe the MII.
266 */
267 sc->sc_mii.mii_ifp = ifp;
268 sc->sc_mii.mii_readreg = sf_mii_read;
269 sc->sc_mii.mii_writereg = sf_mii_write;
270 sc->sc_mii.mii_statchg = sf_mii_statchg;
271 sc->sc_ethercom.ec_mii = &sc->sc_mii;
272 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
273 ether_mediastatus);
274 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
275 MII_OFFSET_ANY, 0);
276 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
277 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
278 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
279 } else
280 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
281
282 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
283 ifp->if_softc = sc;
284 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
285 ifp->if_ioctl = sf_ioctl;
286 ifp->if_start = sf_start;
287 ifp->if_watchdog = sf_watchdog;
288 ifp->if_init = sf_init;
289 ifp->if_stop = sf_stop;
290 IFQ_SET_READY(&ifp->if_snd);
291
292 /*
293 * Attach the interface.
294 */
295 if_attach(ifp);
296 ether_ifattach(ifp, enaddr);
297
298 /*
299 * Make sure the interface is shutdown during reboot.
300 */
301 sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc);
302 if (sc->sc_sdhook == NULL)
303 printf("%s: WARNING: unable to establish shutdown hook\n",
304 sc->sc_dev.dv_xname);
305 return;
306
307 /*
308 * Free any resources we've allocated during the failed attach
309 * attempt. Do this in reverse order an fall through.
310 */
311 fail_5:
312 for (i = 0; i < SF_NRXDESC; i++) {
313 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
314 bus_dmamap_destroy(sc->sc_dmat,
315 sc->sc_rxsoft[i].ds_dmamap);
316 }
317 fail_4:
318 for (i = 0; i < SF_NTXDESC; i++) {
319 if (sc->sc_txsoft[i].ds_dmamap != NULL)
320 bus_dmamap_destroy(sc->sc_dmat,
321 sc->sc_txsoft[i].ds_dmamap);
322 }
323 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
324 fail_3:
325 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
326 fail_2:
327 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
328 sizeof(struct sf_control_data));
329 fail_1:
330 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
331 fail_0:
332 return;
333 }
334
335 /*
336 * sf_shutdown:
337 *
338 * Shutdown hook -- make sure the interface is stopped at reboot.
339 */
340 static void
341 sf_shutdown(void *arg)
342 {
343 struct sf_softc *sc = arg;
344
345 sf_stop(&sc->sc_ethercom.ec_if, 1);
346 }
347
348 /*
349 * sf_start: [ifnet interface function]
350 *
351 * Start packet transmission on the interface.
352 */
353 static void
354 sf_start(struct ifnet *ifp)
355 {
356 struct sf_softc *sc = ifp->if_softc;
357 struct mbuf *m0, *m;
358 struct sf_txdesc0 *txd;
359 struct sf_descsoft *ds;
360 bus_dmamap_t dmamap;
361 int error, producer, last = -1, opending, seg;
362
363 /*
364 * Remember the previous number of pending transmits.
365 */
366 opending = sc->sc_txpending;
367
368 /*
369 * Find out where we're sitting.
370 */
371 producer = SF_TXDINDEX_TO_HOST(
372 TDQPI_HiPrTxProducerIndex_get(
373 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
374
375 /*
376 * Loop through the send queue, setting up transmit descriptors
377 * until we drain the queue, or use up all available transmit
378 * descriptors. Leave a blank one at the end for sanity's sake.
379 */
380 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
381 /*
382 * Grab a packet off the queue.
383 */
384 IFQ_POLL(&ifp->if_snd, m0);
385 if (m0 == NULL)
386 break;
387 m = NULL;
388
389 /*
390 * Get the transmit descriptor.
391 */
392 txd = &sc->sc_txdescs[producer];
393 ds = &sc->sc_txsoft[producer];
394 dmamap = ds->ds_dmamap;
395
396 /*
397 * Load the DMA map. If this fails, the packet either
398 * didn't fit in the allotted number of frags, or we were
399 * short on resources. In this case, we'll copy and try
400 * again.
401 */
402 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
403 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
404 MGETHDR(m, M_DONTWAIT, MT_DATA);
405 if (m == NULL) {
406 printf("%s: unable to allocate Tx mbuf\n",
407 sc->sc_dev.dv_xname);
408 break;
409 }
410 if (m0->m_pkthdr.len > MHLEN) {
411 MCLGET(m, M_DONTWAIT);
412 if ((m->m_flags & M_EXT) == 0) {
413 printf("%s: unable to allocate Tx "
414 "cluster\n", sc->sc_dev.dv_xname);
415 m_freem(m);
416 break;
417 }
418 }
419 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
420 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
421 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
422 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
423 if (error) {
424 printf("%s: unable to load Tx buffer, "
425 "error = %d\n", sc->sc_dev.dv_xname, error);
426 break;
427 }
428 }
429
430 /*
431 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
432 */
433 IFQ_DEQUEUE(&ifp->if_snd, m0);
434 if (m != NULL) {
435 m_freem(m0);
436 m0 = m;
437 }
438
439 /* Initialize the descriptor. */
440 txd->td_word0 =
441 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
442 if (producer == (SF_NTXDESC - 1))
443 txd->td_word0 |= TD_W0_END;
444 txd->td_word1 = htole32(dmamap->dm_nsegs);
445 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
446 txd->td_frags[seg].fr_addr =
447 htole32(dmamap->dm_segs[seg].ds_addr);
448 txd->td_frags[seg].fr_len =
449 htole32(dmamap->dm_segs[seg].ds_len);
450 }
451
452 /* Sync the descriptor and the DMA map. */
453 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
454 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
455 BUS_DMASYNC_PREWRITE);
456
457 /*
458 * Store a pointer to the packet so we can free it later.
459 */
460 ds->ds_mbuf = m0;
461
462 /* Advance the Tx pointer. */
463 sc->sc_txpending++;
464 last = producer;
465 producer = SF_NEXTTX(producer);
466
467 #if NBPFILTER > 0
468 /*
469 * Pass the packet to any BPF listeners.
470 */
471 if (ifp->if_bpf)
472 bpf_mtap(ifp->if_bpf, m0);
473 #endif
474 }
475
476 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
477 /* No more slots left; notify upper layer. */
478 ifp->if_flags |= IFF_OACTIVE;
479 }
480
481 if (sc->sc_txpending != opending) {
482 KASSERT(last != -1);
483 /*
484 * We enqueued packets. Cause a transmit interrupt to
485 * happen on the last packet we enqueued, and give the
486 * new descriptors to the chip by writing the new
487 * producer index.
488 */
489 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
490 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
491
492 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
493 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
494
495 /* Set a watchdog timer in case the chip flakes out. */
496 ifp->if_timer = 5;
497 }
498 }
499
500 /*
501 * sf_watchdog: [ifnet interface function]
502 *
503 * Watchdog timer handler.
504 */
505 static void
506 sf_watchdog(struct ifnet *ifp)
507 {
508 struct sf_softc *sc = ifp->if_softc;
509
510 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
511 ifp->if_oerrors++;
512
513 (void) sf_init(ifp);
514
515 /* Try to get more packets going. */
516 sf_start(ifp);
517 }
518
519 /*
520 * sf_ioctl: [ifnet interface function]
521 *
522 * Handle control requests from the operator.
523 */
524 static int
525 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
526 {
527 struct sf_softc *sc = ifp->if_softc;
528 int s, error;
529
530 s = splnet();
531
532 error = ether_ioctl(ifp, cmd, data);
533 if (error == ENETRESET) {
534 /*
535 * Multicast list has changed; set the hardware filter
536 * accordingly.
537 */
538 if (ifp->if_flags & IFF_RUNNING)
539 sf_set_filter(sc);
540 error = 0;
541 }
542
543 /* Try to get more packets going. */
544 sf_start(ifp);
545
546 splx(s);
547 return (error);
548 }
549
550 /*
551 * sf_intr:
552 *
553 * Interrupt service routine.
554 */
555 int
556 sf_intr(void *arg)
557 {
558 struct sf_softc *sc = arg;
559 uint32_t isr;
560 int handled = 0, wantinit = 0;
561
562 for (;;) {
563 /* Reading clears all interrupts we're interested in. */
564 isr = sf_funcreg_read(sc, SF_InterruptStatus);
565 if ((isr & IS_PCIPadInt) == 0)
566 break;
567
568 handled = 1;
569
570 /* Handle receive interrupts. */
571 if (isr & IS_RxQ1DoneInt)
572 sf_rxintr(sc);
573
574 /* Handle transmit completion interrupts. */
575 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
576 sf_txintr(sc);
577
578 /* Handle abnormal interrupts. */
579 if (isr & IS_AbnormalInterrupt) {
580 /* Statistics. */
581 if (isr & IS_StatisticWrapInt)
582 sf_stats_update(sc);
583
584 /* DMA errors. */
585 if (isr & IS_DmaErrInt) {
586 wantinit = 1;
587 printf("%s: WARNING: DMA error\n",
588 sc->sc_dev.dv_xname);
589 }
590
591 /* Transmit FIFO underruns. */
592 if (isr & IS_TxDataLowInt) {
593 if (sc->sc_txthresh < 0xff)
594 sc->sc_txthresh++;
595 printf("%s: transmit FIFO underrun, new "
596 "threshold: %d bytes\n",
597 sc->sc_dev.dv_xname,
598 sc->sc_txthresh * 16);
599 sf_funcreg_write(sc, SF_TransmitFrameCSR,
600 sc->sc_TransmitFrameCSR |
601 TFCSR_TransmitThreshold(sc->sc_txthresh));
602 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
603 sc->sc_TxDescQueueCtrl |
604 TDQC_TxHighPriorityFifoThreshold(
605 sc->sc_txthresh));
606 }
607 }
608 }
609
610 if (handled) {
611 /* Reset the interface, if necessary. */
612 if (wantinit)
613 sf_init(&sc->sc_ethercom.ec_if);
614
615 /* Try and get more packets going. */
616 sf_start(&sc->sc_ethercom.ec_if);
617 }
618
619 return (handled);
620 }
621
622 /*
623 * sf_txintr:
624 *
625 * Helper -- handle transmit completion interrupts.
626 */
627 static void
628 sf_txintr(struct sf_softc *sc)
629 {
630 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
631 struct sf_descsoft *ds;
632 uint32_t cqci, tcd;
633 int consumer, producer, txidx;
634
635 try_again:
636 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
637
638 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
639 producer = CQPI_TxCompletionProducerIndex_get(
640 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
641
642 if (consumer == producer)
643 return;
644
645 ifp->if_flags &= ~IFF_OACTIVE;
646
647 while (consumer != producer) {
648 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
649 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
650
651 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
652 #ifdef DIAGNOSTIC
653 if ((tcd & TCD_PR) == 0)
654 printf("%s: Tx queue mismatch, index %d\n",
655 sc->sc_dev.dv_xname, txidx);
656 #endif
657 /*
658 * NOTE: stats are updated later. We're just
659 * releasing packets that have been DMA'd to
660 * the chip.
661 */
662 ds = &sc->sc_txsoft[txidx];
663 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
664 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
665 0, ds->ds_dmamap->dm_mapsize,
666 BUS_DMASYNC_POSTWRITE);
667 m_freem(ds->ds_mbuf);
668 ds->ds_mbuf = NULL;
669
670 consumer = SF_NEXTTCD(consumer);
671 sc->sc_txpending--;
672 }
673
674 /* XXXJRT -- should be KDASSERT() */
675 KASSERT(sc->sc_txpending >= 0);
676
677 /* If all packets are done, cancel the watchdog timer. */
678 if (sc->sc_txpending == 0)
679 ifp->if_timer = 0;
680
681 /* Update the consumer index. */
682 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
683 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
684 CQCI_TxCompletionConsumerIndex(consumer));
685
686 /* Double check for new completions. */
687 goto try_again;
688 }
689
690 /*
691 * sf_rxintr:
692 *
693 * Helper -- handle receive interrupts.
694 */
695 static void
696 sf_rxintr(struct sf_softc *sc)
697 {
698 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
699 struct sf_descsoft *ds;
700 struct sf_rcd_full *rcd;
701 struct mbuf *m;
702 uint32_t cqci, word0;
703 int consumer, producer, bufproducer, rxidx, len;
704
705 try_again:
706 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
707
708 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
709 producer = CQPI_RxCompletionQ1ProducerIndex_get(
710 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
711 bufproducer = RXQ1P_RxDescQ1Producer_get(
712 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
713
714 if (consumer == producer)
715 return;
716
717 while (consumer != producer) {
718 rcd = &sc->sc_rxcomp[consumer];
719 SF_CDRXCSYNC(sc, consumer,
720 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
721 SF_CDRXCSYNC(sc, consumer,
722 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
723
724 word0 = le32toh(rcd->rcd_word0);
725 rxidx = RCD_W0_EndIndex(word0);
726
727 ds = &sc->sc_rxsoft[rxidx];
728
729 consumer = SF_NEXTRCD(consumer);
730 bufproducer = SF_NEXTRX(bufproducer);
731
732 if ((word0 & RCD_W0_OK) == 0) {
733 SF_INIT_RXDESC(sc, rxidx);
734 continue;
735 }
736
737 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
738 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
739
740 /*
741 * No errors; receive the packet. Note that we have
742 * configured the Starfire to NOT transfer the CRC
743 * with the packet.
744 */
745 len = RCD_W0_Length(word0);
746
747 #ifdef __NO_STRICT_ALIGNMENT
748 /*
749 * Allocate a new mbuf cluster. If that fails, we are
750 * out of memory, and must drop the packet and recycle
751 * the buffer that's already attached to this descriptor.
752 */
753 m = ds->ds_mbuf;
754 if (sf_add_rxbuf(sc, rxidx) != 0) {
755 ifp->if_ierrors++;
756 SF_INIT_RXDESC(sc, rxidx);
757 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
758 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
759 continue;
760 }
761 #else
762 /*
763 * The Starfire's receive buffer must be 4-byte aligned.
764 * But this means that the data after the Ethernet header
765 * is misaligned. We must allocate a new buffer and
766 * copy the data, shifted forward 2 bytes.
767 */
768 MGETHDR(m, M_DONTWAIT, MT_DATA);
769 if (m == NULL) {
770 dropit:
771 ifp->if_ierrors++;
772 SF_INIT_RXDESC(sc, rxidx);
773 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
774 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
775 continue;
776 }
777 if (len > (MHLEN - 2)) {
778 MCLGET(m, M_DONTWAIT);
779 if ((m->m_flags & M_EXT) == 0) {
780 m_freem(m);
781 goto dropit;
782 }
783 }
784 m->m_data += 2;
785
786 /*
787 * Note that we use cluster for incoming frames, so the
788 * buffer is virtually contiguous.
789 */
790 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
791
792 /* Allow the receive descriptor to continue using its mbuf. */
793 SF_INIT_RXDESC(sc, rxidx);
794 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
795 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
796 #endif /* __NO_STRICT_ALIGNMENT */
797
798 m->m_pkthdr.rcvif = ifp;
799 m->m_pkthdr.len = m->m_len = len;
800
801 #if NBPFILTER > 0
802 /*
803 * Pass this up to any BPF listeners.
804 */
805 if (ifp->if_bpf)
806 bpf_mtap(ifp->if_bpf, m);
807 #endif /* NBPFILTER > 0 */
808
809 /* Pass it on. */
810 (*ifp->if_input)(ifp, m);
811 }
812
813 /* Update the chip's pointers. */
814 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
815 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
816 CQCI_RxCompletionQ1ConsumerIndex(consumer));
817 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
818 RXQ1P_RxDescQ1Producer(bufproducer));
819
820 /* Double-check for any new completions. */
821 goto try_again;
822 }
823
824 /*
825 * sf_tick:
826 *
827 * One second timer, used to tick the MII and update stats.
828 */
829 static void
830 sf_tick(void *arg)
831 {
832 struct sf_softc *sc = arg;
833 int s;
834
835 s = splnet();
836 mii_tick(&sc->sc_mii);
837 sf_stats_update(sc);
838 splx(s);
839
840 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
841 }
842
843 /*
844 * sf_stats_update:
845 *
846 * Read the statitistics counters.
847 */
848 static void
849 sf_stats_update(struct sf_softc *sc)
850 {
851 struct sf_stats stats;
852 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
853 uint32_t *p;
854 u_int i;
855
856 p = &stats.TransmitOKFrames;
857 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
858 *p++ = sf_genreg_read(sc,
859 SF_STATS_BASE + (i * sizeof(uint32_t)));
860 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
861 }
862
863 ifp->if_opackets += stats.TransmitOKFrames;
864
865 ifp->if_collisions += stats.SingleCollisionFrames +
866 stats.MultipleCollisionFrames;
867
868 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
869 stats.TransmitAbortDueToExcessingDeferral +
870 stats.FramesLostDueToInternalTransmitErrors;
871
872 ifp->if_ipackets += stats.ReceiveOKFrames;
873
874 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
875 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
876 stats.ReceiveFramesJabbersError +
877 stats.FramesLostDueToInternalReceiveErrors;
878 }
879
880 /*
881 * sf_reset:
882 *
883 * Perform a soft reset on the Starfire.
884 */
885 static void
886 sf_reset(struct sf_softc *sc)
887 {
888 int i;
889
890 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
891
892 sf_macreset(sc);
893
894 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
895 for (i = 0; i < 1000; i++) {
896 delay(10);
897 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
898 PDC_SoftReset) == 0)
899 break;
900 }
901
902 if (i == 1000) {
903 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
904 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
905 }
906
907 delay(1000);
908 }
909
910 /*
911 * sf_macreset:
912 *
913 * Reset the MAC portion of the Starfire.
914 */
915 static void
916 sf_macreset(struct sf_softc *sc)
917 {
918
919 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
920 delay(1000);
921 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
922 }
923
924 /*
925 * sf_init: [ifnet interface function]
926 *
927 * Initialize the interface. Must be called at splnet().
928 */
929 static int
930 sf_init(struct ifnet *ifp)
931 {
932 struct sf_softc *sc = ifp->if_softc;
933 struct sf_descsoft *ds;
934 int error = 0;
935 u_int i;
936
937 /*
938 * Cancel any pending I/O.
939 */
940 sf_stop(ifp, 0);
941
942 /*
943 * Reset the Starfire to a known state.
944 */
945 sf_reset(sc);
946
947 /* Clear the stat counters. */
948 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
949 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
950
951 /*
952 * Initialize the transmit descriptor ring.
953 */
954 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
955 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
956 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
957 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
958
959 /*
960 * Initialize the transmit completion ring.
961 */
962 for (i = 0; i < SF_NTCD; i++) {
963 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
964 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
965 }
966 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
967 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
968
969 /*
970 * Initialize the receive descriptor ring.
971 */
972 for (i = 0; i < SF_NRXDESC; i++) {
973 ds = &sc->sc_rxsoft[i];
974 if (ds->ds_mbuf == NULL) {
975 if ((error = sf_add_rxbuf(sc, i)) != 0) {
976 printf("%s: unable to allocate or map rx "
977 "buffer %d, error = %d\n",
978 sc->sc_dev.dv_xname, i, error);
979 /*
980 * XXX Should attempt to run with fewer receive
981 * XXX buffers instead of just failing.
982 */
983 sf_rxdrain(sc);
984 goto out;
985 }
986 } else
987 SF_INIT_RXDESC(sc, i);
988 }
989 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
990 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
991 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
992
993 /*
994 * Initialize the receive completion ring.
995 */
996 for (i = 0; i < SF_NRCD; i++) {
997 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
998 sc->sc_rxcomp[i].rcd_word1 = 0;
999 sc->sc_rxcomp[i].rcd_word2 = 0;
1000 sc->sc_rxcomp[i].rcd_timestamp = 0;
1001 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1002 }
1003 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1004 RCQ1C_RxCompletionQ1Type(3));
1005 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1006
1007 /*
1008 * Initialize the Tx CSR.
1009 */
1010 sc->sc_TransmitFrameCSR = 0;
1011 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1012 sc->sc_TransmitFrameCSR |
1013 TFCSR_TransmitThreshold(sc->sc_txthresh));
1014
1015 /*
1016 * Initialize the Tx descriptor control register.
1017 */
1018 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1019 TDQC_TxDmaBurstSize(4) | /* default */
1020 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1021 TDQC_TxDescType(0);
1022 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1023 sc->sc_TxDescQueueCtrl |
1024 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1025
1026 /*
1027 * Initialize the Rx descriptor control registers.
1028 */
1029 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1030 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1031 RDQ1C_RxDescSpacing(0));
1032 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1033
1034 /*
1035 * Initialize the Tx descriptor producer indices.
1036 */
1037 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1038 TDQPI_HiPrTxProducerIndex(0) |
1039 TDQPI_LoPrTxProducerIndex(0));
1040
1041 /*
1042 * Initialize the Rx descriptor producer indices.
1043 */
1044 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1045 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1046 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1047 RXQ2P_RxDescQ2Producer(0));
1048
1049 /*
1050 * Initialize the Tx and Rx completion queue consumer indices.
1051 */
1052 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1053 CQCI_TxCompletionConsumerIndex(0) |
1054 CQCI_RxCompletionQ1ConsumerIndex(0));
1055 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1056
1057 /*
1058 * Initialize the Rx DMA control register.
1059 */
1060 sf_funcreg_write(sc, SF_RxDmaCtrl,
1061 RDC_RxHighPriorityThreshold(6) | /* default */
1062 RDC_RxBurstSize(4)); /* default */
1063
1064 /*
1065 * Set the receive filter.
1066 */
1067 sc->sc_RxAddressFilteringCtl = 0;
1068 sf_set_filter(sc);
1069
1070 /*
1071 * Set MacConfig1. When we set the media, MacConfig1 will
1072 * actually be written and the MAC part reset.
1073 */
1074 sc->sc_MacConfig1 = MC1_PadEn;
1075
1076 /*
1077 * Set the media.
1078 */
1079 if ((error = ether_mediachange(ifp)) != 0)
1080 goto out;
1081
1082 /*
1083 * Initialize the interrupt register.
1084 */
1085 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1086 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1087 IS_StatisticWrapInt;
1088 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1089
1090 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1091 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1092
1093 /*
1094 * Start the transmit and receive processes.
1095 */
1096 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1097 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1098
1099 /* Start the on second clock. */
1100 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1101
1102 /*
1103 * Note that the interface is now running.
1104 */
1105 ifp->if_flags |= IFF_RUNNING;
1106 ifp->if_flags &= ~IFF_OACTIVE;
1107
1108 out:
1109 if (error) {
1110 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1111 ifp->if_timer = 0;
1112 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1113 }
1114 return (error);
1115 }
1116
1117 /*
1118 * sf_rxdrain:
1119 *
1120 * Drain the receive queue.
1121 */
1122 static void
1123 sf_rxdrain(struct sf_softc *sc)
1124 {
1125 struct sf_descsoft *ds;
1126 int i;
1127
1128 for (i = 0; i < SF_NRXDESC; i++) {
1129 ds = &sc->sc_rxsoft[i];
1130 if (ds->ds_mbuf != NULL) {
1131 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1132 m_freem(ds->ds_mbuf);
1133 ds->ds_mbuf = NULL;
1134 }
1135 }
1136 }
1137
1138 /*
1139 * sf_stop: [ifnet interface function]
1140 *
1141 * Stop transmission on the interface.
1142 */
1143 static void
1144 sf_stop(struct ifnet *ifp, int disable)
1145 {
1146 struct sf_softc *sc = ifp->if_softc;
1147 struct sf_descsoft *ds;
1148 int i;
1149
1150 /* Stop the one second clock. */
1151 callout_stop(&sc->sc_tick_callout);
1152
1153 /* Down the MII. */
1154 mii_down(&sc->sc_mii);
1155
1156 /* Disable interrupts. */
1157 sf_funcreg_write(sc, SF_InterruptEn, 0);
1158
1159 /* Stop the transmit and receive processes. */
1160 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1161
1162 /*
1163 * Release any queued transmit buffers.
1164 */
1165 for (i = 0; i < SF_NTXDESC; i++) {
1166 ds = &sc->sc_txsoft[i];
1167 if (ds->ds_mbuf != NULL) {
1168 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1169 m_freem(ds->ds_mbuf);
1170 ds->ds_mbuf = NULL;
1171 }
1172 }
1173
1174 if (disable)
1175 sf_rxdrain(sc);
1176
1177 /*
1178 * Mark the interface down and cancel the watchdog timer.
1179 */
1180 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1181 ifp->if_timer = 0;
1182 }
1183
1184 /*
1185 * sf_read_eeprom:
1186 *
1187 * Read from the Starfire EEPROM.
1188 */
1189 static uint8_t
1190 sf_read_eeprom(struct sf_softc *sc, int offset)
1191 {
1192 uint32_t reg;
1193
1194 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1195
1196 return ((reg >> (8 * (offset & 3))) & 0xff);
1197 }
1198
1199 /*
1200 * sf_add_rxbuf:
1201 *
1202 * Add a receive buffer to the indicated descriptor.
1203 */
1204 static int
1205 sf_add_rxbuf(struct sf_softc *sc, int idx)
1206 {
1207 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1208 struct mbuf *m;
1209 int error;
1210
1211 MGETHDR(m, M_DONTWAIT, MT_DATA);
1212 if (m == NULL)
1213 return (ENOBUFS);
1214
1215 MCLGET(m, M_DONTWAIT);
1216 if ((m->m_flags & M_EXT) == 0) {
1217 m_freem(m);
1218 return (ENOBUFS);
1219 }
1220
1221 if (ds->ds_mbuf != NULL)
1222 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1223
1224 ds->ds_mbuf = m;
1225
1226 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1227 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1228 BUS_DMA_READ|BUS_DMA_NOWAIT);
1229 if (error) {
1230 printf("%s: can't load rx DMA map %d, error = %d\n",
1231 sc->sc_dev.dv_xname, idx, error);
1232 panic("sf_add_rxbuf"); /* XXX */
1233 }
1234
1235 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1236 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1237
1238 SF_INIT_RXDESC(sc, idx);
1239
1240 return (0);
1241 }
1242
1243 static void
1244 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1245 {
1246 uint32_t reg0, reg1, reg2;
1247
1248 reg0 = enaddr[5] | (enaddr[4] << 8);
1249 reg1 = enaddr[3] | (enaddr[2] << 8);
1250 reg2 = enaddr[1] | (enaddr[0] << 8);
1251
1252 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1253 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1254 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1255 }
1256
1257 static void
1258 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1259 {
1260 uint32_t hash, slot, reg;
1261
1262 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1263 slot = hash >> 4;
1264
1265 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1266 reg |= 1 << (hash & 0xf);
1267 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1268 }
1269
1270 /*
1271 * sf_set_filter:
1272 *
1273 * Set the Starfire receive filter.
1274 */
1275 static void
1276 sf_set_filter(struct sf_softc *sc)
1277 {
1278 struct ethercom *ec = &sc->sc_ethercom;
1279 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1280 struct ether_multi *enm;
1281 struct ether_multistep step;
1282 int i;
1283
1284 /* Start by clearing the perfect and hash tables. */
1285 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1286 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1287
1288 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1289 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1290
1291 /*
1292 * Clear the perfect and hash mode bits.
1293 */
1294 sc->sc_RxAddressFilteringCtl &=
1295 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1296
1297 if (ifp->if_flags & IFF_BROADCAST)
1298 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1299 else
1300 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1301
1302 if (ifp->if_flags & IFF_PROMISC) {
1303 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1304 goto allmulti;
1305 } else
1306 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1307
1308 /*
1309 * Set normal perfect filtering mode.
1310 */
1311 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1312
1313 /*
1314 * First, write the station address to the perfect filter
1315 * table.
1316 */
1317 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1318
1319 /*
1320 * Now set the hash bits for each multicast address in our
1321 * list.
1322 */
1323 ETHER_FIRST_MULTI(step, ec, enm);
1324 if (enm == NULL)
1325 goto done;
1326 while (enm != NULL) {
1327 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1328 /*
1329 * We must listen to a range of multicast addresses.
1330 * For now, just accept all multicasts, rather than
1331 * trying to set only those filter bits needed to match
1332 * the range. (At this time, the only use of address
1333 * ranges is for IP multicast routing, for which the
1334 * range is big enough to require all bits set.)
1335 */
1336 goto allmulti;
1337 }
1338 sf_set_filter_hash(sc, enm->enm_addrlo);
1339 ETHER_NEXT_MULTI(step, enm);
1340 }
1341
1342 /*
1343 * Set "hash only multicast dest, match regardless of VLAN ID".
1344 */
1345 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1346 goto done;
1347
1348 allmulti:
1349 /*
1350 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1351 */
1352 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1353 ifp->if_flags |= IFF_ALLMULTI;
1354
1355 done:
1356 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1357 sc->sc_RxAddressFilteringCtl);
1358 }
1359
1360 /*
1361 * sf_mii_read: [mii interface function]
1362 *
1363 * Read from the MII.
1364 */
1365 static int
1366 sf_mii_read(struct device *self, int phy, int reg)
1367 {
1368 struct sf_softc *sc = (void *) self;
1369 uint32_t v;
1370 int i;
1371
1372 for (i = 0; i < 1000; i++) {
1373 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1374 if (v & MiiDataValid)
1375 break;
1376 delay(1);
1377 }
1378
1379 if ((v & MiiDataValid) == 0)
1380 return (0);
1381
1382 if (MiiRegDataPort(v) == 0xffff)
1383 return (0);
1384
1385 return (MiiRegDataPort(v));
1386 }
1387
1388 /*
1389 * sf_mii_write: [mii interface function]
1390 *
1391 * Write to the MII.
1392 */
1393 static void
1394 sf_mii_write(struct device *self, int phy, int reg, int val)
1395 {
1396 struct sf_softc *sc = (void *) self;
1397 int i;
1398
1399 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1400
1401 for (i = 0; i < 1000; i++) {
1402 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1403 MiiBusy) == 0)
1404 return;
1405 delay(1);
1406 }
1407
1408 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
1409 }
1410
1411 /*
1412 * sf_mii_statchg: [mii interface function]
1413 *
1414 * Callback from the PHY when the media changes.
1415 */
1416 static void
1417 sf_mii_statchg(struct device *self)
1418 {
1419 struct sf_softc *sc = (void *) self;
1420 uint32_t ipg;
1421
1422 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1423 sc->sc_MacConfig1 |= MC1_FullDuplex;
1424 ipg = 0x15;
1425 } else {
1426 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1427 ipg = 0x11;
1428 }
1429
1430 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1431 sf_macreset(sc);
1432
1433 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1434 }
1435