aic6915.c revision 1.36 1 /* $NetBSD: aic6915.c,v 1.36 2018/06/26 06:48:00 msaitoh Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Adaptec AIC-6915 (``Starfire'')
34 * 10/100 Ethernet controller.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.36 2018/06/26 06:48:00 msaitoh Exp $");
39
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #include <net/bpf.h>
58
59 #include <sys/bus.h>
60 #include <sys/intr.h>
61
62 #include <dev/mii/miivar.h>
63
64 #include <dev/ic/aic6915reg.h>
65 #include <dev/ic/aic6915var.h>
66
67 static void sf_start(struct ifnet *);
68 static void sf_watchdog(struct ifnet *);
69 static int sf_ioctl(struct ifnet *, u_long, void *);
70 static int sf_init(struct ifnet *);
71 static void sf_stop(struct ifnet *, int);
72
73 static bool sf_shutdown(device_t, int);
74
75 static void sf_txintr(struct sf_softc *);
76 static void sf_rxintr(struct sf_softc *);
77 static void sf_stats_update(struct sf_softc *);
78
79 static void sf_reset(struct sf_softc *);
80 static void sf_macreset(struct sf_softc *);
81 static void sf_rxdrain(struct sf_softc *);
82 static int sf_add_rxbuf(struct sf_softc *, int);
83 static uint8_t sf_read_eeprom(struct sf_softc *, int);
84 static void sf_set_filter(struct sf_softc *);
85
86 static int sf_mii_read(device_t, int, int);
87 static void sf_mii_write(device_t, int, int, int);
88 static void sf_mii_statchg(struct ifnet *);
89
90 static void sf_tick(void *);
91
92 #define sf_funcreg_read(sc, reg) \
93 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
94 #define sf_funcreg_write(sc, reg, val) \
95 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
96
97 static inline uint32_t
98 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
99 {
100
101 if (__predict_false(sc->sc_iomapped)) {
102 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
103 reg);
104 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
105 SF_IndirectIoDataPort));
106 }
107
108 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
109 }
110
111 static inline void
112 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
113 {
114
115 if (__predict_false(sc->sc_iomapped)) {
116 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
117 reg);
118 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
119 val);
120 return;
121 }
122
123 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
124 }
125
126 #define sf_genreg_read(sc, reg) \
127 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
128 #define sf_genreg_write(sc, reg, val) \
129 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
130
131 /*
132 * sf_attach:
133 *
134 * Attach a Starfire interface to the system.
135 */
136 void
137 sf_attach(struct sf_softc *sc)
138 {
139 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
140 int i, rseg, error;
141 bus_dma_segment_t seg;
142 u_int8_t enaddr[ETHER_ADDR_LEN];
143
144 callout_init(&sc->sc_tick_callout, 0);
145
146 /*
147 * If we're I/O mapped, the functional register handle is
148 * the same as the base handle. If we're memory mapped,
149 * carve off a chunk of the register space for the functional
150 * registers, to save on arithmetic later.
151 */
152 if (sc->sc_iomapped)
153 sc->sc_sh_func = sc->sc_sh;
154 else {
155 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
156 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
157 aprint_error_dev(sc->sc_dev, "unable to sub-region "
158 "functional registers, error = %d\n", error);
159 return;
160 }
161 }
162
163 /*
164 * Initialize the transmit threshold for this interface. The
165 * manual describes the default as 4 * 16 bytes. We start out
166 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
167 * several platforms.
168 */
169 sc->sc_txthresh = 10;
170
171 /*
172 * Allocate the control data structures, and create and load the
173 * DMA map for it.
174 */
175 if ((error = bus_dmamem_alloc(sc->sc_dmat,
176 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
177 BUS_DMA_NOWAIT)) != 0) {
178 aprint_error_dev(sc->sc_dev,
179 "unable to allocate control data, error = %d\n", error);
180 goto fail_0;
181 }
182
183 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
184 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
185 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
186 aprint_error_dev(sc->sc_dev,
187 "unable to map control data, error = %d\n", error);
188 goto fail_1;
189 }
190
191 if ((error = bus_dmamap_create(sc->sc_dmat,
192 sizeof(struct sf_control_data), 1,
193 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
194 &sc->sc_cddmamap)) != 0) {
195 aprint_error_dev(sc->sc_dev, "unable to create control data "
196 "DMA map, error = %d\n", error);
197 goto fail_2;
198 }
199
200 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
201 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
202 BUS_DMA_NOWAIT)) != 0) {
203 aprint_error_dev(sc->sc_dev, "unable to load control data "
204 "DMA map, error = %d\n", error);
205 goto fail_3;
206 }
207
208 /*
209 * Create the transmit buffer DMA maps.
210 */
211 for (i = 0; i < SF_NTXDESC; i++) {
212 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
213 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
214 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
215 aprint_error_dev(sc->sc_dev,
216 "unable to create tx DMA map %d, error = %d\n", i,
217 error);
218 goto fail_4;
219 }
220 }
221
222 /*
223 * Create the receive buffer DMA maps.
224 */
225 for (i = 0; i < SF_NRXDESC; i++) {
226 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
227 MCLBYTES, 0, BUS_DMA_NOWAIT,
228 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
229 aprint_error_dev(sc->sc_dev,
230 "unable to create rx DMA map %d, error = %d\n", i,
231 error);
232 goto fail_5;
233 }
234 }
235
236 /*
237 * Reset the chip to a known state.
238 */
239 sf_reset(sc);
240
241 /*
242 * Read the Ethernet address from the EEPROM.
243 */
244 for (i = 0; i < ETHER_ADDR_LEN; i++)
245 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
246
247 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
248 ether_sprintf(enaddr));
249
250 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
251 printf("%s: 64-bit PCI slot detected\n",
252 device_xname(sc->sc_dev));
253
254 /*
255 * Initialize our media structures and probe the MII.
256 */
257 sc->sc_mii.mii_ifp = ifp;
258 sc->sc_mii.mii_readreg = sf_mii_read;
259 sc->sc_mii.mii_writereg = sf_mii_write;
260 sc->sc_mii.mii_statchg = sf_mii_statchg;
261 sc->sc_ethercom.ec_mii = &sc->sc_mii;
262 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
263 ether_mediastatus);
264 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
265 MII_OFFSET_ANY, 0);
266 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
267 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
268 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
269 } else
270 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
271
272 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
273 ifp->if_softc = sc;
274 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
275 ifp->if_ioctl = sf_ioctl;
276 ifp->if_start = sf_start;
277 ifp->if_watchdog = sf_watchdog;
278 ifp->if_init = sf_init;
279 ifp->if_stop = sf_stop;
280 IFQ_SET_READY(&ifp->if_snd);
281
282 /*
283 * Attach the interface.
284 */
285 if_attach(ifp);
286 if_deferred_start_init(ifp, NULL);
287 ether_ifattach(ifp, enaddr);
288
289 /*
290 * Make sure the interface is shutdown during reboot.
291 */
292 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sf_shutdown))
293 pmf_class_network_register(sc->sc_dev, ifp);
294 else
295 aprint_error_dev(sc->sc_dev,
296 "couldn't establish power handler\n");
297 return;
298
299 /*
300 * Free any resources we've allocated during the failed attach
301 * attempt. Do this in reverse order an fall through.
302 */
303 fail_5:
304 for (i = 0; i < SF_NRXDESC; i++) {
305 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
306 bus_dmamap_destroy(sc->sc_dmat,
307 sc->sc_rxsoft[i].ds_dmamap);
308 }
309 fail_4:
310 for (i = 0; i < SF_NTXDESC; i++) {
311 if (sc->sc_txsoft[i].ds_dmamap != NULL)
312 bus_dmamap_destroy(sc->sc_dmat,
313 sc->sc_txsoft[i].ds_dmamap);
314 }
315 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
316 fail_3:
317 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
318 fail_2:
319 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
320 sizeof(struct sf_control_data));
321 fail_1:
322 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
323 fail_0:
324 return;
325 }
326
327 /*
328 * sf_shutdown:
329 *
330 * Shutdown hook -- make sure the interface is stopped at reboot.
331 */
332 static bool
333 sf_shutdown(device_t self, int howto)
334 {
335 struct sf_softc *sc;
336
337 sc = device_private(self);
338 sf_stop(&sc->sc_ethercom.ec_if, 1);
339
340 return true;
341 }
342
343 /*
344 * sf_start: [ifnet interface function]
345 *
346 * Start packet transmission on the interface.
347 */
348 static void
349 sf_start(struct ifnet *ifp)
350 {
351 struct sf_softc *sc = ifp->if_softc;
352 struct mbuf *m0, *m;
353 struct sf_txdesc0 *txd;
354 struct sf_descsoft *ds;
355 bus_dmamap_t dmamap;
356 int error, producer, last = -1, opending, seg;
357
358 /*
359 * Remember the previous number of pending transmits.
360 */
361 opending = sc->sc_txpending;
362
363 /*
364 * Find out where we're sitting.
365 */
366 producer = SF_TXDINDEX_TO_HOST(
367 TDQPI_HiPrTxProducerIndex_get(
368 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
369
370 /*
371 * Loop through the send queue, setting up transmit descriptors
372 * until we drain the queue, or use up all available transmit
373 * descriptors. Leave a blank one at the end for sanity's sake.
374 */
375 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
376 /*
377 * Grab a packet off the queue.
378 */
379 IFQ_POLL(&ifp->if_snd, m0);
380 if (m0 == NULL)
381 break;
382 m = NULL;
383
384 /*
385 * Get the transmit descriptor.
386 */
387 txd = &sc->sc_txdescs[producer];
388 ds = &sc->sc_txsoft[producer];
389 dmamap = ds->ds_dmamap;
390
391 /*
392 * Load the DMA map. If this fails, the packet either
393 * didn't fit in the allotted number of frags, or we were
394 * short on resources. In this case, we'll copy and try
395 * again.
396 */
397 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
398 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
399 MGETHDR(m, M_DONTWAIT, MT_DATA);
400 if (m == NULL) {
401 aprint_error_dev(sc->sc_dev,
402 "unable to allocate Tx mbuf\n");
403 break;
404 }
405 if (m0->m_pkthdr.len > MHLEN) {
406 MCLGET(m, M_DONTWAIT);
407 if ((m->m_flags & M_EXT) == 0) {
408 aprint_error_dev(sc->sc_dev,
409 "unable to allocate Tx cluster\n");
410 m_freem(m);
411 break;
412 }
413 }
414 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
415 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
416 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
417 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
418 if (error) {
419 aprint_error_dev(sc->sc_dev,
420 "unable to load Tx buffer, error = %d\n",
421 error);
422 break;
423 }
424 }
425
426 /*
427 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
428 */
429 IFQ_DEQUEUE(&ifp->if_snd, m0);
430 if (m != NULL) {
431 m_freem(m0);
432 m0 = m;
433 }
434
435 /* Initialize the descriptor. */
436 txd->td_word0 =
437 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
438 if (producer == (SF_NTXDESC - 1))
439 txd->td_word0 |= TD_W0_END;
440 txd->td_word1 = htole32(dmamap->dm_nsegs);
441 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
442 txd->td_frags[seg].fr_addr =
443 htole32(dmamap->dm_segs[seg].ds_addr);
444 txd->td_frags[seg].fr_len =
445 htole32(dmamap->dm_segs[seg].ds_len);
446 }
447
448 /* Sync the descriptor and the DMA map. */
449 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
450 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
451 BUS_DMASYNC_PREWRITE);
452
453 /*
454 * Store a pointer to the packet so we can free it later.
455 */
456 ds->ds_mbuf = m0;
457
458 /* Advance the Tx pointer. */
459 sc->sc_txpending++;
460 last = producer;
461 producer = SF_NEXTTX(producer);
462
463 /*
464 * Pass the packet to any BPF listeners.
465 */
466 bpf_mtap(ifp, m0, BPF_D_OUT);
467 }
468
469 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
470 /* No more slots left; notify upper layer. */
471 ifp->if_flags |= IFF_OACTIVE;
472 }
473
474 if (sc->sc_txpending != opending) {
475 KASSERT(last != -1);
476 /*
477 * We enqueued packets. Cause a transmit interrupt to
478 * happen on the last packet we enqueued, and give the
479 * new descriptors to the chip by writing the new
480 * producer index.
481 */
482 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
483 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
484
485 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
486 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
487
488 /* Set a watchdog timer in case the chip flakes out. */
489 ifp->if_timer = 5;
490 }
491 }
492
493 /*
494 * sf_watchdog: [ifnet interface function]
495 *
496 * Watchdog timer handler.
497 */
498 static void
499 sf_watchdog(struct ifnet *ifp)
500 {
501 struct sf_softc *sc = ifp->if_softc;
502
503 printf("%s: device timeout\n", device_xname(sc->sc_dev));
504 ifp->if_oerrors++;
505
506 (void) sf_init(ifp);
507
508 /* Try to get more packets going. */
509 sf_start(ifp);
510 }
511
512 /*
513 * sf_ioctl: [ifnet interface function]
514 *
515 * Handle control requests from the operator.
516 */
517 static int
518 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
519 {
520 struct sf_softc *sc = ifp->if_softc;
521 int s, error;
522
523 s = splnet();
524
525 error = ether_ioctl(ifp, cmd, data);
526 if (error == ENETRESET) {
527 /*
528 * Multicast list has changed; set the hardware filter
529 * accordingly.
530 */
531 if (ifp->if_flags & IFF_RUNNING)
532 sf_set_filter(sc);
533 error = 0;
534 }
535
536 /* Try to get more packets going. */
537 sf_start(ifp);
538
539 splx(s);
540 return (error);
541 }
542
543 /*
544 * sf_intr:
545 *
546 * Interrupt service routine.
547 */
548 int
549 sf_intr(void *arg)
550 {
551 struct sf_softc *sc = arg;
552 uint32_t isr;
553 int handled = 0, wantinit = 0;
554
555 for (;;) {
556 /* Reading clears all interrupts we're interested in. */
557 isr = sf_funcreg_read(sc, SF_InterruptStatus);
558 if ((isr & IS_PCIPadInt) == 0)
559 break;
560
561 handled = 1;
562
563 /* Handle receive interrupts. */
564 if (isr & IS_RxQ1DoneInt)
565 sf_rxintr(sc);
566
567 /* Handle transmit completion interrupts. */
568 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
569 sf_txintr(sc);
570
571 /* Handle abnormal interrupts. */
572 if (isr & IS_AbnormalInterrupt) {
573 /* Statistics. */
574 if (isr & IS_StatisticWrapInt)
575 sf_stats_update(sc);
576
577 /* DMA errors. */
578 if (isr & IS_DmaErrInt) {
579 wantinit = 1;
580 aprint_error_dev(sc->sc_dev,
581 "WARNING: DMA error\n");
582 }
583
584 /* Transmit FIFO underruns. */
585 if (isr & IS_TxDataLowInt) {
586 if (sc->sc_txthresh < 0xff)
587 sc->sc_txthresh++;
588 printf("%s: transmit FIFO underrun, new "
589 "threshold: %d bytes\n",
590 device_xname(sc->sc_dev),
591 sc->sc_txthresh * 16);
592 sf_funcreg_write(sc, SF_TransmitFrameCSR,
593 sc->sc_TransmitFrameCSR |
594 TFCSR_TransmitThreshold(sc->sc_txthresh));
595 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
596 sc->sc_TxDescQueueCtrl |
597 TDQC_TxHighPriorityFifoThreshold(
598 sc->sc_txthresh));
599 }
600 }
601 }
602
603 if (handled) {
604 /* Reset the interface, if necessary. */
605 if (wantinit)
606 sf_init(&sc->sc_ethercom.ec_if);
607
608 /* Try and get more packets going. */
609 if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
610 }
611
612 return (handled);
613 }
614
615 /*
616 * sf_txintr:
617 *
618 * Helper -- handle transmit completion interrupts.
619 */
620 static void
621 sf_txintr(struct sf_softc *sc)
622 {
623 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
624 struct sf_descsoft *ds;
625 uint32_t cqci, tcd;
626 int consumer, producer, txidx;
627
628 try_again:
629 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
630
631 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
632 producer = CQPI_TxCompletionProducerIndex_get(
633 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
634
635 if (consumer == producer)
636 return;
637
638 ifp->if_flags &= ~IFF_OACTIVE;
639
640 while (consumer != producer) {
641 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
642 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
643
644 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
645 #ifdef DIAGNOSTIC
646 if ((tcd & TCD_PR) == 0)
647 aprint_error_dev(sc->sc_dev,
648 "Tx queue mismatch, index %d\n", txidx);
649 #endif
650 /*
651 * NOTE: stats are updated later. We're just
652 * releasing packets that have been DMA'd to
653 * the chip.
654 */
655 ds = &sc->sc_txsoft[txidx];
656 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
657 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
658 0, ds->ds_dmamap->dm_mapsize,
659 BUS_DMASYNC_POSTWRITE);
660 m_freem(ds->ds_mbuf);
661 ds->ds_mbuf = NULL;
662
663 consumer = SF_NEXTTCD(consumer);
664 sc->sc_txpending--;
665 }
666
667 /* XXXJRT -- should be KDASSERT() */
668 KASSERT(sc->sc_txpending >= 0);
669
670 /* If all packets are done, cancel the watchdog timer. */
671 if (sc->sc_txpending == 0)
672 ifp->if_timer = 0;
673
674 /* Update the consumer index. */
675 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
676 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
677 CQCI_TxCompletionConsumerIndex(consumer));
678
679 /* Double check for new completions. */
680 goto try_again;
681 }
682
683 /*
684 * sf_rxintr:
685 *
686 * Helper -- handle receive interrupts.
687 */
688 static void
689 sf_rxintr(struct sf_softc *sc)
690 {
691 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
692 struct sf_descsoft *ds;
693 struct sf_rcd_full *rcd;
694 struct mbuf *m;
695 uint32_t cqci, word0;
696 int consumer, producer, bufproducer, rxidx, len;
697
698 try_again:
699 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
700
701 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
702 producer = CQPI_RxCompletionQ1ProducerIndex_get(
703 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
704 bufproducer = RXQ1P_RxDescQ1Producer_get(
705 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
706
707 if (consumer == producer)
708 return;
709
710 while (consumer != producer) {
711 rcd = &sc->sc_rxcomp[consumer];
712 SF_CDRXCSYNC(sc, consumer,
713 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
714 SF_CDRXCSYNC(sc, consumer,
715 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
716
717 word0 = le32toh(rcd->rcd_word0);
718 rxidx = RCD_W0_EndIndex(word0);
719
720 ds = &sc->sc_rxsoft[rxidx];
721
722 consumer = SF_NEXTRCD(consumer);
723 bufproducer = SF_NEXTRX(bufproducer);
724
725 if ((word0 & RCD_W0_OK) == 0) {
726 SF_INIT_RXDESC(sc, rxidx);
727 continue;
728 }
729
730 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
731 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
732
733 /*
734 * No errors; receive the packet. Note that we have
735 * configured the Starfire to NOT transfer the CRC
736 * with the packet.
737 */
738 len = RCD_W0_Length(word0);
739
740 #ifdef __NO_STRICT_ALIGNMENT
741 /*
742 * Allocate a new mbuf cluster. If that fails, we are
743 * out of memory, and must drop the packet and recycle
744 * the buffer that's already attached to this descriptor.
745 */
746 m = ds->ds_mbuf;
747 if (sf_add_rxbuf(sc, rxidx) != 0) {
748 ifp->if_ierrors++;
749 SF_INIT_RXDESC(sc, rxidx);
750 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
751 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
752 continue;
753 }
754 #else
755 /*
756 * The Starfire's receive buffer must be 4-byte aligned.
757 * But this means that the data after the Ethernet header
758 * is misaligned. We must allocate a new buffer and
759 * copy the data, shifted forward 2 bytes.
760 */
761 MGETHDR(m, M_DONTWAIT, MT_DATA);
762 if (m == NULL) {
763 dropit:
764 ifp->if_ierrors++;
765 SF_INIT_RXDESC(sc, rxidx);
766 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
767 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
768 continue;
769 }
770 if (len > (MHLEN - 2)) {
771 MCLGET(m, M_DONTWAIT);
772 if ((m->m_flags & M_EXT) == 0) {
773 m_freem(m);
774 goto dropit;
775 }
776 }
777 m->m_data += 2;
778
779 /*
780 * Note that we use cluster for incoming frames, so the
781 * buffer is virtually contiguous.
782 */
783 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
784
785 /* Allow the receive descriptor to continue using its mbuf. */
786 SF_INIT_RXDESC(sc, rxidx);
787 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
788 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
789 #endif /* __NO_STRICT_ALIGNMENT */
790
791 m_set_rcvif(m, ifp);
792 m->m_pkthdr.len = m->m_len = len;
793
794 /* Pass it on. */
795 if_percpuq_enqueue(ifp->if_percpuq, m);
796 }
797
798 /* Update the chip's pointers. */
799 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
800 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
801 CQCI_RxCompletionQ1ConsumerIndex(consumer));
802 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
803 RXQ1P_RxDescQ1Producer(bufproducer));
804
805 /* Double-check for any new completions. */
806 goto try_again;
807 }
808
809 /*
810 * sf_tick:
811 *
812 * One second timer, used to tick the MII and update stats.
813 */
814 static void
815 sf_tick(void *arg)
816 {
817 struct sf_softc *sc = arg;
818 int s;
819
820 s = splnet();
821 mii_tick(&sc->sc_mii);
822 sf_stats_update(sc);
823 splx(s);
824
825 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
826 }
827
828 /*
829 * sf_stats_update:
830 *
831 * Read the statitistics counters.
832 */
833 static void
834 sf_stats_update(struct sf_softc *sc)
835 {
836 struct sf_stats stats;
837 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
838 uint32_t *p;
839 u_int i;
840
841 p = &stats.TransmitOKFrames;
842 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
843 *p++ = sf_genreg_read(sc,
844 SF_STATS_BASE + (i * sizeof(uint32_t)));
845 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
846 }
847
848 ifp->if_opackets += stats.TransmitOKFrames;
849
850 ifp->if_collisions += stats.SingleCollisionFrames +
851 stats.MultipleCollisionFrames;
852
853 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
854 stats.TransmitAbortDueToExcessingDeferral +
855 stats.FramesLostDueToInternalTransmitErrors;
856
857 ifp->if_ipackets += stats.ReceiveOKFrames;
858
859 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
860 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
861 stats.ReceiveFramesJabbersError +
862 stats.FramesLostDueToInternalReceiveErrors;
863 }
864
865 /*
866 * sf_reset:
867 *
868 * Perform a soft reset on the Starfire.
869 */
870 static void
871 sf_reset(struct sf_softc *sc)
872 {
873 int i;
874
875 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
876
877 sf_macreset(sc);
878
879 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
880 for (i = 0; i < 1000; i++) {
881 delay(10);
882 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
883 PDC_SoftReset) == 0)
884 break;
885 }
886
887 if (i == 1000) {
888 aprint_error_dev(sc->sc_dev, "reset failed to complete\n");
889 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
890 }
891
892 delay(1000);
893 }
894
895 /*
896 * sf_macreset:
897 *
898 * Reset the MAC portion of the Starfire.
899 */
900 static void
901 sf_macreset(struct sf_softc *sc)
902 {
903
904 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
905 delay(1000);
906 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
907 }
908
909 /*
910 * sf_init: [ifnet interface function]
911 *
912 * Initialize the interface. Must be called at splnet().
913 */
914 static int
915 sf_init(struct ifnet *ifp)
916 {
917 struct sf_softc *sc = ifp->if_softc;
918 struct sf_descsoft *ds;
919 int error = 0;
920 u_int i;
921
922 /*
923 * Cancel any pending I/O.
924 */
925 sf_stop(ifp, 0);
926
927 /*
928 * Reset the Starfire to a known state.
929 */
930 sf_reset(sc);
931
932 /* Clear the stat counters. */
933 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
934 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
935
936 /*
937 * Initialize the transmit descriptor ring.
938 */
939 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
940 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
941 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
942 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
943
944 /*
945 * Initialize the transmit completion ring.
946 */
947 for (i = 0; i < SF_NTCD; i++) {
948 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
949 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
950 }
951 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
952 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
953
954 /*
955 * Initialize the receive descriptor ring.
956 */
957 for (i = 0; i < SF_NRXDESC; i++) {
958 ds = &sc->sc_rxsoft[i];
959 if (ds->ds_mbuf == NULL) {
960 if ((error = sf_add_rxbuf(sc, i)) != 0) {
961 aprint_error_dev(sc->sc_dev,
962 "unable to allocate or map rx buffer %d, "
963 "error = %d\n", i, error);
964 /*
965 * XXX Should attempt to run with fewer receive
966 * XXX buffers instead of just failing.
967 */
968 sf_rxdrain(sc);
969 goto out;
970 }
971 } else
972 SF_INIT_RXDESC(sc, i);
973 }
974 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
975 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
976 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
977
978 /*
979 * Initialize the receive completion ring.
980 */
981 for (i = 0; i < SF_NRCD; i++) {
982 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
983 sc->sc_rxcomp[i].rcd_word1 = 0;
984 sc->sc_rxcomp[i].rcd_word2 = 0;
985 sc->sc_rxcomp[i].rcd_timestamp = 0;
986 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
987 }
988 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
989 RCQ1C_RxCompletionQ1Type(3));
990 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
991
992 /*
993 * Initialize the Tx CSR.
994 */
995 sc->sc_TransmitFrameCSR = 0;
996 sf_funcreg_write(sc, SF_TransmitFrameCSR,
997 sc->sc_TransmitFrameCSR |
998 TFCSR_TransmitThreshold(sc->sc_txthresh));
999
1000 /*
1001 * Initialize the Tx descriptor control register.
1002 */
1003 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1004 TDQC_TxDmaBurstSize(4) | /* default */
1005 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1006 TDQC_TxDescType(0);
1007 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1008 sc->sc_TxDescQueueCtrl |
1009 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1010
1011 /*
1012 * Initialize the Rx descriptor control registers.
1013 */
1014 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1015 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1016 RDQ1C_RxDescSpacing(0));
1017 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1018
1019 /*
1020 * Initialize the Tx descriptor producer indices.
1021 */
1022 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1023 TDQPI_HiPrTxProducerIndex(0) |
1024 TDQPI_LoPrTxProducerIndex(0));
1025
1026 /*
1027 * Initialize the Rx descriptor producer indices.
1028 */
1029 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1030 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1031 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1032 RXQ2P_RxDescQ2Producer(0));
1033
1034 /*
1035 * Initialize the Tx and Rx completion queue consumer indices.
1036 */
1037 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1038 CQCI_TxCompletionConsumerIndex(0) |
1039 CQCI_RxCompletionQ1ConsumerIndex(0));
1040 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1041
1042 /*
1043 * Initialize the Rx DMA control register.
1044 */
1045 sf_funcreg_write(sc, SF_RxDmaCtrl,
1046 RDC_RxHighPriorityThreshold(6) | /* default */
1047 RDC_RxBurstSize(4)); /* default */
1048
1049 /*
1050 * Set the receive filter.
1051 */
1052 sc->sc_RxAddressFilteringCtl = 0;
1053 sf_set_filter(sc);
1054
1055 /*
1056 * Set MacConfig1. When we set the media, MacConfig1 will
1057 * actually be written and the MAC part reset.
1058 */
1059 sc->sc_MacConfig1 = MC1_PadEn;
1060
1061 /*
1062 * Set the media.
1063 */
1064 if ((error = ether_mediachange(ifp)) != 0)
1065 goto out;
1066
1067 /*
1068 * Initialize the interrupt register.
1069 */
1070 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1071 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1072 IS_StatisticWrapInt;
1073 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1074
1075 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1076 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1077
1078 /*
1079 * Start the transmit and receive processes.
1080 */
1081 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1082 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1083
1084 /* Start the on second clock. */
1085 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1086
1087 /*
1088 * Note that the interface is now running.
1089 */
1090 ifp->if_flags |= IFF_RUNNING;
1091 ifp->if_flags &= ~IFF_OACTIVE;
1092
1093 out:
1094 if (error) {
1095 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1096 ifp->if_timer = 0;
1097 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1098 }
1099 return (error);
1100 }
1101
1102 /*
1103 * sf_rxdrain:
1104 *
1105 * Drain the receive queue.
1106 */
1107 static void
1108 sf_rxdrain(struct sf_softc *sc)
1109 {
1110 struct sf_descsoft *ds;
1111 int i;
1112
1113 for (i = 0; i < SF_NRXDESC; i++) {
1114 ds = &sc->sc_rxsoft[i];
1115 if (ds->ds_mbuf != NULL) {
1116 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1117 m_freem(ds->ds_mbuf);
1118 ds->ds_mbuf = NULL;
1119 }
1120 }
1121 }
1122
1123 /*
1124 * sf_stop: [ifnet interface function]
1125 *
1126 * Stop transmission on the interface.
1127 */
1128 static void
1129 sf_stop(struct ifnet *ifp, int disable)
1130 {
1131 struct sf_softc *sc = ifp->if_softc;
1132 struct sf_descsoft *ds;
1133 int i;
1134
1135 /* Stop the one second clock. */
1136 callout_stop(&sc->sc_tick_callout);
1137
1138 /* Down the MII. */
1139 mii_down(&sc->sc_mii);
1140
1141 /* Disable interrupts. */
1142 sf_funcreg_write(sc, SF_InterruptEn, 0);
1143
1144 /* Stop the transmit and receive processes. */
1145 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1146
1147 /*
1148 * Release any queued transmit buffers.
1149 */
1150 for (i = 0; i < SF_NTXDESC; i++) {
1151 ds = &sc->sc_txsoft[i];
1152 if (ds->ds_mbuf != NULL) {
1153 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1154 m_freem(ds->ds_mbuf);
1155 ds->ds_mbuf = NULL;
1156 }
1157 }
1158
1159 /*
1160 * Mark the interface down and cancel the watchdog timer.
1161 */
1162 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1163 ifp->if_timer = 0;
1164
1165 if (disable)
1166 sf_rxdrain(sc);
1167 }
1168
1169 /*
1170 * sf_read_eeprom:
1171 *
1172 * Read from the Starfire EEPROM.
1173 */
1174 static uint8_t
1175 sf_read_eeprom(struct sf_softc *sc, int offset)
1176 {
1177 uint32_t reg;
1178
1179 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1180
1181 return ((reg >> (8 * (offset & 3))) & 0xff);
1182 }
1183
1184 /*
1185 * sf_add_rxbuf:
1186 *
1187 * Add a receive buffer to the indicated descriptor.
1188 */
1189 static int
1190 sf_add_rxbuf(struct sf_softc *sc, int idx)
1191 {
1192 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1193 struct mbuf *m;
1194 int error;
1195
1196 MGETHDR(m, M_DONTWAIT, MT_DATA);
1197 if (m == NULL)
1198 return (ENOBUFS);
1199
1200 MCLGET(m, M_DONTWAIT);
1201 if ((m->m_flags & M_EXT) == 0) {
1202 m_freem(m);
1203 return (ENOBUFS);
1204 }
1205
1206 if (ds->ds_mbuf != NULL)
1207 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1208
1209 ds->ds_mbuf = m;
1210
1211 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1212 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1213 BUS_DMA_READ|BUS_DMA_NOWAIT);
1214 if (error) {
1215 aprint_error_dev(sc->sc_dev,
1216 "can't load rx DMA map %d, error = %d\n", idx, error);
1217 panic("sf_add_rxbuf"); /* XXX */
1218 }
1219
1220 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1221 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1222
1223 SF_INIT_RXDESC(sc, idx);
1224
1225 return (0);
1226 }
1227
1228 static void
1229 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1230 {
1231 uint32_t reg0, reg1, reg2;
1232
1233 reg0 = enaddr[5] | (enaddr[4] << 8);
1234 reg1 = enaddr[3] | (enaddr[2] << 8);
1235 reg2 = enaddr[1] | (enaddr[0] << 8);
1236
1237 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1238 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1239 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1240 }
1241
1242 static void
1243 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1244 {
1245 uint32_t hash, slot, reg;
1246
1247 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1248 slot = hash >> 4;
1249
1250 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1251 reg |= 1 << (hash & 0xf);
1252 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1253 }
1254
1255 /*
1256 * sf_set_filter:
1257 *
1258 * Set the Starfire receive filter.
1259 */
1260 static void
1261 sf_set_filter(struct sf_softc *sc)
1262 {
1263 struct ethercom *ec = &sc->sc_ethercom;
1264 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1265 struct ether_multi *enm;
1266 struct ether_multistep step;
1267 int i;
1268
1269 /* Start by clearing the perfect and hash tables. */
1270 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1271 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1272
1273 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1274 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1275
1276 /*
1277 * Clear the perfect and hash mode bits.
1278 */
1279 sc->sc_RxAddressFilteringCtl &=
1280 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1281
1282 if (ifp->if_flags & IFF_BROADCAST)
1283 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1284 else
1285 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1286
1287 if (ifp->if_flags & IFF_PROMISC) {
1288 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1289 goto allmulti;
1290 } else
1291 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1292
1293 /*
1294 * Set normal perfect filtering mode.
1295 */
1296 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1297
1298 /*
1299 * First, write the station address to the perfect filter
1300 * table.
1301 */
1302 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1303
1304 /*
1305 * Now set the hash bits for each multicast address in our
1306 * list.
1307 */
1308 ETHER_FIRST_MULTI(step, ec, enm);
1309 if (enm == NULL)
1310 goto done;
1311 while (enm != NULL) {
1312 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1313 /*
1314 * We must listen to a range of multicast addresses.
1315 * For now, just accept all multicasts, rather than
1316 * trying to set only those filter bits needed to match
1317 * the range. (At this time, the only use of address
1318 * ranges is for IP multicast routing, for which the
1319 * range is big enough to require all bits set.)
1320 */
1321 goto allmulti;
1322 }
1323 sf_set_filter_hash(sc, enm->enm_addrlo);
1324 ETHER_NEXT_MULTI(step, enm);
1325 }
1326
1327 /*
1328 * Set "hash only multicast dest, match regardless of VLAN ID".
1329 */
1330 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1331 goto done;
1332
1333 allmulti:
1334 /*
1335 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1336 */
1337 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1338 ifp->if_flags |= IFF_ALLMULTI;
1339
1340 done:
1341 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1342 sc->sc_RxAddressFilteringCtl);
1343 }
1344
1345 /*
1346 * sf_mii_read: [mii interface function]
1347 *
1348 * Read from the MII.
1349 */
1350 static int
1351 sf_mii_read(device_t self, int phy, int reg)
1352 {
1353 struct sf_softc *sc = device_private(self);
1354 uint32_t v;
1355 int i;
1356
1357 for (i = 0; i < 1000; i++) {
1358 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1359 if (v & MiiDataValid)
1360 break;
1361 delay(1);
1362 }
1363
1364 if ((v & MiiDataValid) == 0)
1365 return (0);
1366
1367 if (MiiRegDataPort(v) == 0xffff)
1368 return (0);
1369
1370 return (MiiRegDataPort(v));
1371 }
1372
1373 /*
1374 * sf_mii_write: [mii interface function]
1375 *
1376 * Write to the MII.
1377 */
1378 static void
1379 sf_mii_write(device_t self, int phy, int reg, int val)
1380 {
1381 struct sf_softc *sc = device_private(self);
1382 int i;
1383
1384 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1385
1386 for (i = 0; i < 1000; i++) {
1387 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1388 MiiBusy) == 0)
1389 return;
1390 delay(1);
1391 }
1392
1393 printf("%s: MII write timed out\n", device_xname(sc->sc_dev));
1394 }
1395
1396 /*
1397 * sf_mii_statchg: [mii interface function]
1398 *
1399 * Callback from the PHY when the media changes.
1400 */
1401 static void
1402 sf_mii_statchg(struct ifnet *ifp)
1403 {
1404 struct sf_softc *sc = ifp->if_softc;
1405 uint32_t ipg;
1406
1407 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1408 sc->sc_MacConfig1 |= MC1_FullDuplex;
1409 ipg = 0x15;
1410 } else {
1411 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1412 ipg = 0x11;
1413 }
1414
1415 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1416 sf_macreset(sc);
1417
1418 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1419 }
1420