aic6915.c revision 1.41 1 /* $NetBSD: aic6915.c,v 1.41 2020/01/29 14:09:58 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Adaptec AIC-6915 (``Starfire'')
34 * 10/100 Ethernet controller.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.41 2020/01/29 14:09:58 thorpej Exp $");
39
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #include <net/bpf.h>
58
59 #include <sys/bus.h>
60 #include <sys/intr.h>
61
62 #include <dev/mii/miivar.h>
63
64 #include <dev/ic/aic6915reg.h>
65 #include <dev/ic/aic6915var.h>
66
67 static void sf_start(struct ifnet *);
68 static void sf_watchdog(struct ifnet *);
69 static int sf_ioctl(struct ifnet *, u_long, void *);
70 static int sf_init(struct ifnet *);
71 static void sf_stop(struct ifnet *, int);
72
73 static bool sf_shutdown(device_t, int);
74
75 static void sf_txintr(struct sf_softc *);
76 static void sf_rxintr(struct sf_softc *);
77 static void sf_stats_update(struct sf_softc *);
78
79 static void sf_reset(struct sf_softc *);
80 static void sf_macreset(struct sf_softc *);
81 static void sf_rxdrain(struct sf_softc *);
82 static int sf_add_rxbuf(struct sf_softc *, int);
83 static uint8_t sf_read_eeprom(struct sf_softc *, int);
84 static void sf_set_filter(struct sf_softc *);
85
86 static int sf_mii_read(device_t, int, int, uint16_t *);
87 static int sf_mii_write(device_t, int, int, uint16_t);
88 static void sf_mii_statchg(struct ifnet *);
89
90 static void sf_tick(void *);
91
92 #define sf_funcreg_read(sc, reg) \
93 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
94 #define sf_funcreg_write(sc, reg, val) \
95 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
96
97 static inline uint32_t
98 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
99 {
100
101 if (__predict_false(sc->sc_iomapped)) {
102 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
103 reg);
104 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
105 SF_IndirectIoDataPort));
106 }
107
108 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
109 }
110
111 static inline void
112 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
113 {
114
115 if (__predict_false(sc->sc_iomapped)) {
116 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
117 reg);
118 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
119 val);
120 return;
121 }
122
123 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
124 }
125
126 #define sf_genreg_read(sc, reg) \
127 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
128 #define sf_genreg_write(sc, reg, val) \
129 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
130
131 /*
132 * sf_attach:
133 *
134 * Attach a Starfire interface to the system.
135 */
136 void
137 sf_attach(struct sf_softc *sc)
138 {
139 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
140 struct mii_data * const mii = &sc->sc_mii;
141 int i, rseg, error;
142 bus_dma_segment_t seg;
143 uint8_t enaddr[ETHER_ADDR_LEN];
144
145 callout_init(&sc->sc_tick_callout, 0);
146
147 /*
148 * If we're I/O mapped, the functional register handle is
149 * the same as the base handle. If we're memory mapped,
150 * carve off a chunk of the register space for the functional
151 * registers, to save on arithmetic later.
152 */
153 if (sc->sc_iomapped)
154 sc->sc_sh_func = sc->sc_sh;
155 else {
156 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
157 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
158 aprint_error_dev(sc->sc_dev, "unable to sub-region "
159 "functional registers, error = %d\n", error);
160 return;
161 }
162 }
163
164 /*
165 * Initialize the transmit threshold for this interface. The
166 * manual describes the default as 4 * 16 bytes. We start out
167 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
168 * several platforms.
169 */
170 sc->sc_txthresh = 10;
171
172 /*
173 * Allocate the control data structures, and create and load the
174 * DMA map for it.
175 */
176 if ((error = bus_dmamem_alloc(sc->sc_dmat,
177 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
178 BUS_DMA_NOWAIT)) != 0) {
179 aprint_error_dev(sc->sc_dev,
180 "unable to allocate control data, error = %d\n", error);
181 goto fail_0;
182 }
183
184 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
185 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
186 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
187 aprint_error_dev(sc->sc_dev,
188 "unable to map control data, error = %d\n", error);
189 goto fail_1;
190 }
191
192 if ((error = bus_dmamap_create(sc->sc_dmat,
193 sizeof(struct sf_control_data), 1,
194 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
195 &sc->sc_cddmamap)) != 0) {
196 aprint_error_dev(sc->sc_dev, "unable to create control data "
197 "DMA map, error = %d\n", error);
198 goto fail_2;
199 }
200
201 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
202 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
203 BUS_DMA_NOWAIT)) != 0) {
204 aprint_error_dev(sc->sc_dev, "unable to load control data "
205 "DMA map, error = %d\n", error);
206 goto fail_3;
207 }
208
209 /*
210 * Create the transmit buffer DMA maps.
211 */
212 for (i = 0; i < SF_NTXDESC; i++) {
213 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
214 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
215 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
216 aprint_error_dev(sc->sc_dev,
217 "unable to create tx DMA map %d, error = %d\n", i,
218 error);
219 goto fail_4;
220 }
221 }
222
223 /*
224 * Create the receive buffer DMA maps.
225 */
226 for (i = 0; i < SF_NRXDESC; i++) {
227 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
228 MCLBYTES, 0, BUS_DMA_NOWAIT,
229 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
230 aprint_error_dev(sc->sc_dev,
231 "unable to create rx DMA map %d, error = %d\n", i,
232 error);
233 goto fail_5;
234 }
235 }
236
237 /*
238 * Reset the chip to a known state.
239 */
240 sf_reset(sc);
241
242 /*
243 * Read the Ethernet address from the EEPROM.
244 */
245 for (i = 0; i < ETHER_ADDR_LEN; i++)
246 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
247
248 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
249 ether_sprintf(enaddr));
250
251 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
252 printf("%s: 64-bit PCI slot detected\n",
253 device_xname(sc->sc_dev));
254
255 /*
256 * Initialize our media structures and probe the MII.
257 */
258 mii->mii_ifp = ifp;
259 mii->mii_readreg = sf_mii_read;
260 mii->mii_writereg = sf_mii_write;
261 mii->mii_statchg = sf_mii_statchg;
262 sc->sc_ethercom.ec_mii = mii;
263 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
264 ether_mediastatus);
265 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
266 MII_OFFSET_ANY, 0);
267 if (LIST_FIRST(&mii->mii_phys) == NULL) {
268 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
269 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
270 } else
271 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
272
273 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
274 ifp->if_softc = sc;
275 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
276 ifp->if_ioctl = sf_ioctl;
277 ifp->if_start = sf_start;
278 ifp->if_watchdog = sf_watchdog;
279 ifp->if_init = sf_init;
280 ifp->if_stop = sf_stop;
281 IFQ_SET_READY(&ifp->if_snd);
282
283 /*
284 * Attach the interface.
285 */
286 if_attach(ifp);
287 if_deferred_start_init(ifp, NULL);
288 ether_ifattach(ifp, enaddr);
289
290 /*
291 * Make sure the interface is shutdown during reboot.
292 */
293 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sf_shutdown))
294 pmf_class_network_register(sc->sc_dev, ifp);
295 else
296 aprint_error_dev(sc->sc_dev,
297 "couldn't establish power handler\n");
298 return;
299
300 /*
301 * Free any resources we've allocated during the failed attach
302 * attempt. Do this in reverse order an fall through.
303 */
304 fail_5:
305 for (i = 0; i < SF_NRXDESC; i++) {
306 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
307 bus_dmamap_destroy(sc->sc_dmat,
308 sc->sc_rxsoft[i].ds_dmamap);
309 }
310 fail_4:
311 for (i = 0; i < SF_NTXDESC; i++) {
312 if (sc->sc_txsoft[i].ds_dmamap != NULL)
313 bus_dmamap_destroy(sc->sc_dmat,
314 sc->sc_txsoft[i].ds_dmamap);
315 }
316 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
317 fail_3:
318 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
319 fail_2:
320 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
321 sizeof(struct sf_control_data));
322 fail_1:
323 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
324 fail_0:
325 return;
326 }
327
328 /*
329 * sf_shutdown:
330 *
331 * Shutdown hook -- make sure the interface is stopped at reboot.
332 */
333 static bool
334 sf_shutdown(device_t self, int howto)
335 {
336 struct sf_softc *sc;
337
338 sc = device_private(self);
339 sf_stop(&sc->sc_ethercom.ec_if, 1);
340
341 return true;
342 }
343
344 /*
345 * sf_start: [ifnet interface function]
346 *
347 * Start packet transmission on the interface.
348 */
349 static void
350 sf_start(struct ifnet *ifp)
351 {
352 struct sf_softc *sc = ifp->if_softc;
353 struct mbuf *m0, *m;
354 struct sf_txdesc0 *txd;
355 struct sf_descsoft *ds;
356 bus_dmamap_t dmamap;
357 int error, producer, last = -1, opending, seg;
358
359 /*
360 * Remember the previous number of pending transmits.
361 */
362 opending = sc->sc_txpending;
363
364 /*
365 * Find out where we're sitting.
366 */
367 producer = SF_TXDINDEX_TO_HOST(
368 TDQPI_HiPrTxProducerIndex_get(
369 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
370
371 /*
372 * Loop through the send queue, setting up transmit descriptors
373 * until we drain the queue, or use up all available transmit
374 * descriptors. Leave a blank one at the end for sanity's sake.
375 */
376 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
377 /*
378 * Grab a packet off the queue.
379 */
380 IFQ_POLL(&ifp->if_snd, m0);
381 if (m0 == NULL)
382 break;
383 m = NULL;
384
385 /*
386 * Get the transmit descriptor.
387 */
388 txd = &sc->sc_txdescs[producer];
389 ds = &sc->sc_txsoft[producer];
390 dmamap = ds->ds_dmamap;
391
392 /*
393 * Load the DMA map. If this fails, the packet either
394 * didn't fit in the allotted number of frags, or we were
395 * short on resources. In this case, we'll copy and try
396 * again.
397 */
398 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
399 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
400 MGETHDR(m, M_DONTWAIT, MT_DATA);
401 if (m == NULL) {
402 aprint_error_dev(sc->sc_dev,
403 "unable to allocate Tx mbuf\n");
404 break;
405 }
406 if (m0->m_pkthdr.len > MHLEN) {
407 MCLGET(m, M_DONTWAIT);
408 if ((m->m_flags & M_EXT) == 0) {
409 aprint_error_dev(sc->sc_dev,
410 "unable to allocate Tx cluster\n");
411 m_freem(m);
412 break;
413 }
414 }
415 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
416 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
417 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
418 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
419 if (error) {
420 aprint_error_dev(sc->sc_dev,
421 "unable to load Tx buffer, error = %d\n",
422 error);
423 break;
424 }
425 }
426
427 /*
428 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
429 */
430 IFQ_DEQUEUE(&ifp->if_snd, m0);
431 if (m != NULL) {
432 m_freem(m0);
433 m0 = m;
434 }
435
436 /* Initialize the descriptor. */
437 txd->td_word0 =
438 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
439 if (producer == (SF_NTXDESC - 1))
440 txd->td_word0 |= TD_W0_END;
441 txd->td_word1 = htole32(dmamap->dm_nsegs);
442 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
443 txd->td_frags[seg].fr_addr =
444 htole32(dmamap->dm_segs[seg].ds_addr);
445 txd->td_frags[seg].fr_len =
446 htole32(dmamap->dm_segs[seg].ds_len);
447 }
448
449 /* Sync the descriptor and the DMA map. */
450 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
451 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
452 BUS_DMASYNC_PREWRITE);
453
454 /*
455 * Store a pointer to the packet so we can free it later.
456 */
457 ds->ds_mbuf = m0;
458
459 /* Advance the Tx pointer. */
460 sc->sc_txpending++;
461 last = producer;
462 producer = SF_NEXTTX(producer);
463
464 /*
465 * Pass the packet to any BPF listeners.
466 */
467 bpf_mtap(ifp, m0, BPF_D_OUT);
468 }
469
470 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
471 /* No more slots left; notify upper layer. */
472 ifp->if_flags |= IFF_OACTIVE;
473 }
474
475 if (sc->sc_txpending != opending) {
476 KASSERT(last != -1);
477 /*
478 * We enqueued packets. Cause a transmit interrupt to
479 * happen on the last packet we enqueued, and give the
480 * new descriptors to the chip by writing the new
481 * producer index.
482 */
483 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
484 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
485
486 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
487 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
488
489 /* Set a watchdog timer in case the chip flakes out. */
490 ifp->if_timer = 5;
491 }
492 }
493
494 /*
495 * sf_watchdog: [ifnet interface function]
496 *
497 * Watchdog timer handler.
498 */
499 static void
500 sf_watchdog(struct ifnet *ifp)
501 {
502 struct sf_softc *sc = ifp->if_softc;
503
504 printf("%s: device timeout\n", device_xname(sc->sc_dev));
505 if_statinc(ifp, if_oerrors);
506
507 (void) sf_init(ifp);
508
509 /* Try to get more packets going. */
510 sf_start(ifp);
511 }
512
513 /*
514 * sf_ioctl: [ifnet interface function]
515 *
516 * Handle control requests from the operator.
517 */
518 static int
519 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
520 {
521 struct sf_softc *sc = ifp->if_softc;
522 int s, error;
523
524 s = splnet();
525
526 error = ether_ioctl(ifp, cmd, data);
527 if (error == ENETRESET) {
528 /*
529 * Multicast list has changed; set the hardware filter
530 * accordingly.
531 */
532 if (ifp->if_flags & IFF_RUNNING)
533 sf_set_filter(sc);
534 error = 0;
535 }
536
537 /* Try to get more packets going. */
538 sf_start(ifp);
539
540 splx(s);
541 return (error);
542 }
543
544 /*
545 * sf_intr:
546 *
547 * Interrupt service routine.
548 */
549 int
550 sf_intr(void *arg)
551 {
552 struct sf_softc *sc = arg;
553 uint32_t isr;
554 int handled = 0, wantinit = 0;
555
556 for (;;) {
557 /* Reading clears all interrupts we're interested in. */
558 isr = sf_funcreg_read(sc, SF_InterruptStatus);
559 if ((isr & IS_PCIPadInt) == 0)
560 break;
561
562 handled = 1;
563
564 /* Handle receive interrupts. */
565 if (isr & IS_RxQ1DoneInt)
566 sf_rxintr(sc);
567
568 /* Handle transmit completion interrupts. */
569 if (isr & (IS_TxDmaDoneInt | IS_TxQueueDoneInt))
570 sf_txintr(sc);
571
572 /* Handle abnormal interrupts. */
573 if (isr & IS_AbnormalInterrupt) {
574 /* Statistics. */
575 if (isr & IS_StatisticWrapInt)
576 sf_stats_update(sc);
577
578 /* DMA errors. */
579 if (isr & IS_DmaErrInt) {
580 wantinit = 1;
581 aprint_error_dev(sc->sc_dev,
582 "WARNING: DMA error\n");
583 }
584
585 /* Transmit FIFO underruns. */
586 if (isr & IS_TxDataLowInt) {
587 if (sc->sc_txthresh < 0xff)
588 sc->sc_txthresh++;
589 printf("%s: transmit FIFO underrun, new "
590 "threshold: %d bytes\n",
591 device_xname(sc->sc_dev),
592 sc->sc_txthresh * 16);
593 sf_funcreg_write(sc, SF_TransmitFrameCSR,
594 sc->sc_TransmitFrameCSR |
595 TFCSR_TransmitThreshold(sc->sc_txthresh));
596 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
597 sc->sc_TxDescQueueCtrl |
598 TDQC_TxHighPriorityFifoThreshold(
599 sc->sc_txthresh));
600 }
601 }
602 }
603
604 if (handled) {
605 /* Reset the interface, if necessary. */
606 if (wantinit)
607 sf_init(&sc->sc_ethercom.ec_if);
608
609 /* Try and get more packets going. */
610 if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
611 }
612
613 return (handled);
614 }
615
616 /*
617 * sf_txintr:
618 *
619 * Helper -- handle transmit completion interrupts.
620 */
621 static void
622 sf_txintr(struct sf_softc *sc)
623 {
624 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
625 struct sf_descsoft *ds;
626 uint32_t cqci, tcd;
627 int consumer, producer, txidx;
628
629 try_again:
630 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
631
632 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
633 producer = CQPI_TxCompletionProducerIndex_get(
634 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
635
636 if (consumer == producer)
637 return;
638
639 ifp->if_flags &= ~IFF_OACTIVE;
640
641 while (consumer != producer) {
642 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
643 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
644
645 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
646 #ifdef DIAGNOSTIC
647 if ((tcd & TCD_PR) == 0)
648 aprint_error_dev(sc->sc_dev,
649 "Tx queue mismatch, index %d\n", txidx);
650 #endif
651 /*
652 * NOTE: stats are updated later. We're just
653 * releasing packets that have been DMA'd to
654 * the chip.
655 */
656 ds = &sc->sc_txsoft[txidx];
657 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
658 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
659 0, ds->ds_dmamap->dm_mapsize,
660 BUS_DMASYNC_POSTWRITE);
661 m_freem(ds->ds_mbuf);
662 ds->ds_mbuf = NULL;
663
664 consumer = SF_NEXTTCD(consumer);
665 sc->sc_txpending--;
666 }
667
668 /* XXXJRT -- should be KDASSERT() */
669 KASSERT(sc->sc_txpending >= 0);
670
671 /* If all packets are done, cancel the watchdog timer. */
672 if (sc->sc_txpending == 0)
673 ifp->if_timer = 0;
674
675 /* Update the consumer index. */
676 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
677 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
678 CQCI_TxCompletionConsumerIndex(consumer));
679
680 /* Double check for new completions. */
681 goto try_again;
682 }
683
684 /*
685 * sf_rxintr:
686 *
687 * Helper -- handle receive interrupts.
688 */
689 static void
690 sf_rxintr(struct sf_softc *sc)
691 {
692 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
693 struct sf_descsoft *ds;
694 struct sf_rcd_full *rcd;
695 struct mbuf *m;
696 uint32_t cqci, word0;
697 int consumer, producer, bufproducer, rxidx, len;
698
699 try_again:
700 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
701
702 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
703 producer = CQPI_RxCompletionQ1ProducerIndex_get(
704 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
705 bufproducer = RXQ1P_RxDescQ1Producer_get(
706 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
707
708 if (consumer == producer)
709 return;
710
711 while (consumer != producer) {
712 rcd = &sc->sc_rxcomp[consumer];
713 SF_CDRXCSYNC(sc, consumer,
714 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
715 SF_CDRXCSYNC(sc, consumer,
716 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
717
718 word0 = le32toh(rcd->rcd_word0);
719 rxidx = RCD_W0_EndIndex(word0);
720
721 ds = &sc->sc_rxsoft[rxidx];
722
723 consumer = SF_NEXTRCD(consumer);
724 bufproducer = SF_NEXTRX(bufproducer);
725
726 if ((word0 & RCD_W0_OK) == 0) {
727 SF_INIT_RXDESC(sc, rxidx);
728 continue;
729 }
730
731 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
732 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
733
734 /*
735 * No errors; receive the packet. Note that we have
736 * configured the Starfire to NOT transfer the CRC
737 * with the packet.
738 */
739 len = RCD_W0_Length(word0);
740
741 #ifdef __NO_STRICT_ALIGNMENT
742 /*
743 * Allocate a new mbuf cluster. If that fails, we are
744 * out of memory, and must drop the packet and recycle
745 * the buffer that's already attached to this descriptor.
746 */
747 m = ds->ds_mbuf;
748 if (sf_add_rxbuf(sc, rxidx) != 0) {
749 if_statinc(ifp, if_ierrors);
750 SF_INIT_RXDESC(sc, rxidx);
751 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
752 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
753 continue;
754 }
755 #else
756 /*
757 * The Starfire's receive buffer must be 4-byte aligned.
758 * But this means that the data after the Ethernet header
759 * is misaligned. We must allocate a new buffer and
760 * copy the data, shifted forward 2 bytes.
761 */
762 MGETHDR(m, M_DONTWAIT, MT_DATA);
763 if (m == NULL) {
764 dropit:
765 if_statinc(ifp, if_ierrors);
766 SF_INIT_RXDESC(sc, rxidx);
767 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
768 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
769 continue;
770 }
771 if (len > (MHLEN - 2)) {
772 MCLGET(m, M_DONTWAIT);
773 if ((m->m_flags & M_EXT) == 0) {
774 m_freem(m);
775 goto dropit;
776 }
777 }
778 m->m_data += 2;
779
780 /*
781 * Note that we use cluster for incoming frames, so the
782 * buffer is virtually contiguous.
783 */
784 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
785
786 /* Allow the receive descriptor to continue using its mbuf. */
787 SF_INIT_RXDESC(sc, rxidx);
788 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
789 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
790 #endif /* __NO_STRICT_ALIGNMENT */
791
792 m_set_rcvif(m, ifp);
793 m->m_pkthdr.len = m->m_len = len;
794
795 /* Pass it on. */
796 if_percpuq_enqueue(ifp->if_percpuq, m);
797 }
798
799 /* Update the chip's pointers. */
800 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
801 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
802 CQCI_RxCompletionQ1ConsumerIndex(consumer));
803 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
804 RXQ1P_RxDescQ1Producer(bufproducer));
805
806 /* Double-check for any new completions. */
807 goto try_again;
808 }
809
810 /*
811 * sf_tick:
812 *
813 * One second timer, used to tick the MII and update stats.
814 */
815 static void
816 sf_tick(void *arg)
817 {
818 struct sf_softc *sc = arg;
819 int s;
820
821 s = splnet();
822 mii_tick(&sc->sc_mii);
823 sf_stats_update(sc);
824 splx(s);
825
826 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
827 }
828
829 /*
830 * sf_stats_update:
831 *
832 * Read the statitistics counters.
833 */
834 static void
835 sf_stats_update(struct sf_softc *sc)
836 {
837 struct sf_stats stats;
838 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
839 uint32_t *p;
840 u_int i;
841
842 p = &stats.TransmitOKFrames;
843 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
844 *p++ = sf_genreg_read(sc,
845 SF_STATS_BASE + (i * sizeof(uint32_t)));
846 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
847 }
848
849 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
850
851 if_statadd_ref(nsr, if_opackets, stats.TransmitOKFrames);
852
853 if_statadd_ref(nsr, if_collisions,
854 stats.SingleCollisionFrames +
855 stats.MultipleCollisionFrames);
856
857 if_statadd_ref(nsr, if_oerrors,
858 stats.TransmitAbortDueToExcessiveCollisions +
859 stats.TransmitAbortDueToExcessingDeferral +
860 stats.FramesLostDueToInternalTransmitErrors);
861
862 if_statadd_ref(nsr, if_ierrors,
863 stats.ReceiveCRCErrors + stats.AlignmentErrors +
864 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
865 stats.ReceiveFramesJabbersError +
866 stats.FramesLostDueToInternalReceiveErrors);
867
868 IF_STAT_PUTREF(ifp);
869 }
870
871 /*
872 * sf_reset:
873 *
874 * Perform a soft reset on the Starfire.
875 */
876 static void
877 sf_reset(struct sf_softc *sc)
878 {
879 int i;
880
881 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
882
883 sf_macreset(sc);
884
885 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
886 for (i = 0; i < 1000; i++) {
887 delay(10);
888 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
889 PDC_SoftReset) == 0)
890 break;
891 }
892
893 if (i == 1000) {
894 aprint_error_dev(sc->sc_dev, "reset failed to complete\n");
895 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
896 }
897
898 delay(1000);
899 }
900
901 /*
902 * sf_macreset:
903 *
904 * Reset the MAC portion of the Starfire.
905 */
906 static void
907 sf_macreset(struct sf_softc *sc)
908 {
909
910 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
911 delay(1000);
912 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
913 }
914
915 /*
916 * sf_init: [ifnet interface function]
917 *
918 * Initialize the interface. Must be called at splnet().
919 */
920 static int
921 sf_init(struct ifnet *ifp)
922 {
923 struct sf_softc *sc = ifp->if_softc;
924 struct sf_descsoft *ds;
925 int error = 0;
926 u_int i;
927
928 /*
929 * Cancel any pending I/O.
930 */
931 sf_stop(ifp, 0);
932
933 /*
934 * Reset the Starfire to a known state.
935 */
936 sf_reset(sc);
937
938 /* Clear the stat counters. */
939 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
940 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
941
942 /*
943 * Initialize the transmit descriptor ring.
944 */
945 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
946 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
947 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
948 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
949
950 /*
951 * Initialize the transmit completion ring.
952 */
953 for (i = 0; i < SF_NTCD; i++) {
954 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
955 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
956 }
957 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
958 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
959
960 /*
961 * Initialize the receive descriptor ring.
962 */
963 for (i = 0; i < SF_NRXDESC; i++) {
964 ds = &sc->sc_rxsoft[i];
965 if (ds->ds_mbuf == NULL) {
966 if ((error = sf_add_rxbuf(sc, i)) != 0) {
967 aprint_error_dev(sc->sc_dev,
968 "unable to allocate or map rx buffer %d, "
969 "error = %d\n", i, error);
970 /*
971 * XXX Should attempt to run with fewer receive
972 * XXX buffers instead of just failing.
973 */
974 sf_rxdrain(sc);
975 goto out;
976 }
977 } else
978 SF_INIT_RXDESC(sc, i);
979 }
980 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
981 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
982 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
983
984 /*
985 * Initialize the receive completion ring.
986 */
987 for (i = 0; i < SF_NRCD; i++) {
988 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
989 sc->sc_rxcomp[i].rcd_word1 = 0;
990 sc->sc_rxcomp[i].rcd_word2 = 0;
991 sc->sc_rxcomp[i].rcd_timestamp = 0;
992 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
993 }
994 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
995 RCQ1C_RxCompletionQ1Type(3));
996 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
997
998 /*
999 * Initialize the Tx CSR.
1000 */
1001 sc->sc_TransmitFrameCSR = 0;
1002 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1003 sc->sc_TransmitFrameCSR |
1004 TFCSR_TransmitThreshold(sc->sc_txthresh));
1005
1006 /*
1007 * Initialize the Tx descriptor control register.
1008 */
1009 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1010 TDQC_TxDmaBurstSize(4) | /* default */
1011 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1012 TDQC_TxDescType(0);
1013 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1014 sc->sc_TxDescQueueCtrl |
1015 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1016
1017 /*
1018 * Initialize the Rx descriptor control registers.
1019 */
1020 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1021 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1022 RDQ1C_RxDescSpacing(0));
1023 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1024
1025 /*
1026 * Initialize the Tx descriptor producer indices.
1027 */
1028 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1029 TDQPI_HiPrTxProducerIndex(0) |
1030 TDQPI_LoPrTxProducerIndex(0));
1031
1032 /*
1033 * Initialize the Rx descriptor producer indices.
1034 */
1035 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1036 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1037 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1038 RXQ2P_RxDescQ2Producer(0));
1039
1040 /*
1041 * Initialize the Tx and Rx completion queue consumer indices.
1042 */
1043 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1044 CQCI_TxCompletionConsumerIndex(0) |
1045 CQCI_RxCompletionQ1ConsumerIndex(0));
1046 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1047
1048 /*
1049 * Initialize the Rx DMA control register.
1050 */
1051 sf_funcreg_write(sc, SF_RxDmaCtrl,
1052 RDC_RxHighPriorityThreshold(6) | /* default */
1053 RDC_RxBurstSize(4)); /* default */
1054
1055 /*
1056 * Set the receive filter.
1057 */
1058 sc->sc_RxAddressFilteringCtl = 0;
1059 sf_set_filter(sc);
1060
1061 /*
1062 * Set MacConfig1. When we set the media, MacConfig1 will
1063 * actually be written and the MAC part reset.
1064 */
1065 sc->sc_MacConfig1 = MC1_PadEn;
1066
1067 /*
1068 * Set the media.
1069 */
1070 if ((error = ether_mediachange(ifp)) != 0)
1071 goto out;
1072
1073 /*
1074 * Initialize the interrupt register.
1075 */
1076 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1077 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1078 IS_StatisticWrapInt;
1079 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1080
1081 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1082 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1083
1084 /*
1085 * Start the transmit and receive processes.
1086 */
1087 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1088 GEC_TxDmaEn | GEC_RxDmaEn | GEC_TransmitEn | GEC_ReceiveEn);
1089
1090 /* Start the on second clock. */
1091 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1092
1093 /*
1094 * Note that the interface is now running.
1095 */
1096 ifp->if_flags |= IFF_RUNNING;
1097 ifp->if_flags &= ~IFF_OACTIVE;
1098
1099 out:
1100 if (error) {
1101 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1102 ifp->if_timer = 0;
1103 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1104 }
1105 return (error);
1106 }
1107
1108 /*
1109 * sf_rxdrain:
1110 *
1111 * Drain the receive queue.
1112 */
1113 static void
1114 sf_rxdrain(struct sf_softc *sc)
1115 {
1116 struct sf_descsoft *ds;
1117 int i;
1118
1119 for (i = 0; i < SF_NRXDESC; i++) {
1120 ds = &sc->sc_rxsoft[i];
1121 if (ds->ds_mbuf != NULL) {
1122 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1123 m_freem(ds->ds_mbuf);
1124 ds->ds_mbuf = NULL;
1125 }
1126 }
1127 }
1128
1129 /*
1130 * sf_stop: [ifnet interface function]
1131 *
1132 * Stop transmission on the interface.
1133 */
1134 static void
1135 sf_stop(struct ifnet *ifp, int disable)
1136 {
1137 struct sf_softc *sc = ifp->if_softc;
1138 struct sf_descsoft *ds;
1139 int i;
1140
1141 /* Stop the one second clock. */
1142 callout_stop(&sc->sc_tick_callout);
1143
1144 /* Down the MII. */
1145 mii_down(&sc->sc_mii);
1146
1147 /* Disable interrupts. */
1148 sf_funcreg_write(sc, SF_InterruptEn, 0);
1149
1150 /* Stop the transmit and receive processes. */
1151 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1152
1153 /*
1154 * Release any queued transmit buffers.
1155 */
1156 for (i = 0; i < SF_NTXDESC; i++) {
1157 ds = &sc->sc_txsoft[i];
1158 if (ds->ds_mbuf != NULL) {
1159 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1160 m_freem(ds->ds_mbuf);
1161 ds->ds_mbuf = NULL;
1162 }
1163 }
1164
1165 /*
1166 * Mark the interface down and cancel the watchdog timer.
1167 */
1168 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1169 ifp->if_timer = 0;
1170
1171 if (disable)
1172 sf_rxdrain(sc);
1173 }
1174
1175 /*
1176 * sf_read_eeprom:
1177 *
1178 * Read from the Starfire EEPROM.
1179 */
1180 static uint8_t
1181 sf_read_eeprom(struct sf_softc *sc, int offset)
1182 {
1183 uint32_t reg;
1184
1185 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1186
1187 return ((reg >> (8 * (offset & 3))) & 0xff);
1188 }
1189
1190 /*
1191 * sf_add_rxbuf:
1192 *
1193 * Add a receive buffer to the indicated descriptor.
1194 */
1195 static int
1196 sf_add_rxbuf(struct sf_softc *sc, int idx)
1197 {
1198 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1199 struct mbuf *m;
1200 int error;
1201
1202 MGETHDR(m, M_DONTWAIT, MT_DATA);
1203 if (m == NULL)
1204 return (ENOBUFS);
1205
1206 MCLGET(m, M_DONTWAIT);
1207 if ((m->m_flags & M_EXT) == 0) {
1208 m_freem(m);
1209 return (ENOBUFS);
1210 }
1211
1212 if (ds->ds_mbuf != NULL)
1213 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1214
1215 ds->ds_mbuf = m;
1216
1217 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1218 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1219 BUS_DMA_READ | BUS_DMA_NOWAIT);
1220 if (error) {
1221 aprint_error_dev(sc->sc_dev,
1222 "can't load rx DMA map %d, error = %d\n", idx, error);
1223 panic("sf_add_rxbuf"); /* XXX */
1224 }
1225
1226 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1227 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1228
1229 SF_INIT_RXDESC(sc, idx);
1230
1231 return (0);
1232 }
1233
1234 static void
1235 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1236 {
1237 uint32_t reg0, reg1, reg2;
1238
1239 reg0 = enaddr[5] | (enaddr[4] << 8);
1240 reg1 = enaddr[3] | (enaddr[2] << 8);
1241 reg2 = enaddr[1] | (enaddr[0] << 8);
1242
1243 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1244 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1245 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1246 }
1247
1248 static void
1249 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1250 {
1251 uint32_t hash, slot, reg;
1252
1253 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1254 slot = hash >> 4;
1255
1256 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1257 reg |= 1 << (hash & 0xf);
1258 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1259 }
1260
1261 /*
1262 * sf_set_filter:
1263 *
1264 * Set the Starfire receive filter.
1265 */
1266 static void
1267 sf_set_filter(struct sf_softc *sc)
1268 {
1269 struct ethercom *ec = &sc->sc_ethercom;
1270 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1271 struct ether_multi *enm;
1272 struct ether_multistep step;
1273 int i;
1274
1275 /* Start by clearing the perfect and hash tables. */
1276 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1277 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1278
1279 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1280 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1281
1282 /*
1283 * Clear the perfect and hash mode bits.
1284 */
1285 sc->sc_RxAddressFilteringCtl &=
1286 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1287
1288 if (ifp->if_flags & IFF_BROADCAST)
1289 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1290 else
1291 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1292
1293 if (ifp->if_flags & IFF_PROMISC) {
1294 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1295 goto allmulti;
1296 } else
1297 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1298
1299 /*
1300 * Set normal perfect filtering mode.
1301 */
1302 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1303
1304 /*
1305 * First, write the station address to the perfect filter
1306 * table.
1307 */
1308 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1309
1310 /*
1311 * Now set the hash bits for each multicast address in our
1312 * list.
1313 */
1314 ETHER_LOCK(ec);
1315 ETHER_FIRST_MULTI(step, ec, enm);
1316 if (enm == NULL) {
1317 ETHER_UNLOCK(ec);
1318 goto done;
1319 }
1320 while (enm != NULL) {
1321 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1322 /*
1323 * We must listen to a range of multicast addresses.
1324 * For now, just accept all multicasts, rather than
1325 * trying to set only those filter bits needed to match
1326 * the range. (At this time, the only use of address
1327 * ranges is for IP multicast routing, for which the
1328 * range is big enough to require all bits set.)
1329 */
1330 ETHER_UNLOCK(ec);
1331 goto allmulti;
1332 }
1333 sf_set_filter_hash(sc, enm->enm_addrlo);
1334 ETHER_NEXT_MULTI(step, enm);
1335 }
1336 ETHER_UNLOCK(ec);
1337
1338 /*
1339 * Set "hash only multicast dest, match regardless of VLAN ID".
1340 */
1341 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1342 goto done;
1343
1344 allmulti:
1345 /*
1346 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1347 */
1348 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1349 ifp->if_flags |= IFF_ALLMULTI;
1350
1351 done:
1352 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1353 sc->sc_RxAddressFilteringCtl);
1354 }
1355
1356 /*
1357 * sf_mii_read: [mii interface function]
1358 *
1359 * Read from the MII.
1360 */
1361 static int
1362 sf_mii_read(device_t self, int phy, int reg, uint16_t *data)
1363 {
1364 struct sf_softc *sc = device_private(self);
1365 uint32_t v;
1366 int i;
1367
1368 for (i = 0; i < 1000; i++) {
1369 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1370 if (v & MiiDataValid)
1371 break;
1372 delay(1);
1373 }
1374
1375 if ((v & MiiDataValid) == 0)
1376 return -1;
1377
1378 if (MiiRegDataPort(v) == 0xffff)
1379 return -1;
1380
1381 *data = MiiRegDataPort(v);
1382 return 0;
1383 }
1384
1385 /*
1386 * sf_mii_write: [mii interface function]
1387 *
1388 * Write to the MII.
1389 */
1390 static int
1391 sf_mii_write(device_t self, int phy, int reg, uint16_t val)
1392 {
1393 struct sf_softc *sc = device_private(self);
1394 int i;
1395
1396 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1397
1398 for (i = 0; i < 1000; i++) {
1399 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1400 MiiBusy) == 0)
1401 return 0;
1402 delay(1);
1403 }
1404
1405 printf("%s: MII write timed out\n", device_xname(sc->sc_dev));
1406 return ETIMEDOUT;
1407 }
1408
1409 /*
1410 * sf_mii_statchg: [mii interface function]
1411 *
1412 * Callback from the PHY when the media changes.
1413 */
1414 static void
1415 sf_mii_statchg(struct ifnet *ifp)
1416 {
1417 struct sf_softc *sc = ifp->if_softc;
1418 uint32_t ipg;
1419
1420 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1421 sc->sc_MacConfig1 |= MC1_FullDuplex;
1422 ipg = 0x15;
1423 } else {
1424 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1425 ipg = 0x11;
1426 }
1427
1428 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1429 sf_macreset(sc);
1430
1431 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1432 }
1433