aic6915.c revision 1.31 1 /* $NetBSD: aic6915.c,v 1.31 2016/02/09 08:32:10 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Adaptec AIC-6915 (``Starfire'')
34 * 10/100 Ethernet controller.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.31 2016/02/09 08:32:10 ozaki-r Exp $");
39
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #include <net/bpf.h>
58
59 #include <sys/bus.h>
60 #include <sys/intr.h>
61
62 #include <dev/mii/miivar.h>
63
64 #include <dev/ic/aic6915reg.h>
65 #include <dev/ic/aic6915var.h>
66
67 static void sf_start(struct ifnet *);
68 static void sf_watchdog(struct ifnet *);
69 static int sf_ioctl(struct ifnet *, u_long, void *);
70 static int sf_init(struct ifnet *);
71 static void sf_stop(struct ifnet *, int);
72
73 static bool sf_shutdown(device_t, int);
74
75 static void sf_txintr(struct sf_softc *);
76 static void sf_rxintr(struct sf_softc *);
77 static void sf_stats_update(struct sf_softc *);
78
79 static void sf_reset(struct sf_softc *);
80 static void sf_macreset(struct sf_softc *);
81 static void sf_rxdrain(struct sf_softc *);
82 static int sf_add_rxbuf(struct sf_softc *, int);
83 static uint8_t sf_read_eeprom(struct sf_softc *, int);
84 static void sf_set_filter(struct sf_softc *);
85
86 static int sf_mii_read(device_t, int, int);
87 static void sf_mii_write(device_t, int, int, int);
88 static void sf_mii_statchg(struct ifnet *);
89
90 static void sf_tick(void *);
91
92 #define sf_funcreg_read(sc, reg) \
93 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
94 #define sf_funcreg_write(sc, reg, val) \
95 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
96
97 static inline uint32_t
98 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
99 {
100
101 if (__predict_false(sc->sc_iomapped)) {
102 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
103 reg);
104 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
105 SF_IndirectIoDataPort));
106 }
107
108 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
109 }
110
111 static inline void
112 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
113 {
114
115 if (__predict_false(sc->sc_iomapped)) {
116 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
117 reg);
118 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
119 val);
120 return;
121 }
122
123 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
124 }
125
126 #define sf_genreg_read(sc, reg) \
127 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
128 #define sf_genreg_write(sc, reg, val) \
129 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
130
131 /*
132 * sf_attach:
133 *
134 * Attach a Starfire interface to the system.
135 */
136 void
137 sf_attach(struct sf_softc *sc)
138 {
139 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
140 int i, rseg, error;
141 bus_dma_segment_t seg;
142 u_int8_t enaddr[ETHER_ADDR_LEN];
143
144 callout_init(&sc->sc_tick_callout, 0);
145
146 /*
147 * If we're I/O mapped, the functional register handle is
148 * the same as the base handle. If we're memory mapped,
149 * carve off a chunk of the register space for the functional
150 * registers, to save on arithmetic later.
151 */
152 if (sc->sc_iomapped)
153 sc->sc_sh_func = sc->sc_sh;
154 else {
155 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
156 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
157 aprint_error_dev(sc->sc_dev, "unable to sub-region functional "
158 "registers, error = %d\n",
159 error);
160 return;
161 }
162 }
163
164 /*
165 * Initialize the transmit threshold for this interface. The
166 * manual describes the default as 4 * 16 bytes. We start out
167 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
168 * several platforms.
169 */
170 sc->sc_txthresh = 10;
171
172 /*
173 * Allocate the control data structures, and create and load the
174 * DMA map for it.
175 */
176 if ((error = bus_dmamem_alloc(sc->sc_dmat,
177 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
178 BUS_DMA_NOWAIT)) != 0) {
179 aprint_error_dev(sc->sc_dev, "unable to allocate control data, error = %d\n",
180 error);
181 goto fail_0;
182 }
183
184 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
185 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
186 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
187 aprint_error_dev(sc->sc_dev, "unable to map control data, error = %d\n",
188 error);
189 goto fail_1;
190 }
191
192 if ((error = bus_dmamap_create(sc->sc_dmat,
193 sizeof(struct sf_control_data), 1,
194 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
195 &sc->sc_cddmamap)) != 0) {
196 aprint_error_dev(sc->sc_dev, "unable to create control data DMA map, "
197 "error = %d\n", error);
198 goto fail_2;
199 }
200
201 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
202 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
203 BUS_DMA_NOWAIT)) != 0) {
204 aprint_error_dev(sc->sc_dev, "unable to load control data DMA map, error = %d\n",
205 error);
206 goto fail_3;
207 }
208
209 /*
210 * Create the transmit buffer DMA maps.
211 */
212 for (i = 0; i < SF_NTXDESC; i++) {
213 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
214 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
215 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
216 aprint_error_dev(sc->sc_dev, "unable to create tx DMA map %d, "
217 "error = %d\n", i, error);
218 goto fail_4;
219 }
220 }
221
222 /*
223 * Create the receive buffer DMA maps.
224 */
225 for (i = 0; i < SF_NRXDESC; i++) {
226 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
227 MCLBYTES, 0, BUS_DMA_NOWAIT,
228 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
229 aprint_error_dev(sc->sc_dev, "unable to create rx DMA map %d, "
230 "error = %d\n", i, error);
231 goto fail_5;
232 }
233 }
234
235 /*
236 * Reset the chip to a known state.
237 */
238 sf_reset(sc);
239
240 /*
241 * Read the Ethernet address from the EEPROM.
242 */
243 for (i = 0; i < ETHER_ADDR_LEN; i++)
244 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
245
246 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
247 ether_sprintf(enaddr));
248
249 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
250 printf("%s: 64-bit PCI slot detected\n", device_xname(sc->sc_dev));
251
252 /*
253 * Initialize our media structures and probe the MII.
254 */
255 sc->sc_mii.mii_ifp = ifp;
256 sc->sc_mii.mii_readreg = sf_mii_read;
257 sc->sc_mii.mii_writereg = sf_mii_write;
258 sc->sc_mii.mii_statchg = sf_mii_statchg;
259 sc->sc_ethercom.ec_mii = &sc->sc_mii;
260 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
261 ether_mediastatus);
262 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
263 MII_OFFSET_ANY, 0);
264 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
265 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
266 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
267 } else
268 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
269
270 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
271 ifp->if_softc = sc;
272 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
273 ifp->if_ioctl = sf_ioctl;
274 ifp->if_start = sf_start;
275 ifp->if_watchdog = sf_watchdog;
276 ifp->if_init = sf_init;
277 ifp->if_stop = sf_stop;
278 IFQ_SET_READY(&ifp->if_snd);
279
280 /*
281 * Attach the interface.
282 */
283 if_attach(ifp);
284 ether_ifattach(ifp, enaddr);
285
286 /*
287 * Make sure the interface is shutdown during reboot.
288 */
289 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sf_shutdown))
290 pmf_class_network_register(sc->sc_dev, ifp);
291 else
292 aprint_error_dev(sc->sc_dev,
293 "couldn't establish power handler\n");
294 return;
295
296 /*
297 * Free any resources we've allocated during the failed attach
298 * attempt. Do this in reverse order an fall through.
299 */
300 fail_5:
301 for (i = 0; i < SF_NRXDESC; i++) {
302 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
303 bus_dmamap_destroy(sc->sc_dmat,
304 sc->sc_rxsoft[i].ds_dmamap);
305 }
306 fail_4:
307 for (i = 0; i < SF_NTXDESC; i++) {
308 if (sc->sc_txsoft[i].ds_dmamap != NULL)
309 bus_dmamap_destroy(sc->sc_dmat,
310 sc->sc_txsoft[i].ds_dmamap);
311 }
312 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
313 fail_3:
314 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
315 fail_2:
316 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
317 sizeof(struct sf_control_data));
318 fail_1:
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 fail_0:
321 return;
322 }
323
324 /*
325 * sf_shutdown:
326 *
327 * Shutdown hook -- make sure the interface is stopped at reboot.
328 */
329 static bool
330 sf_shutdown(device_t self, int howto)
331 {
332 struct sf_softc *sc;
333
334 sc = device_private(self);
335 sf_stop(&sc->sc_ethercom.ec_if, 1);
336
337 return true;
338 }
339
340 /*
341 * sf_start: [ifnet interface function]
342 *
343 * Start packet transmission on the interface.
344 */
345 static void
346 sf_start(struct ifnet *ifp)
347 {
348 struct sf_softc *sc = ifp->if_softc;
349 struct mbuf *m0, *m;
350 struct sf_txdesc0 *txd;
351 struct sf_descsoft *ds;
352 bus_dmamap_t dmamap;
353 int error, producer, last = -1, opending, seg;
354
355 /*
356 * Remember the previous number of pending transmits.
357 */
358 opending = sc->sc_txpending;
359
360 /*
361 * Find out where we're sitting.
362 */
363 producer = SF_TXDINDEX_TO_HOST(
364 TDQPI_HiPrTxProducerIndex_get(
365 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
366
367 /*
368 * Loop through the send queue, setting up transmit descriptors
369 * until we drain the queue, or use up all available transmit
370 * descriptors. Leave a blank one at the end for sanity's sake.
371 */
372 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
373 /*
374 * Grab a packet off the queue.
375 */
376 IFQ_POLL(&ifp->if_snd, m0);
377 if (m0 == NULL)
378 break;
379 m = NULL;
380
381 /*
382 * Get the transmit descriptor.
383 */
384 txd = &sc->sc_txdescs[producer];
385 ds = &sc->sc_txsoft[producer];
386 dmamap = ds->ds_dmamap;
387
388 /*
389 * Load the DMA map. If this fails, the packet either
390 * didn't fit in the allotted number of frags, or we were
391 * short on resources. In this case, we'll copy and try
392 * again.
393 */
394 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
395 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
396 MGETHDR(m, M_DONTWAIT, MT_DATA);
397 if (m == NULL) {
398 aprint_error_dev(sc->sc_dev, "unable to allocate Tx mbuf\n");
399 break;
400 }
401 if (m0->m_pkthdr.len > MHLEN) {
402 MCLGET(m, M_DONTWAIT);
403 if ((m->m_flags & M_EXT) == 0) {
404 aprint_error_dev(sc->sc_dev, "unable to allocate Tx "
405 "cluster\n");
406 m_freem(m);
407 break;
408 }
409 }
410 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
411 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
412 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
413 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
414 if (error) {
415 aprint_error_dev(sc->sc_dev, "unable to load Tx buffer, "
416 "error = %d\n", error);
417 break;
418 }
419 }
420
421 /*
422 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
423 */
424 IFQ_DEQUEUE(&ifp->if_snd, m0);
425 if (m != NULL) {
426 m_freem(m0);
427 m0 = m;
428 }
429
430 /* Initialize the descriptor. */
431 txd->td_word0 =
432 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
433 if (producer == (SF_NTXDESC - 1))
434 txd->td_word0 |= TD_W0_END;
435 txd->td_word1 = htole32(dmamap->dm_nsegs);
436 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
437 txd->td_frags[seg].fr_addr =
438 htole32(dmamap->dm_segs[seg].ds_addr);
439 txd->td_frags[seg].fr_len =
440 htole32(dmamap->dm_segs[seg].ds_len);
441 }
442
443 /* Sync the descriptor and the DMA map. */
444 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
445 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
446 BUS_DMASYNC_PREWRITE);
447
448 /*
449 * Store a pointer to the packet so we can free it later.
450 */
451 ds->ds_mbuf = m0;
452
453 /* Advance the Tx pointer. */
454 sc->sc_txpending++;
455 last = producer;
456 producer = SF_NEXTTX(producer);
457
458 /*
459 * Pass the packet to any BPF listeners.
460 */
461 bpf_mtap(ifp, m0);
462 }
463
464 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
465 /* No more slots left; notify upper layer. */
466 ifp->if_flags |= IFF_OACTIVE;
467 }
468
469 if (sc->sc_txpending != opending) {
470 KASSERT(last != -1);
471 /*
472 * We enqueued packets. Cause a transmit interrupt to
473 * happen on the last packet we enqueued, and give the
474 * new descriptors to the chip by writing the new
475 * producer index.
476 */
477 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
478 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
479
480 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
481 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
482
483 /* Set a watchdog timer in case the chip flakes out. */
484 ifp->if_timer = 5;
485 }
486 }
487
488 /*
489 * sf_watchdog: [ifnet interface function]
490 *
491 * Watchdog timer handler.
492 */
493 static void
494 sf_watchdog(struct ifnet *ifp)
495 {
496 struct sf_softc *sc = ifp->if_softc;
497
498 printf("%s: device timeout\n", device_xname(sc->sc_dev));
499 ifp->if_oerrors++;
500
501 (void) sf_init(ifp);
502
503 /* Try to get more packets going. */
504 sf_start(ifp);
505 }
506
507 /*
508 * sf_ioctl: [ifnet interface function]
509 *
510 * Handle control requests from the operator.
511 */
512 static int
513 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
514 {
515 struct sf_softc *sc = ifp->if_softc;
516 int s, error;
517
518 s = splnet();
519
520 error = ether_ioctl(ifp, cmd, data);
521 if (error == ENETRESET) {
522 /*
523 * Multicast list has changed; set the hardware filter
524 * accordingly.
525 */
526 if (ifp->if_flags & IFF_RUNNING)
527 sf_set_filter(sc);
528 error = 0;
529 }
530
531 /* Try to get more packets going. */
532 sf_start(ifp);
533
534 splx(s);
535 return (error);
536 }
537
538 /*
539 * sf_intr:
540 *
541 * Interrupt service routine.
542 */
543 int
544 sf_intr(void *arg)
545 {
546 struct sf_softc *sc = arg;
547 uint32_t isr;
548 int handled = 0, wantinit = 0;
549
550 for (;;) {
551 /* Reading clears all interrupts we're interested in. */
552 isr = sf_funcreg_read(sc, SF_InterruptStatus);
553 if ((isr & IS_PCIPadInt) == 0)
554 break;
555
556 handled = 1;
557
558 /* Handle receive interrupts. */
559 if (isr & IS_RxQ1DoneInt)
560 sf_rxintr(sc);
561
562 /* Handle transmit completion interrupts. */
563 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
564 sf_txintr(sc);
565
566 /* Handle abnormal interrupts. */
567 if (isr & IS_AbnormalInterrupt) {
568 /* Statistics. */
569 if (isr & IS_StatisticWrapInt)
570 sf_stats_update(sc);
571
572 /* DMA errors. */
573 if (isr & IS_DmaErrInt) {
574 wantinit = 1;
575 aprint_error_dev(sc->sc_dev, "WARNING: DMA error\n");
576 }
577
578 /* Transmit FIFO underruns. */
579 if (isr & IS_TxDataLowInt) {
580 if (sc->sc_txthresh < 0xff)
581 sc->sc_txthresh++;
582 printf("%s: transmit FIFO underrun, new "
583 "threshold: %d bytes\n",
584 device_xname(sc->sc_dev),
585 sc->sc_txthresh * 16);
586 sf_funcreg_write(sc, SF_TransmitFrameCSR,
587 sc->sc_TransmitFrameCSR |
588 TFCSR_TransmitThreshold(sc->sc_txthresh));
589 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
590 sc->sc_TxDescQueueCtrl |
591 TDQC_TxHighPriorityFifoThreshold(
592 sc->sc_txthresh));
593 }
594 }
595 }
596
597 if (handled) {
598 /* Reset the interface, if necessary. */
599 if (wantinit)
600 sf_init(&sc->sc_ethercom.ec_if);
601
602 /* Try and get more packets going. */
603 sf_start(&sc->sc_ethercom.ec_if);
604 }
605
606 return (handled);
607 }
608
609 /*
610 * sf_txintr:
611 *
612 * Helper -- handle transmit completion interrupts.
613 */
614 static void
615 sf_txintr(struct sf_softc *sc)
616 {
617 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
618 struct sf_descsoft *ds;
619 uint32_t cqci, tcd;
620 int consumer, producer, txidx;
621
622 try_again:
623 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
624
625 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
626 producer = CQPI_TxCompletionProducerIndex_get(
627 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
628
629 if (consumer == producer)
630 return;
631
632 ifp->if_flags &= ~IFF_OACTIVE;
633
634 while (consumer != producer) {
635 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
636 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
637
638 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
639 #ifdef DIAGNOSTIC
640 if ((tcd & TCD_PR) == 0)
641 aprint_error_dev(sc->sc_dev, "Tx queue mismatch, index %d\n",
642 txidx);
643 #endif
644 /*
645 * NOTE: stats are updated later. We're just
646 * releasing packets that have been DMA'd to
647 * the chip.
648 */
649 ds = &sc->sc_txsoft[txidx];
650 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
651 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
652 0, ds->ds_dmamap->dm_mapsize,
653 BUS_DMASYNC_POSTWRITE);
654 m_freem(ds->ds_mbuf);
655 ds->ds_mbuf = NULL;
656
657 consumer = SF_NEXTTCD(consumer);
658 sc->sc_txpending--;
659 }
660
661 /* XXXJRT -- should be KDASSERT() */
662 KASSERT(sc->sc_txpending >= 0);
663
664 /* If all packets are done, cancel the watchdog timer. */
665 if (sc->sc_txpending == 0)
666 ifp->if_timer = 0;
667
668 /* Update the consumer index. */
669 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
670 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
671 CQCI_TxCompletionConsumerIndex(consumer));
672
673 /* Double check for new completions. */
674 goto try_again;
675 }
676
677 /*
678 * sf_rxintr:
679 *
680 * Helper -- handle receive interrupts.
681 */
682 static void
683 sf_rxintr(struct sf_softc *sc)
684 {
685 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
686 struct sf_descsoft *ds;
687 struct sf_rcd_full *rcd;
688 struct mbuf *m;
689 uint32_t cqci, word0;
690 int consumer, producer, bufproducer, rxidx, len;
691
692 try_again:
693 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
694
695 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
696 producer = CQPI_RxCompletionQ1ProducerIndex_get(
697 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
698 bufproducer = RXQ1P_RxDescQ1Producer_get(
699 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
700
701 if (consumer == producer)
702 return;
703
704 while (consumer != producer) {
705 rcd = &sc->sc_rxcomp[consumer];
706 SF_CDRXCSYNC(sc, consumer,
707 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
708 SF_CDRXCSYNC(sc, consumer,
709 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
710
711 word0 = le32toh(rcd->rcd_word0);
712 rxidx = RCD_W0_EndIndex(word0);
713
714 ds = &sc->sc_rxsoft[rxidx];
715
716 consumer = SF_NEXTRCD(consumer);
717 bufproducer = SF_NEXTRX(bufproducer);
718
719 if ((word0 & RCD_W0_OK) == 0) {
720 SF_INIT_RXDESC(sc, rxidx);
721 continue;
722 }
723
724 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
725 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
726
727 /*
728 * No errors; receive the packet. Note that we have
729 * configured the Starfire to NOT transfer the CRC
730 * with the packet.
731 */
732 len = RCD_W0_Length(word0);
733
734 #ifdef __NO_STRICT_ALIGNMENT
735 /*
736 * Allocate a new mbuf cluster. If that fails, we are
737 * out of memory, and must drop the packet and recycle
738 * the buffer that's already attached to this descriptor.
739 */
740 m = ds->ds_mbuf;
741 if (sf_add_rxbuf(sc, rxidx) != 0) {
742 ifp->if_ierrors++;
743 SF_INIT_RXDESC(sc, rxidx);
744 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
745 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
746 continue;
747 }
748 #else
749 /*
750 * The Starfire's receive buffer must be 4-byte aligned.
751 * But this means that the data after the Ethernet header
752 * is misaligned. We must allocate a new buffer and
753 * copy the data, shifted forward 2 bytes.
754 */
755 MGETHDR(m, M_DONTWAIT, MT_DATA);
756 if (m == NULL) {
757 dropit:
758 ifp->if_ierrors++;
759 SF_INIT_RXDESC(sc, rxidx);
760 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
761 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
762 continue;
763 }
764 if (len > (MHLEN - 2)) {
765 MCLGET(m, M_DONTWAIT);
766 if ((m->m_flags & M_EXT) == 0) {
767 m_freem(m);
768 goto dropit;
769 }
770 }
771 m->m_data += 2;
772
773 /*
774 * Note that we use cluster for incoming frames, so the
775 * buffer is virtually contiguous.
776 */
777 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
778
779 /* Allow the receive descriptor to continue using its mbuf. */
780 SF_INIT_RXDESC(sc, rxidx);
781 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
782 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
783 #endif /* __NO_STRICT_ALIGNMENT */
784
785 m->m_pkthdr.rcvif = ifp;
786 m->m_pkthdr.len = m->m_len = len;
787
788 /*
789 * Pass this up to any BPF listeners.
790 */
791 bpf_mtap(ifp, m);
792
793 /* Pass it on. */
794 if_percpuq_enqueue(ifp->if_percpuq, m);
795 }
796
797 /* Update the chip's pointers. */
798 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
799 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
800 CQCI_RxCompletionQ1ConsumerIndex(consumer));
801 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
802 RXQ1P_RxDescQ1Producer(bufproducer));
803
804 /* Double-check for any new completions. */
805 goto try_again;
806 }
807
808 /*
809 * sf_tick:
810 *
811 * One second timer, used to tick the MII and update stats.
812 */
813 static void
814 sf_tick(void *arg)
815 {
816 struct sf_softc *sc = arg;
817 int s;
818
819 s = splnet();
820 mii_tick(&sc->sc_mii);
821 sf_stats_update(sc);
822 splx(s);
823
824 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
825 }
826
827 /*
828 * sf_stats_update:
829 *
830 * Read the statitistics counters.
831 */
832 static void
833 sf_stats_update(struct sf_softc *sc)
834 {
835 struct sf_stats stats;
836 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
837 uint32_t *p;
838 u_int i;
839
840 p = &stats.TransmitOKFrames;
841 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
842 *p++ = sf_genreg_read(sc,
843 SF_STATS_BASE + (i * sizeof(uint32_t)));
844 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
845 }
846
847 ifp->if_opackets += stats.TransmitOKFrames;
848
849 ifp->if_collisions += stats.SingleCollisionFrames +
850 stats.MultipleCollisionFrames;
851
852 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
853 stats.TransmitAbortDueToExcessingDeferral +
854 stats.FramesLostDueToInternalTransmitErrors;
855
856 ifp->if_ipackets += stats.ReceiveOKFrames;
857
858 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
859 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
860 stats.ReceiveFramesJabbersError +
861 stats.FramesLostDueToInternalReceiveErrors;
862 }
863
864 /*
865 * sf_reset:
866 *
867 * Perform a soft reset on the Starfire.
868 */
869 static void
870 sf_reset(struct sf_softc *sc)
871 {
872 int i;
873
874 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
875
876 sf_macreset(sc);
877
878 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
879 for (i = 0; i < 1000; i++) {
880 delay(10);
881 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
882 PDC_SoftReset) == 0)
883 break;
884 }
885
886 if (i == 1000) {
887 aprint_error_dev(sc->sc_dev, "reset failed to complete\n");
888 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
889 }
890
891 delay(1000);
892 }
893
894 /*
895 * sf_macreset:
896 *
897 * Reset the MAC portion of the Starfire.
898 */
899 static void
900 sf_macreset(struct sf_softc *sc)
901 {
902
903 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
904 delay(1000);
905 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
906 }
907
908 /*
909 * sf_init: [ifnet interface function]
910 *
911 * Initialize the interface. Must be called at splnet().
912 */
913 static int
914 sf_init(struct ifnet *ifp)
915 {
916 struct sf_softc *sc = ifp->if_softc;
917 struct sf_descsoft *ds;
918 int error = 0;
919 u_int i;
920
921 /*
922 * Cancel any pending I/O.
923 */
924 sf_stop(ifp, 0);
925
926 /*
927 * Reset the Starfire to a known state.
928 */
929 sf_reset(sc);
930
931 /* Clear the stat counters. */
932 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
933 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
934
935 /*
936 * Initialize the transmit descriptor ring.
937 */
938 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
939 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
940 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
941 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
942
943 /*
944 * Initialize the transmit completion ring.
945 */
946 for (i = 0; i < SF_NTCD; i++) {
947 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
948 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
949 }
950 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
951 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
952
953 /*
954 * Initialize the receive descriptor ring.
955 */
956 for (i = 0; i < SF_NRXDESC; i++) {
957 ds = &sc->sc_rxsoft[i];
958 if (ds->ds_mbuf == NULL) {
959 if ((error = sf_add_rxbuf(sc, i)) != 0) {
960 aprint_error_dev(sc->sc_dev, "unable to allocate or map rx "
961 "buffer %d, error = %d\n",
962 i, error);
963 /*
964 * XXX Should attempt to run with fewer receive
965 * XXX buffers instead of just failing.
966 */
967 sf_rxdrain(sc);
968 goto out;
969 }
970 } else
971 SF_INIT_RXDESC(sc, i);
972 }
973 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
974 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
975 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
976
977 /*
978 * Initialize the receive completion ring.
979 */
980 for (i = 0; i < SF_NRCD; i++) {
981 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
982 sc->sc_rxcomp[i].rcd_word1 = 0;
983 sc->sc_rxcomp[i].rcd_word2 = 0;
984 sc->sc_rxcomp[i].rcd_timestamp = 0;
985 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
986 }
987 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
988 RCQ1C_RxCompletionQ1Type(3));
989 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
990
991 /*
992 * Initialize the Tx CSR.
993 */
994 sc->sc_TransmitFrameCSR = 0;
995 sf_funcreg_write(sc, SF_TransmitFrameCSR,
996 sc->sc_TransmitFrameCSR |
997 TFCSR_TransmitThreshold(sc->sc_txthresh));
998
999 /*
1000 * Initialize the Tx descriptor control register.
1001 */
1002 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1003 TDQC_TxDmaBurstSize(4) | /* default */
1004 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1005 TDQC_TxDescType(0);
1006 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1007 sc->sc_TxDescQueueCtrl |
1008 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1009
1010 /*
1011 * Initialize the Rx descriptor control registers.
1012 */
1013 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1014 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1015 RDQ1C_RxDescSpacing(0));
1016 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1017
1018 /*
1019 * Initialize the Tx descriptor producer indices.
1020 */
1021 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1022 TDQPI_HiPrTxProducerIndex(0) |
1023 TDQPI_LoPrTxProducerIndex(0));
1024
1025 /*
1026 * Initialize the Rx descriptor producer indices.
1027 */
1028 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1029 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1030 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1031 RXQ2P_RxDescQ2Producer(0));
1032
1033 /*
1034 * Initialize the Tx and Rx completion queue consumer indices.
1035 */
1036 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1037 CQCI_TxCompletionConsumerIndex(0) |
1038 CQCI_RxCompletionQ1ConsumerIndex(0));
1039 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1040
1041 /*
1042 * Initialize the Rx DMA control register.
1043 */
1044 sf_funcreg_write(sc, SF_RxDmaCtrl,
1045 RDC_RxHighPriorityThreshold(6) | /* default */
1046 RDC_RxBurstSize(4)); /* default */
1047
1048 /*
1049 * Set the receive filter.
1050 */
1051 sc->sc_RxAddressFilteringCtl = 0;
1052 sf_set_filter(sc);
1053
1054 /*
1055 * Set MacConfig1. When we set the media, MacConfig1 will
1056 * actually be written and the MAC part reset.
1057 */
1058 sc->sc_MacConfig1 = MC1_PadEn;
1059
1060 /*
1061 * Set the media.
1062 */
1063 if ((error = ether_mediachange(ifp)) != 0)
1064 goto out;
1065
1066 /*
1067 * Initialize the interrupt register.
1068 */
1069 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1070 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1071 IS_StatisticWrapInt;
1072 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1073
1074 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1075 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1076
1077 /*
1078 * Start the transmit and receive processes.
1079 */
1080 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1081 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1082
1083 /* Start the on second clock. */
1084 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1085
1086 /*
1087 * Note that the interface is now running.
1088 */
1089 ifp->if_flags |= IFF_RUNNING;
1090 ifp->if_flags &= ~IFF_OACTIVE;
1091
1092 out:
1093 if (error) {
1094 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1095 ifp->if_timer = 0;
1096 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1097 }
1098 return (error);
1099 }
1100
1101 /*
1102 * sf_rxdrain:
1103 *
1104 * Drain the receive queue.
1105 */
1106 static void
1107 sf_rxdrain(struct sf_softc *sc)
1108 {
1109 struct sf_descsoft *ds;
1110 int i;
1111
1112 for (i = 0; i < SF_NRXDESC; i++) {
1113 ds = &sc->sc_rxsoft[i];
1114 if (ds->ds_mbuf != NULL) {
1115 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1116 m_freem(ds->ds_mbuf);
1117 ds->ds_mbuf = NULL;
1118 }
1119 }
1120 }
1121
1122 /*
1123 * sf_stop: [ifnet interface function]
1124 *
1125 * Stop transmission on the interface.
1126 */
1127 static void
1128 sf_stop(struct ifnet *ifp, int disable)
1129 {
1130 struct sf_softc *sc = ifp->if_softc;
1131 struct sf_descsoft *ds;
1132 int i;
1133
1134 /* Stop the one second clock. */
1135 callout_stop(&sc->sc_tick_callout);
1136
1137 /* Down the MII. */
1138 mii_down(&sc->sc_mii);
1139
1140 /* Disable interrupts. */
1141 sf_funcreg_write(sc, SF_InterruptEn, 0);
1142
1143 /* Stop the transmit and receive processes. */
1144 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1145
1146 /*
1147 * Release any queued transmit buffers.
1148 */
1149 for (i = 0; i < SF_NTXDESC; i++) {
1150 ds = &sc->sc_txsoft[i];
1151 if (ds->ds_mbuf != NULL) {
1152 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1153 m_freem(ds->ds_mbuf);
1154 ds->ds_mbuf = NULL;
1155 }
1156 }
1157
1158 /*
1159 * Mark the interface down and cancel the watchdog timer.
1160 */
1161 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1162 ifp->if_timer = 0;
1163
1164 if (disable)
1165 sf_rxdrain(sc);
1166 }
1167
1168 /*
1169 * sf_read_eeprom:
1170 *
1171 * Read from the Starfire EEPROM.
1172 */
1173 static uint8_t
1174 sf_read_eeprom(struct sf_softc *sc, int offset)
1175 {
1176 uint32_t reg;
1177
1178 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1179
1180 return ((reg >> (8 * (offset & 3))) & 0xff);
1181 }
1182
1183 /*
1184 * sf_add_rxbuf:
1185 *
1186 * Add a receive buffer to the indicated descriptor.
1187 */
1188 static int
1189 sf_add_rxbuf(struct sf_softc *sc, int idx)
1190 {
1191 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1192 struct mbuf *m;
1193 int error;
1194
1195 MGETHDR(m, M_DONTWAIT, MT_DATA);
1196 if (m == NULL)
1197 return (ENOBUFS);
1198
1199 MCLGET(m, M_DONTWAIT);
1200 if ((m->m_flags & M_EXT) == 0) {
1201 m_freem(m);
1202 return (ENOBUFS);
1203 }
1204
1205 if (ds->ds_mbuf != NULL)
1206 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1207
1208 ds->ds_mbuf = m;
1209
1210 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1211 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1212 BUS_DMA_READ|BUS_DMA_NOWAIT);
1213 if (error) {
1214 aprint_error_dev(sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1215 idx, error);
1216 panic("sf_add_rxbuf"); /* XXX */
1217 }
1218
1219 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1220 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1221
1222 SF_INIT_RXDESC(sc, idx);
1223
1224 return (0);
1225 }
1226
1227 static void
1228 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1229 {
1230 uint32_t reg0, reg1, reg2;
1231
1232 reg0 = enaddr[5] | (enaddr[4] << 8);
1233 reg1 = enaddr[3] | (enaddr[2] << 8);
1234 reg2 = enaddr[1] | (enaddr[0] << 8);
1235
1236 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1237 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1238 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1239 }
1240
1241 static void
1242 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1243 {
1244 uint32_t hash, slot, reg;
1245
1246 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1247 slot = hash >> 4;
1248
1249 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1250 reg |= 1 << (hash & 0xf);
1251 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1252 }
1253
1254 /*
1255 * sf_set_filter:
1256 *
1257 * Set the Starfire receive filter.
1258 */
1259 static void
1260 sf_set_filter(struct sf_softc *sc)
1261 {
1262 struct ethercom *ec = &sc->sc_ethercom;
1263 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1264 struct ether_multi *enm;
1265 struct ether_multistep step;
1266 int i;
1267
1268 /* Start by clearing the perfect and hash tables. */
1269 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1270 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1271
1272 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1273 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1274
1275 /*
1276 * Clear the perfect and hash mode bits.
1277 */
1278 sc->sc_RxAddressFilteringCtl &=
1279 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1280
1281 if (ifp->if_flags & IFF_BROADCAST)
1282 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1283 else
1284 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1285
1286 if (ifp->if_flags & IFF_PROMISC) {
1287 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1288 goto allmulti;
1289 } else
1290 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1291
1292 /*
1293 * Set normal perfect filtering mode.
1294 */
1295 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1296
1297 /*
1298 * First, write the station address to the perfect filter
1299 * table.
1300 */
1301 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1302
1303 /*
1304 * Now set the hash bits for each multicast address in our
1305 * list.
1306 */
1307 ETHER_FIRST_MULTI(step, ec, enm);
1308 if (enm == NULL)
1309 goto done;
1310 while (enm != NULL) {
1311 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1312 /*
1313 * We must listen to a range of multicast addresses.
1314 * For now, just accept all multicasts, rather than
1315 * trying to set only those filter bits needed to match
1316 * the range. (At this time, the only use of address
1317 * ranges is for IP multicast routing, for which the
1318 * range is big enough to require all bits set.)
1319 */
1320 goto allmulti;
1321 }
1322 sf_set_filter_hash(sc, enm->enm_addrlo);
1323 ETHER_NEXT_MULTI(step, enm);
1324 }
1325
1326 /*
1327 * Set "hash only multicast dest, match regardless of VLAN ID".
1328 */
1329 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1330 goto done;
1331
1332 allmulti:
1333 /*
1334 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1335 */
1336 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1337 ifp->if_flags |= IFF_ALLMULTI;
1338
1339 done:
1340 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1341 sc->sc_RxAddressFilteringCtl);
1342 }
1343
1344 /*
1345 * sf_mii_read: [mii interface function]
1346 *
1347 * Read from the MII.
1348 */
1349 static int
1350 sf_mii_read(device_t self, int phy, int reg)
1351 {
1352 struct sf_softc *sc = device_private(self);
1353 uint32_t v;
1354 int i;
1355
1356 for (i = 0; i < 1000; i++) {
1357 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1358 if (v & MiiDataValid)
1359 break;
1360 delay(1);
1361 }
1362
1363 if ((v & MiiDataValid) == 0)
1364 return (0);
1365
1366 if (MiiRegDataPort(v) == 0xffff)
1367 return (0);
1368
1369 return (MiiRegDataPort(v));
1370 }
1371
1372 /*
1373 * sf_mii_write: [mii interface function]
1374 *
1375 * Write to the MII.
1376 */
1377 static void
1378 sf_mii_write(device_t self, int phy, int reg, int val)
1379 {
1380 struct sf_softc *sc = device_private(self);
1381 int i;
1382
1383 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1384
1385 for (i = 0; i < 1000; i++) {
1386 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1387 MiiBusy) == 0)
1388 return;
1389 delay(1);
1390 }
1391
1392 printf("%s: MII write timed out\n", device_xname(sc->sc_dev));
1393 }
1394
1395 /*
1396 * sf_mii_statchg: [mii interface function]
1397 *
1398 * Callback from the PHY when the media changes.
1399 */
1400 static void
1401 sf_mii_statchg(struct ifnet *ifp)
1402 {
1403 struct sf_softc *sc = ifp->if_softc;
1404 uint32_t ipg;
1405
1406 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1407 sc->sc_MacConfig1 |= MC1_FullDuplex;
1408 ipg = 0x15;
1409 } else {
1410 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1411 ipg = 0x11;
1412 }
1413
1414 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1415 sf_macreset(sc);
1416
1417 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1418 }
1419