aic6915.c revision 1.43 1 /* $NetBSD: aic6915.c,v 1.43 2020/03/12 03:01:46 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Adaptec AIC-6915 (``Starfire'')
34 * 10/100 Ethernet controller.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.43 2020/03/12 03:01:46 thorpej Exp $");
39
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #include <net/bpf.h>
58
59 #include <sys/bus.h>
60 #include <sys/intr.h>
61
62 #include <dev/mii/miivar.h>
63
64 #include <dev/ic/aic6915reg.h>
65 #include <dev/ic/aic6915var.h>
66
67 static void sf_start(struct ifnet *);
68 static void sf_watchdog(struct ifnet *);
69 static int sf_ioctl(struct ifnet *, u_long, void *);
70 static int sf_init(struct ifnet *);
71 static void sf_stop(struct ifnet *, int);
72
73 static bool sf_shutdown(device_t, int);
74
75 static void sf_txintr(struct sf_softc *);
76 static void sf_rxintr(struct sf_softc *);
77 static void sf_stats_update(struct sf_softc *);
78
79 static void sf_reset(struct sf_softc *);
80 static void sf_macreset(struct sf_softc *);
81 static void sf_rxdrain(struct sf_softc *);
82 static int sf_add_rxbuf(struct sf_softc *, int);
83 static uint8_t sf_read_eeprom(struct sf_softc *, int);
84 static void sf_set_filter(struct sf_softc *);
85
86 static int sf_mii_read(device_t, int, int, uint16_t *);
87 static int sf_mii_write(device_t, int, int, uint16_t);
88 static void sf_mii_statchg(struct ifnet *);
89
90 static void sf_tick(void *);
91
92 #define sf_funcreg_read(sc, reg) \
93 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
94 #define sf_funcreg_write(sc, reg, val) \
95 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
96
97 static inline uint32_t
98 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
99 {
100
101 if (__predict_false(sc->sc_iomapped)) {
102 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
103 reg);
104 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
105 SF_IndirectIoDataPort));
106 }
107
108 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
109 }
110
111 static inline void
112 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
113 {
114
115 if (__predict_false(sc->sc_iomapped)) {
116 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
117 reg);
118 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
119 val);
120 return;
121 }
122
123 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
124 }
125
126 #define sf_genreg_read(sc, reg) \
127 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
128 #define sf_genreg_write(sc, reg, val) \
129 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
130
131 /*
132 * sf_attach:
133 *
134 * Attach a Starfire interface to the system.
135 */
136 void
137 sf_attach(struct sf_softc *sc)
138 {
139 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
140 struct mii_data * const mii = &sc->sc_mii;
141 int i, rseg, error;
142 bus_dma_segment_t seg;
143 uint8_t enaddr[ETHER_ADDR_LEN];
144
145 callout_init(&sc->sc_tick_callout, 0);
146 callout_setfunc(&sc->sc_tick_callout, sf_tick, sc);
147
148 /*
149 * If we're I/O mapped, the functional register handle is
150 * the same as the base handle. If we're memory mapped,
151 * carve off a chunk of the register space for the functional
152 * registers, to save on arithmetic later.
153 */
154 if (sc->sc_iomapped)
155 sc->sc_sh_func = sc->sc_sh;
156 else {
157 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
158 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
159 aprint_error_dev(sc->sc_dev, "unable to sub-region "
160 "functional registers, error = %d\n", error);
161 return;
162 }
163 }
164
165 /*
166 * Initialize the transmit threshold for this interface. The
167 * manual describes the default as 4 * 16 bytes. We start out
168 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
169 * several platforms.
170 */
171 sc->sc_txthresh = 10;
172
173 /*
174 * Allocate the control data structures, and create and load the
175 * DMA map for it.
176 */
177 if ((error = bus_dmamem_alloc(sc->sc_dmat,
178 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
179 BUS_DMA_NOWAIT)) != 0) {
180 aprint_error_dev(sc->sc_dev,
181 "unable to allocate control data, error = %d\n", error);
182 goto fail_0;
183 }
184
185 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
186 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
187 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
188 aprint_error_dev(sc->sc_dev,
189 "unable to map control data, error = %d\n", error);
190 goto fail_1;
191 }
192
193 if ((error = bus_dmamap_create(sc->sc_dmat,
194 sizeof(struct sf_control_data), 1,
195 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
196 &sc->sc_cddmamap)) != 0) {
197 aprint_error_dev(sc->sc_dev, "unable to create control data "
198 "DMA map, error = %d\n", error);
199 goto fail_2;
200 }
201
202 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
203 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
204 BUS_DMA_NOWAIT)) != 0) {
205 aprint_error_dev(sc->sc_dev, "unable to load control data "
206 "DMA map, error = %d\n", error);
207 goto fail_3;
208 }
209
210 /*
211 * Create the transmit buffer DMA maps.
212 */
213 for (i = 0; i < SF_NTXDESC; i++) {
214 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
215 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
216 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
217 aprint_error_dev(sc->sc_dev,
218 "unable to create tx DMA map %d, error = %d\n", i,
219 error);
220 goto fail_4;
221 }
222 }
223
224 /*
225 * Create the receive buffer DMA maps.
226 */
227 for (i = 0; i < SF_NRXDESC; i++) {
228 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
229 MCLBYTES, 0, BUS_DMA_NOWAIT,
230 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
231 aprint_error_dev(sc->sc_dev,
232 "unable to create rx DMA map %d, error = %d\n", i,
233 error);
234 goto fail_5;
235 }
236 }
237
238 /*
239 * Reset the chip to a known state.
240 */
241 sf_reset(sc);
242
243 /*
244 * Read the Ethernet address from the EEPROM.
245 */
246 for (i = 0; i < ETHER_ADDR_LEN; i++)
247 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
248
249 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
250 ether_sprintf(enaddr));
251
252 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
253 printf("%s: 64-bit PCI slot detected\n",
254 device_xname(sc->sc_dev));
255
256 /*
257 * Initialize our media structures and probe the MII.
258 */
259 mii->mii_ifp = ifp;
260 mii->mii_readreg = sf_mii_read;
261 mii->mii_writereg = sf_mii_write;
262 mii->mii_statchg = sf_mii_statchg;
263 sc->sc_ethercom.ec_mii = mii;
264 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
265 ether_mediastatus);
266 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
267 MII_OFFSET_ANY, 0);
268 if (LIST_FIRST(&mii->mii_phys) == NULL) {
269 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
270 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
271 } else
272 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
273
274 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
275 ifp->if_softc = sc;
276 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
277 ifp->if_ioctl = sf_ioctl;
278 ifp->if_start = sf_start;
279 ifp->if_watchdog = sf_watchdog;
280 ifp->if_init = sf_init;
281 ifp->if_stop = sf_stop;
282 IFQ_SET_READY(&ifp->if_snd);
283
284 /*
285 * Attach the interface.
286 */
287 if_attach(ifp);
288 if_deferred_start_init(ifp, NULL);
289 ether_ifattach(ifp, enaddr);
290
291 /*
292 * Make sure the interface is shutdown during reboot.
293 */
294 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sf_shutdown))
295 pmf_class_network_register(sc->sc_dev, ifp);
296 else
297 aprint_error_dev(sc->sc_dev,
298 "couldn't establish power handler\n");
299 return;
300
301 /*
302 * Free any resources we've allocated during the failed attach
303 * attempt. Do this in reverse order an fall through.
304 */
305 fail_5:
306 for (i = 0; i < SF_NRXDESC; i++) {
307 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
308 bus_dmamap_destroy(sc->sc_dmat,
309 sc->sc_rxsoft[i].ds_dmamap);
310 }
311 fail_4:
312 for (i = 0; i < SF_NTXDESC; i++) {
313 if (sc->sc_txsoft[i].ds_dmamap != NULL)
314 bus_dmamap_destroy(sc->sc_dmat,
315 sc->sc_txsoft[i].ds_dmamap);
316 }
317 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
318 fail_3:
319 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
320 fail_2:
321 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
322 sizeof(struct sf_control_data));
323 fail_1:
324 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
325 fail_0:
326 return;
327 }
328
329 /*
330 * sf_shutdown:
331 *
332 * Shutdown hook -- make sure the interface is stopped at reboot.
333 */
334 static bool
335 sf_shutdown(device_t self, int howto)
336 {
337 struct sf_softc *sc;
338
339 sc = device_private(self);
340 sf_stop(&sc->sc_ethercom.ec_if, 1);
341
342 return true;
343 }
344
345 /*
346 * sf_start: [ifnet interface function]
347 *
348 * Start packet transmission on the interface.
349 */
350 static void
351 sf_start(struct ifnet *ifp)
352 {
353 struct sf_softc *sc = ifp->if_softc;
354 struct mbuf *m0, *m;
355 struct sf_txdesc0 *txd;
356 struct sf_descsoft *ds;
357 bus_dmamap_t dmamap;
358 int error, producer, last = -1, opending, seg;
359
360 /*
361 * Remember the previous number of pending transmits.
362 */
363 opending = sc->sc_txpending;
364
365 /*
366 * Find out where we're sitting.
367 */
368 producer = SF_TXDINDEX_TO_HOST(
369 TDQPI_HiPrTxProducerIndex_get(
370 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
371
372 /*
373 * Loop through the send queue, setting up transmit descriptors
374 * until we drain the queue, or use up all available transmit
375 * descriptors. Leave a blank one at the end for sanity's sake.
376 */
377 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
378 /*
379 * Grab a packet off the queue.
380 */
381 IFQ_POLL(&ifp->if_snd, m0);
382 if (m0 == NULL)
383 break;
384 m = NULL;
385
386 /*
387 * Get the transmit descriptor.
388 */
389 txd = &sc->sc_txdescs[producer];
390 ds = &sc->sc_txsoft[producer];
391 dmamap = ds->ds_dmamap;
392
393 /*
394 * Load the DMA map. If this fails, the packet either
395 * didn't fit in the allotted number of frags, or we were
396 * short on resources. In this case, we'll copy and try
397 * again.
398 */
399 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
400 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
401 MGETHDR(m, M_DONTWAIT, MT_DATA);
402 if (m == NULL) {
403 aprint_error_dev(sc->sc_dev,
404 "unable to allocate Tx mbuf\n");
405 break;
406 }
407 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
408 if (m0->m_pkthdr.len > MHLEN) {
409 MCLGET(m, M_DONTWAIT);
410 if ((m->m_flags & M_EXT) == 0) {
411 aprint_error_dev(sc->sc_dev,
412 "unable to allocate Tx cluster\n");
413 m_freem(m);
414 break;
415 }
416 }
417 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
418 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
419 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
420 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
421 if (error) {
422 aprint_error_dev(sc->sc_dev,
423 "unable to load Tx buffer, error = %d\n",
424 error);
425 break;
426 }
427 }
428
429 /*
430 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
431 */
432 IFQ_DEQUEUE(&ifp->if_snd, m0);
433 if (m != NULL) {
434 m_freem(m0);
435 m0 = m;
436 }
437
438 /* Initialize the descriptor. */
439 txd->td_word0 =
440 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
441 if (producer == (SF_NTXDESC - 1))
442 txd->td_word0 |= TD_W0_END;
443 txd->td_word1 = htole32(dmamap->dm_nsegs);
444 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
445 txd->td_frags[seg].fr_addr =
446 htole32(dmamap->dm_segs[seg].ds_addr);
447 txd->td_frags[seg].fr_len =
448 htole32(dmamap->dm_segs[seg].ds_len);
449 }
450
451 /* Sync the descriptor and the DMA map. */
452 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
453 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
454 BUS_DMASYNC_PREWRITE);
455
456 /*
457 * Store a pointer to the packet so we can free it later.
458 */
459 ds->ds_mbuf = m0;
460
461 /* Advance the Tx pointer. */
462 sc->sc_txpending++;
463 last = producer;
464 producer = SF_NEXTTX(producer);
465
466 /*
467 * Pass the packet to any BPF listeners.
468 */
469 bpf_mtap(ifp, m0, BPF_D_OUT);
470 }
471
472 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
473 /* No more slots left; notify upper layer. */
474 ifp->if_flags |= IFF_OACTIVE;
475 }
476
477 if (sc->sc_txpending != opending) {
478 KASSERT(last != -1);
479 /*
480 * We enqueued packets. Cause a transmit interrupt to
481 * happen on the last packet we enqueued, and give the
482 * new descriptors to the chip by writing the new
483 * producer index.
484 */
485 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
486 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
487
488 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
489 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
490
491 /* Set a watchdog timer in case the chip flakes out. */
492 ifp->if_timer = 5;
493 }
494 }
495
496 /*
497 * sf_watchdog: [ifnet interface function]
498 *
499 * Watchdog timer handler.
500 */
501 static void
502 sf_watchdog(struct ifnet *ifp)
503 {
504 struct sf_softc *sc = ifp->if_softc;
505
506 printf("%s: device timeout\n", device_xname(sc->sc_dev));
507 if_statinc(ifp, if_oerrors);
508
509 (void) sf_init(ifp);
510
511 /* Try to get more packets going. */
512 sf_start(ifp);
513 }
514
515 /*
516 * sf_ioctl: [ifnet interface function]
517 *
518 * Handle control requests from the operator.
519 */
520 static int
521 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
522 {
523 struct sf_softc *sc = ifp->if_softc;
524 int s, error;
525
526 s = splnet();
527
528 error = ether_ioctl(ifp, cmd, data);
529 if (error == ENETRESET) {
530 /*
531 * Multicast list has changed; set the hardware filter
532 * accordingly.
533 */
534 if (ifp->if_flags & IFF_RUNNING)
535 sf_set_filter(sc);
536 error = 0;
537 }
538
539 /* Try to get more packets going. */
540 sf_start(ifp);
541
542 splx(s);
543 return (error);
544 }
545
546 /*
547 * sf_intr:
548 *
549 * Interrupt service routine.
550 */
551 int
552 sf_intr(void *arg)
553 {
554 struct sf_softc *sc = arg;
555 uint32_t isr;
556 int handled = 0, wantinit = 0;
557
558 for (;;) {
559 /* Reading clears all interrupts we're interested in. */
560 isr = sf_funcreg_read(sc, SF_InterruptStatus);
561 if ((isr & IS_PCIPadInt) == 0)
562 break;
563
564 handled = 1;
565
566 /* Handle receive interrupts. */
567 if (isr & IS_RxQ1DoneInt)
568 sf_rxintr(sc);
569
570 /* Handle transmit completion interrupts. */
571 if (isr & (IS_TxDmaDoneInt | IS_TxQueueDoneInt))
572 sf_txintr(sc);
573
574 /* Handle abnormal interrupts. */
575 if (isr & IS_AbnormalInterrupt) {
576 /* Statistics. */
577 if (isr & IS_StatisticWrapInt)
578 sf_stats_update(sc);
579
580 /* DMA errors. */
581 if (isr & IS_DmaErrInt) {
582 wantinit = 1;
583 aprint_error_dev(sc->sc_dev,
584 "WARNING: DMA error\n");
585 }
586
587 /* Transmit FIFO underruns. */
588 if (isr & IS_TxDataLowInt) {
589 if (sc->sc_txthresh < 0xff)
590 sc->sc_txthresh++;
591 printf("%s: transmit FIFO underrun, new "
592 "threshold: %d bytes\n",
593 device_xname(sc->sc_dev),
594 sc->sc_txthresh * 16);
595 sf_funcreg_write(sc, SF_TransmitFrameCSR,
596 sc->sc_TransmitFrameCSR |
597 TFCSR_TransmitThreshold(sc->sc_txthresh));
598 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
599 sc->sc_TxDescQueueCtrl |
600 TDQC_TxHighPriorityFifoThreshold(
601 sc->sc_txthresh));
602 }
603 }
604 }
605
606 if (handled) {
607 /* Reset the interface, if necessary. */
608 if (wantinit)
609 sf_init(&sc->sc_ethercom.ec_if);
610
611 /* Try and get more packets going. */
612 if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
613 }
614
615 return (handled);
616 }
617
618 /*
619 * sf_txintr:
620 *
621 * Helper -- handle transmit completion interrupts.
622 */
623 static void
624 sf_txintr(struct sf_softc *sc)
625 {
626 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
627 struct sf_descsoft *ds;
628 uint32_t cqci, tcd;
629 int consumer, producer, txidx;
630
631 try_again:
632 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
633
634 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
635 producer = CQPI_TxCompletionProducerIndex_get(
636 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
637
638 if (consumer == producer)
639 return;
640
641 ifp->if_flags &= ~IFF_OACTIVE;
642
643 while (consumer != producer) {
644 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
645 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
646
647 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
648 #ifdef DIAGNOSTIC
649 if ((tcd & TCD_PR) == 0)
650 aprint_error_dev(sc->sc_dev,
651 "Tx queue mismatch, index %d\n", txidx);
652 #endif
653 /*
654 * NOTE: stats are updated later. We're just
655 * releasing packets that have been DMA'd to
656 * the chip.
657 */
658 ds = &sc->sc_txsoft[txidx];
659 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
660 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
661 0, ds->ds_dmamap->dm_mapsize,
662 BUS_DMASYNC_POSTWRITE);
663 m_freem(ds->ds_mbuf);
664 ds->ds_mbuf = NULL;
665
666 consumer = SF_NEXTTCD(consumer);
667 sc->sc_txpending--;
668 }
669
670 /* XXXJRT -- should be KDASSERT() */
671 KASSERT(sc->sc_txpending >= 0);
672
673 /* If all packets are done, cancel the watchdog timer. */
674 if (sc->sc_txpending == 0)
675 ifp->if_timer = 0;
676
677 /* Update the consumer index. */
678 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
679 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
680 CQCI_TxCompletionConsumerIndex(consumer));
681
682 /* Double check for new completions. */
683 goto try_again;
684 }
685
686 /*
687 * sf_rxintr:
688 *
689 * Helper -- handle receive interrupts.
690 */
691 static void
692 sf_rxintr(struct sf_softc *sc)
693 {
694 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
695 struct sf_descsoft *ds;
696 struct sf_rcd_full *rcd;
697 struct mbuf *m;
698 uint32_t cqci, word0;
699 int consumer, producer, bufproducer, rxidx, len;
700
701 try_again:
702 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
703
704 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
705 producer = CQPI_RxCompletionQ1ProducerIndex_get(
706 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
707 bufproducer = RXQ1P_RxDescQ1Producer_get(
708 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
709
710 if (consumer == producer)
711 return;
712
713 while (consumer != producer) {
714 rcd = &sc->sc_rxcomp[consumer];
715 SF_CDRXCSYNC(sc, consumer,
716 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
717 SF_CDRXCSYNC(sc, consumer,
718 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
719
720 word0 = le32toh(rcd->rcd_word0);
721 rxidx = RCD_W0_EndIndex(word0);
722
723 ds = &sc->sc_rxsoft[rxidx];
724
725 consumer = SF_NEXTRCD(consumer);
726 bufproducer = SF_NEXTRX(bufproducer);
727
728 if ((word0 & RCD_W0_OK) == 0) {
729 SF_INIT_RXDESC(sc, rxidx);
730 continue;
731 }
732
733 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
734 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
735
736 /*
737 * No errors; receive the packet. Note that we have
738 * configured the Starfire to NOT transfer the CRC
739 * with the packet.
740 */
741 len = RCD_W0_Length(word0);
742
743 #ifdef __NO_STRICT_ALIGNMENT
744 /*
745 * Allocate a new mbuf cluster. If that fails, we are
746 * out of memory, and must drop the packet and recycle
747 * the buffer that's already attached to this descriptor.
748 */
749 m = ds->ds_mbuf;
750 if (sf_add_rxbuf(sc, rxidx) != 0) {
751 if_statinc(ifp, if_ierrors);
752 SF_INIT_RXDESC(sc, rxidx);
753 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
754 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
755 continue;
756 }
757 #else
758 /*
759 * The Starfire's receive buffer must be 4-byte aligned.
760 * But this means that the data after the Ethernet header
761 * is misaligned. We must allocate a new buffer and
762 * copy the data, shifted forward 2 bytes.
763 */
764 MGETHDR(m, M_DONTWAIT, MT_DATA);
765 if (m == NULL) {
766 dropit:
767 if_statinc(ifp, if_ierrors);
768 SF_INIT_RXDESC(sc, rxidx);
769 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
770 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
771 continue;
772 }
773 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
774 if (len > (MHLEN - 2)) {
775 MCLGET(m, M_DONTWAIT);
776 if ((m->m_flags & M_EXT) == 0) {
777 m_freem(m);
778 goto dropit;
779 }
780 }
781 m->m_data += 2;
782
783 /*
784 * Note that we use cluster for incoming frames, so the
785 * buffer is virtually contiguous.
786 */
787 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
788
789 /* Allow the receive descriptor to continue using its mbuf. */
790 SF_INIT_RXDESC(sc, rxidx);
791 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
792 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
793 #endif /* __NO_STRICT_ALIGNMENT */
794
795 m_set_rcvif(m, ifp);
796 m->m_pkthdr.len = m->m_len = len;
797
798 /* Pass it on. */
799 if_percpuq_enqueue(ifp->if_percpuq, m);
800 }
801
802 /* Update the chip's pointers. */
803 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
804 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
805 CQCI_RxCompletionQ1ConsumerIndex(consumer));
806 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
807 RXQ1P_RxDescQ1Producer(bufproducer));
808
809 /* Double-check for any new completions. */
810 goto try_again;
811 }
812
813 /*
814 * sf_tick:
815 *
816 * One second timer, used to tick the MII and update stats.
817 */
818 static void
819 sf_tick(void *arg)
820 {
821 struct sf_softc *sc = arg;
822 int s;
823
824 s = splnet();
825 mii_tick(&sc->sc_mii);
826 sf_stats_update(sc);
827 splx(s);
828
829 callout_schedule(&sc->sc_tick_callout, hz);
830 }
831
832 /*
833 * sf_stats_update:
834 *
835 * Read the statitistics counters.
836 */
837 static void
838 sf_stats_update(struct sf_softc *sc)
839 {
840 struct sf_stats stats;
841 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
842 uint32_t *p;
843 u_int i;
844
845 p = &stats.TransmitOKFrames;
846 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
847 *p++ = sf_genreg_read(sc,
848 SF_STATS_BASE + (i * sizeof(uint32_t)));
849 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
850 }
851
852 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
853
854 if_statadd_ref(nsr, if_opackets, stats.TransmitOKFrames);
855
856 if_statadd_ref(nsr, if_collisions,
857 stats.SingleCollisionFrames +
858 stats.MultipleCollisionFrames);
859
860 if_statadd_ref(nsr, if_oerrors,
861 stats.TransmitAbortDueToExcessiveCollisions +
862 stats.TransmitAbortDueToExcessingDeferral +
863 stats.FramesLostDueToInternalTransmitErrors);
864
865 if_statadd_ref(nsr, if_ierrors,
866 stats.ReceiveCRCErrors + stats.AlignmentErrors +
867 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
868 stats.ReceiveFramesJabbersError +
869 stats.FramesLostDueToInternalReceiveErrors);
870
871 IF_STAT_PUTREF(ifp);
872 }
873
874 /*
875 * sf_reset:
876 *
877 * Perform a soft reset on the Starfire.
878 */
879 static void
880 sf_reset(struct sf_softc *sc)
881 {
882 int i;
883
884 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
885
886 sf_macreset(sc);
887
888 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
889 for (i = 0; i < 1000; i++) {
890 delay(10);
891 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
892 PDC_SoftReset) == 0)
893 break;
894 }
895
896 if (i == 1000) {
897 aprint_error_dev(sc->sc_dev, "reset failed to complete\n");
898 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
899 }
900
901 delay(1000);
902 }
903
904 /*
905 * sf_macreset:
906 *
907 * Reset the MAC portion of the Starfire.
908 */
909 static void
910 sf_macreset(struct sf_softc *sc)
911 {
912
913 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
914 delay(1000);
915 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
916 }
917
918 /*
919 * sf_init: [ifnet interface function]
920 *
921 * Initialize the interface. Must be called at splnet().
922 */
923 static int
924 sf_init(struct ifnet *ifp)
925 {
926 struct sf_softc *sc = ifp->if_softc;
927 struct sf_descsoft *ds;
928 int error = 0;
929 u_int i;
930
931 /*
932 * Cancel any pending I/O.
933 */
934 sf_stop(ifp, 0);
935
936 /*
937 * Reset the Starfire to a known state.
938 */
939 sf_reset(sc);
940
941 /* Clear the stat counters. */
942 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
943 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
944
945 /*
946 * Initialize the transmit descriptor ring.
947 */
948 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
949 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
950 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
951 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
952
953 /*
954 * Initialize the transmit completion ring.
955 */
956 for (i = 0; i < SF_NTCD; i++) {
957 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
958 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
959 }
960 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
961 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
962
963 /*
964 * Initialize the receive descriptor ring.
965 */
966 for (i = 0; i < SF_NRXDESC; i++) {
967 ds = &sc->sc_rxsoft[i];
968 if (ds->ds_mbuf == NULL) {
969 if ((error = sf_add_rxbuf(sc, i)) != 0) {
970 aprint_error_dev(sc->sc_dev,
971 "unable to allocate or map rx buffer %d, "
972 "error = %d\n", i, error);
973 /*
974 * XXX Should attempt to run with fewer receive
975 * XXX buffers instead of just failing.
976 */
977 sf_rxdrain(sc);
978 goto out;
979 }
980 } else
981 SF_INIT_RXDESC(sc, i);
982 }
983 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
984 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
985 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
986
987 /*
988 * Initialize the receive completion ring.
989 */
990 for (i = 0; i < SF_NRCD; i++) {
991 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
992 sc->sc_rxcomp[i].rcd_word1 = 0;
993 sc->sc_rxcomp[i].rcd_word2 = 0;
994 sc->sc_rxcomp[i].rcd_timestamp = 0;
995 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
996 }
997 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
998 RCQ1C_RxCompletionQ1Type(3));
999 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1000
1001 /*
1002 * Initialize the Tx CSR.
1003 */
1004 sc->sc_TransmitFrameCSR = 0;
1005 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1006 sc->sc_TransmitFrameCSR |
1007 TFCSR_TransmitThreshold(sc->sc_txthresh));
1008
1009 /*
1010 * Initialize the Tx descriptor control register.
1011 */
1012 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1013 TDQC_TxDmaBurstSize(4) | /* default */
1014 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1015 TDQC_TxDescType(0);
1016 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1017 sc->sc_TxDescQueueCtrl |
1018 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1019
1020 /*
1021 * Initialize the Rx descriptor control registers.
1022 */
1023 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1024 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1025 RDQ1C_RxDescSpacing(0));
1026 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1027
1028 /*
1029 * Initialize the Tx descriptor producer indices.
1030 */
1031 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1032 TDQPI_HiPrTxProducerIndex(0) |
1033 TDQPI_LoPrTxProducerIndex(0));
1034
1035 /*
1036 * Initialize the Rx descriptor producer indices.
1037 */
1038 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1039 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1040 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1041 RXQ2P_RxDescQ2Producer(0));
1042
1043 /*
1044 * Initialize the Tx and Rx completion queue consumer indices.
1045 */
1046 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1047 CQCI_TxCompletionConsumerIndex(0) |
1048 CQCI_RxCompletionQ1ConsumerIndex(0));
1049 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1050
1051 /*
1052 * Initialize the Rx DMA control register.
1053 */
1054 sf_funcreg_write(sc, SF_RxDmaCtrl,
1055 RDC_RxHighPriorityThreshold(6) | /* default */
1056 RDC_RxBurstSize(4)); /* default */
1057
1058 /*
1059 * Set the receive filter.
1060 */
1061 sc->sc_RxAddressFilteringCtl = 0;
1062 sf_set_filter(sc);
1063
1064 /*
1065 * Set MacConfig1. When we set the media, MacConfig1 will
1066 * actually be written and the MAC part reset.
1067 */
1068 sc->sc_MacConfig1 = MC1_PadEn;
1069
1070 /*
1071 * Set the media.
1072 */
1073 if ((error = ether_mediachange(ifp)) != 0)
1074 goto out;
1075
1076 /*
1077 * Initialize the interrupt register.
1078 */
1079 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1080 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1081 IS_StatisticWrapInt;
1082 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1083
1084 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1085 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1086
1087 /*
1088 * Start the transmit and receive processes.
1089 */
1090 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1091 GEC_TxDmaEn | GEC_RxDmaEn | GEC_TransmitEn | GEC_ReceiveEn);
1092
1093 /* Start the on second clock. */
1094 callout_schedule(&sc->sc_tick_callout, hz);
1095
1096 /*
1097 * Note that the interface is now running.
1098 */
1099 ifp->if_flags |= IFF_RUNNING;
1100 ifp->if_flags &= ~IFF_OACTIVE;
1101
1102 out:
1103 if (error) {
1104 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1105 ifp->if_timer = 0;
1106 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1107 }
1108 return (error);
1109 }
1110
1111 /*
1112 * sf_rxdrain:
1113 *
1114 * Drain the receive queue.
1115 */
1116 static void
1117 sf_rxdrain(struct sf_softc *sc)
1118 {
1119 struct sf_descsoft *ds;
1120 int i;
1121
1122 for (i = 0; i < SF_NRXDESC; i++) {
1123 ds = &sc->sc_rxsoft[i];
1124 if (ds->ds_mbuf != NULL) {
1125 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1126 m_freem(ds->ds_mbuf);
1127 ds->ds_mbuf = NULL;
1128 }
1129 }
1130 }
1131
1132 /*
1133 * sf_stop: [ifnet interface function]
1134 *
1135 * Stop transmission on the interface.
1136 */
1137 static void
1138 sf_stop(struct ifnet *ifp, int disable)
1139 {
1140 struct sf_softc *sc = ifp->if_softc;
1141 struct sf_descsoft *ds;
1142 int i;
1143
1144 /* Stop the one second clock. */
1145 callout_stop(&sc->sc_tick_callout);
1146
1147 /* Down the MII. */
1148 mii_down(&sc->sc_mii);
1149
1150 /* Disable interrupts. */
1151 sf_funcreg_write(sc, SF_InterruptEn, 0);
1152
1153 /* Stop the transmit and receive processes. */
1154 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1155
1156 /*
1157 * Release any queued transmit buffers.
1158 */
1159 for (i = 0; i < SF_NTXDESC; i++) {
1160 ds = &sc->sc_txsoft[i];
1161 if (ds->ds_mbuf != NULL) {
1162 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1163 m_freem(ds->ds_mbuf);
1164 ds->ds_mbuf = NULL;
1165 }
1166 }
1167
1168 /*
1169 * Mark the interface down and cancel the watchdog timer.
1170 */
1171 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1172 ifp->if_timer = 0;
1173
1174 if (disable)
1175 sf_rxdrain(sc);
1176 }
1177
1178 /*
1179 * sf_read_eeprom:
1180 *
1181 * Read from the Starfire EEPROM.
1182 */
1183 static uint8_t
1184 sf_read_eeprom(struct sf_softc *sc, int offset)
1185 {
1186 uint32_t reg;
1187
1188 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1189
1190 return ((reg >> (8 * (offset & 3))) & 0xff);
1191 }
1192
1193 /*
1194 * sf_add_rxbuf:
1195 *
1196 * Add a receive buffer to the indicated descriptor.
1197 */
1198 static int
1199 sf_add_rxbuf(struct sf_softc *sc, int idx)
1200 {
1201 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1202 struct mbuf *m;
1203 int error;
1204
1205 MGETHDR(m, M_DONTWAIT, MT_DATA);
1206 if (m == NULL)
1207 return (ENOBUFS);
1208 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1209
1210 MCLGET(m, M_DONTWAIT);
1211 if ((m->m_flags & M_EXT) == 0) {
1212 m_freem(m);
1213 return (ENOBUFS);
1214 }
1215
1216 if (ds->ds_mbuf != NULL)
1217 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1218
1219 ds->ds_mbuf = m;
1220
1221 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1222 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1223 BUS_DMA_READ | BUS_DMA_NOWAIT);
1224 if (error) {
1225 aprint_error_dev(sc->sc_dev,
1226 "can't load rx DMA map %d, error = %d\n", idx, error);
1227 panic("sf_add_rxbuf"); /* XXX */
1228 }
1229
1230 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1231 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1232
1233 SF_INIT_RXDESC(sc, idx);
1234
1235 return (0);
1236 }
1237
1238 static void
1239 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1240 {
1241 uint32_t reg0, reg1, reg2;
1242
1243 reg0 = enaddr[5] | (enaddr[4] << 8);
1244 reg1 = enaddr[3] | (enaddr[2] << 8);
1245 reg2 = enaddr[1] | (enaddr[0] << 8);
1246
1247 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1248 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1249 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1250 }
1251
1252 static void
1253 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1254 {
1255 uint32_t hash, slot, reg;
1256
1257 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1258 slot = hash >> 4;
1259
1260 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1261 reg |= 1 << (hash & 0xf);
1262 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1263 }
1264
1265 /*
1266 * sf_set_filter:
1267 *
1268 * Set the Starfire receive filter.
1269 */
1270 static void
1271 sf_set_filter(struct sf_softc *sc)
1272 {
1273 struct ethercom *ec = &sc->sc_ethercom;
1274 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1275 struct ether_multi *enm;
1276 struct ether_multistep step;
1277 int i;
1278
1279 /* Start by clearing the perfect and hash tables. */
1280 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1281 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1282
1283 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1284 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1285
1286 /*
1287 * Clear the perfect and hash mode bits.
1288 */
1289 sc->sc_RxAddressFilteringCtl &=
1290 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1291
1292 if (ifp->if_flags & IFF_BROADCAST)
1293 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1294 else
1295 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1296
1297 if (ifp->if_flags & IFF_PROMISC) {
1298 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1299 goto allmulti;
1300 } else
1301 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1302
1303 /*
1304 * Set normal perfect filtering mode.
1305 */
1306 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1307
1308 /*
1309 * First, write the station address to the perfect filter
1310 * table.
1311 */
1312 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1313
1314 /*
1315 * Now set the hash bits for each multicast address in our
1316 * list.
1317 */
1318 ETHER_LOCK(ec);
1319 ETHER_FIRST_MULTI(step, ec, enm);
1320 if (enm == NULL) {
1321 ETHER_UNLOCK(ec);
1322 goto done;
1323 }
1324 while (enm != NULL) {
1325 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1326 /*
1327 * We must listen to a range of multicast addresses.
1328 * For now, just accept all multicasts, rather than
1329 * trying to set only those filter bits needed to match
1330 * the range. (At this time, the only use of address
1331 * ranges is for IP multicast routing, for which the
1332 * range is big enough to require all bits set.)
1333 */
1334 ETHER_UNLOCK(ec);
1335 goto allmulti;
1336 }
1337 sf_set_filter_hash(sc, enm->enm_addrlo);
1338 ETHER_NEXT_MULTI(step, enm);
1339 }
1340 ETHER_UNLOCK(ec);
1341
1342 /*
1343 * Set "hash only multicast dest, match regardless of VLAN ID".
1344 */
1345 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1346 goto done;
1347
1348 allmulti:
1349 /*
1350 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1351 */
1352 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1353 ifp->if_flags |= IFF_ALLMULTI;
1354
1355 done:
1356 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1357 sc->sc_RxAddressFilteringCtl);
1358 }
1359
1360 /*
1361 * sf_mii_read: [mii interface function]
1362 *
1363 * Read from the MII.
1364 */
1365 static int
1366 sf_mii_read(device_t self, int phy, int reg, uint16_t *data)
1367 {
1368 struct sf_softc *sc = device_private(self);
1369 uint32_t v;
1370 int i;
1371
1372 for (i = 0; i < 1000; i++) {
1373 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1374 if (v & MiiDataValid)
1375 break;
1376 delay(1);
1377 }
1378
1379 if ((v & MiiDataValid) == 0)
1380 return -1;
1381
1382 if (MiiRegDataPort(v) == 0xffff)
1383 return -1;
1384
1385 *data = MiiRegDataPort(v);
1386 return 0;
1387 }
1388
1389 /*
1390 * sf_mii_write: [mii interface function]
1391 *
1392 * Write to the MII.
1393 */
1394 static int
1395 sf_mii_write(device_t self, int phy, int reg, uint16_t val)
1396 {
1397 struct sf_softc *sc = device_private(self);
1398 int i;
1399
1400 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1401
1402 for (i = 0; i < 1000; i++) {
1403 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1404 MiiBusy) == 0)
1405 return 0;
1406 delay(1);
1407 }
1408
1409 printf("%s: MII write timed out\n", device_xname(sc->sc_dev));
1410 return ETIMEDOUT;
1411 }
1412
1413 /*
1414 * sf_mii_statchg: [mii interface function]
1415 *
1416 * Callback from the PHY when the media changes.
1417 */
1418 static void
1419 sf_mii_statchg(struct ifnet *ifp)
1420 {
1421 struct sf_softc *sc = ifp->if_softc;
1422 uint32_t ipg;
1423
1424 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1425 sc->sc_MacConfig1 |= MC1_FullDuplex;
1426 ipg = 0x15;
1427 } else {
1428 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1429 ipg = 0x11;
1430 }
1431
1432 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1433 sf_macreset(sc);
1434
1435 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1436 }
1437