aic6915.c revision 1.22 1 /* $NetBSD: aic6915.c,v 1.22 2008/04/08 12:07:25 cegger Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the Adaptec AIC-6915 (``Starfire'')
41 * 10/100 Ethernet controller.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.22 2008/04/08 12:07:25 cegger Exp $");
46
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_ether.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <sys/bus.h>
72 #include <sys/intr.h>
73
74 #include <dev/mii/miivar.h>
75
76 #include <dev/ic/aic6915reg.h>
77 #include <dev/ic/aic6915var.h>
78
79 static void sf_start(struct ifnet *);
80 static void sf_watchdog(struct ifnet *);
81 static int sf_ioctl(struct ifnet *, u_long, void *);
82 static int sf_init(struct ifnet *);
83 static void sf_stop(struct ifnet *, int);
84
85 static void sf_shutdown(void *);
86
87 static void sf_txintr(struct sf_softc *);
88 static void sf_rxintr(struct sf_softc *);
89 static void sf_stats_update(struct sf_softc *);
90
91 static void sf_reset(struct sf_softc *);
92 static void sf_macreset(struct sf_softc *);
93 static void sf_rxdrain(struct sf_softc *);
94 static int sf_add_rxbuf(struct sf_softc *, int);
95 static uint8_t sf_read_eeprom(struct sf_softc *, int);
96 static void sf_set_filter(struct sf_softc *);
97
98 static int sf_mii_read(struct device *, int, int);
99 static void sf_mii_write(struct device *, int, int, int);
100 static void sf_mii_statchg(struct device *);
101
102 static void sf_tick(void *);
103
104 #define sf_funcreg_read(sc, reg) \
105 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
106 #define sf_funcreg_write(sc, reg, val) \
107 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
108
109 static inline uint32_t
110 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
111 {
112
113 if (__predict_false(sc->sc_iomapped)) {
114 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
115 reg);
116 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
117 SF_IndirectIoDataPort));
118 }
119
120 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
121 }
122
123 static inline void
124 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
125 {
126
127 if (__predict_false(sc->sc_iomapped)) {
128 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
129 reg);
130 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
131 val);
132 return;
133 }
134
135 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
136 }
137
138 #define sf_genreg_read(sc, reg) \
139 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
140 #define sf_genreg_write(sc, reg, val) \
141 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
142
143 /*
144 * sf_attach:
145 *
146 * Attach a Starfire interface to the system.
147 */
148 void
149 sf_attach(struct sf_softc *sc)
150 {
151 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
152 int i, rseg, error;
153 bus_dma_segment_t seg;
154 u_int8_t enaddr[ETHER_ADDR_LEN];
155
156 callout_init(&sc->sc_tick_callout, 0);
157
158 /*
159 * If we're I/O mapped, the functional register handle is
160 * the same as the base handle. If we're memory mapped,
161 * carve off a chunk of the register space for the functional
162 * registers, to save on arithmetic later.
163 */
164 if (sc->sc_iomapped)
165 sc->sc_sh_func = sc->sc_sh;
166 else {
167 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
168 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
169 aprint_error_dev(&sc->sc_dev, "unable to sub-region functional "
170 "registers, error = %d\n",
171 error);
172 return;
173 }
174 }
175
176 /*
177 * Initialize the transmit threshold for this interface. The
178 * manual describes the default as 4 * 16 bytes. We start out
179 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
180 * several platforms.
181 */
182 sc->sc_txthresh = 10;
183
184 /*
185 * Allocate the control data structures, and create and load the
186 * DMA map for it.
187 */
188 if ((error = bus_dmamem_alloc(sc->sc_dmat,
189 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
190 BUS_DMA_NOWAIT)) != 0) {
191 aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n",
192 error);
193 goto fail_0;
194 }
195
196 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
197 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
198 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
199 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
200 error);
201 goto fail_1;
202 }
203
204 if ((error = bus_dmamap_create(sc->sc_dmat,
205 sizeof(struct sf_control_data), 1,
206 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
207 &sc->sc_cddmamap)) != 0) {
208 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
209 "error = %d\n", error);
210 goto fail_2;
211 }
212
213 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
214 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
215 BUS_DMA_NOWAIT)) != 0) {
216 aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n",
217 error);
218 goto fail_3;
219 }
220
221 /*
222 * Create the transmit buffer DMA maps.
223 */
224 for (i = 0; i < SF_NTXDESC; i++) {
225 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
226 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
227 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
228 aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
229 "error = %d\n", i, error);
230 goto fail_4;
231 }
232 }
233
234 /*
235 * Create the receive buffer DMA maps.
236 */
237 for (i = 0; i < SF_NRXDESC; i++) {
238 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
239 MCLBYTES, 0, BUS_DMA_NOWAIT,
240 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
241 aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
242 "error = %d\n", i, error);
243 goto fail_5;
244 }
245 }
246
247 /*
248 * Reset the chip to a known state.
249 */
250 sf_reset(sc);
251
252 /*
253 * Read the Ethernet address from the EEPROM.
254 */
255 for (i = 0; i < ETHER_ADDR_LEN; i++)
256 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
257
258 printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev),
259 ether_sprintf(enaddr));
260
261 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
262 printf("%s: 64-bit PCI slot detected\n", device_xname(&sc->sc_dev));
263
264 /*
265 * Initialize our media structures and probe the MII.
266 */
267 sc->sc_mii.mii_ifp = ifp;
268 sc->sc_mii.mii_readreg = sf_mii_read;
269 sc->sc_mii.mii_writereg = sf_mii_write;
270 sc->sc_mii.mii_statchg = sf_mii_statchg;
271 sc->sc_ethercom.ec_mii = &sc->sc_mii;
272 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
273 ether_mediastatus);
274 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
275 MII_OFFSET_ANY, 0);
276 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
277 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
278 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
279 } else
280 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
281
282 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
283 ifp->if_softc = sc;
284 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
285 ifp->if_ioctl = sf_ioctl;
286 ifp->if_start = sf_start;
287 ifp->if_watchdog = sf_watchdog;
288 ifp->if_init = sf_init;
289 ifp->if_stop = sf_stop;
290 IFQ_SET_READY(&ifp->if_snd);
291
292 /*
293 * Attach the interface.
294 */
295 if_attach(ifp);
296 ether_ifattach(ifp, enaddr);
297
298 /*
299 * Make sure the interface is shutdown during reboot.
300 */
301 sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc);
302 if (sc->sc_sdhook == NULL)
303 aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish shutdown hook\n");
304 return;
305
306 /*
307 * Free any resources we've allocated during the failed attach
308 * attempt. Do this in reverse order an fall through.
309 */
310 fail_5:
311 for (i = 0; i < SF_NRXDESC; i++) {
312 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
313 bus_dmamap_destroy(sc->sc_dmat,
314 sc->sc_rxsoft[i].ds_dmamap);
315 }
316 fail_4:
317 for (i = 0; i < SF_NTXDESC; i++) {
318 if (sc->sc_txsoft[i].ds_dmamap != NULL)
319 bus_dmamap_destroy(sc->sc_dmat,
320 sc->sc_txsoft[i].ds_dmamap);
321 }
322 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
323 fail_3:
324 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
325 fail_2:
326 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
327 sizeof(struct sf_control_data));
328 fail_1:
329 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
330 fail_0:
331 return;
332 }
333
334 /*
335 * sf_shutdown:
336 *
337 * Shutdown hook -- make sure the interface is stopped at reboot.
338 */
339 static void
340 sf_shutdown(void *arg)
341 {
342 struct sf_softc *sc = arg;
343
344 sf_stop(&sc->sc_ethercom.ec_if, 1);
345 }
346
347 /*
348 * sf_start: [ifnet interface function]
349 *
350 * Start packet transmission on the interface.
351 */
352 static void
353 sf_start(struct ifnet *ifp)
354 {
355 struct sf_softc *sc = ifp->if_softc;
356 struct mbuf *m0, *m;
357 struct sf_txdesc0 *txd;
358 struct sf_descsoft *ds;
359 bus_dmamap_t dmamap;
360 int error, producer, last = -1, opending, seg;
361
362 /*
363 * Remember the previous number of pending transmits.
364 */
365 opending = sc->sc_txpending;
366
367 /*
368 * Find out where we're sitting.
369 */
370 producer = SF_TXDINDEX_TO_HOST(
371 TDQPI_HiPrTxProducerIndex_get(
372 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
373
374 /*
375 * Loop through the send queue, setting up transmit descriptors
376 * until we drain the queue, or use up all available transmit
377 * descriptors. Leave a blank one at the end for sanity's sake.
378 */
379 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
380 /*
381 * Grab a packet off the queue.
382 */
383 IFQ_POLL(&ifp->if_snd, m0);
384 if (m0 == NULL)
385 break;
386 m = NULL;
387
388 /*
389 * Get the transmit descriptor.
390 */
391 txd = &sc->sc_txdescs[producer];
392 ds = &sc->sc_txsoft[producer];
393 dmamap = ds->ds_dmamap;
394
395 /*
396 * Load the DMA map. If this fails, the packet either
397 * didn't fit in the allotted number of frags, or we were
398 * short on resources. In this case, we'll copy and try
399 * again.
400 */
401 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
402 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
403 MGETHDR(m, M_DONTWAIT, MT_DATA);
404 if (m == NULL) {
405 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n");
406 break;
407 }
408 if (m0->m_pkthdr.len > MHLEN) {
409 MCLGET(m, M_DONTWAIT);
410 if ((m->m_flags & M_EXT) == 0) {
411 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx "
412 "cluster\n");
413 m_freem(m);
414 break;
415 }
416 }
417 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
418 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
419 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
420 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
421 if (error) {
422 aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, "
423 "error = %d\n", error);
424 break;
425 }
426 }
427
428 /*
429 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
430 */
431 IFQ_DEQUEUE(&ifp->if_snd, m0);
432 if (m != NULL) {
433 m_freem(m0);
434 m0 = m;
435 }
436
437 /* Initialize the descriptor. */
438 txd->td_word0 =
439 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
440 if (producer == (SF_NTXDESC - 1))
441 txd->td_word0 |= TD_W0_END;
442 txd->td_word1 = htole32(dmamap->dm_nsegs);
443 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
444 txd->td_frags[seg].fr_addr =
445 htole32(dmamap->dm_segs[seg].ds_addr);
446 txd->td_frags[seg].fr_len =
447 htole32(dmamap->dm_segs[seg].ds_len);
448 }
449
450 /* Sync the descriptor and the DMA map. */
451 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
452 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
453 BUS_DMASYNC_PREWRITE);
454
455 /*
456 * Store a pointer to the packet so we can free it later.
457 */
458 ds->ds_mbuf = m0;
459
460 /* Advance the Tx pointer. */
461 sc->sc_txpending++;
462 last = producer;
463 producer = SF_NEXTTX(producer);
464
465 #if NBPFILTER > 0
466 /*
467 * Pass the packet to any BPF listeners.
468 */
469 if (ifp->if_bpf)
470 bpf_mtap(ifp->if_bpf, m0);
471 #endif
472 }
473
474 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
475 /* No more slots left; notify upper layer. */
476 ifp->if_flags |= IFF_OACTIVE;
477 }
478
479 if (sc->sc_txpending != opending) {
480 KASSERT(last != -1);
481 /*
482 * We enqueued packets. Cause a transmit interrupt to
483 * happen on the last packet we enqueued, and give the
484 * new descriptors to the chip by writing the new
485 * producer index.
486 */
487 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
488 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
489
490 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
491 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
492
493 /* Set a watchdog timer in case the chip flakes out. */
494 ifp->if_timer = 5;
495 }
496 }
497
498 /*
499 * sf_watchdog: [ifnet interface function]
500 *
501 * Watchdog timer handler.
502 */
503 static void
504 sf_watchdog(struct ifnet *ifp)
505 {
506 struct sf_softc *sc = ifp->if_softc;
507
508 printf("%s: device timeout\n", device_xname(&sc->sc_dev));
509 ifp->if_oerrors++;
510
511 (void) sf_init(ifp);
512
513 /* Try to get more packets going. */
514 sf_start(ifp);
515 }
516
517 /*
518 * sf_ioctl: [ifnet interface function]
519 *
520 * Handle control requests from the operator.
521 */
522 static int
523 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
524 {
525 struct sf_softc *sc = ifp->if_softc;
526 int s, error;
527
528 s = splnet();
529
530 error = ether_ioctl(ifp, cmd, data);
531 if (error == ENETRESET) {
532 /*
533 * Multicast list has changed; set the hardware filter
534 * accordingly.
535 */
536 if (ifp->if_flags & IFF_RUNNING)
537 sf_set_filter(sc);
538 error = 0;
539 }
540
541 /* Try to get more packets going. */
542 sf_start(ifp);
543
544 splx(s);
545 return (error);
546 }
547
548 /*
549 * sf_intr:
550 *
551 * Interrupt service routine.
552 */
553 int
554 sf_intr(void *arg)
555 {
556 struct sf_softc *sc = arg;
557 uint32_t isr;
558 int handled = 0, wantinit = 0;
559
560 for (;;) {
561 /* Reading clears all interrupts we're interested in. */
562 isr = sf_funcreg_read(sc, SF_InterruptStatus);
563 if ((isr & IS_PCIPadInt) == 0)
564 break;
565
566 handled = 1;
567
568 /* Handle receive interrupts. */
569 if (isr & IS_RxQ1DoneInt)
570 sf_rxintr(sc);
571
572 /* Handle transmit completion interrupts. */
573 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
574 sf_txintr(sc);
575
576 /* Handle abnormal interrupts. */
577 if (isr & IS_AbnormalInterrupt) {
578 /* Statistics. */
579 if (isr & IS_StatisticWrapInt)
580 sf_stats_update(sc);
581
582 /* DMA errors. */
583 if (isr & IS_DmaErrInt) {
584 wantinit = 1;
585 aprint_error_dev(&sc->sc_dev, "WARNING: DMA error\n");
586 }
587
588 /* Transmit FIFO underruns. */
589 if (isr & IS_TxDataLowInt) {
590 if (sc->sc_txthresh < 0xff)
591 sc->sc_txthresh++;
592 printf("%s: transmit FIFO underrun, new "
593 "threshold: %d bytes\n",
594 device_xname(&sc->sc_dev),
595 sc->sc_txthresh * 16);
596 sf_funcreg_write(sc, SF_TransmitFrameCSR,
597 sc->sc_TransmitFrameCSR |
598 TFCSR_TransmitThreshold(sc->sc_txthresh));
599 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
600 sc->sc_TxDescQueueCtrl |
601 TDQC_TxHighPriorityFifoThreshold(
602 sc->sc_txthresh));
603 }
604 }
605 }
606
607 if (handled) {
608 /* Reset the interface, if necessary. */
609 if (wantinit)
610 sf_init(&sc->sc_ethercom.ec_if);
611
612 /* Try and get more packets going. */
613 sf_start(&sc->sc_ethercom.ec_if);
614 }
615
616 return (handled);
617 }
618
619 /*
620 * sf_txintr:
621 *
622 * Helper -- handle transmit completion interrupts.
623 */
624 static void
625 sf_txintr(struct sf_softc *sc)
626 {
627 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
628 struct sf_descsoft *ds;
629 uint32_t cqci, tcd;
630 int consumer, producer, txidx;
631
632 try_again:
633 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
634
635 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
636 producer = CQPI_TxCompletionProducerIndex_get(
637 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
638
639 if (consumer == producer)
640 return;
641
642 ifp->if_flags &= ~IFF_OACTIVE;
643
644 while (consumer != producer) {
645 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
646 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
647
648 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
649 #ifdef DIAGNOSTIC
650 if ((tcd & TCD_PR) == 0)
651 aprint_error_dev(&sc->sc_dev, "Tx queue mismatch, index %d\n",
652 txidx);
653 #endif
654 /*
655 * NOTE: stats are updated later. We're just
656 * releasing packets that have been DMA'd to
657 * the chip.
658 */
659 ds = &sc->sc_txsoft[txidx];
660 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
661 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
662 0, ds->ds_dmamap->dm_mapsize,
663 BUS_DMASYNC_POSTWRITE);
664 m_freem(ds->ds_mbuf);
665 ds->ds_mbuf = NULL;
666
667 consumer = SF_NEXTTCD(consumer);
668 sc->sc_txpending--;
669 }
670
671 /* XXXJRT -- should be KDASSERT() */
672 KASSERT(sc->sc_txpending >= 0);
673
674 /* If all packets are done, cancel the watchdog timer. */
675 if (sc->sc_txpending == 0)
676 ifp->if_timer = 0;
677
678 /* Update the consumer index. */
679 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
680 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
681 CQCI_TxCompletionConsumerIndex(consumer));
682
683 /* Double check for new completions. */
684 goto try_again;
685 }
686
687 /*
688 * sf_rxintr:
689 *
690 * Helper -- handle receive interrupts.
691 */
692 static void
693 sf_rxintr(struct sf_softc *sc)
694 {
695 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
696 struct sf_descsoft *ds;
697 struct sf_rcd_full *rcd;
698 struct mbuf *m;
699 uint32_t cqci, word0;
700 int consumer, producer, bufproducer, rxidx, len;
701
702 try_again:
703 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
704
705 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
706 producer = CQPI_RxCompletionQ1ProducerIndex_get(
707 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
708 bufproducer = RXQ1P_RxDescQ1Producer_get(
709 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
710
711 if (consumer == producer)
712 return;
713
714 while (consumer != producer) {
715 rcd = &sc->sc_rxcomp[consumer];
716 SF_CDRXCSYNC(sc, consumer,
717 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
718 SF_CDRXCSYNC(sc, consumer,
719 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
720
721 word0 = le32toh(rcd->rcd_word0);
722 rxidx = RCD_W0_EndIndex(word0);
723
724 ds = &sc->sc_rxsoft[rxidx];
725
726 consumer = SF_NEXTRCD(consumer);
727 bufproducer = SF_NEXTRX(bufproducer);
728
729 if ((word0 & RCD_W0_OK) == 0) {
730 SF_INIT_RXDESC(sc, rxidx);
731 continue;
732 }
733
734 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
735 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
736
737 /*
738 * No errors; receive the packet. Note that we have
739 * configured the Starfire to NOT transfer the CRC
740 * with the packet.
741 */
742 len = RCD_W0_Length(word0);
743
744 #ifdef __NO_STRICT_ALIGNMENT
745 /*
746 * Allocate a new mbuf cluster. If that fails, we are
747 * out of memory, and must drop the packet and recycle
748 * the buffer that's already attached to this descriptor.
749 */
750 m = ds->ds_mbuf;
751 if (sf_add_rxbuf(sc, rxidx) != 0) {
752 ifp->if_ierrors++;
753 SF_INIT_RXDESC(sc, rxidx);
754 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
755 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
756 continue;
757 }
758 #else
759 /*
760 * The Starfire's receive buffer must be 4-byte aligned.
761 * But this means that the data after the Ethernet header
762 * is misaligned. We must allocate a new buffer and
763 * copy the data, shifted forward 2 bytes.
764 */
765 MGETHDR(m, M_DONTWAIT, MT_DATA);
766 if (m == NULL) {
767 dropit:
768 ifp->if_ierrors++;
769 SF_INIT_RXDESC(sc, rxidx);
770 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
771 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
772 continue;
773 }
774 if (len > (MHLEN - 2)) {
775 MCLGET(m, M_DONTWAIT);
776 if ((m->m_flags & M_EXT) == 0) {
777 m_freem(m);
778 goto dropit;
779 }
780 }
781 m->m_data += 2;
782
783 /*
784 * Note that we use cluster for incoming frames, so the
785 * buffer is virtually contiguous.
786 */
787 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
788
789 /* Allow the receive descriptor to continue using its mbuf. */
790 SF_INIT_RXDESC(sc, rxidx);
791 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
792 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
793 #endif /* __NO_STRICT_ALIGNMENT */
794
795 m->m_pkthdr.rcvif = ifp;
796 m->m_pkthdr.len = m->m_len = len;
797
798 #if NBPFILTER > 0
799 /*
800 * Pass this up to any BPF listeners.
801 */
802 if (ifp->if_bpf)
803 bpf_mtap(ifp->if_bpf, m);
804 #endif /* NBPFILTER > 0 */
805
806 /* Pass it on. */
807 (*ifp->if_input)(ifp, m);
808 }
809
810 /* Update the chip's pointers. */
811 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
812 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
813 CQCI_RxCompletionQ1ConsumerIndex(consumer));
814 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
815 RXQ1P_RxDescQ1Producer(bufproducer));
816
817 /* Double-check for any new completions. */
818 goto try_again;
819 }
820
821 /*
822 * sf_tick:
823 *
824 * One second timer, used to tick the MII and update stats.
825 */
826 static void
827 sf_tick(void *arg)
828 {
829 struct sf_softc *sc = arg;
830 int s;
831
832 s = splnet();
833 mii_tick(&sc->sc_mii);
834 sf_stats_update(sc);
835 splx(s);
836
837 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
838 }
839
840 /*
841 * sf_stats_update:
842 *
843 * Read the statitistics counters.
844 */
845 static void
846 sf_stats_update(struct sf_softc *sc)
847 {
848 struct sf_stats stats;
849 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
850 uint32_t *p;
851 u_int i;
852
853 p = &stats.TransmitOKFrames;
854 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
855 *p++ = sf_genreg_read(sc,
856 SF_STATS_BASE + (i * sizeof(uint32_t)));
857 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
858 }
859
860 ifp->if_opackets += stats.TransmitOKFrames;
861
862 ifp->if_collisions += stats.SingleCollisionFrames +
863 stats.MultipleCollisionFrames;
864
865 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
866 stats.TransmitAbortDueToExcessingDeferral +
867 stats.FramesLostDueToInternalTransmitErrors;
868
869 ifp->if_ipackets += stats.ReceiveOKFrames;
870
871 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
872 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
873 stats.ReceiveFramesJabbersError +
874 stats.FramesLostDueToInternalReceiveErrors;
875 }
876
877 /*
878 * sf_reset:
879 *
880 * Perform a soft reset on the Starfire.
881 */
882 static void
883 sf_reset(struct sf_softc *sc)
884 {
885 int i;
886
887 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
888
889 sf_macreset(sc);
890
891 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
892 for (i = 0; i < 1000; i++) {
893 delay(10);
894 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
895 PDC_SoftReset) == 0)
896 break;
897 }
898
899 if (i == 1000) {
900 aprint_error_dev(&sc->sc_dev, "reset failed to complete\n");
901 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
902 }
903
904 delay(1000);
905 }
906
907 /*
908 * sf_macreset:
909 *
910 * Reset the MAC portion of the Starfire.
911 */
912 static void
913 sf_macreset(struct sf_softc *sc)
914 {
915
916 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
917 delay(1000);
918 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
919 }
920
921 /*
922 * sf_init: [ifnet interface function]
923 *
924 * Initialize the interface. Must be called at splnet().
925 */
926 static int
927 sf_init(struct ifnet *ifp)
928 {
929 struct sf_softc *sc = ifp->if_softc;
930 struct sf_descsoft *ds;
931 int error = 0;
932 u_int i;
933
934 /*
935 * Cancel any pending I/O.
936 */
937 sf_stop(ifp, 0);
938
939 /*
940 * Reset the Starfire to a known state.
941 */
942 sf_reset(sc);
943
944 /* Clear the stat counters. */
945 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
946 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
947
948 /*
949 * Initialize the transmit descriptor ring.
950 */
951 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
952 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
953 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
954 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
955
956 /*
957 * Initialize the transmit completion ring.
958 */
959 for (i = 0; i < SF_NTCD; i++) {
960 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
961 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
962 }
963 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
964 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
965
966 /*
967 * Initialize the receive descriptor ring.
968 */
969 for (i = 0; i < SF_NRXDESC; i++) {
970 ds = &sc->sc_rxsoft[i];
971 if (ds->ds_mbuf == NULL) {
972 if ((error = sf_add_rxbuf(sc, i)) != 0) {
973 aprint_error_dev(&sc->sc_dev, "unable to allocate or map rx "
974 "buffer %d, error = %d\n",
975 i, error);
976 /*
977 * XXX Should attempt to run with fewer receive
978 * XXX buffers instead of just failing.
979 */
980 sf_rxdrain(sc);
981 goto out;
982 }
983 } else
984 SF_INIT_RXDESC(sc, i);
985 }
986 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
987 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
988 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
989
990 /*
991 * Initialize the receive completion ring.
992 */
993 for (i = 0; i < SF_NRCD; i++) {
994 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
995 sc->sc_rxcomp[i].rcd_word1 = 0;
996 sc->sc_rxcomp[i].rcd_word2 = 0;
997 sc->sc_rxcomp[i].rcd_timestamp = 0;
998 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
999 }
1000 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1001 RCQ1C_RxCompletionQ1Type(3));
1002 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1003
1004 /*
1005 * Initialize the Tx CSR.
1006 */
1007 sc->sc_TransmitFrameCSR = 0;
1008 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1009 sc->sc_TransmitFrameCSR |
1010 TFCSR_TransmitThreshold(sc->sc_txthresh));
1011
1012 /*
1013 * Initialize the Tx descriptor control register.
1014 */
1015 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1016 TDQC_TxDmaBurstSize(4) | /* default */
1017 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1018 TDQC_TxDescType(0);
1019 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1020 sc->sc_TxDescQueueCtrl |
1021 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1022
1023 /*
1024 * Initialize the Rx descriptor control registers.
1025 */
1026 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1027 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1028 RDQ1C_RxDescSpacing(0));
1029 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1030
1031 /*
1032 * Initialize the Tx descriptor producer indices.
1033 */
1034 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1035 TDQPI_HiPrTxProducerIndex(0) |
1036 TDQPI_LoPrTxProducerIndex(0));
1037
1038 /*
1039 * Initialize the Rx descriptor producer indices.
1040 */
1041 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1042 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1043 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1044 RXQ2P_RxDescQ2Producer(0));
1045
1046 /*
1047 * Initialize the Tx and Rx completion queue consumer indices.
1048 */
1049 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1050 CQCI_TxCompletionConsumerIndex(0) |
1051 CQCI_RxCompletionQ1ConsumerIndex(0));
1052 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1053
1054 /*
1055 * Initialize the Rx DMA control register.
1056 */
1057 sf_funcreg_write(sc, SF_RxDmaCtrl,
1058 RDC_RxHighPriorityThreshold(6) | /* default */
1059 RDC_RxBurstSize(4)); /* default */
1060
1061 /*
1062 * Set the receive filter.
1063 */
1064 sc->sc_RxAddressFilteringCtl = 0;
1065 sf_set_filter(sc);
1066
1067 /*
1068 * Set MacConfig1. When we set the media, MacConfig1 will
1069 * actually be written and the MAC part reset.
1070 */
1071 sc->sc_MacConfig1 = MC1_PadEn;
1072
1073 /*
1074 * Set the media.
1075 */
1076 if ((error = ether_mediachange(ifp)) != 0)
1077 goto out;
1078
1079 /*
1080 * Initialize the interrupt register.
1081 */
1082 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1083 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1084 IS_StatisticWrapInt;
1085 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1086
1087 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1088 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1089
1090 /*
1091 * Start the transmit and receive processes.
1092 */
1093 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1094 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1095
1096 /* Start the on second clock. */
1097 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1098
1099 /*
1100 * Note that the interface is now running.
1101 */
1102 ifp->if_flags |= IFF_RUNNING;
1103 ifp->if_flags &= ~IFF_OACTIVE;
1104
1105 out:
1106 if (error) {
1107 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1108 ifp->if_timer = 0;
1109 printf("%s: interface not running\n", device_xname(&sc->sc_dev));
1110 }
1111 return (error);
1112 }
1113
1114 /*
1115 * sf_rxdrain:
1116 *
1117 * Drain the receive queue.
1118 */
1119 static void
1120 sf_rxdrain(struct sf_softc *sc)
1121 {
1122 struct sf_descsoft *ds;
1123 int i;
1124
1125 for (i = 0; i < SF_NRXDESC; i++) {
1126 ds = &sc->sc_rxsoft[i];
1127 if (ds->ds_mbuf != NULL) {
1128 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1129 m_freem(ds->ds_mbuf);
1130 ds->ds_mbuf = NULL;
1131 }
1132 }
1133 }
1134
1135 /*
1136 * sf_stop: [ifnet interface function]
1137 *
1138 * Stop transmission on the interface.
1139 */
1140 static void
1141 sf_stop(struct ifnet *ifp, int disable)
1142 {
1143 struct sf_softc *sc = ifp->if_softc;
1144 struct sf_descsoft *ds;
1145 int i;
1146
1147 /* Stop the one second clock. */
1148 callout_stop(&sc->sc_tick_callout);
1149
1150 /* Down the MII. */
1151 mii_down(&sc->sc_mii);
1152
1153 /* Disable interrupts. */
1154 sf_funcreg_write(sc, SF_InterruptEn, 0);
1155
1156 /* Stop the transmit and receive processes. */
1157 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1158
1159 /*
1160 * Release any queued transmit buffers.
1161 */
1162 for (i = 0; i < SF_NTXDESC; i++) {
1163 ds = &sc->sc_txsoft[i];
1164 if (ds->ds_mbuf != NULL) {
1165 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1166 m_freem(ds->ds_mbuf);
1167 ds->ds_mbuf = NULL;
1168 }
1169 }
1170
1171 /*
1172 * Mark the interface down and cancel the watchdog timer.
1173 */
1174 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1175 ifp->if_timer = 0;
1176
1177 if (disable)
1178 sf_rxdrain(sc);
1179 }
1180
1181 /*
1182 * sf_read_eeprom:
1183 *
1184 * Read from the Starfire EEPROM.
1185 */
1186 static uint8_t
1187 sf_read_eeprom(struct sf_softc *sc, int offset)
1188 {
1189 uint32_t reg;
1190
1191 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1192
1193 return ((reg >> (8 * (offset & 3))) & 0xff);
1194 }
1195
1196 /*
1197 * sf_add_rxbuf:
1198 *
1199 * Add a receive buffer to the indicated descriptor.
1200 */
1201 static int
1202 sf_add_rxbuf(struct sf_softc *sc, int idx)
1203 {
1204 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1205 struct mbuf *m;
1206 int error;
1207
1208 MGETHDR(m, M_DONTWAIT, MT_DATA);
1209 if (m == NULL)
1210 return (ENOBUFS);
1211
1212 MCLGET(m, M_DONTWAIT);
1213 if ((m->m_flags & M_EXT) == 0) {
1214 m_freem(m);
1215 return (ENOBUFS);
1216 }
1217
1218 if (ds->ds_mbuf != NULL)
1219 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1220
1221 ds->ds_mbuf = m;
1222
1223 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1224 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1225 BUS_DMA_READ|BUS_DMA_NOWAIT);
1226 if (error) {
1227 aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1228 idx, error);
1229 panic("sf_add_rxbuf"); /* XXX */
1230 }
1231
1232 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1233 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1234
1235 SF_INIT_RXDESC(sc, idx);
1236
1237 return (0);
1238 }
1239
1240 static void
1241 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1242 {
1243 uint32_t reg0, reg1, reg2;
1244
1245 reg0 = enaddr[5] | (enaddr[4] << 8);
1246 reg1 = enaddr[3] | (enaddr[2] << 8);
1247 reg2 = enaddr[1] | (enaddr[0] << 8);
1248
1249 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1250 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1251 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1252 }
1253
1254 static void
1255 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1256 {
1257 uint32_t hash, slot, reg;
1258
1259 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1260 slot = hash >> 4;
1261
1262 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1263 reg |= 1 << (hash & 0xf);
1264 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1265 }
1266
1267 /*
1268 * sf_set_filter:
1269 *
1270 * Set the Starfire receive filter.
1271 */
1272 static void
1273 sf_set_filter(struct sf_softc *sc)
1274 {
1275 struct ethercom *ec = &sc->sc_ethercom;
1276 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1277 struct ether_multi *enm;
1278 struct ether_multistep step;
1279 int i;
1280
1281 /* Start by clearing the perfect and hash tables. */
1282 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1283 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1284
1285 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1286 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1287
1288 /*
1289 * Clear the perfect and hash mode bits.
1290 */
1291 sc->sc_RxAddressFilteringCtl &=
1292 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1293
1294 if (ifp->if_flags & IFF_BROADCAST)
1295 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1296 else
1297 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1298
1299 if (ifp->if_flags & IFF_PROMISC) {
1300 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1301 goto allmulti;
1302 } else
1303 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1304
1305 /*
1306 * Set normal perfect filtering mode.
1307 */
1308 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1309
1310 /*
1311 * First, write the station address to the perfect filter
1312 * table.
1313 */
1314 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1315
1316 /*
1317 * Now set the hash bits for each multicast address in our
1318 * list.
1319 */
1320 ETHER_FIRST_MULTI(step, ec, enm);
1321 if (enm == NULL)
1322 goto done;
1323 while (enm != NULL) {
1324 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1325 /*
1326 * We must listen to a range of multicast addresses.
1327 * For now, just accept all multicasts, rather than
1328 * trying to set only those filter bits needed to match
1329 * the range. (At this time, the only use of address
1330 * ranges is for IP multicast routing, for which the
1331 * range is big enough to require all bits set.)
1332 */
1333 goto allmulti;
1334 }
1335 sf_set_filter_hash(sc, enm->enm_addrlo);
1336 ETHER_NEXT_MULTI(step, enm);
1337 }
1338
1339 /*
1340 * Set "hash only multicast dest, match regardless of VLAN ID".
1341 */
1342 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1343 goto done;
1344
1345 allmulti:
1346 /*
1347 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1348 */
1349 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1350 ifp->if_flags |= IFF_ALLMULTI;
1351
1352 done:
1353 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1354 sc->sc_RxAddressFilteringCtl);
1355 }
1356
1357 /*
1358 * sf_mii_read: [mii interface function]
1359 *
1360 * Read from the MII.
1361 */
1362 static int
1363 sf_mii_read(struct device *self, int phy, int reg)
1364 {
1365 struct sf_softc *sc = (void *) self;
1366 uint32_t v;
1367 int i;
1368
1369 for (i = 0; i < 1000; i++) {
1370 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1371 if (v & MiiDataValid)
1372 break;
1373 delay(1);
1374 }
1375
1376 if ((v & MiiDataValid) == 0)
1377 return (0);
1378
1379 if (MiiRegDataPort(v) == 0xffff)
1380 return (0);
1381
1382 return (MiiRegDataPort(v));
1383 }
1384
1385 /*
1386 * sf_mii_write: [mii interface function]
1387 *
1388 * Write to the MII.
1389 */
1390 static void
1391 sf_mii_write(struct device *self, int phy, int reg, int val)
1392 {
1393 struct sf_softc *sc = (void *) self;
1394 int i;
1395
1396 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1397
1398 for (i = 0; i < 1000; i++) {
1399 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1400 MiiBusy) == 0)
1401 return;
1402 delay(1);
1403 }
1404
1405 printf("%s: MII write timed out\n", device_xname(&sc->sc_dev));
1406 }
1407
1408 /*
1409 * sf_mii_statchg: [mii interface function]
1410 *
1411 * Callback from the PHY when the media changes.
1412 */
1413 static void
1414 sf_mii_statchg(struct device *self)
1415 {
1416 struct sf_softc *sc = (void *) self;
1417 uint32_t ipg;
1418
1419 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1420 sc->sc_MacConfig1 |= MC1_FullDuplex;
1421 ipg = 0x15;
1422 } else {
1423 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1424 ipg = 0x11;
1425 }
1426
1427 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1428 sf_macreset(sc);
1429
1430 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1431 }
1432