aic6915.c revision 1.26 1 /* $NetBSD: aic6915.c,v 1.26 2010/01/19 22:06:24 pooka Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Adaptec AIC-6915 (``Starfire'')
34 * 10/100 Ethernet controller.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.26 2010/01/19 22:06:24 pooka Exp $");
39
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_ether.h>
58
59 #include <net/bpf.h>
60
61 #include <sys/bus.h>
62 #include <sys/intr.h>
63
64 #include <dev/mii/miivar.h>
65
66 #include <dev/ic/aic6915reg.h>
67 #include <dev/ic/aic6915var.h>
68
69 static void sf_start(struct ifnet *);
70 static void sf_watchdog(struct ifnet *);
71 static int sf_ioctl(struct ifnet *, u_long, void *);
72 static int sf_init(struct ifnet *);
73 static void sf_stop(struct ifnet *, int);
74
75 static bool sf_shutdown(device_t, int);
76
77 static void sf_txintr(struct sf_softc *);
78 static void sf_rxintr(struct sf_softc *);
79 static void sf_stats_update(struct sf_softc *);
80
81 static void sf_reset(struct sf_softc *);
82 static void sf_macreset(struct sf_softc *);
83 static void sf_rxdrain(struct sf_softc *);
84 static int sf_add_rxbuf(struct sf_softc *, int);
85 static uint8_t sf_read_eeprom(struct sf_softc *, int);
86 static void sf_set_filter(struct sf_softc *);
87
88 static int sf_mii_read(device_t, int, int);
89 static void sf_mii_write(device_t, int, int, int);
90 static void sf_mii_statchg(device_t);
91
92 static void sf_tick(void *);
93
94 #define sf_funcreg_read(sc, reg) \
95 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
96 #define sf_funcreg_write(sc, reg, val) \
97 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
98
99 static inline uint32_t
100 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
101 {
102
103 if (__predict_false(sc->sc_iomapped)) {
104 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
105 reg);
106 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
107 SF_IndirectIoDataPort));
108 }
109
110 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
111 }
112
113 static inline void
114 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
115 {
116
117 if (__predict_false(sc->sc_iomapped)) {
118 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
119 reg);
120 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
121 val);
122 return;
123 }
124
125 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
126 }
127
128 #define sf_genreg_read(sc, reg) \
129 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
130 #define sf_genreg_write(sc, reg, val) \
131 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
132
133 /*
134 * sf_attach:
135 *
136 * Attach a Starfire interface to the system.
137 */
138 void
139 sf_attach(struct sf_softc *sc)
140 {
141 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
142 int i, rseg, error;
143 bus_dma_segment_t seg;
144 u_int8_t enaddr[ETHER_ADDR_LEN];
145
146 callout_init(&sc->sc_tick_callout, 0);
147
148 /*
149 * If we're I/O mapped, the functional register handle is
150 * the same as the base handle. If we're memory mapped,
151 * carve off a chunk of the register space for the functional
152 * registers, to save on arithmetic later.
153 */
154 if (sc->sc_iomapped)
155 sc->sc_sh_func = sc->sc_sh;
156 else {
157 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
158 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
159 aprint_error_dev(&sc->sc_dev, "unable to sub-region functional "
160 "registers, error = %d\n",
161 error);
162 return;
163 }
164 }
165
166 /*
167 * Initialize the transmit threshold for this interface. The
168 * manual describes the default as 4 * 16 bytes. We start out
169 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
170 * several platforms.
171 */
172 sc->sc_txthresh = 10;
173
174 /*
175 * Allocate the control data structures, and create and load the
176 * DMA map for it.
177 */
178 if ((error = bus_dmamem_alloc(sc->sc_dmat,
179 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
180 BUS_DMA_NOWAIT)) != 0) {
181 aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n",
182 error);
183 goto fail_0;
184 }
185
186 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
187 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
188 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
189 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
190 error);
191 goto fail_1;
192 }
193
194 if ((error = bus_dmamap_create(sc->sc_dmat,
195 sizeof(struct sf_control_data), 1,
196 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
197 &sc->sc_cddmamap)) != 0) {
198 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
199 "error = %d\n", error);
200 goto fail_2;
201 }
202
203 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
204 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
205 BUS_DMA_NOWAIT)) != 0) {
206 aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n",
207 error);
208 goto fail_3;
209 }
210
211 /*
212 * Create the transmit buffer DMA maps.
213 */
214 for (i = 0; i < SF_NTXDESC; i++) {
215 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
216 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
217 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
218 aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
219 "error = %d\n", i, error);
220 goto fail_4;
221 }
222 }
223
224 /*
225 * Create the receive buffer DMA maps.
226 */
227 for (i = 0; i < SF_NRXDESC; i++) {
228 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
229 MCLBYTES, 0, BUS_DMA_NOWAIT,
230 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
231 aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
232 "error = %d\n", i, error);
233 goto fail_5;
234 }
235 }
236
237 /*
238 * Reset the chip to a known state.
239 */
240 sf_reset(sc);
241
242 /*
243 * Read the Ethernet address from the EEPROM.
244 */
245 for (i = 0; i < ETHER_ADDR_LEN; i++)
246 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
247
248 printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev),
249 ether_sprintf(enaddr));
250
251 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
252 printf("%s: 64-bit PCI slot detected\n", device_xname(&sc->sc_dev));
253
254 /*
255 * Initialize our media structures and probe the MII.
256 */
257 sc->sc_mii.mii_ifp = ifp;
258 sc->sc_mii.mii_readreg = sf_mii_read;
259 sc->sc_mii.mii_writereg = sf_mii_write;
260 sc->sc_mii.mii_statchg = sf_mii_statchg;
261 sc->sc_ethercom.ec_mii = &sc->sc_mii;
262 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
263 ether_mediastatus);
264 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
265 MII_OFFSET_ANY, 0);
266 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
267 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
268 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
269 } else
270 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
271
272 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
273 ifp->if_softc = sc;
274 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
275 ifp->if_ioctl = sf_ioctl;
276 ifp->if_start = sf_start;
277 ifp->if_watchdog = sf_watchdog;
278 ifp->if_init = sf_init;
279 ifp->if_stop = sf_stop;
280 IFQ_SET_READY(&ifp->if_snd);
281
282 /*
283 * Attach the interface.
284 */
285 if_attach(ifp);
286 ether_ifattach(ifp, enaddr);
287
288 /*
289 * Make sure the interface is shutdown during reboot.
290 */
291 if (pmf_device_register1(&sc->sc_dev, NULL, NULL, sf_shutdown))
292 pmf_class_network_register(&sc->sc_dev, ifp);
293 else
294 aprint_error_dev(&sc->sc_dev,
295 "couldn't establish power handler\n");
296 return;
297
298 /*
299 * Free any resources we've allocated during the failed attach
300 * attempt. Do this in reverse order an fall through.
301 */
302 fail_5:
303 for (i = 0; i < SF_NRXDESC; i++) {
304 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
305 bus_dmamap_destroy(sc->sc_dmat,
306 sc->sc_rxsoft[i].ds_dmamap);
307 }
308 fail_4:
309 for (i = 0; i < SF_NTXDESC; i++) {
310 if (sc->sc_txsoft[i].ds_dmamap != NULL)
311 bus_dmamap_destroy(sc->sc_dmat,
312 sc->sc_txsoft[i].ds_dmamap);
313 }
314 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
315 fail_3:
316 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
317 fail_2:
318 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
319 sizeof(struct sf_control_data));
320 fail_1:
321 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
322 fail_0:
323 return;
324 }
325
326 /*
327 * sf_shutdown:
328 *
329 * Shutdown hook -- make sure the interface is stopped at reboot.
330 */
331 static bool
332 sf_shutdown(device_t self, int howto)
333 {
334 struct sf_softc *sc;
335
336 sc = device_private(self);
337 sf_stop(&sc->sc_ethercom.ec_if, 1);
338
339 return true;
340 }
341
342 /*
343 * sf_start: [ifnet interface function]
344 *
345 * Start packet transmission on the interface.
346 */
347 static void
348 sf_start(struct ifnet *ifp)
349 {
350 struct sf_softc *sc = ifp->if_softc;
351 struct mbuf *m0, *m;
352 struct sf_txdesc0 *txd;
353 struct sf_descsoft *ds;
354 bus_dmamap_t dmamap;
355 int error, producer, last = -1, opending, seg;
356
357 /*
358 * Remember the previous number of pending transmits.
359 */
360 opending = sc->sc_txpending;
361
362 /*
363 * Find out where we're sitting.
364 */
365 producer = SF_TXDINDEX_TO_HOST(
366 TDQPI_HiPrTxProducerIndex_get(
367 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
368
369 /*
370 * Loop through the send queue, setting up transmit descriptors
371 * until we drain the queue, or use up all available transmit
372 * descriptors. Leave a blank one at the end for sanity's sake.
373 */
374 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
375 /*
376 * Grab a packet off the queue.
377 */
378 IFQ_POLL(&ifp->if_snd, m0);
379 if (m0 == NULL)
380 break;
381 m = NULL;
382
383 /*
384 * Get the transmit descriptor.
385 */
386 txd = &sc->sc_txdescs[producer];
387 ds = &sc->sc_txsoft[producer];
388 dmamap = ds->ds_dmamap;
389
390 /*
391 * Load the DMA map. If this fails, the packet either
392 * didn't fit in the allotted number of frags, or we were
393 * short on resources. In this case, we'll copy and try
394 * again.
395 */
396 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
397 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
398 MGETHDR(m, M_DONTWAIT, MT_DATA);
399 if (m == NULL) {
400 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n");
401 break;
402 }
403 if (m0->m_pkthdr.len > MHLEN) {
404 MCLGET(m, M_DONTWAIT);
405 if ((m->m_flags & M_EXT) == 0) {
406 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx "
407 "cluster\n");
408 m_freem(m);
409 break;
410 }
411 }
412 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
413 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
414 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
415 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
416 if (error) {
417 aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, "
418 "error = %d\n", error);
419 break;
420 }
421 }
422
423 /*
424 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
425 */
426 IFQ_DEQUEUE(&ifp->if_snd, m0);
427 if (m != NULL) {
428 m_freem(m0);
429 m0 = m;
430 }
431
432 /* Initialize the descriptor. */
433 txd->td_word0 =
434 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
435 if (producer == (SF_NTXDESC - 1))
436 txd->td_word0 |= TD_W0_END;
437 txd->td_word1 = htole32(dmamap->dm_nsegs);
438 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
439 txd->td_frags[seg].fr_addr =
440 htole32(dmamap->dm_segs[seg].ds_addr);
441 txd->td_frags[seg].fr_len =
442 htole32(dmamap->dm_segs[seg].ds_len);
443 }
444
445 /* Sync the descriptor and the DMA map. */
446 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
447 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
448 BUS_DMASYNC_PREWRITE);
449
450 /*
451 * Store a pointer to the packet so we can free it later.
452 */
453 ds->ds_mbuf = m0;
454
455 /* Advance the Tx pointer. */
456 sc->sc_txpending++;
457 last = producer;
458 producer = SF_NEXTTX(producer);
459
460 /*
461 * Pass the packet to any BPF listeners.
462 */
463 if (ifp->if_bpf)
464 bpf_ops->bpf_mtap(ifp->if_bpf, m0);
465 }
466
467 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
468 /* No more slots left; notify upper layer. */
469 ifp->if_flags |= IFF_OACTIVE;
470 }
471
472 if (sc->sc_txpending != opending) {
473 KASSERT(last != -1);
474 /*
475 * We enqueued packets. Cause a transmit interrupt to
476 * happen on the last packet we enqueued, and give the
477 * new descriptors to the chip by writing the new
478 * producer index.
479 */
480 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
481 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
482
483 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
484 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
485
486 /* Set a watchdog timer in case the chip flakes out. */
487 ifp->if_timer = 5;
488 }
489 }
490
491 /*
492 * sf_watchdog: [ifnet interface function]
493 *
494 * Watchdog timer handler.
495 */
496 static void
497 sf_watchdog(struct ifnet *ifp)
498 {
499 struct sf_softc *sc = ifp->if_softc;
500
501 printf("%s: device timeout\n", device_xname(&sc->sc_dev));
502 ifp->if_oerrors++;
503
504 (void) sf_init(ifp);
505
506 /* Try to get more packets going. */
507 sf_start(ifp);
508 }
509
510 /*
511 * sf_ioctl: [ifnet interface function]
512 *
513 * Handle control requests from the operator.
514 */
515 static int
516 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
517 {
518 struct sf_softc *sc = ifp->if_softc;
519 int s, error;
520
521 s = splnet();
522
523 error = ether_ioctl(ifp, cmd, data);
524 if (error == ENETRESET) {
525 /*
526 * Multicast list has changed; set the hardware filter
527 * accordingly.
528 */
529 if (ifp->if_flags & IFF_RUNNING)
530 sf_set_filter(sc);
531 error = 0;
532 }
533
534 /* Try to get more packets going. */
535 sf_start(ifp);
536
537 splx(s);
538 return (error);
539 }
540
541 /*
542 * sf_intr:
543 *
544 * Interrupt service routine.
545 */
546 int
547 sf_intr(void *arg)
548 {
549 struct sf_softc *sc = arg;
550 uint32_t isr;
551 int handled = 0, wantinit = 0;
552
553 for (;;) {
554 /* Reading clears all interrupts we're interested in. */
555 isr = sf_funcreg_read(sc, SF_InterruptStatus);
556 if ((isr & IS_PCIPadInt) == 0)
557 break;
558
559 handled = 1;
560
561 /* Handle receive interrupts. */
562 if (isr & IS_RxQ1DoneInt)
563 sf_rxintr(sc);
564
565 /* Handle transmit completion interrupts. */
566 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
567 sf_txintr(sc);
568
569 /* Handle abnormal interrupts. */
570 if (isr & IS_AbnormalInterrupt) {
571 /* Statistics. */
572 if (isr & IS_StatisticWrapInt)
573 sf_stats_update(sc);
574
575 /* DMA errors. */
576 if (isr & IS_DmaErrInt) {
577 wantinit = 1;
578 aprint_error_dev(&sc->sc_dev, "WARNING: DMA error\n");
579 }
580
581 /* Transmit FIFO underruns. */
582 if (isr & IS_TxDataLowInt) {
583 if (sc->sc_txthresh < 0xff)
584 sc->sc_txthresh++;
585 printf("%s: transmit FIFO underrun, new "
586 "threshold: %d bytes\n",
587 device_xname(&sc->sc_dev),
588 sc->sc_txthresh * 16);
589 sf_funcreg_write(sc, SF_TransmitFrameCSR,
590 sc->sc_TransmitFrameCSR |
591 TFCSR_TransmitThreshold(sc->sc_txthresh));
592 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
593 sc->sc_TxDescQueueCtrl |
594 TDQC_TxHighPriorityFifoThreshold(
595 sc->sc_txthresh));
596 }
597 }
598 }
599
600 if (handled) {
601 /* Reset the interface, if necessary. */
602 if (wantinit)
603 sf_init(&sc->sc_ethercom.ec_if);
604
605 /* Try and get more packets going. */
606 sf_start(&sc->sc_ethercom.ec_if);
607 }
608
609 return (handled);
610 }
611
612 /*
613 * sf_txintr:
614 *
615 * Helper -- handle transmit completion interrupts.
616 */
617 static void
618 sf_txintr(struct sf_softc *sc)
619 {
620 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
621 struct sf_descsoft *ds;
622 uint32_t cqci, tcd;
623 int consumer, producer, txidx;
624
625 try_again:
626 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
627
628 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
629 producer = CQPI_TxCompletionProducerIndex_get(
630 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
631
632 if (consumer == producer)
633 return;
634
635 ifp->if_flags &= ~IFF_OACTIVE;
636
637 while (consumer != producer) {
638 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
639 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
640
641 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
642 #ifdef DIAGNOSTIC
643 if ((tcd & TCD_PR) == 0)
644 aprint_error_dev(&sc->sc_dev, "Tx queue mismatch, index %d\n",
645 txidx);
646 #endif
647 /*
648 * NOTE: stats are updated later. We're just
649 * releasing packets that have been DMA'd to
650 * the chip.
651 */
652 ds = &sc->sc_txsoft[txidx];
653 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
654 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
655 0, ds->ds_dmamap->dm_mapsize,
656 BUS_DMASYNC_POSTWRITE);
657 m_freem(ds->ds_mbuf);
658 ds->ds_mbuf = NULL;
659
660 consumer = SF_NEXTTCD(consumer);
661 sc->sc_txpending--;
662 }
663
664 /* XXXJRT -- should be KDASSERT() */
665 KASSERT(sc->sc_txpending >= 0);
666
667 /* If all packets are done, cancel the watchdog timer. */
668 if (sc->sc_txpending == 0)
669 ifp->if_timer = 0;
670
671 /* Update the consumer index. */
672 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
673 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
674 CQCI_TxCompletionConsumerIndex(consumer));
675
676 /* Double check for new completions. */
677 goto try_again;
678 }
679
680 /*
681 * sf_rxintr:
682 *
683 * Helper -- handle receive interrupts.
684 */
685 static void
686 sf_rxintr(struct sf_softc *sc)
687 {
688 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
689 struct sf_descsoft *ds;
690 struct sf_rcd_full *rcd;
691 struct mbuf *m;
692 uint32_t cqci, word0;
693 int consumer, producer, bufproducer, rxidx, len;
694
695 try_again:
696 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
697
698 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
699 producer = CQPI_RxCompletionQ1ProducerIndex_get(
700 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
701 bufproducer = RXQ1P_RxDescQ1Producer_get(
702 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
703
704 if (consumer == producer)
705 return;
706
707 while (consumer != producer) {
708 rcd = &sc->sc_rxcomp[consumer];
709 SF_CDRXCSYNC(sc, consumer,
710 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
711 SF_CDRXCSYNC(sc, consumer,
712 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
713
714 word0 = le32toh(rcd->rcd_word0);
715 rxidx = RCD_W0_EndIndex(word0);
716
717 ds = &sc->sc_rxsoft[rxidx];
718
719 consumer = SF_NEXTRCD(consumer);
720 bufproducer = SF_NEXTRX(bufproducer);
721
722 if ((word0 & RCD_W0_OK) == 0) {
723 SF_INIT_RXDESC(sc, rxidx);
724 continue;
725 }
726
727 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
728 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
729
730 /*
731 * No errors; receive the packet. Note that we have
732 * configured the Starfire to NOT transfer the CRC
733 * with the packet.
734 */
735 len = RCD_W0_Length(word0);
736
737 #ifdef __NO_STRICT_ALIGNMENT
738 /*
739 * Allocate a new mbuf cluster. If that fails, we are
740 * out of memory, and must drop the packet and recycle
741 * the buffer that's already attached to this descriptor.
742 */
743 m = ds->ds_mbuf;
744 if (sf_add_rxbuf(sc, rxidx) != 0) {
745 ifp->if_ierrors++;
746 SF_INIT_RXDESC(sc, rxidx);
747 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
748 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
749 continue;
750 }
751 #else
752 /*
753 * The Starfire's receive buffer must be 4-byte aligned.
754 * But this means that the data after the Ethernet header
755 * is misaligned. We must allocate a new buffer and
756 * copy the data, shifted forward 2 bytes.
757 */
758 MGETHDR(m, M_DONTWAIT, MT_DATA);
759 if (m == NULL) {
760 dropit:
761 ifp->if_ierrors++;
762 SF_INIT_RXDESC(sc, rxidx);
763 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
764 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
765 continue;
766 }
767 if (len > (MHLEN - 2)) {
768 MCLGET(m, M_DONTWAIT);
769 if ((m->m_flags & M_EXT) == 0) {
770 m_freem(m);
771 goto dropit;
772 }
773 }
774 m->m_data += 2;
775
776 /*
777 * Note that we use cluster for incoming frames, so the
778 * buffer is virtually contiguous.
779 */
780 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
781
782 /* Allow the receive descriptor to continue using its mbuf. */
783 SF_INIT_RXDESC(sc, rxidx);
784 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
785 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
786 #endif /* __NO_STRICT_ALIGNMENT */
787
788 m->m_pkthdr.rcvif = ifp;
789 m->m_pkthdr.len = m->m_len = len;
790
791 /*
792 * Pass this up to any BPF listeners.
793 */
794 if (ifp->if_bpf)
795 bpf_ops->bpf_mtap(ifp->if_bpf, m);
796
797 /* Pass it on. */
798 (*ifp->if_input)(ifp, m);
799 }
800
801 /* Update the chip's pointers. */
802 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
803 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
804 CQCI_RxCompletionQ1ConsumerIndex(consumer));
805 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
806 RXQ1P_RxDescQ1Producer(bufproducer));
807
808 /* Double-check for any new completions. */
809 goto try_again;
810 }
811
812 /*
813 * sf_tick:
814 *
815 * One second timer, used to tick the MII and update stats.
816 */
817 static void
818 sf_tick(void *arg)
819 {
820 struct sf_softc *sc = arg;
821 int s;
822
823 s = splnet();
824 mii_tick(&sc->sc_mii);
825 sf_stats_update(sc);
826 splx(s);
827
828 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
829 }
830
831 /*
832 * sf_stats_update:
833 *
834 * Read the statitistics counters.
835 */
836 static void
837 sf_stats_update(struct sf_softc *sc)
838 {
839 struct sf_stats stats;
840 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
841 uint32_t *p;
842 u_int i;
843
844 p = &stats.TransmitOKFrames;
845 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
846 *p++ = sf_genreg_read(sc,
847 SF_STATS_BASE + (i * sizeof(uint32_t)));
848 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
849 }
850
851 ifp->if_opackets += stats.TransmitOKFrames;
852
853 ifp->if_collisions += stats.SingleCollisionFrames +
854 stats.MultipleCollisionFrames;
855
856 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
857 stats.TransmitAbortDueToExcessingDeferral +
858 stats.FramesLostDueToInternalTransmitErrors;
859
860 ifp->if_ipackets += stats.ReceiveOKFrames;
861
862 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
863 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
864 stats.ReceiveFramesJabbersError +
865 stats.FramesLostDueToInternalReceiveErrors;
866 }
867
868 /*
869 * sf_reset:
870 *
871 * Perform a soft reset on the Starfire.
872 */
873 static void
874 sf_reset(struct sf_softc *sc)
875 {
876 int i;
877
878 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
879
880 sf_macreset(sc);
881
882 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
883 for (i = 0; i < 1000; i++) {
884 delay(10);
885 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
886 PDC_SoftReset) == 0)
887 break;
888 }
889
890 if (i == 1000) {
891 aprint_error_dev(&sc->sc_dev, "reset failed to complete\n");
892 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
893 }
894
895 delay(1000);
896 }
897
898 /*
899 * sf_macreset:
900 *
901 * Reset the MAC portion of the Starfire.
902 */
903 static void
904 sf_macreset(struct sf_softc *sc)
905 {
906
907 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
908 delay(1000);
909 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
910 }
911
912 /*
913 * sf_init: [ifnet interface function]
914 *
915 * Initialize the interface. Must be called at splnet().
916 */
917 static int
918 sf_init(struct ifnet *ifp)
919 {
920 struct sf_softc *sc = ifp->if_softc;
921 struct sf_descsoft *ds;
922 int error = 0;
923 u_int i;
924
925 /*
926 * Cancel any pending I/O.
927 */
928 sf_stop(ifp, 0);
929
930 /*
931 * Reset the Starfire to a known state.
932 */
933 sf_reset(sc);
934
935 /* Clear the stat counters. */
936 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
937 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
938
939 /*
940 * Initialize the transmit descriptor ring.
941 */
942 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
943 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
944 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
945 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
946
947 /*
948 * Initialize the transmit completion ring.
949 */
950 for (i = 0; i < SF_NTCD; i++) {
951 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
952 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
953 }
954 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
955 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
956
957 /*
958 * Initialize the receive descriptor ring.
959 */
960 for (i = 0; i < SF_NRXDESC; i++) {
961 ds = &sc->sc_rxsoft[i];
962 if (ds->ds_mbuf == NULL) {
963 if ((error = sf_add_rxbuf(sc, i)) != 0) {
964 aprint_error_dev(&sc->sc_dev, "unable to allocate or map rx "
965 "buffer %d, error = %d\n",
966 i, error);
967 /*
968 * XXX Should attempt to run with fewer receive
969 * XXX buffers instead of just failing.
970 */
971 sf_rxdrain(sc);
972 goto out;
973 }
974 } else
975 SF_INIT_RXDESC(sc, i);
976 }
977 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
978 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
979 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
980
981 /*
982 * Initialize the receive completion ring.
983 */
984 for (i = 0; i < SF_NRCD; i++) {
985 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
986 sc->sc_rxcomp[i].rcd_word1 = 0;
987 sc->sc_rxcomp[i].rcd_word2 = 0;
988 sc->sc_rxcomp[i].rcd_timestamp = 0;
989 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
990 }
991 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
992 RCQ1C_RxCompletionQ1Type(3));
993 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
994
995 /*
996 * Initialize the Tx CSR.
997 */
998 sc->sc_TransmitFrameCSR = 0;
999 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1000 sc->sc_TransmitFrameCSR |
1001 TFCSR_TransmitThreshold(sc->sc_txthresh));
1002
1003 /*
1004 * Initialize the Tx descriptor control register.
1005 */
1006 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1007 TDQC_TxDmaBurstSize(4) | /* default */
1008 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1009 TDQC_TxDescType(0);
1010 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1011 sc->sc_TxDescQueueCtrl |
1012 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1013
1014 /*
1015 * Initialize the Rx descriptor control registers.
1016 */
1017 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1018 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1019 RDQ1C_RxDescSpacing(0));
1020 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1021
1022 /*
1023 * Initialize the Tx descriptor producer indices.
1024 */
1025 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1026 TDQPI_HiPrTxProducerIndex(0) |
1027 TDQPI_LoPrTxProducerIndex(0));
1028
1029 /*
1030 * Initialize the Rx descriptor producer indices.
1031 */
1032 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1033 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1034 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1035 RXQ2P_RxDescQ2Producer(0));
1036
1037 /*
1038 * Initialize the Tx and Rx completion queue consumer indices.
1039 */
1040 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1041 CQCI_TxCompletionConsumerIndex(0) |
1042 CQCI_RxCompletionQ1ConsumerIndex(0));
1043 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1044
1045 /*
1046 * Initialize the Rx DMA control register.
1047 */
1048 sf_funcreg_write(sc, SF_RxDmaCtrl,
1049 RDC_RxHighPriorityThreshold(6) | /* default */
1050 RDC_RxBurstSize(4)); /* default */
1051
1052 /*
1053 * Set the receive filter.
1054 */
1055 sc->sc_RxAddressFilteringCtl = 0;
1056 sf_set_filter(sc);
1057
1058 /*
1059 * Set MacConfig1. When we set the media, MacConfig1 will
1060 * actually be written and the MAC part reset.
1061 */
1062 sc->sc_MacConfig1 = MC1_PadEn;
1063
1064 /*
1065 * Set the media.
1066 */
1067 if ((error = ether_mediachange(ifp)) != 0)
1068 goto out;
1069
1070 /*
1071 * Initialize the interrupt register.
1072 */
1073 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1074 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1075 IS_StatisticWrapInt;
1076 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1077
1078 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1079 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1080
1081 /*
1082 * Start the transmit and receive processes.
1083 */
1084 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1085 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1086
1087 /* Start the on second clock. */
1088 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1089
1090 /*
1091 * Note that the interface is now running.
1092 */
1093 ifp->if_flags |= IFF_RUNNING;
1094 ifp->if_flags &= ~IFF_OACTIVE;
1095
1096 out:
1097 if (error) {
1098 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1099 ifp->if_timer = 0;
1100 printf("%s: interface not running\n", device_xname(&sc->sc_dev));
1101 }
1102 return (error);
1103 }
1104
1105 /*
1106 * sf_rxdrain:
1107 *
1108 * Drain the receive queue.
1109 */
1110 static void
1111 sf_rxdrain(struct sf_softc *sc)
1112 {
1113 struct sf_descsoft *ds;
1114 int i;
1115
1116 for (i = 0; i < SF_NRXDESC; i++) {
1117 ds = &sc->sc_rxsoft[i];
1118 if (ds->ds_mbuf != NULL) {
1119 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1120 m_freem(ds->ds_mbuf);
1121 ds->ds_mbuf = NULL;
1122 }
1123 }
1124 }
1125
1126 /*
1127 * sf_stop: [ifnet interface function]
1128 *
1129 * Stop transmission on the interface.
1130 */
1131 static void
1132 sf_stop(struct ifnet *ifp, int disable)
1133 {
1134 struct sf_softc *sc = ifp->if_softc;
1135 struct sf_descsoft *ds;
1136 int i;
1137
1138 /* Stop the one second clock. */
1139 callout_stop(&sc->sc_tick_callout);
1140
1141 /* Down the MII. */
1142 mii_down(&sc->sc_mii);
1143
1144 /* Disable interrupts. */
1145 sf_funcreg_write(sc, SF_InterruptEn, 0);
1146
1147 /* Stop the transmit and receive processes. */
1148 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1149
1150 /*
1151 * Release any queued transmit buffers.
1152 */
1153 for (i = 0; i < SF_NTXDESC; i++) {
1154 ds = &sc->sc_txsoft[i];
1155 if (ds->ds_mbuf != NULL) {
1156 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1157 m_freem(ds->ds_mbuf);
1158 ds->ds_mbuf = NULL;
1159 }
1160 }
1161
1162 /*
1163 * Mark the interface down and cancel the watchdog timer.
1164 */
1165 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1166 ifp->if_timer = 0;
1167
1168 if (disable)
1169 sf_rxdrain(sc);
1170 }
1171
1172 /*
1173 * sf_read_eeprom:
1174 *
1175 * Read from the Starfire EEPROM.
1176 */
1177 static uint8_t
1178 sf_read_eeprom(struct sf_softc *sc, int offset)
1179 {
1180 uint32_t reg;
1181
1182 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1183
1184 return ((reg >> (8 * (offset & 3))) & 0xff);
1185 }
1186
1187 /*
1188 * sf_add_rxbuf:
1189 *
1190 * Add a receive buffer to the indicated descriptor.
1191 */
1192 static int
1193 sf_add_rxbuf(struct sf_softc *sc, int idx)
1194 {
1195 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1196 struct mbuf *m;
1197 int error;
1198
1199 MGETHDR(m, M_DONTWAIT, MT_DATA);
1200 if (m == NULL)
1201 return (ENOBUFS);
1202
1203 MCLGET(m, M_DONTWAIT);
1204 if ((m->m_flags & M_EXT) == 0) {
1205 m_freem(m);
1206 return (ENOBUFS);
1207 }
1208
1209 if (ds->ds_mbuf != NULL)
1210 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1211
1212 ds->ds_mbuf = m;
1213
1214 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1215 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1216 BUS_DMA_READ|BUS_DMA_NOWAIT);
1217 if (error) {
1218 aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1219 idx, error);
1220 panic("sf_add_rxbuf"); /* XXX */
1221 }
1222
1223 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1224 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1225
1226 SF_INIT_RXDESC(sc, idx);
1227
1228 return (0);
1229 }
1230
1231 static void
1232 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1233 {
1234 uint32_t reg0, reg1, reg2;
1235
1236 reg0 = enaddr[5] | (enaddr[4] << 8);
1237 reg1 = enaddr[3] | (enaddr[2] << 8);
1238 reg2 = enaddr[1] | (enaddr[0] << 8);
1239
1240 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1241 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1242 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1243 }
1244
1245 static void
1246 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1247 {
1248 uint32_t hash, slot, reg;
1249
1250 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1251 slot = hash >> 4;
1252
1253 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1254 reg |= 1 << (hash & 0xf);
1255 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1256 }
1257
1258 /*
1259 * sf_set_filter:
1260 *
1261 * Set the Starfire receive filter.
1262 */
1263 static void
1264 sf_set_filter(struct sf_softc *sc)
1265 {
1266 struct ethercom *ec = &sc->sc_ethercom;
1267 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1268 struct ether_multi *enm;
1269 struct ether_multistep step;
1270 int i;
1271
1272 /* Start by clearing the perfect and hash tables. */
1273 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1274 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1275
1276 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1277 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1278
1279 /*
1280 * Clear the perfect and hash mode bits.
1281 */
1282 sc->sc_RxAddressFilteringCtl &=
1283 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1284
1285 if (ifp->if_flags & IFF_BROADCAST)
1286 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1287 else
1288 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1289
1290 if (ifp->if_flags & IFF_PROMISC) {
1291 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1292 goto allmulti;
1293 } else
1294 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1295
1296 /*
1297 * Set normal perfect filtering mode.
1298 */
1299 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1300
1301 /*
1302 * First, write the station address to the perfect filter
1303 * table.
1304 */
1305 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1306
1307 /*
1308 * Now set the hash bits for each multicast address in our
1309 * list.
1310 */
1311 ETHER_FIRST_MULTI(step, ec, enm);
1312 if (enm == NULL)
1313 goto done;
1314 while (enm != NULL) {
1315 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1316 /*
1317 * We must listen to a range of multicast addresses.
1318 * For now, just accept all multicasts, rather than
1319 * trying to set only those filter bits needed to match
1320 * the range. (At this time, the only use of address
1321 * ranges is for IP multicast routing, for which the
1322 * range is big enough to require all bits set.)
1323 */
1324 goto allmulti;
1325 }
1326 sf_set_filter_hash(sc, enm->enm_addrlo);
1327 ETHER_NEXT_MULTI(step, enm);
1328 }
1329
1330 /*
1331 * Set "hash only multicast dest, match regardless of VLAN ID".
1332 */
1333 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1334 goto done;
1335
1336 allmulti:
1337 /*
1338 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1339 */
1340 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1341 ifp->if_flags |= IFF_ALLMULTI;
1342
1343 done:
1344 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1345 sc->sc_RxAddressFilteringCtl);
1346 }
1347
1348 /*
1349 * sf_mii_read: [mii interface function]
1350 *
1351 * Read from the MII.
1352 */
1353 static int
1354 sf_mii_read(device_t self, int phy, int reg)
1355 {
1356 struct sf_softc *sc = (void *) self;
1357 uint32_t v;
1358 int i;
1359
1360 for (i = 0; i < 1000; i++) {
1361 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1362 if (v & MiiDataValid)
1363 break;
1364 delay(1);
1365 }
1366
1367 if ((v & MiiDataValid) == 0)
1368 return (0);
1369
1370 if (MiiRegDataPort(v) == 0xffff)
1371 return (0);
1372
1373 return (MiiRegDataPort(v));
1374 }
1375
1376 /*
1377 * sf_mii_write: [mii interface function]
1378 *
1379 * Write to the MII.
1380 */
1381 static void
1382 sf_mii_write(device_t self, int phy, int reg, int val)
1383 {
1384 struct sf_softc *sc = (void *) self;
1385 int i;
1386
1387 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1388
1389 for (i = 0; i < 1000; i++) {
1390 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1391 MiiBusy) == 0)
1392 return;
1393 delay(1);
1394 }
1395
1396 printf("%s: MII write timed out\n", device_xname(&sc->sc_dev));
1397 }
1398
1399 /*
1400 * sf_mii_statchg: [mii interface function]
1401 *
1402 * Callback from the PHY when the media changes.
1403 */
1404 static void
1405 sf_mii_statchg(device_t self)
1406 {
1407 struct sf_softc *sc = (void *) self;
1408 uint32_t ipg;
1409
1410 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1411 sc->sc_MacConfig1 |= MC1_FullDuplex;
1412 ipg = 0x15;
1413 } else {
1414 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1415 ipg = 0x11;
1416 }
1417
1418 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1419 sf_macreset(sc);
1420
1421 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1422 }
1423