aic6915.c revision 1.1 1 /* $NetBSD: aic6915.c,v 1.1 2001/06/18 22:05:35 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the Adaptec AIC-6915 (``Starfire'')
41 * 10/100 Ethernet controller.
42 */
43
44 #include "bpfilter.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/callout.h>
49 #include <sys/mbuf.h>
50 #include <sys/malloc.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/ioctl.h>
54 #include <sys/errno.h>
55 #include <sys/device.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70
71 #include <dev/mii/miivar.h>
72
73 #include <dev/ic/aic6915reg.h>
74 #include <dev/ic/aic6915var.h>
75
76 void sf_start(struct ifnet *);
77 void sf_watchdog(struct ifnet *);
78 int sf_ioctl(struct ifnet *, u_long, caddr_t);
79 int sf_init(struct ifnet *);
80 void sf_stop(struct ifnet *, int);
81
82 void sf_shutdown(void *);
83
84 void sf_txintr(struct sf_softc *);
85 void sf_rxintr(struct sf_softc *);
86 void sf_stats_update(struct sf_softc *);
87
88 void sf_reset(struct sf_softc *);
89 void sf_macreset(struct sf_softc *);
90 void sf_rxdrain(struct sf_softc *);
91 int sf_add_rxbuf(struct sf_softc *, int);
92 uint8_t sf_read_eeprom(struct sf_softc *, int);
93 void sf_set_filter(struct sf_softc *);
94
95 int sf_mii_read(struct device *, int, int);
96 void sf_mii_write(struct device *, int, int, int);
97 void sf_mii_statchg(struct device *);
98
99 void sf_tick(void *);
100
101 int sf_mediachange(struct ifnet *);
102 void sf_mediastatus(struct ifnet *, struct ifmediareq *);
103
104 int sf_copy_small = 0;
105
106 #define sf_funcreg_read(sc, reg) \
107 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
108 #define sf_funcreg_write(sc, reg, val) \
109 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
110
111 static __inline uint32_t
112 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
113 {
114
115 if (__predict_false(sc->sc_iomapped)) {
116 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
117 reg);
118 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
119 SF_IndirectIoDataPort));
120 }
121
122 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
123 }
124
125 static __inline void
126 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
127 {
128
129 if (__predict_false(sc->sc_iomapped)) {
130 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
131 reg);
132 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
133 val);
134 return;
135 }
136
137 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
138 }
139
140 #define sf_genreg_read(sc, reg) \
141 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
142 #define sf_genreg_write(sc, reg, val) \
143 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
144
145 /*
146 * sf_attach:
147 *
148 * Attach a Starfire interface to the system.
149 */
150 void
151 sf_attach(struct sf_softc *sc)
152 {
153 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
154 int i, rseg, error;
155 bus_dma_segment_t seg;
156 u_int8_t enaddr[ETHER_ADDR_LEN];
157
158 callout_init(&sc->sc_tick_callout);
159
160 /*
161 * If we're I/O mapped, the functional register handle is
162 * the same as the base handle. If we're memory mapped,
163 * carve off a chunk of the register space for the functional
164 * registers, to save on arithmetic later.
165 */
166 if (sc->sc_iomapped)
167 sc->sc_sh_func = sc->sc_sh;
168 else {
169 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
170 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
171 printf("%s: unable to sub-region functional "
172 "registers, error = %d\n", sc->sc_dev.dv_xname,
173 error);
174 return;
175 }
176 }
177
178 /*
179 * Initialize the transmit threshold for this interface. The
180 * manual describes the default as 4 * 16 bytes. We start out
181 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
182 * several platforms.
183 */
184 sc->sc_txthresh = 10;
185
186 /*
187 * Allocate the control data structures, and create and load the
188 * DMA map for it.
189 */
190 if ((error = bus_dmamem_alloc(sc->sc_dmat,
191 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
192 BUS_DMA_NOWAIT)) != 0) {
193 printf("%s: unable to allocate control data, error = %d\n",
194 sc->sc_dev.dv_xname, error);
195 goto fail_0;
196 }
197
198 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
199 sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data,
200 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
201 printf("%s: unable to map control data, error = %d\n",
202 sc->sc_dev.dv_xname, error);
203 goto fail_1;
204 }
205
206 if ((error = bus_dmamap_create(sc->sc_dmat,
207 sizeof(struct sf_control_data), 1,
208 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
209 &sc->sc_cddmamap)) != 0) {
210 printf("%s: unable to create control data DMA map, "
211 "error = %d\n", sc->sc_dev.dv_xname, error);
212 goto fail_2;
213 }
214
215 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
216 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
217 BUS_DMA_NOWAIT)) != 0) {
218 printf("%s: unable to load control data DMA map, error = %d\n",
219 sc->sc_dev.dv_xname, error);
220 goto fail_3;
221 }
222
223 /*
224 * Create the transmit buffer DMA maps.
225 */
226 for (i = 0; i < SF_NTXDESC; i++) {
227 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
228 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
229 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
230 printf("%s: unable to create tx DMA map %d, "
231 "error = %d\n", sc->sc_dev.dv_xname, i, error);
232 goto fail_4;
233 }
234 }
235
236 /*
237 * Create the receive buffer DMA maps.
238 */
239 for (i = 0; i < SF_NRXDESC; i++) {
240 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
241 MCLBYTES, 0, BUS_DMA_NOWAIT,
242 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
243 printf("%s: unable to create rx DMA map %d, "
244 "error = %d\n", sc->sc_dev.dv_xname, i, error);
245 goto fail_5;
246 }
247 }
248
249 /*
250 * Reset the chip to a known state.
251 */
252 sf_reset(sc);
253
254 /*
255 * Read the Ethernet address from the EEPROM.
256 */
257 for (i = 0; i < ETHER_ADDR_LEN; i++)
258 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
259
260 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
261 ether_sprintf(enaddr));
262
263 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
264 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
265
266 /*
267 * Initialize our media structures and probe the MII.
268 */
269 sc->sc_mii.mii_ifp = ifp;
270 sc->sc_mii.mii_readreg = sf_mii_read;
271 sc->sc_mii.mii_writereg = sf_mii_write;
272 sc->sc_mii.mii_statchg = sf_mii_statchg;
273 ifmedia_init(&sc->sc_mii.mii_media, 0, sf_mediachange,
274 sf_mediastatus);
275 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
276 MII_OFFSET_ANY, 0);
277 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
278 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
279 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
280 } else
281 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
282
283 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
284 ifp->if_softc = sc;
285 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
286 ifp->if_ioctl = sf_ioctl;
287 ifp->if_start = sf_start;
288 ifp->if_watchdog = sf_watchdog;
289 ifp->if_init = sf_init;
290 ifp->if_stop = sf_stop;
291 IFQ_SET_READY(&ifp->if_snd);
292
293 /*
294 * Attach the interface.
295 */
296 if_attach(ifp);
297 ether_ifattach(ifp, enaddr);
298
299 /*
300 * Make sure the interface is shutdown during reboot.
301 */
302 sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc);
303 if (sc->sc_sdhook == NULL)
304 printf("%s: WARNING: unable to establish shutdown hook\n",
305 sc->sc_dev.dv_xname);
306 return;
307
308 /*
309 * Free any resources we've allocated during the failed attach
310 * attempt. Do this in reverse order an fall through.
311 */
312 fail_5:
313 for (i = 0; i < SF_NRXDESC; i++) {
314 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
315 bus_dmamap_destroy(sc->sc_dmat,
316 sc->sc_rxsoft[i].ds_dmamap);
317 }
318 fail_4:
319 for (i = 0; i < SF_NTXDESC; i++) {
320 if (sc->sc_txsoft[i].ds_dmamap != NULL)
321 bus_dmamap_destroy(sc->sc_dmat,
322 sc->sc_txsoft[i].ds_dmamap);
323 }
324 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
325 fail_3:
326 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
327 fail_2:
328 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data,
329 sizeof(struct sf_control_data));
330 fail_1:
331 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
332 fail_0:
333 return;
334 }
335
336 /*
337 * sf_shutdown:
338 *
339 * Shutdown hook -- make sure the interface is stopped at reboot.
340 */
341 void
342 sf_shutdown(void *arg)
343 {
344 struct sf_softc *sc = arg;
345
346 sf_stop(&sc->sc_ethercom.ec_if, 1);
347 }
348
349 /*
350 * sf_start: [ifnet interface function]
351 *
352 * Start packet transmission on the interface.
353 */
354 void
355 sf_start(struct ifnet *ifp)
356 {
357 struct sf_softc *sc = ifp->if_softc;
358 struct mbuf *m0, *m;
359 struct sf_txdesc0 *txd;
360 struct sf_descsoft *ds;
361 bus_dmamap_t dmamap;
362 int error, producer, last, opending, seg;
363
364 /*
365 * Remember the previous number of pending transmits.
366 */
367 opending = sc->sc_txpending;
368
369 /*
370 * Find out where we're sitting.
371 */
372 producer = SF_TXDINDEX_TO_HOST(
373 TDQPI_HiPrTxProducerIndex_get(
374 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
375
376 /*
377 * Loop through the send queue, setting up transmit descriptors
378 * until we drain the queue, or use up all available transmit
379 * descriptors. Leave a blank one at the end for sanity's sake.
380 */
381 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
382 /*
383 * Grab a packet off the queue.
384 */
385 IFQ_POLL(&ifp->if_snd, m0);
386 if (m0 == NULL)
387 break;
388 m = NULL;
389
390 /*
391 * Get the transmit descriptor.
392 */
393 txd = &sc->sc_txdescs[producer];
394 ds = &sc->sc_txsoft[producer];
395 dmamap = ds->ds_dmamap;
396
397 /*
398 * Load the DMA map. If this fails, the packet either
399 * didn't fit in the allotted number of frags, or we were
400 * short on resources. In this case, we'll copy and try
401 * again.
402 */
403 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
404 BUS_DMA_NOWAIT) != 0) {
405 MGETHDR(m, M_DONTWAIT, MT_DATA);
406 if (m == NULL) {
407 printf("%s: unable to allocate Tx mbuf\n",
408 sc->sc_dev.dv_xname);
409 break;
410 }
411 if (m0->m_pkthdr.len > MHLEN) {
412 MCLGET(m, M_DONTWAIT);
413 if ((m->m_flags & M_EXT) == 0) {
414 printf("%s: unable to allocate Tx "
415 "cluster\n", sc->sc_dev.dv_xname);
416 m_freem(m);
417 break;
418 }
419 }
420 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
421 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
422 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
423 m, BUS_DMA_NOWAIT);
424 if (error) {
425 printf("%s: unable to load Tx buffer, "
426 "error = %d\n", sc->sc_dev.dv_xname, error);
427 break;
428 }
429 }
430
431 /*
432 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
433 */
434 IFQ_DEQUEUE(&ifp->if_snd, m0);
435 if (m != NULL) {
436 m_freem(m0);
437 m0 = m;
438 }
439
440 /* Initialize the descriptor. */
441 txd->td_word0 =
442 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
443 if (producer == (SF_NTXDESC - 1))
444 txd->td_word0 |= TD_W0_END;
445 txd->td_word1 = htole32(dmamap->dm_nsegs);
446 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
447 txd->td_frags[seg].fr_addr =
448 htole32(dmamap->dm_segs[seg].ds_addr);
449 txd->td_frags[seg].fr_len =
450 htole32(dmamap->dm_segs[seg].ds_len);
451 }
452
453 /* Sync the descriptor and the DMA map. */
454 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
455 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
456 BUS_DMASYNC_PREWRITE);
457
458 /*
459 * Store a pointer to the packet so we can free it later.
460 */
461 ds->ds_mbuf = m0;
462
463 /* Advance the Tx pointer. */
464 sc->sc_txpending++;
465 last = producer;
466 producer = SF_NEXTTX(producer);
467
468 #if NBPFILTER > 0
469 /*
470 * Pass the packet to any BPF listeners.
471 */
472 if (ifp->if_bpf)
473 bpf_mtap(ifp->if_bpf, m0);
474 #endif
475 }
476
477 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
478 /* No more slots left; notify upper layer. */
479 ifp->if_flags |= IFF_OACTIVE;
480 }
481
482 if (sc->sc_txpending != opending) {
483 /*
484 * We enqueued packets. Cause a transmit interrupt to
485 * happen on the last packet we enqueued, and give the
486 * new descriptors to the chip by writing the new
487 * producer index.
488 */
489 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
490 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
491
492 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
493 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
494
495 /* Set a watchdog timer in case the chip flakes out. */
496 ifp->if_timer = 5;
497 }
498 }
499
500 /*
501 * sf_watchdog: [ifnet interface function]
502 *
503 * Watchdog timer handler.
504 */
505 void
506 sf_watchdog(struct ifnet *ifp)
507 {
508 struct sf_softc *sc = ifp->if_softc;
509
510 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
511 ifp->if_oerrors++;
512
513 (void) sf_init(ifp);
514
515 /* Try to get more packets going. */
516 sf_start(ifp);
517 }
518
519 /*
520 * sf_ioctl: [ifnet interface function]
521 *
522 * Handle control requests from the operator.
523 */
524 int
525 sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
526 {
527 struct sf_softc *sc = ifp->if_softc;
528 struct ifreq *ifr = (struct ifreq *) data;
529 int s, error;
530
531 s = splnet();
532
533 switch (cmd) {
534 case SIOCSIFMEDIA:
535 case SIOCGIFMEDIA:
536 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
537 break;
538
539 default:
540 error = ether_ioctl(ifp, cmd, data);
541 if (error == ENETRESET) {
542 /*
543 * Multicast list has changed; set the hardware filter
544 * accordingly.
545 */
546 sf_set_filter(sc);
547 error = 0;
548 }
549 break;
550 }
551
552 /* Try to get more packets going. */
553 sf_start(ifp);
554
555 splx(s);
556 return (error);
557 }
558
559 /*
560 * sf_intr:
561 *
562 * Interrupt service routine.
563 */
564 int
565 sf_intr(void *arg)
566 {
567 struct sf_softc *sc = arg;
568 uint32_t isr;
569 int handled = 0, wantinit = 0;
570
571 for (;;) {
572 /* Reading clears all interrupts we're interested in. */
573 isr = sf_funcreg_read(sc, SF_InterruptStatus);
574 if ((isr & IS_PCIPadInt) == 0)
575 break;
576
577 handled = 1;
578
579 /* Handle receive interrupts. */
580 if (isr & IS_RxQ1DoneInt)
581 sf_rxintr(sc);
582
583 /* Handle transmit completion interrupts. */
584 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
585 sf_txintr(sc);
586
587 /* Handle abnormal interrupts. */
588 if (isr & IS_AbnormalInterrupt) {
589 /* Statistics. */
590 if (isr & IS_StatisticWrapInt)
591 sf_stats_update(sc);
592
593 /* DMA errors. */
594 if (isr & IS_DmaErrInt) {
595 wantinit = 1;
596 printf("%s: WARNING: DMA error\n",
597 sc->sc_dev.dv_xname);
598 }
599
600 /* Transmit FIFO underruns. */
601 if (isr & IS_TxDataLowInt) {
602 if (sc->sc_txthresh < 0xff)
603 sc->sc_txthresh++;
604 printf("%s: transmit FIFO underrun, new "
605 "threshold: %d bytes\n",
606 sc->sc_dev.dv_xname,
607 sc->sc_txthresh * 16);
608 sf_funcreg_write(sc, SF_TransmitFrameCSR,
609 sc->sc_TransmitFrameCSR |
610 TFCSR_TransmitThreshold(sc->sc_txthresh));
611 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
612 sc->sc_TxDescQueueCtrl |
613 TDQC_TxHighPriorityFifoThreshold(
614 sc->sc_txthresh));
615 }
616 }
617 }
618
619 if (handled) {
620 /* Reset the interface, if necessary. */
621 if (wantinit)
622 sf_init(&sc->sc_ethercom.ec_if);
623
624 /* Try and get more packets going. */
625 sf_start(&sc->sc_ethercom.ec_if);
626 }
627
628 return (handled);
629 }
630
631 /*
632 * sf_txintr:
633 *
634 * Helper -- handle transmit completion interrupts.
635 */
636 void
637 sf_txintr(struct sf_softc *sc)
638 {
639 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
640 struct sf_descsoft *ds;
641 uint32_t cqci, tcd;
642 int consumer, producer, txidx;
643
644 try_again:
645 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
646
647 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
648 producer = CQPI_TxCompletionProducerIndex_get(
649 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
650
651 if (consumer == producer)
652 return;
653
654 ifp->if_flags &= ~IFF_OACTIVE;
655
656 while (consumer != producer) {
657 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
658 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
659
660 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
661 #ifdef DIAGNOSTIC
662 if ((tcd & TCD_PR) == 0)
663 printf("%s: Tx queue mismatch, index %d\n",
664 sc->sc_dev.dv_xname, txidx);
665 #endif
666 /*
667 * NOTE: stats are updated later. We're just
668 * releasing packets that have been DMA'd to
669 * the chip.
670 */
671 ds = &sc->sc_txsoft[txidx];
672 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
673 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
674 0, ds->ds_dmamap->dm_mapsize,
675 BUS_DMASYNC_POSTWRITE);
676 m_freem(ds->ds_mbuf);
677 ds->ds_mbuf = NULL;
678
679 consumer = SF_NEXTTCD(consumer);
680 sc->sc_txpending--;
681 }
682
683 /* XXXJRT -- should be KDASSERT() */
684 KASSERT(sc->sc_txpending >= 0);
685
686 /* If all packets are done, cancel the watchdog timer. */
687 if (sc->sc_txpending == 0)
688 ifp->if_timer = 0;
689
690 /* Update the consumer index. */
691 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
692 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
693 CQCI_TxCompletionConsumerIndex(consumer));
694
695 /* Double check for new completions. */
696 goto try_again;
697 }
698
699 /*
700 * sf_rxintr:
701 *
702 * Helper -- handle receive interrupts.
703 */
704 void
705 sf_rxintr(struct sf_softc *sc)
706 {
707 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
708 struct sf_descsoft *ds;
709 struct sf_rcd_full *rcd;
710 struct mbuf *m;
711 uint32_t cqci, word0;
712 int consumer, producer, bufproducer, rxidx, len;
713
714 try_again:
715 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
716
717 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
718 producer = CQPI_RxCompletionQ1ProducerIndex_get(
719 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
720 bufproducer = RXQ1P_RxDescQ1Producer_get(
721 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
722
723 if (consumer == producer)
724 return;
725
726 while (consumer != producer) {
727 rcd = &sc->sc_rxcomp[consumer];
728 SF_CDRXCSYNC(sc, consumer,
729 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
730 SF_CDRXCSYNC(sc, consumer,
731 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
732
733 word0 = le32toh(rcd->rcd_word0);
734 rxidx = RCD_W0_EndIndex(word0);
735
736 ds = &sc->sc_rxsoft[rxidx];
737
738 consumer = SF_NEXTRCD(consumer);
739 bufproducer = SF_NEXTRX(bufproducer);
740
741 if ((word0 & RCD_W0_OK) == 0) {
742 SF_INIT_RXDESC(sc, rxidx);
743 continue;
744 }
745
746 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
747 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
748
749 /*
750 * No errors; receive the packet. Note that we have
751 * configured the Starfire to NOT transfer the CRC
752 * with the packet.
753 */
754 len = RCD_W0_Length(word0);
755
756 #ifdef __NO_STRICT_ALIGNMENT
757 /*
758 * Allocate a new mbuf cluster. If that fails, we are
759 * out of memory, and must drop the packet and recycle
760 * the buffer that's already attached to this descriptor.
761 */
762 m = ds->ds_mbuf;
763 if (sf_add_rxbuf(sc, rxidx) != 0) {
764 ifp->if_ierrors++;
765 SF_INIT_RXDESC(sc, rxidx);
766 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
767 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
768 continue;
769 }
770 #else
771 /*
772 * The Starfire's receive buffer must be 4-byte aligned.
773 * But this means that the data after the Ethernet header
774 * is misaligned. We must allocate a new buffer and
775 * copy the data, shifted forward 2 bytes.
776 */
777 MGETHDR(m, M_DONTWAIT, MT_DATA);
778 if (m == NULL) {
779 dropit:
780 ifp->if_ierrors++;
781 SF_INIT_RXDESC(sc, rxidx);
782 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
783 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
784 continue;
785 }
786 if (len > (MHLEN - 2)) {
787 MCLGET(m, M_DONTWAIT);
788 if ((m->m_flags & M_EXT) == 0) {
789 m_freem(m);
790 goto dropit;
791 }
792 }
793 m->m_data += 2;
794
795 /*
796 * Note that we use cluster for incoming frames, so the
797 * buffer is virtually contiguous.
798 */
799 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
800
801 /* Allow the receive descriptor to continue using its mbuf. */
802 SF_INIT_RXDESC(sc, rxidx);
803 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
804 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
805 #endif /* __NO_STRICT_ALIGNMENT */
806
807 m->m_pkthdr.rcvif = ifp;
808 m->m_pkthdr.len = m->m_len = len;
809
810 #if NBPFILTER > 0
811 /*
812 * Pass this up to any BPF listeners.
813 */
814 if (ifp->if_bpf)
815 bpf_mtap(ifp->if_bpf, m);
816 #endif /* NBPFILTER > 0 */
817
818 /* Pass it on. */
819 (*ifp->if_input)(ifp, m);
820 }
821
822 /* Update the chip's pointers. */
823 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
824 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
825 CQCI_RxCompletionQ1ConsumerIndex(consumer));
826 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
827 RXQ1P_RxDescQ1Producer(bufproducer));
828
829 /* Double-check for any new completions. */
830 goto try_again;
831 }
832
833 /*
834 * sf_tick:
835 *
836 * One second timer, used to tick the MII and update stats.
837 */
838 void
839 sf_tick(void *arg)
840 {
841 struct sf_softc *sc = arg;
842 int s;
843
844 s = splnet();
845 mii_tick(&sc->sc_mii);
846 sf_stats_update(sc);
847 splx(s);
848
849 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
850 }
851
852 /*
853 * sf_stats_update:
854 *
855 * Read the statitistics counters.
856 */
857 void
858 sf_stats_update(struct sf_softc *sc)
859 {
860 struct sf_stats stats;
861 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
862 uint32_t *p;
863 int i;
864
865 p = &stats.TransmitOKFrames;
866 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
867 *p++ = sf_genreg_read(sc,
868 SF_STATS_BASE + (i * sizeof(uint32_t)));
869 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
870 }
871
872 ifp->if_opackets += stats.TransmitOKFrames;
873
874 ifp->if_collisions += stats.SingleCollisionFrames +
875 stats.MultipleCollisionFrames;
876
877 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
878 stats.TransmitAbortDueToExcessingDeferral +
879 stats.FramesLostDueToInternalTransmitErrors;
880
881 ifp->if_ipackets += stats.ReceiveOKFrames;
882
883 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
884 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
885 stats.ReceiveFramesJabbersError +
886 stats.FramesLostDueToInternalReceiveErrors;
887 }
888
889 /*
890 * sf_reset:
891 *
892 * Perform a soft reset on the Starfire.
893 */
894 void
895 sf_reset(struct sf_softc *sc)
896 {
897 int i;
898
899 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
900
901 sf_macreset(sc);
902
903 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
904 for (i = 0; i < 1000; i++) {
905 delay(10);
906 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
907 PDC_SoftReset) == 0)
908 break;
909 }
910
911 if (i == 1000) {
912 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
913 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
914 }
915
916 delay(1000);
917 }
918
919 /*
920 * sf_macreset:
921 *
922 * Reset the MAC portion of the Starfire.
923 */
924 void
925 sf_macreset(struct sf_softc *sc)
926 {
927
928 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
929 delay(1000);
930 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
931 }
932
933 /*
934 * sf_init: [ifnet interface function]
935 *
936 * Initialize the interface. Must be called at splnet().
937 */
938 int
939 sf_init(struct ifnet *ifp)
940 {
941 struct sf_softc *sc = ifp->if_softc;
942 struct sf_descsoft *ds;
943 int i, error = 0;
944
945 /*
946 * Cancel any pending I/O.
947 */
948 sf_stop(ifp, 0);
949
950 /*
951 * Reset the Starfire to a known state.
952 */
953 sf_reset(sc);
954
955 /* Clear the stat counters. */
956 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
957 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
958
959 /*
960 * Initialize the transmit descriptor ring.
961 */
962 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
963 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
964 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
965 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
966
967 /*
968 * Initialize the transmit completion ring.
969 */
970 for (i = 0; i < SF_NTCD; i++) {
971 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
972 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
973 }
974 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
975 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
976
977 /*
978 * Initialize the receive descriptor ring.
979 */
980 for (i = 0; i < SF_NRXDESC; i++) {
981 ds = &sc->sc_rxsoft[i];
982 if (ds->ds_mbuf == NULL) {
983 if ((error = sf_add_rxbuf(sc, i)) != 0) {
984 printf("%s: unable to allocate or map rx "
985 "buffer %d, error = %d\n",
986 sc->sc_dev.dv_xname, i, error);
987 /*
988 * XXX Should attempt to run with fewer receive
989 * XXX buffers instead of just failing.
990 */
991 sf_rxdrain(sc);
992 goto out;
993 }
994 }
995 }
996 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
997 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
998 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
999
1000 /*
1001 * Initialize the receive completion ring.
1002 */
1003 for (i = 0; i < SF_NRCD; i++) {
1004 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
1005 sc->sc_rxcomp[i].rcd_word1 = 0;
1006 sc->sc_rxcomp[i].rcd_word2 = 0;
1007 sc->sc_rxcomp[i].rcd_timestamp = 0;
1008 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1009 }
1010 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1011 RCQ1C_RxCompletionQ1Type(3));
1012 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1013
1014 /*
1015 * Initialize the Tx CSR.
1016 */
1017 sc->sc_TransmitFrameCSR = 0;
1018 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1019 sc->sc_TransmitFrameCSR |
1020 TFCSR_TransmitThreshold(sc->sc_txthresh));
1021
1022 /*
1023 * Initialize the Tx descriptor control register.
1024 */
1025 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1026 TDQC_TxDmaBurstSize(4) | /* default */
1027 TDQC_MinFrameSpacing(2) | /* 64 bytes */
1028 TDQC_TxDescType(0);
1029 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1030 sc->sc_TxDescQueueCtrl |
1031 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1032
1033 /*
1034 * Initialize the Rx descriptor control registers.
1035 */
1036 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1037 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1038 RDQ1C_RxDescSpacing(0));
1039 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1040
1041 /*
1042 * Initialize the Tx descriptor producer indices.
1043 */
1044 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1045 TDQPI_HiPrTxProducerIndex(0) |
1046 TDQPI_LoPrTxProducerIndex(0));
1047
1048 /*
1049 * Initialize the Rx descriptor producer indices.
1050 */
1051 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1052 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1053 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1054 RXQ2P_RxDescQ2Producer(0));
1055
1056 /*
1057 * Initialize the Tx and Rx completion queue consumer indices.
1058 */
1059 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1060 CQCI_TxCompletionConsumerIndex(0) |
1061 CQCI_RxCompletionQ1ConsumerIndex(0));
1062 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1063
1064 /*
1065 * Initialize the Rx DMA control register.
1066 */
1067 sf_funcreg_write(sc, SF_RxDmaCtrl,
1068 RDC_RxHighPriorityThreshold(6) | /* default */
1069 RDC_RxBurstSize(4)); /* default */
1070
1071 /*
1072 * Set the receive filter.
1073 */
1074 sc->sc_RxAddressFilteringCtl = 0;
1075 sf_set_filter(sc);
1076
1077 /*
1078 * Set MacConfig1. When we set the media, MacConfig1 will
1079 * actually be written and the MAC part reset.
1080 */
1081 sc->sc_MacConfig1 = MC1_PadEn;
1082
1083 /*
1084 * Set the media.
1085 */
1086 mii_mediachg(&sc->sc_mii);
1087
1088 /*
1089 * Initialize the interrupt register.
1090 */
1091 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1092 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1093 IS_StatisticWrapInt;
1094 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1095
1096 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1097 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1098
1099 /*
1100 * Start the transmit and receive processes.
1101 */
1102 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1103 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1104
1105 /* Start the on second clock. */
1106 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1107
1108 /*
1109 * Note that the interface is now running.
1110 */
1111 ifp->if_flags |= IFF_RUNNING;
1112 ifp->if_flags &= ~IFF_OACTIVE;
1113
1114 out:
1115 if (error) {
1116 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1117 ifp->if_timer = 0;
1118 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1119 }
1120 return (error);
1121 }
1122
1123 /*
1124 * sf_rxdrain:
1125 *
1126 * Drain the receive queue.
1127 */
1128 void
1129 sf_rxdrain(struct sf_softc *sc)
1130 {
1131 struct sf_descsoft *ds;
1132 int i;
1133
1134 for (i = 0; i < SF_NRXDESC; i++) {
1135 ds = &sc->sc_rxsoft[i];
1136 if (ds->ds_mbuf != NULL) {
1137 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1138 m_freem(ds->ds_mbuf);
1139 ds->ds_mbuf = NULL;
1140 }
1141 }
1142 }
1143
1144 /*
1145 * sf_stop: [ifnet interface function]
1146 *
1147 * Stop transmission on the interface.
1148 */
1149 void
1150 sf_stop(struct ifnet *ifp, int disable)
1151 {
1152 struct sf_softc *sc = ifp->if_softc;
1153 struct sf_descsoft *ds;
1154 int i;
1155
1156 /* Stop the one second clock. */
1157 callout_stop(&sc->sc_tick_callout);
1158
1159 /* Down the MII. */
1160 mii_down(&sc->sc_mii);
1161
1162 /* Disable interrupts. */
1163 sf_funcreg_write(sc, SF_InterruptEn, 0);
1164
1165 /* Stop the transmit and receive processes. */
1166 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1167
1168 /*
1169 * Release any queued transmit buffers.
1170 */
1171 for (i = 0; i < SF_NTXDESC; i++) {
1172 ds = &sc->sc_txsoft[i];
1173 if (ds->ds_mbuf != NULL) {
1174 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1175 m_freem(ds->ds_mbuf);
1176 ds->ds_mbuf = NULL;
1177 }
1178 }
1179
1180 if (disable)
1181 sf_rxdrain(sc);
1182
1183 /*
1184 * Mark the interface down and cancel the watchdog timer.
1185 */
1186 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1187 ifp->if_timer = 0;
1188 }
1189
1190 /*
1191 * sf_read_eeprom:
1192 *
1193 * Read from the Starfire EEPROM.
1194 */
1195 uint8_t
1196 sf_read_eeprom(struct sf_softc *sc, int offset)
1197 {
1198 uint32_t reg;
1199
1200 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1201
1202 return ((reg >> (8 * (offset & 3))) & 0xff);
1203 }
1204
1205 /*
1206 * sf_add_rxbuf:
1207 *
1208 * Add a receive buffer to the indicated descriptor.
1209 */
1210 int
1211 sf_add_rxbuf(struct sf_softc *sc, int idx)
1212 {
1213 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1214 struct mbuf *m;
1215 int error;
1216
1217 MGETHDR(m, M_DONTWAIT, MT_DATA);
1218 if (m == NULL)
1219 return (ENOBUFS);
1220
1221 MCLGET(m, M_DONTWAIT);
1222 if ((m->m_flags & M_EXT) == 0) {
1223 m_freem(m);
1224 return (ENOBUFS);
1225 }
1226
1227 if (ds->ds_mbuf != NULL)
1228 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1229
1230 ds->ds_mbuf = m;
1231
1232 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1233 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1234 if (error) {
1235 printf("%s: can't load rx DMA map %d, error = %d\n",
1236 sc->sc_dev.dv_xname, idx, error);
1237 panic("sf_add_rxbuf"); /* XXX */
1238 }
1239
1240 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1241 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1242
1243 SF_INIT_RXDESC(sc, idx);
1244
1245 return (0);
1246 }
1247
1248 static void
1249 sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr)
1250 {
1251 uint32_t reg0, reg1, reg2;
1252
1253 reg0 = enaddr[5] | (enaddr[4] << 8);
1254 reg1 = enaddr[3] | (enaddr[2] << 8);
1255 reg2 = enaddr[1] | (enaddr[0] << 8);
1256
1257 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1258 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1259 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1260 }
1261
1262 static void
1263 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1264 {
1265 uint32_t hash, slot, reg;
1266
1267 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1268 slot = hash >> 4;
1269
1270 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1271 reg |= 1 << (hash & 0xf);
1272 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1273 }
1274
1275 /*
1276 * sf_set_filter:
1277 *
1278 * Set the Starfire receive filter.
1279 */
1280 void
1281 sf_set_filter(struct sf_softc *sc)
1282 {
1283 struct ethercom *ec = &sc->sc_ethercom;
1284 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1285 struct ether_multi *enm;
1286 struct ether_multistep step;
1287 int i;
1288
1289 /* Start by clearing the perfect and hash tables. */
1290 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1291 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1292
1293 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1294 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1295
1296 /*
1297 * Clear the perfect and hash mode bits.
1298 */
1299 sc->sc_RxAddressFilteringCtl &=
1300 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1301
1302 if (ifp->if_flags & IFF_BROADCAST)
1303 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1304 else
1305 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1306
1307 if (ifp->if_flags & IFF_PROMISC) {
1308 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1309 goto allmulti;
1310 } else
1311 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1312
1313 /*
1314 * Set normal perfect filtering mode.
1315 */
1316 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1317
1318 /*
1319 * First, write the station address to the perfect filter
1320 * table.
1321 */
1322 sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl));
1323
1324 /*
1325 * Now set the hash bits for each multicast address in our
1326 * list.
1327 */
1328 ETHER_FIRST_MULTI(step, ec, enm);
1329 if (enm == NULL)
1330 goto done;
1331 while (enm != NULL) {
1332 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1333 /*
1334 * We must listen to a range of multicast addresses.
1335 * For now, just accept all multicasts, rather than
1336 * trying to set only those filter bits needed to match
1337 * the range. (At this time, the only use of address
1338 * ranges is for IP multicast routing, for which the
1339 * range is big enough to require all bits set.)
1340 */
1341 goto allmulti;
1342 }
1343 sf_set_filter_hash(sc, enm->enm_addrlo);
1344 ETHER_NEXT_MULTI(step, enm);
1345 }
1346
1347 /*
1348 * Set "hash only multicast dest, match regardless of VLAN ID".
1349 */
1350 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1351 goto done;
1352
1353 allmulti:
1354 /*
1355 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1356 */
1357 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1358 ifp->if_flags |= IFF_ALLMULTI;
1359
1360 done:
1361 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1362 sc->sc_RxAddressFilteringCtl);
1363 }
1364
1365 /*
1366 * sf_mii_read: [mii interface function]
1367 *
1368 * Read from the MII.
1369 */
1370 int
1371 sf_mii_read(struct device *self, int phy, int reg)
1372 {
1373 struct sf_softc *sc = (void *) self;
1374 uint32_t v;
1375 int i;
1376
1377 for (i = 0; i < 1000; i++) {
1378 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1379 if (v & MiiDataValid)
1380 break;
1381 delay(1);
1382 }
1383
1384 if ((v & MiiDataValid) == 0)
1385 return (0);
1386
1387 if (MiiRegDataPort(v) == 0xffff)
1388 return (0);
1389
1390 return (MiiRegDataPort(v));
1391 }
1392
1393 /*
1394 * sf_mii_write: [mii interface function]
1395 *
1396 * Write to the MII.
1397 */
1398 void
1399 sf_mii_write(struct device *self, int phy, int reg, int val)
1400 {
1401 struct sf_softc *sc = (void *) self;
1402 int i;
1403
1404 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1405
1406 for (i = 0; i < 1000; i++) {
1407 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1408 MiiBusy) == 0)
1409 return;
1410 delay(1);
1411 }
1412
1413 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
1414 }
1415
1416 /*
1417 * sf_mii_statchg: [mii interface function]
1418 *
1419 * Callback from the PHY when the media changes.
1420 */
1421 void
1422 sf_mii_statchg(struct device *self)
1423 {
1424 struct sf_softc *sc = (void *) self;
1425 uint32_t ipg;
1426
1427 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1428 sc->sc_MacConfig1 |= MC1_FullDuplex;
1429 ipg = 0x15;
1430 } else {
1431 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1432 ipg = 0x11;
1433 }
1434
1435 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1436 sf_macreset(sc);
1437
1438 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1439 }
1440
1441 /*
1442 * sf_mediastatus: [ifmedia interface function]
1443 *
1444 * Callback from ifmedia to request current media status.
1445 */
1446 void
1447 sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1448 {
1449 struct sf_softc *sc = ifp->if_softc;
1450
1451 mii_pollstat(&sc->sc_mii);
1452 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1453 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1454 }
1455
1456 /*
1457 * sf_mediachange: [ifmedia interface function]
1458 *
1459 * Callback from ifmedia to request new media setting.
1460 */
1461 int
1462 sf_mediachange(struct ifnet *ifp)
1463 {
1464 struct sf_softc *sc = ifp->if_softc;
1465
1466 if (ifp->if_flags & IFF_UP)
1467 mii_mediachg(&sc->sc_mii);
1468 return (0);
1469 }
1470