aic6915.c revision 1.6 1 /* $NetBSD: aic6915.c,v 1.6 2002/05/03 00:04:07 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the Adaptec AIC-6915 (``Starfire'')
41 * 10/100 Ethernet controller.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.6 2002/05/03 00:04:07 thorpej Exp $");
46
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_ether.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <machine/bus.h>
72 #include <machine/intr.h>
73
74 #include <dev/mii/miivar.h>
75
76 #include <dev/ic/aic6915reg.h>
77 #include <dev/ic/aic6915var.h>
78
79 void sf_start(struct ifnet *);
80 void sf_watchdog(struct ifnet *);
81 int sf_ioctl(struct ifnet *, u_long, caddr_t);
82 int sf_init(struct ifnet *);
83 void sf_stop(struct ifnet *, int);
84
85 void sf_shutdown(void *);
86
87 void sf_txintr(struct sf_softc *);
88 void sf_rxintr(struct sf_softc *);
89 void sf_stats_update(struct sf_softc *);
90
91 void sf_reset(struct sf_softc *);
92 void sf_macreset(struct sf_softc *);
93 void sf_rxdrain(struct sf_softc *);
94 int sf_add_rxbuf(struct sf_softc *, int);
95 uint8_t sf_read_eeprom(struct sf_softc *, int);
96 void sf_set_filter(struct sf_softc *);
97
98 int sf_mii_read(struct device *, int, int);
99 void sf_mii_write(struct device *, int, int, int);
100 void sf_mii_statchg(struct device *);
101
102 void sf_tick(void *);
103
104 int sf_mediachange(struct ifnet *);
105 void sf_mediastatus(struct ifnet *, struct ifmediareq *);
106
107 int sf_copy_small = 0;
108
109 #define sf_funcreg_read(sc, reg) \
110 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
111 #define sf_funcreg_write(sc, reg, val) \
112 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
113
114 static __inline uint32_t
115 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
116 {
117
118 if (__predict_false(sc->sc_iomapped)) {
119 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
120 reg);
121 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
122 SF_IndirectIoDataPort));
123 }
124
125 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
126 }
127
128 static __inline void
129 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
130 {
131
132 if (__predict_false(sc->sc_iomapped)) {
133 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
134 reg);
135 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
136 val);
137 return;
138 }
139
140 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
141 }
142
143 #define sf_genreg_read(sc, reg) \
144 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
145 #define sf_genreg_write(sc, reg, val) \
146 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
147
148 /*
149 * sf_attach:
150 *
151 * Attach a Starfire interface to the system.
152 */
153 void
154 sf_attach(struct sf_softc *sc)
155 {
156 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
157 int i, rseg, error;
158 bus_dma_segment_t seg;
159 u_int8_t enaddr[ETHER_ADDR_LEN];
160
161 callout_init(&sc->sc_tick_callout);
162
163 /*
164 * If we're I/O mapped, the functional register handle is
165 * the same as the base handle. If we're memory mapped,
166 * carve off a chunk of the register space for the functional
167 * registers, to save on arithmetic later.
168 */
169 if (sc->sc_iomapped)
170 sc->sc_sh_func = sc->sc_sh;
171 else {
172 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
173 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
174 printf("%s: unable to sub-region functional "
175 "registers, error = %d\n", sc->sc_dev.dv_xname,
176 error);
177 return;
178 }
179 }
180
181 /*
182 * Initialize the transmit threshold for this interface. The
183 * manual describes the default as 4 * 16 bytes. We start out
184 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
185 * several platforms.
186 */
187 sc->sc_txthresh = 10;
188
189 /*
190 * Allocate the control data structures, and create and load the
191 * DMA map for it.
192 */
193 if ((error = bus_dmamem_alloc(sc->sc_dmat,
194 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
195 BUS_DMA_NOWAIT)) != 0) {
196 printf("%s: unable to allocate control data, error = %d\n",
197 sc->sc_dev.dv_xname, error);
198 goto fail_0;
199 }
200
201 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
202 sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data,
203 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
204 printf("%s: unable to map control data, error = %d\n",
205 sc->sc_dev.dv_xname, error);
206 goto fail_1;
207 }
208
209 if ((error = bus_dmamap_create(sc->sc_dmat,
210 sizeof(struct sf_control_data), 1,
211 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
212 &sc->sc_cddmamap)) != 0) {
213 printf("%s: unable to create control data DMA map, "
214 "error = %d\n", sc->sc_dev.dv_xname, error);
215 goto fail_2;
216 }
217
218 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
219 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
220 BUS_DMA_NOWAIT)) != 0) {
221 printf("%s: unable to load control data DMA map, error = %d\n",
222 sc->sc_dev.dv_xname, error);
223 goto fail_3;
224 }
225
226 /*
227 * Create the transmit buffer DMA maps.
228 */
229 for (i = 0; i < SF_NTXDESC; i++) {
230 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
231 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
232 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
233 printf("%s: unable to create tx DMA map %d, "
234 "error = %d\n", sc->sc_dev.dv_xname, i, error);
235 goto fail_4;
236 }
237 }
238
239 /*
240 * Create the receive buffer DMA maps.
241 */
242 for (i = 0; i < SF_NRXDESC; i++) {
243 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
244 MCLBYTES, 0, BUS_DMA_NOWAIT,
245 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
246 printf("%s: unable to create rx DMA map %d, "
247 "error = %d\n", sc->sc_dev.dv_xname, i, error);
248 goto fail_5;
249 }
250 }
251
252 /*
253 * Reset the chip to a known state.
254 */
255 sf_reset(sc);
256
257 /*
258 * Read the Ethernet address from the EEPROM.
259 */
260 for (i = 0; i < ETHER_ADDR_LEN; i++)
261 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
262
263 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
264 ether_sprintf(enaddr));
265
266 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
267 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
268
269 /*
270 * Initialize our media structures and probe the MII.
271 */
272 sc->sc_mii.mii_ifp = ifp;
273 sc->sc_mii.mii_readreg = sf_mii_read;
274 sc->sc_mii.mii_writereg = sf_mii_write;
275 sc->sc_mii.mii_statchg = sf_mii_statchg;
276 ifmedia_init(&sc->sc_mii.mii_media, 0, sf_mediachange,
277 sf_mediastatus);
278 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
279 MII_OFFSET_ANY, 0);
280 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
281 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
282 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
283 } else
284 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
285
286 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
287 ifp->if_softc = sc;
288 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
289 ifp->if_ioctl = sf_ioctl;
290 ifp->if_start = sf_start;
291 ifp->if_watchdog = sf_watchdog;
292 ifp->if_init = sf_init;
293 ifp->if_stop = sf_stop;
294 IFQ_SET_READY(&ifp->if_snd);
295
296 /*
297 * Attach the interface.
298 */
299 if_attach(ifp);
300 ether_ifattach(ifp, enaddr);
301
302 /*
303 * Make sure the interface is shutdown during reboot.
304 */
305 sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc);
306 if (sc->sc_sdhook == NULL)
307 printf("%s: WARNING: unable to establish shutdown hook\n",
308 sc->sc_dev.dv_xname);
309 return;
310
311 /*
312 * Free any resources we've allocated during the failed attach
313 * attempt. Do this in reverse order an fall through.
314 */
315 fail_5:
316 for (i = 0; i < SF_NRXDESC; i++) {
317 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
318 bus_dmamap_destroy(sc->sc_dmat,
319 sc->sc_rxsoft[i].ds_dmamap);
320 }
321 fail_4:
322 for (i = 0; i < SF_NTXDESC; i++) {
323 if (sc->sc_txsoft[i].ds_dmamap != NULL)
324 bus_dmamap_destroy(sc->sc_dmat,
325 sc->sc_txsoft[i].ds_dmamap);
326 }
327 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
328 fail_3:
329 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
330 fail_2:
331 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data,
332 sizeof(struct sf_control_data));
333 fail_1:
334 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
335 fail_0:
336 return;
337 }
338
339 /*
340 * sf_shutdown:
341 *
342 * Shutdown hook -- make sure the interface is stopped at reboot.
343 */
344 void
345 sf_shutdown(void *arg)
346 {
347 struct sf_softc *sc = arg;
348
349 sf_stop(&sc->sc_ethercom.ec_if, 1);
350 }
351
352 /*
353 * sf_start: [ifnet interface function]
354 *
355 * Start packet transmission on the interface.
356 */
357 void
358 sf_start(struct ifnet *ifp)
359 {
360 struct sf_softc *sc = ifp->if_softc;
361 struct mbuf *m0, *m;
362 struct sf_txdesc0 *txd;
363 struct sf_descsoft *ds;
364 bus_dmamap_t dmamap;
365 int error, producer, last, opending, seg;
366
367 /*
368 * Remember the previous number of pending transmits.
369 */
370 opending = sc->sc_txpending;
371
372 /*
373 * Find out where we're sitting.
374 */
375 producer = SF_TXDINDEX_TO_HOST(
376 TDQPI_HiPrTxProducerIndex_get(
377 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
378
379 /*
380 * Loop through the send queue, setting up transmit descriptors
381 * until we drain the queue, or use up all available transmit
382 * descriptors. Leave a blank one at the end for sanity's sake.
383 */
384 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
385 /*
386 * Grab a packet off the queue.
387 */
388 IFQ_POLL(&ifp->if_snd, m0);
389 if (m0 == NULL)
390 break;
391 m = NULL;
392
393 /*
394 * Get the transmit descriptor.
395 */
396 txd = &sc->sc_txdescs[producer];
397 ds = &sc->sc_txsoft[producer];
398 dmamap = ds->ds_dmamap;
399
400 /*
401 * Load the DMA map. If this fails, the packet either
402 * didn't fit in the allotted number of frags, or we were
403 * short on resources. In this case, we'll copy and try
404 * again.
405 */
406 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
407 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
408 MGETHDR(m, M_DONTWAIT, MT_DATA);
409 if (m == NULL) {
410 printf("%s: unable to allocate Tx mbuf\n",
411 sc->sc_dev.dv_xname);
412 break;
413 }
414 if (m0->m_pkthdr.len > MHLEN) {
415 MCLGET(m, M_DONTWAIT);
416 if ((m->m_flags & M_EXT) == 0) {
417 printf("%s: unable to allocate Tx "
418 "cluster\n", sc->sc_dev.dv_xname);
419 m_freem(m);
420 break;
421 }
422 }
423 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
424 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
425 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
426 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
427 if (error) {
428 printf("%s: unable to load Tx buffer, "
429 "error = %d\n", sc->sc_dev.dv_xname, error);
430 break;
431 }
432 }
433
434 /*
435 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
436 */
437 IFQ_DEQUEUE(&ifp->if_snd, m0);
438 if (m != NULL) {
439 m_freem(m0);
440 m0 = m;
441 }
442
443 /* Initialize the descriptor. */
444 txd->td_word0 =
445 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
446 if (producer == (SF_NTXDESC - 1))
447 txd->td_word0 |= TD_W0_END;
448 txd->td_word1 = htole32(dmamap->dm_nsegs);
449 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
450 txd->td_frags[seg].fr_addr =
451 htole32(dmamap->dm_segs[seg].ds_addr);
452 txd->td_frags[seg].fr_len =
453 htole32(dmamap->dm_segs[seg].ds_len);
454 }
455
456 /* Sync the descriptor and the DMA map. */
457 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
458 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
459 BUS_DMASYNC_PREWRITE);
460
461 /*
462 * Store a pointer to the packet so we can free it later.
463 */
464 ds->ds_mbuf = m0;
465
466 /* Advance the Tx pointer. */
467 sc->sc_txpending++;
468 last = producer;
469 producer = SF_NEXTTX(producer);
470
471 #if NBPFILTER > 0
472 /*
473 * Pass the packet to any BPF listeners.
474 */
475 if (ifp->if_bpf)
476 bpf_mtap(ifp->if_bpf, m0);
477 #endif
478 }
479
480 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
481 /* No more slots left; notify upper layer. */
482 ifp->if_flags |= IFF_OACTIVE;
483 }
484
485 if (sc->sc_txpending != opending) {
486 /*
487 * We enqueued packets. Cause a transmit interrupt to
488 * happen on the last packet we enqueued, and give the
489 * new descriptors to the chip by writing the new
490 * producer index.
491 */
492 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
493 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
494
495 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
496 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
497
498 /* Set a watchdog timer in case the chip flakes out. */
499 ifp->if_timer = 5;
500 }
501 }
502
503 /*
504 * sf_watchdog: [ifnet interface function]
505 *
506 * Watchdog timer handler.
507 */
508 void
509 sf_watchdog(struct ifnet *ifp)
510 {
511 struct sf_softc *sc = ifp->if_softc;
512
513 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
514 ifp->if_oerrors++;
515
516 (void) sf_init(ifp);
517
518 /* Try to get more packets going. */
519 sf_start(ifp);
520 }
521
522 /*
523 * sf_ioctl: [ifnet interface function]
524 *
525 * Handle control requests from the operator.
526 */
527 int
528 sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
529 {
530 struct sf_softc *sc = ifp->if_softc;
531 struct ifreq *ifr = (struct ifreq *) data;
532 int s, error;
533
534 s = splnet();
535
536 switch (cmd) {
537 case SIOCSIFMEDIA:
538 case SIOCGIFMEDIA:
539 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
540 break;
541
542 default:
543 error = ether_ioctl(ifp, cmd, data);
544 if (error == ENETRESET) {
545 /*
546 * Multicast list has changed; set the hardware filter
547 * accordingly.
548 */
549 sf_set_filter(sc);
550 error = 0;
551 }
552 break;
553 }
554
555 /* Try to get more packets going. */
556 sf_start(ifp);
557
558 splx(s);
559 return (error);
560 }
561
562 /*
563 * sf_intr:
564 *
565 * Interrupt service routine.
566 */
567 int
568 sf_intr(void *arg)
569 {
570 struct sf_softc *sc = arg;
571 uint32_t isr;
572 int handled = 0, wantinit = 0;
573
574 for (;;) {
575 /* Reading clears all interrupts we're interested in. */
576 isr = sf_funcreg_read(sc, SF_InterruptStatus);
577 if ((isr & IS_PCIPadInt) == 0)
578 break;
579
580 handled = 1;
581
582 /* Handle receive interrupts. */
583 if (isr & IS_RxQ1DoneInt)
584 sf_rxintr(sc);
585
586 /* Handle transmit completion interrupts. */
587 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
588 sf_txintr(sc);
589
590 /* Handle abnormal interrupts. */
591 if (isr & IS_AbnormalInterrupt) {
592 /* Statistics. */
593 if (isr & IS_StatisticWrapInt)
594 sf_stats_update(sc);
595
596 /* DMA errors. */
597 if (isr & IS_DmaErrInt) {
598 wantinit = 1;
599 printf("%s: WARNING: DMA error\n",
600 sc->sc_dev.dv_xname);
601 }
602
603 /* Transmit FIFO underruns. */
604 if (isr & IS_TxDataLowInt) {
605 if (sc->sc_txthresh < 0xff)
606 sc->sc_txthresh++;
607 printf("%s: transmit FIFO underrun, new "
608 "threshold: %d bytes\n",
609 sc->sc_dev.dv_xname,
610 sc->sc_txthresh * 16);
611 sf_funcreg_write(sc, SF_TransmitFrameCSR,
612 sc->sc_TransmitFrameCSR |
613 TFCSR_TransmitThreshold(sc->sc_txthresh));
614 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
615 sc->sc_TxDescQueueCtrl |
616 TDQC_TxHighPriorityFifoThreshold(
617 sc->sc_txthresh));
618 }
619 }
620 }
621
622 if (handled) {
623 /* Reset the interface, if necessary. */
624 if (wantinit)
625 sf_init(&sc->sc_ethercom.ec_if);
626
627 /* Try and get more packets going. */
628 sf_start(&sc->sc_ethercom.ec_if);
629 }
630
631 return (handled);
632 }
633
634 /*
635 * sf_txintr:
636 *
637 * Helper -- handle transmit completion interrupts.
638 */
639 void
640 sf_txintr(struct sf_softc *sc)
641 {
642 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
643 struct sf_descsoft *ds;
644 uint32_t cqci, tcd;
645 int consumer, producer, txidx;
646
647 try_again:
648 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
649
650 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
651 producer = CQPI_TxCompletionProducerIndex_get(
652 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
653
654 if (consumer == producer)
655 return;
656
657 ifp->if_flags &= ~IFF_OACTIVE;
658
659 while (consumer != producer) {
660 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
661 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
662
663 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
664 #ifdef DIAGNOSTIC
665 if ((tcd & TCD_PR) == 0)
666 printf("%s: Tx queue mismatch, index %d\n",
667 sc->sc_dev.dv_xname, txidx);
668 #endif
669 /*
670 * NOTE: stats are updated later. We're just
671 * releasing packets that have been DMA'd to
672 * the chip.
673 */
674 ds = &sc->sc_txsoft[txidx];
675 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
676 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
677 0, ds->ds_dmamap->dm_mapsize,
678 BUS_DMASYNC_POSTWRITE);
679 m_freem(ds->ds_mbuf);
680 ds->ds_mbuf = NULL;
681
682 consumer = SF_NEXTTCD(consumer);
683 sc->sc_txpending--;
684 }
685
686 /* XXXJRT -- should be KDASSERT() */
687 KASSERT(sc->sc_txpending >= 0);
688
689 /* If all packets are done, cancel the watchdog timer. */
690 if (sc->sc_txpending == 0)
691 ifp->if_timer = 0;
692
693 /* Update the consumer index. */
694 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
695 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
696 CQCI_TxCompletionConsumerIndex(consumer));
697
698 /* Double check for new completions. */
699 goto try_again;
700 }
701
702 /*
703 * sf_rxintr:
704 *
705 * Helper -- handle receive interrupts.
706 */
707 void
708 sf_rxintr(struct sf_softc *sc)
709 {
710 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
711 struct sf_descsoft *ds;
712 struct sf_rcd_full *rcd;
713 struct mbuf *m;
714 uint32_t cqci, word0;
715 int consumer, producer, bufproducer, rxidx, len;
716
717 try_again:
718 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
719
720 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
721 producer = CQPI_RxCompletionQ1ProducerIndex_get(
722 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
723 bufproducer = RXQ1P_RxDescQ1Producer_get(
724 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
725
726 if (consumer == producer)
727 return;
728
729 while (consumer != producer) {
730 rcd = &sc->sc_rxcomp[consumer];
731 SF_CDRXCSYNC(sc, consumer,
732 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
733 SF_CDRXCSYNC(sc, consumer,
734 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
735
736 word0 = le32toh(rcd->rcd_word0);
737 rxidx = RCD_W0_EndIndex(word0);
738
739 ds = &sc->sc_rxsoft[rxidx];
740
741 consumer = SF_NEXTRCD(consumer);
742 bufproducer = SF_NEXTRX(bufproducer);
743
744 if ((word0 & RCD_W0_OK) == 0) {
745 SF_INIT_RXDESC(sc, rxidx);
746 continue;
747 }
748
749 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
750 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
751
752 /*
753 * No errors; receive the packet. Note that we have
754 * configured the Starfire to NOT transfer the CRC
755 * with the packet.
756 */
757 len = RCD_W0_Length(word0);
758
759 #ifdef __NO_STRICT_ALIGNMENT
760 /*
761 * Allocate a new mbuf cluster. If that fails, we are
762 * out of memory, and must drop the packet and recycle
763 * the buffer that's already attached to this descriptor.
764 */
765 m = ds->ds_mbuf;
766 if (sf_add_rxbuf(sc, rxidx) != 0) {
767 ifp->if_ierrors++;
768 SF_INIT_RXDESC(sc, rxidx);
769 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
770 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
771 continue;
772 }
773 #else
774 /*
775 * The Starfire's receive buffer must be 4-byte aligned.
776 * But this means that the data after the Ethernet header
777 * is misaligned. We must allocate a new buffer and
778 * copy the data, shifted forward 2 bytes.
779 */
780 MGETHDR(m, M_DONTWAIT, MT_DATA);
781 if (m == NULL) {
782 dropit:
783 ifp->if_ierrors++;
784 SF_INIT_RXDESC(sc, rxidx);
785 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
786 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
787 continue;
788 }
789 if (len > (MHLEN - 2)) {
790 MCLGET(m, M_DONTWAIT);
791 if ((m->m_flags & M_EXT) == 0) {
792 m_freem(m);
793 goto dropit;
794 }
795 }
796 m->m_data += 2;
797
798 /*
799 * Note that we use cluster for incoming frames, so the
800 * buffer is virtually contiguous.
801 */
802 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), len);
803
804 /* Allow the receive descriptor to continue using its mbuf. */
805 SF_INIT_RXDESC(sc, rxidx);
806 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
807 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
808 #endif /* __NO_STRICT_ALIGNMENT */
809
810 m->m_pkthdr.rcvif = ifp;
811 m->m_pkthdr.len = m->m_len = len;
812
813 #if NBPFILTER > 0
814 /*
815 * Pass this up to any BPF listeners.
816 */
817 if (ifp->if_bpf)
818 bpf_mtap(ifp->if_bpf, m);
819 #endif /* NBPFILTER > 0 */
820
821 /* Pass it on. */
822 (*ifp->if_input)(ifp, m);
823 }
824
825 /* Update the chip's pointers. */
826 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
827 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
828 CQCI_RxCompletionQ1ConsumerIndex(consumer));
829 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
830 RXQ1P_RxDescQ1Producer(bufproducer));
831
832 /* Double-check for any new completions. */
833 goto try_again;
834 }
835
836 /*
837 * sf_tick:
838 *
839 * One second timer, used to tick the MII and update stats.
840 */
841 void
842 sf_tick(void *arg)
843 {
844 struct sf_softc *sc = arg;
845 int s;
846
847 s = splnet();
848 mii_tick(&sc->sc_mii);
849 sf_stats_update(sc);
850 splx(s);
851
852 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
853 }
854
855 /*
856 * sf_stats_update:
857 *
858 * Read the statitistics counters.
859 */
860 void
861 sf_stats_update(struct sf_softc *sc)
862 {
863 struct sf_stats stats;
864 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
865 uint32_t *p;
866 int i;
867
868 p = &stats.TransmitOKFrames;
869 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
870 *p++ = sf_genreg_read(sc,
871 SF_STATS_BASE + (i * sizeof(uint32_t)));
872 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
873 }
874
875 ifp->if_opackets += stats.TransmitOKFrames;
876
877 ifp->if_collisions += stats.SingleCollisionFrames +
878 stats.MultipleCollisionFrames;
879
880 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
881 stats.TransmitAbortDueToExcessingDeferral +
882 stats.FramesLostDueToInternalTransmitErrors;
883
884 ifp->if_ipackets += stats.ReceiveOKFrames;
885
886 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
887 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
888 stats.ReceiveFramesJabbersError +
889 stats.FramesLostDueToInternalReceiveErrors;
890 }
891
892 /*
893 * sf_reset:
894 *
895 * Perform a soft reset on the Starfire.
896 */
897 void
898 sf_reset(struct sf_softc *sc)
899 {
900 int i;
901
902 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
903
904 sf_macreset(sc);
905
906 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
907 for (i = 0; i < 1000; i++) {
908 delay(10);
909 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
910 PDC_SoftReset) == 0)
911 break;
912 }
913
914 if (i == 1000) {
915 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
916 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
917 }
918
919 delay(1000);
920 }
921
922 /*
923 * sf_macreset:
924 *
925 * Reset the MAC portion of the Starfire.
926 */
927 void
928 sf_macreset(struct sf_softc *sc)
929 {
930
931 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
932 delay(1000);
933 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
934 }
935
936 /*
937 * sf_init: [ifnet interface function]
938 *
939 * Initialize the interface. Must be called at splnet().
940 */
941 int
942 sf_init(struct ifnet *ifp)
943 {
944 struct sf_softc *sc = ifp->if_softc;
945 struct sf_descsoft *ds;
946 int i, error = 0;
947
948 /*
949 * Cancel any pending I/O.
950 */
951 sf_stop(ifp, 0);
952
953 /*
954 * Reset the Starfire to a known state.
955 */
956 sf_reset(sc);
957
958 /* Clear the stat counters. */
959 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
960 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
961
962 /*
963 * Initialize the transmit descriptor ring.
964 */
965 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
966 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
967 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
968 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
969
970 /*
971 * Initialize the transmit completion ring.
972 */
973 for (i = 0; i < SF_NTCD; i++) {
974 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
975 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
976 }
977 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
978 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
979
980 /*
981 * Initialize the receive descriptor ring.
982 */
983 for (i = 0; i < SF_NRXDESC; i++) {
984 ds = &sc->sc_rxsoft[i];
985 if (ds->ds_mbuf == NULL) {
986 if ((error = sf_add_rxbuf(sc, i)) != 0) {
987 printf("%s: unable to allocate or map rx "
988 "buffer %d, error = %d\n",
989 sc->sc_dev.dv_xname, i, error);
990 /*
991 * XXX Should attempt to run with fewer receive
992 * XXX buffers instead of just failing.
993 */
994 sf_rxdrain(sc);
995 goto out;
996 }
997 } else
998 SF_INIT_RXDESC(sc, i);
999 }
1000 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
1001 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
1002 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
1003
1004 /*
1005 * Initialize the receive completion ring.
1006 */
1007 for (i = 0; i < SF_NRCD; i++) {
1008 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
1009 sc->sc_rxcomp[i].rcd_word1 = 0;
1010 sc->sc_rxcomp[i].rcd_word2 = 0;
1011 sc->sc_rxcomp[i].rcd_timestamp = 0;
1012 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1013 }
1014 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1015 RCQ1C_RxCompletionQ1Type(3));
1016 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1017
1018 /*
1019 * Initialize the Tx CSR.
1020 */
1021 sc->sc_TransmitFrameCSR = 0;
1022 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1023 sc->sc_TransmitFrameCSR |
1024 TFCSR_TransmitThreshold(sc->sc_txthresh));
1025
1026 /*
1027 * Initialize the Tx descriptor control register.
1028 */
1029 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1030 TDQC_TxDmaBurstSize(4) | /* default */
1031 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1032 TDQC_TxDescType(0);
1033 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1034 sc->sc_TxDescQueueCtrl |
1035 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1036
1037 /*
1038 * Initialize the Rx descriptor control registers.
1039 */
1040 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1041 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1042 RDQ1C_RxDescSpacing(0));
1043 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1044
1045 /*
1046 * Initialize the Tx descriptor producer indices.
1047 */
1048 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1049 TDQPI_HiPrTxProducerIndex(0) |
1050 TDQPI_LoPrTxProducerIndex(0));
1051
1052 /*
1053 * Initialize the Rx descriptor producer indices.
1054 */
1055 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1056 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1057 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1058 RXQ2P_RxDescQ2Producer(0));
1059
1060 /*
1061 * Initialize the Tx and Rx completion queue consumer indices.
1062 */
1063 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1064 CQCI_TxCompletionConsumerIndex(0) |
1065 CQCI_RxCompletionQ1ConsumerIndex(0));
1066 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1067
1068 /*
1069 * Initialize the Rx DMA control register.
1070 */
1071 sf_funcreg_write(sc, SF_RxDmaCtrl,
1072 RDC_RxHighPriorityThreshold(6) | /* default */
1073 RDC_RxBurstSize(4)); /* default */
1074
1075 /*
1076 * Set the receive filter.
1077 */
1078 sc->sc_RxAddressFilteringCtl = 0;
1079 sf_set_filter(sc);
1080
1081 /*
1082 * Set MacConfig1. When we set the media, MacConfig1 will
1083 * actually be written and the MAC part reset.
1084 */
1085 sc->sc_MacConfig1 = MC1_PadEn;
1086
1087 /*
1088 * Set the media.
1089 */
1090 mii_mediachg(&sc->sc_mii);
1091
1092 /*
1093 * Initialize the interrupt register.
1094 */
1095 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1096 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1097 IS_StatisticWrapInt;
1098 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1099
1100 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1101 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1102
1103 /*
1104 * Start the transmit and receive processes.
1105 */
1106 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1107 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1108
1109 /* Start the on second clock. */
1110 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1111
1112 /*
1113 * Note that the interface is now running.
1114 */
1115 ifp->if_flags |= IFF_RUNNING;
1116 ifp->if_flags &= ~IFF_OACTIVE;
1117
1118 out:
1119 if (error) {
1120 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1121 ifp->if_timer = 0;
1122 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1123 }
1124 return (error);
1125 }
1126
1127 /*
1128 * sf_rxdrain:
1129 *
1130 * Drain the receive queue.
1131 */
1132 void
1133 sf_rxdrain(struct sf_softc *sc)
1134 {
1135 struct sf_descsoft *ds;
1136 int i;
1137
1138 for (i = 0; i < SF_NRXDESC; i++) {
1139 ds = &sc->sc_rxsoft[i];
1140 if (ds->ds_mbuf != NULL) {
1141 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1142 m_freem(ds->ds_mbuf);
1143 ds->ds_mbuf = NULL;
1144 }
1145 }
1146 }
1147
1148 /*
1149 * sf_stop: [ifnet interface function]
1150 *
1151 * Stop transmission on the interface.
1152 */
1153 void
1154 sf_stop(struct ifnet *ifp, int disable)
1155 {
1156 struct sf_softc *sc = ifp->if_softc;
1157 struct sf_descsoft *ds;
1158 int i;
1159
1160 /* Stop the one second clock. */
1161 callout_stop(&sc->sc_tick_callout);
1162
1163 /* Down the MII. */
1164 mii_down(&sc->sc_mii);
1165
1166 /* Disable interrupts. */
1167 sf_funcreg_write(sc, SF_InterruptEn, 0);
1168
1169 /* Stop the transmit and receive processes. */
1170 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1171
1172 /*
1173 * Release any queued transmit buffers.
1174 */
1175 for (i = 0; i < SF_NTXDESC; i++) {
1176 ds = &sc->sc_txsoft[i];
1177 if (ds->ds_mbuf != NULL) {
1178 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1179 m_freem(ds->ds_mbuf);
1180 ds->ds_mbuf = NULL;
1181 }
1182 }
1183
1184 if (disable)
1185 sf_rxdrain(sc);
1186
1187 /*
1188 * Mark the interface down and cancel the watchdog timer.
1189 */
1190 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1191 ifp->if_timer = 0;
1192 }
1193
1194 /*
1195 * sf_read_eeprom:
1196 *
1197 * Read from the Starfire EEPROM.
1198 */
1199 uint8_t
1200 sf_read_eeprom(struct sf_softc *sc, int offset)
1201 {
1202 uint32_t reg;
1203
1204 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1205
1206 return ((reg >> (8 * (offset & 3))) & 0xff);
1207 }
1208
1209 /*
1210 * sf_add_rxbuf:
1211 *
1212 * Add a receive buffer to the indicated descriptor.
1213 */
1214 int
1215 sf_add_rxbuf(struct sf_softc *sc, int idx)
1216 {
1217 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1218 struct mbuf *m;
1219 int error;
1220
1221 MGETHDR(m, M_DONTWAIT, MT_DATA);
1222 if (m == NULL)
1223 return (ENOBUFS);
1224
1225 MCLGET(m, M_DONTWAIT);
1226 if ((m->m_flags & M_EXT) == 0) {
1227 m_freem(m);
1228 return (ENOBUFS);
1229 }
1230
1231 if (ds->ds_mbuf != NULL)
1232 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1233
1234 ds->ds_mbuf = m;
1235
1236 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1237 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1238 BUS_DMA_READ|BUS_DMA_NOWAIT);
1239 if (error) {
1240 printf("%s: can't load rx DMA map %d, error = %d\n",
1241 sc->sc_dev.dv_xname, idx, error);
1242 panic("sf_add_rxbuf"); /* XXX */
1243 }
1244
1245 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1246 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1247
1248 SF_INIT_RXDESC(sc, idx);
1249
1250 return (0);
1251 }
1252
1253 static void
1254 sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr)
1255 {
1256 uint32_t reg0, reg1, reg2;
1257
1258 reg0 = enaddr[5] | (enaddr[4] << 8);
1259 reg1 = enaddr[3] | (enaddr[2] << 8);
1260 reg2 = enaddr[1] | (enaddr[0] << 8);
1261
1262 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1263 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1264 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1265 }
1266
1267 static void
1268 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1269 {
1270 uint32_t hash, slot, reg;
1271
1272 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1273 slot = hash >> 4;
1274
1275 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1276 reg |= 1 << (hash & 0xf);
1277 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1278 }
1279
1280 /*
1281 * sf_set_filter:
1282 *
1283 * Set the Starfire receive filter.
1284 */
1285 void
1286 sf_set_filter(struct sf_softc *sc)
1287 {
1288 struct ethercom *ec = &sc->sc_ethercom;
1289 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1290 struct ether_multi *enm;
1291 struct ether_multistep step;
1292 int i;
1293
1294 /* Start by clearing the perfect and hash tables. */
1295 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1296 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1297
1298 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1299 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1300
1301 /*
1302 * Clear the perfect and hash mode bits.
1303 */
1304 sc->sc_RxAddressFilteringCtl &=
1305 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1306
1307 if (ifp->if_flags & IFF_BROADCAST)
1308 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1309 else
1310 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1311
1312 if (ifp->if_flags & IFF_PROMISC) {
1313 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1314 goto allmulti;
1315 } else
1316 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1317
1318 /*
1319 * Set normal perfect filtering mode.
1320 */
1321 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1322
1323 /*
1324 * First, write the station address to the perfect filter
1325 * table.
1326 */
1327 sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl));
1328
1329 /*
1330 * Now set the hash bits for each multicast address in our
1331 * list.
1332 */
1333 ETHER_FIRST_MULTI(step, ec, enm);
1334 if (enm == NULL)
1335 goto done;
1336 while (enm != NULL) {
1337 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1338 /*
1339 * We must listen to a range of multicast addresses.
1340 * For now, just accept all multicasts, rather than
1341 * trying to set only those filter bits needed to match
1342 * the range. (At this time, the only use of address
1343 * ranges is for IP multicast routing, for which the
1344 * range is big enough to require all bits set.)
1345 */
1346 goto allmulti;
1347 }
1348 sf_set_filter_hash(sc, enm->enm_addrlo);
1349 ETHER_NEXT_MULTI(step, enm);
1350 }
1351
1352 /*
1353 * Set "hash only multicast dest, match regardless of VLAN ID".
1354 */
1355 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1356 goto done;
1357
1358 allmulti:
1359 /*
1360 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1361 */
1362 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1363 ifp->if_flags |= IFF_ALLMULTI;
1364
1365 done:
1366 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1367 sc->sc_RxAddressFilteringCtl);
1368 }
1369
1370 /*
1371 * sf_mii_read: [mii interface function]
1372 *
1373 * Read from the MII.
1374 */
1375 int
1376 sf_mii_read(struct device *self, int phy, int reg)
1377 {
1378 struct sf_softc *sc = (void *) self;
1379 uint32_t v;
1380 int i;
1381
1382 for (i = 0; i < 1000; i++) {
1383 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1384 if (v & MiiDataValid)
1385 break;
1386 delay(1);
1387 }
1388
1389 if ((v & MiiDataValid) == 0)
1390 return (0);
1391
1392 if (MiiRegDataPort(v) == 0xffff)
1393 return (0);
1394
1395 return (MiiRegDataPort(v));
1396 }
1397
1398 /*
1399 * sf_mii_write: [mii interface function]
1400 *
1401 * Write to the MII.
1402 */
1403 void
1404 sf_mii_write(struct device *self, int phy, int reg, int val)
1405 {
1406 struct sf_softc *sc = (void *) self;
1407 int i;
1408
1409 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1410
1411 for (i = 0; i < 1000; i++) {
1412 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1413 MiiBusy) == 0)
1414 return;
1415 delay(1);
1416 }
1417
1418 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
1419 }
1420
1421 /*
1422 * sf_mii_statchg: [mii interface function]
1423 *
1424 * Callback from the PHY when the media changes.
1425 */
1426 void
1427 sf_mii_statchg(struct device *self)
1428 {
1429 struct sf_softc *sc = (void *) self;
1430 uint32_t ipg;
1431
1432 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1433 sc->sc_MacConfig1 |= MC1_FullDuplex;
1434 ipg = 0x15;
1435 } else {
1436 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1437 ipg = 0x11;
1438 }
1439
1440 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1441 sf_macreset(sc);
1442
1443 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1444 }
1445
1446 /*
1447 * sf_mediastatus: [ifmedia interface function]
1448 *
1449 * Callback from ifmedia to request current media status.
1450 */
1451 void
1452 sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1453 {
1454 struct sf_softc *sc = ifp->if_softc;
1455
1456 mii_pollstat(&sc->sc_mii);
1457 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1458 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1459 }
1460
1461 /*
1462 * sf_mediachange: [ifmedia interface function]
1463 *
1464 * Callback from ifmedia to request new media setting.
1465 */
1466 int
1467 sf_mediachange(struct ifnet *ifp)
1468 {
1469 struct sf_softc *sc = ifp->if_softc;
1470
1471 if (ifp->if_flags & IFF_UP)
1472 mii_mediachg(&sc->sc_mii);
1473 return (0);
1474 }
1475