if_sip.c revision 1.78.2.6 1 /* $NetBSD: if_sip.c,v 1.78.2.6 2005/02/04 11:46:38 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Reduce the Rx interrupt load.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.78.2.6 2005/02/04 11:46:38 skrll Exp $");
84
85 #include "bpfilter.h"
86 #include "rnd.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/socket.h>
95 #include <sys/ioctl.h>
96 #include <sys/errno.h>
97 #include <sys/device.h>
98 #include <sys/queue.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <machine/bus.h>
116 #include <machine/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/mii_bitbang.h>
122
123 #include <dev/pci/pcireg.h>
124 #include <dev/pci/pcivar.h>
125 #include <dev/pci/pcidevs.h>
126
127 #include <dev/pci/if_sipreg.h>
128
129 #ifdef DP83820 /* DP83820 Gigabit Ethernet */
130 #define SIP_DECL(x) __CONCAT(gsip_,x)
131 #else /* SiS900 and DP83815 */
132 #define SIP_DECL(x) __CONCAT(sip_,x)
133 #endif
134
135 #define SIP_STR(x) __STRING(SIP_DECL(x))
136
137 /*
138 * Transmit descriptor list size. This is arbitrary, but allocate
139 * enough descriptors for 128 pending transmissions, and 8 segments
140 * per packet (64 for DP83820 for jumbo frames).
141 *
142 * This MUST work out to a power of 2.
143 */
144 #ifdef DP83820
145 #define SIP_NTXSEGS 64
146 #define SIP_NTXSEGS_ALLOC 16
147 #else
148 #define SIP_NTXSEGS 16
149 #define SIP_NTXSEGS_ALLOC 8
150 #endif
151
152 #define SIP_TXQUEUELEN 256
153 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS_ALLOC)
154 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
155 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
156
157 #if defined(DP83820)
158 #define TX_DMAMAP_SIZE ETHER_MAX_LEN_JUMBO
159 #else
160 #define TX_DMAMAP_SIZE MCLBYTES
161 #endif
162
163 /*
164 * Receive descriptor list size. We have one Rx buffer per incoming
165 * packet, so this logic is a little simpler.
166 *
167 * Actually, on the DP83820, we allow the packet to consume more than
168 * one buffer, in order to support jumbo Ethernet frames. In that
169 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
170 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
171 * so we'd better be quick about handling receive interrupts.
172 */
173 #if defined(DP83820)
174 #define SIP_NRXDESC 256
175 #else
176 #define SIP_NRXDESC 128
177 #endif /* DP83820 */
178 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
179 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
180
181 /*
182 * Control structures are DMA'd to the SiS900 chip. We allocate them in
183 * a single clump that maps to a single DMA segment to make several things
184 * easier.
185 */
186 struct sip_control_data {
187 /*
188 * The transmit descriptors.
189 */
190 struct sip_desc scd_txdescs[SIP_NTXDESC];
191
192 /*
193 * The receive descriptors.
194 */
195 struct sip_desc scd_rxdescs[SIP_NRXDESC];
196 };
197
198 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
199 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
200 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
201
202 /*
203 * Software state for transmit jobs.
204 */
205 struct sip_txsoft {
206 struct mbuf *txs_mbuf; /* head of our mbuf chain */
207 bus_dmamap_t txs_dmamap; /* our DMA map */
208 int txs_firstdesc; /* first descriptor in packet */
209 int txs_lastdesc; /* last descriptor in packet */
210 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
211 };
212
213 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
214
215 /*
216 * Software state for receive jobs.
217 */
218 struct sip_rxsoft {
219 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
220 bus_dmamap_t rxs_dmamap; /* our DMA map */
221 };
222
223 /*
224 * Software state per device.
225 */
226 struct sip_softc {
227 struct device sc_dev; /* generic device information */
228 bus_space_tag_t sc_st; /* bus space tag */
229 bus_space_handle_t sc_sh; /* bus space handle */
230 bus_dma_tag_t sc_dmat; /* bus DMA tag */
231 struct ethercom sc_ethercom; /* ethernet common data */
232 void *sc_sdhook; /* shutdown hook */
233
234 const struct sip_product *sc_model; /* which model are we? */
235 int sc_rev; /* chip revision */
236
237 void *sc_ih; /* interrupt cookie */
238
239 struct mii_data sc_mii; /* MII/media information */
240
241 struct callout sc_tick_ch; /* tick callout */
242
243 bus_dmamap_t sc_cddmamap; /* control data DMA map */
244 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
245
246 /*
247 * Software state for transmit and receive descriptors.
248 */
249 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
250 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
251
252 /*
253 * Control data structures.
254 */
255 struct sip_control_data *sc_control_data;
256 #define sc_txdescs sc_control_data->scd_txdescs
257 #define sc_rxdescs sc_control_data->scd_rxdescs
258
259 #ifdef SIP_EVENT_COUNTERS
260 /*
261 * Event counters.
262 */
263 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
264 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
265 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
266 struct evcnt sc_ev_txdintr; /* Tx descriptor interrupts */
267 struct evcnt sc_ev_txiintr; /* Tx idle interrupts */
268 struct evcnt sc_ev_rxintr; /* Rx interrupts */
269 struct evcnt sc_ev_hiberr; /* HIBERR interrupts */
270 struct evcnt sc_ev_rxpause; /* PAUSE received */
271 #ifdef DP83820
272 struct evcnt sc_ev_txpause; /* PAUSE transmitted */
273 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
274 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
275 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
276 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
277 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
278 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
279 #endif /* DP83820 */
280 #endif /* SIP_EVENT_COUNTERS */
281
282 u_int32_t sc_txcfg; /* prototype TXCFG register */
283 u_int32_t sc_rxcfg; /* prototype RXCFG register */
284 u_int32_t sc_imr; /* prototype IMR register */
285 u_int32_t sc_rfcr; /* prototype RFCR register */
286
287 u_int32_t sc_cfg; /* prototype CFG register */
288
289 #ifdef DP83820
290 u_int32_t sc_gpior; /* prototype GPIOR register */
291 #endif /* DP83820 */
292
293 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
294 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
295
296 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
297
298 int sc_flowflags; /* 802.3x flow control flags */
299 #ifdef DP83820
300 int sc_rx_flow_thresh; /* Rx FIFO threshold for flow control */
301 #else
302 int sc_paused; /* paused indication */
303 #endif
304
305 int sc_txfree; /* number of free Tx descriptors */
306 int sc_txnext; /* next ready Tx descriptor */
307 int sc_txwin; /* Tx descriptors since last intr */
308
309 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
310 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
311
312 int sc_rxptr; /* next ready Rx descriptor/descsoft */
313 #if defined(DP83820)
314 int sc_rxdiscard;
315 int sc_rxlen;
316 struct mbuf *sc_rxhead;
317 struct mbuf *sc_rxtail;
318 struct mbuf **sc_rxtailp;
319 #endif /* DP83820 */
320
321 #if NRND > 0
322 rndsource_element_t rnd_source; /* random source */
323 #endif
324 };
325
326 #ifdef DP83820
327 #define SIP_RXCHAIN_RESET(sc) \
328 do { \
329 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
330 *(sc)->sc_rxtailp = NULL; \
331 (sc)->sc_rxlen = 0; \
332 } while (/*CONSTCOND*/0)
333
334 #define SIP_RXCHAIN_LINK(sc, m) \
335 do { \
336 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
337 (sc)->sc_rxtailp = &(m)->m_next; \
338 } while (/*CONSTCOND*/0)
339 #endif /* DP83820 */
340
341 #ifdef SIP_EVENT_COUNTERS
342 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
343 #else
344 #define SIP_EVCNT_INCR(ev) /* nothing */
345 #endif
346
347 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
348 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
349
350 #define SIP_CDTXSYNC(sc, x, n, ops) \
351 do { \
352 int __x, __n; \
353 \
354 __x = (x); \
355 __n = (n); \
356 \
357 /* If it will wrap around, sync to the end of the ring. */ \
358 if ((__x + __n) > SIP_NTXDESC) { \
359 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
360 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
361 (SIP_NTXDESC - __x), (ops)); \
362 __n -= (SIP_NTXDESC - __x); \
363 __x = 0; \
364 } \
365 \
366 /* Now sync whatever is left. */ \
367 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
368 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
369 } while (0)
370
371 #define SIP_CDRXSYNC(sc, x, ops) \
372 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
373 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
374
375 #ifdef DP83820
376 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0;
377 #define SIP_RXBUF_LEN (MCLBYTES - 8)
378 #else
379 #define SIP_INIT_RXDESC_EXTSTS /* nothing */
380 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */
381 #endif
382 #define SIP_INIT_RXDESC(sc, x) \
383 do { \
384 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
385 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
386 \
387 __sipd->sipd_link = \
388 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \
389 __sipd->sipd_bufptr = \
390 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \
391 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \
392 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \
393 SIP_INIT_RXDESC_EXTSTS \
394 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
395 } while (0)
396
397 #define SIP_CHIP_VERS(sc, v, p, r) \
398 ((sc)->sc_model->sip_vendor == (v) && \
399 (sc)->sc_model->sip_product == (p) && \
400 (sc)->sc_rev == (r))
401
402 #define SIP_CHIP_MODEL(sc, v, p) \
403 ((sc)->sc_model->sip_vendor == (v) && \
404 (sc)->sc_model->sip_product == (p))
405
406 #if !defined(DP83820)
407 #define SIP_SIS900_REV(sc, rev) \
408 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev))
409 #endif
410
411 #define SIP_TIMEOUT 1000
412
413 static void SIP_DECL(start)(struct ifnet *);
414 static void SIP_DECL(watchdog)(struct ifnet *);
415 static int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t);
416 static int SIP_DECL(init)(struct ifnet *);
417 static void SIP_DECL(stop)(struct ifnet *, int);
418
419 static void SIP_DECL(shutdown)(void *);
420
421 static void SIP_DECL(reset)(struct sip_softc *);
422 static void SIP_DECL(rxdrain)(struct sip_softc *);
423 static int SIP_DECL(add_rxbuf)(struct sip_softc *, int);
424 static void SIP_DECL(read_eeprom)(struct sip_softc *, int, int,
425 u_int16_t *);
426 static void SIP_DECL(tick)(void *);
427
428 #if !defined(DP83820)
429 static void SIP_DECL(sis900_set_filter)(struct sip_softc *);
430 #endif /* ! DP83820 */
431 static void SIP_DECL(dp83815_set_filter)(struct sip_softc *);
432
433 #if defined(DP83820)
434 static void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *,
435 const struct pci_attach_args *, u_int8_t *);
436 #else
437 static void SIP_DECL(sis900_eeprom_delay)(struct sip_softc *sc);
438 static void SIP_DECL(sis900_read_macaddr)(struct sip_softc *,
439 const struct pci_attach_args *, u_int8_t *);
440 static void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *,
441 const struct pci_attach_args *, u_int8_t *);
442 #endif /* DP83820 */
443
444 static int SIP_DECL(intr)(void *);
445 static void SIP_DECL(txintr)(struct sip_softc *);
446 static void SIP_DECL(rxintr)(struct sip_softc *);
447
448 #if defined(DP83820)
449 static int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int);
450 static void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int);
451 static void SIP_DECL(dp83820_mii_statchg)(struct device *);
452 #else
453 static int SIP_DECL(sis900_mii_readreg)(struct device *, int, int);
454 static void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int);
455 static void SIP_DECL(sis900_mii_statchg)(struct device *);
456
457 static int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int);
458 static void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int);
459 static void SIP_DECL(dp83815_mii_statchg)(struct device *);
460 #endif /* DP83820 */
461
462 static int SIP_DECL(mediachange)(struct ifnet *);
463 static void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *);
464
465 static int SIP_DECL(match)(struct device *, struct cfdata *, void *);
466 static void SIP_DECL(attach)(struct device *, struct device *, void *);
467
468 int SIP_DECL(copy_small) = 0;
469
470 #ifdef DP83820
471 CFATTACH_DECL(gsip, sizeof(struct sip_softc),
472 gsip_match, gsip_attach, NULL, NULL);
473 #else
474 CFATTACH_DECL(sip, sizeof(struct sip_softc),
475 sip_match, sip_attach, NULL, NULL);
476 #endif
477
478 /*
479 * Descriptions of the variants of the SiS900.
480 */
481 struct sip_variant {
482 int (*sipv_mii_readreg)(struct device *, int, int);
483 void (*sipv_mii_writereg)(struct device *, int, int, int);
484 void (*sipv_mii_statchg)(struct device *);
485 void (*sipv_set_filter)(struct sip_softc *);
486 void (*sipv_read_macaddr)(struct sip_softc *,
487 const struct pci_attach_args *, u_int8_t *);
488 };
489
490 static u_int32_t SIP_DECL(mii_bitbang_read)(struct device *);
491 static void SIP_DECL(mii_bitbang_write)(struct device *, u_int32_t);
492
493 static const struct mii_bitbang_ops SIP_DECL(mii_bitbang_ops) = {
494 SIP_DECL(mii_bitbang_read),
495 SIP_DECL(mii_bitbang_write),
496 {
497 EROMAR_MDIO, /* MII_BIT_MDO */
498 EROMAR_MDIO, /* MII_BIT_MDI */
499 EROMAR_MDC, /* MII_BIT_MDC */
500 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
501 0, /* MII_BIT_DIR_PHY_HOST */
502 }
503 };
504
505 #if defined(DP83820)
506 static const struct sip_variant SIP_DECL(variant_dp83820) = {
507 SIP_DECL(dp83820_mii_readreg),
508 SIP_DECL(dp83820_mii_writereg),
509 SIP_DECL(dp83820_mii_statchg),
510 SIP_DECL(dp83815_set_filter),
511 SIP_DECL(dp83820_read_macaddr),
512 };
513 #else
514 static const struct sip_variant SIP_DECL(variant_sis900) = {
515 SIP_DECL(sis900_mii_readreg),
516 SIP_DECL(sis900_mii_writereg),
517 SIP_DECL(sis900_mii_statchg),
518 SIP_DECL(sis900_set_filter),
519 SIP_DECL(sis900_read_macaddr),
520 };
521
522 static const struct sip_variant SIP_DECL(variant_dp83815) = {
523 SIP_DECL(dp83815_mii_readreg),
524 SIP_DECL(dp83815_mii_writereg),
525 SIP_DECL(dp83815_mii_statchg),
526 SIP_DECL(dp83815_set_filter),
527 SIP_DECL(dp83815_read_macaddr),
528 };
529 #endif /* DP83820 */
530
531 /*
532 * Devices supported by this driver.
533 */
534 static const struct sip_product {
535 pci_vendor_id_t sip_vendor;
536 pci_product_id_t sip_product;
537 const char *sip_name;
538 const struct sip_variant *sip_variant;
539 } SIP_DECL(products)[] = {
540 #if defined(DP83820)
541 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
542 "NatSemi DP83820 Gigabit Ethernet",
543 &SIP_DECL(variant_dp83820) },
544 #else
545 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
546 "SiS 900 10/100 Ethernet",
547 &SIP_DECL(variant_sis900) },
548 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
549 "SiS 7016 10/100 Ethernet",
550 &SIP_DECL(variant_sis900) },
551
552 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
553 "NatSemi DP83815 10/100 Ethernet",
554 &SIP_DECL(variant_dp83815) },
555 #endif /* DP83820 */
556
557 { 0, 0,
558 NULL,
559 NULL },
560 };
561
562 static const struct sip_product *
563 SIP_DECL(lookup)(const struct pci_attach_args *pa)
564 {
565 const struct sip_product *sip;
566
567 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) {
568 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
569 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
570 return (sip);
571 }
572 return (NULL);
573 }
574
575 #ifdef DP83820
576 /*
577 * I really hate stupid hardware vendors. There's a bit in the EEPROM
578 * which indicates if the card can do 64-bit data transfers. Unfortunately,
579 * several vendors of 32-bit cards fail to clear this bit in the EEPROM,
580 * which means we try to use 64-bit data transfers on those cards if we
581 * happen to be plugged into a 32-bit slot.
582 *
583 * What we do is use this table of cards known to be 64-bit cards. If
584 * you have a 64-bit card who's subsystem ID is not listed in this table,
585 * send the output of "pcictl dump ..." of the device to me so that your
586 * card will use the 64-bit data path when plugged into a 64-bit slot.
587 *
588 * -- Jason R. Thorpe <thorpej (at) NetBSD.org>
589 * June 30, 2002
590 */
591 static int
592 SIP_DECL(check_64bit)(const struct pci_attach_args *pa)
593 {
594 static const struct {
595 pci_vendor_id_t c64_vendor;
596 pci_product_id_t c64_product;
597 } card64[] = {
598 /* Asante GigaNIX */
599 { 0x128a, 0x0002 },
600
601 /* Accton EN1407-T, Planex GN-1000TE */
602 { 0x1113, 0x1407 },
603
604 /* Netgear GA-621 */
605 { 0x1385, 0x621a },
606
607 /* SMC EZ Card */
608 { 0x10b8, 0x9462 },
609
610 { 0, 0}
611 };
612 pcireg_t subsys;
613 int i;
614
615 subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
616
617 for (i = 0; card64[i].c64_vendor != 0; i++) {
618 if (PCI_VENDOR(subsys) == card64[i].c64_vendor &&
619 PCI_PRODUCT(subsys) == card64[i].c64_product)
620 return (1);
621 }
622
623 return (0);
624 }
625 #endif /* DP83820 */
626
627 static int
628 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux)
629 {
630 struct pci_attach_args *pa = aux;
631
632 if (SIP_DECL(lookup)(pa) != NULL)
633 return (1);
634
635 return (0);
636 }
637
638 static void
639 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux)
640 {
641 struct sip_softc *sc = (struct sip_softc *) self;
642 struct pci_attach_args *pa = aux;
643 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
644 pci_chipset_tag_t pc = pa->pa_pc;
645 pci_intr_handle_t ih;
646 const char *intrstr = NULL;
647 bus_space_tag_t iot, memt;
648 bus_space_handle_t ioh, memh;
649 bus_dma_segment_t seg;
650 int ioh_valid, memh_valid;
651 int i, rseg, error;
652 const struct sip_product *sip;
653 pcireg_t pmode;
654 u_int8_t enaddr[ETHER_ADDR_LEN];
655 int pmreg;
656 #ifdef DP83820
657 pcireg_t memtype;
658 u_int32_t reg;
659 #endif /* DP83820 */
660
661 callout_init(&sc->sc_tick_ch);
662
663 sip = SIP_DECL(lookup)(pa);
664 if (sip == NULL) {
665 printf("\n");
666 panic(SIP_STR(attach) ": impossible");
667 }
668 sc->sc_rev = PCI_REVISION(pa->pa_class);
669
670 printf(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev);
671
672 sc->sc_model = sip;
673
674 /*
675 * XXX Work-around broken PXE firmware on some boards.
676 *
677 * The DP83815 shares an address decoder with the MEM BAR
678 * and the ROM BAR. Make sure the ROM BAR is disabled,
679 * so that memory mapped access works.
680 */
681 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM,
682 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) &
683 ~PCI_MAPREG_ROM_ENABLE);
684
685 /*
686 * Map the device.
687 */
688 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
689 PCI_MAPREG_TYPE_IO, 0,
690 &iot, &ioh, NULL, NULL) == 0);
691 #ifdef DP83820
692 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
693 switch (memtype) {
694 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
695 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
696 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
697 memtype, 0, &memt, &memh, NULL, NULL) == 0);
698 break;
699 default:
700 memh_valid = 0;
701 }
702 #else
703 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
704 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
705 &memt, &memh, NULL, NULL) == 0);
706 #endif /* DP83820 */
707
708 if (memh_valid) {
709 sc->sc_st = memt;
710 sc->sc_sh = memh;
711 } else if (ioh_valid) {
712 sc->sc_st = iot;
713 sc->sc_sh = ioh;
714 } else {
715 printf("%s: unable to map device registers\n",
716 sc->sc_dev.dv_xname);
717 return;
718 }
719
720 sc->sc_dmat = pa->pa_dmat;
721
722 /*
723 * Make sure bus mastering is enabled. Also make sure
724 * Write/Invalidate is enabled if we're allowed to use it.
725 */
726 pmreg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
727 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY)
728 pmreg |= PCI_COMMAND_INVALIDATE_ENABLE;
729 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
730 pmreg | PCI_COMMAND_MASTER_ENABLE);
731
732 /* Get it out of power save mode if needed. */
733 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
734 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
735 PCI_PMCSR_STATE_MASK;
736 if (pmode == PCI_PMCSR_STATE_D3) {
737 /*
738 * The card has lost all configuration data in
739 * this state, so punt.
740 */
741 printf("%s: unable to wake up from power state D3\n",
742 sc->sc_dev.dv_xname);
743 return;
744 }
745 if (pmode != PCI_PMCSR_STATE_D0) {
746 printf("%s: waking up from power state D%d\n",
747 sc->sc_dev.dv_xname, pmode);
748 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
749 PCI_PMCSR_STATE_D0);
750 }
751 }
752
753 /*
754 * Map and establish our interrupt.
755 */
756 if (pci_intr_map(pa, &ih)) {
757 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
758 return;
759 }
760 intrstr = pci_intr_string(pc, ih);
761 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc);
762 if (sc->sc_ih == NULL) {
763 printf("%s: unable to establish interrupt",
764 sc->sc_dev.dv_xname);
765 if (intrstr != NULL)
766 printf(" at %s", intrstr);
767 printf("\n");
768 return;
769 }
770 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
771
772 SIMPLEQ_INIT(&sc->sc_txfreeq);
773 SIMPLEQ_INIT(&sc->sc_txdirtyq);
774
775 /*
776 * Allocate the control data structures, and create and load the
777 * DMA map for it.
778 */
779 if ((error = bus_dmamem_alloc(sc->sc_dmat,
780 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
781 0)) != 0) {
782 printf("%s: unable to allocate control data, error = %d\n",
783 sc->sc_dev.dv_xname, error);
784 goto fail_0;
785 }
786
787 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
788 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
789 BUS_DMA_COHERENT)) != 0) {
790 printf("%s: unable to map control data, error = %d\n",
791 sc->sc_dev.dv_xname, error);
792 goto fail_1;
793 }
794
795 if ((error = bus_dmamap_create(sc->sc_dmat,
796 sizeof(struct sip_control_data), 1,
797 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
798 printf("%s: unable to create control data DMA map, "
799 "error = %d\n", sc->sc_dev.dv_xname, error);
800 goto fail_2;
801 }
802
803 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
804 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
805 0)) != 0) {
806 printf("%s: unable to load control data DMA map, error = %d\n",
807 sc->sc_dev.dv_xname, error);
808 goto fail_3;
809 }
810
811 /*
812 * Create the transmit buffer DMA maps.
813 */
814 for (i = 0; i < SIP_TXQUEUELEN; i++) {
815 if ((error = bus_dmamap_create(sc->sc_dmat, TX_DMAMAP_SIZE,
816 SIP_NTXSEGS, MCLBYTES, 0, 0,
817 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
818 printf("%s: unable to create tx DMA map %d, "
819 "error = %d\n", sc->sc_dev.dv_xname, i, error);
820 goto fail_4;
821 }
822 }
823
824 /*
825 * Create the receive buffer DMA maps.
826 */
827 for (i = 0; i < SIP_NRXDESC; i++) {
828 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
829 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
830 printf("%s: unable to create rx DMA map %d, "
831 "error = %d\n", sc->sc_dev.dv_xname, i, error);
832 goto fail_5;
833 }
834 sc->sc_rxsoft[i].rxs_mbuf = NULL;
835 }
836
837 /*
838 * Reset the chip to a known state.
839 */
840 SIP_DECL(reset)(sc);
841
842 /*
843 * Read the Ethernet address from the EEPROM. This might
844 * also fetch other stuff from the EEPROM and stash it
845 * in the softc.
846 */
847 sc->sc_cfg = 0;
848 #if !defined(DP83820)
849 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
850 SIP_SIS900_REV(sc,SIS_REV_900B))
851 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT);
852
853 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
854 SIP_SIS900_REV(sc,SIS_REV_960) ||
855 SIP_SIS900_REV(sc,SIS_REV_900B))
856 sc->sc_cfg |= (bus_space_read_4(sc->sc_st, sc->sc_sh,
857 SIP_CFG) & CFG_EDBMASTEN);
858 #endif
859
860 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr);
861
862 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
863 ether_sprintf(enaddr));
864
865 /*
866 * Initialize the configuration register: aggressive PCI
867 * bus request algorithm, default backoff, default OW timer,
868 * default parity error detection.
869 *
870 * NOTE: "Big endian mode" is useless on the SiS900 and
871 * friends -- it affects packet data, not descriptors.
872 */
873 #ifdef DP83820
874 /*
875 * Cause the chip to load configuration data from the EEPROM.
876 */
877 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_PTSCR, PTSCR_EELOAD_EN);
878 for (i = 0; i < 10000; i++) {
879 delay(10);
880 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
881 PTSCR_EELOAD_EN) == 0)
882 break;
883 }
884 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
885 PTSCR_EELOAD_EN) {
886 printf("%s: timeout loading configuration from EEPROM\n",
887 sc->sc_dev.dv_xname);
888 return;
889 }
890
891 sc->sc_gpior = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_GPIOR);
892
893 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
894 if (reg & CFG_PCI64_DET) {
895 printf("%s: 64-bit PCI slot detected", sc->sc_dev.dv_xname);
896 /*
897 * Check to see if this card is 64-bit. If so, enable 64-bit
898 * data transfers.
899 *
900 * We can't use the DATA64_EN bit in the EEPROM, because
901 * vendors of 32-bit cards fail to clear that bit in many
902 * cases (yet the card still detects that it's in a 64-bit
903 * slot; go figure).
904 */
905 if (SIP_DECL(check_64bit)(pa)) {
906 sc->sc_cfg |= CFG_DATA64_EN;
907 printf(", using 64-bit data transfers");
908 }
909 printf("\n");
910 }
911
912 /*
913 * XXX Need some PCI flags indicating support for
914 * XXX 64-bit addressing.
915 */
916 #if 0
917 if (reg & CFG_M64ADDR)
918 sc->sc_cfg |= CFG_M64ADDR;
919 if (reg & CFG_T64ADDR)
920 sc->sc_cfg |= CFG_T64ADDR;
921 #endif
922
923 if (reg & (CFG_TBI_EN|CFG_EXT_125)) {
924 const char *sep = "";
925 printf("%s: using ", sc->sc_dev.dv_xname);
926 if (reg & CFG_EXT_125) {
927 sc->sc_cfg |= CFG_EXT_125;
928 printf("%s125MHz clock", sep);
929 sep = ", ";
930 }
931 if (reg & CFG_TBI_EN) {
932 sc->sc_cfg |= CFG_TBI_EN;
933 printf("%sten-bit interface", sep);
934 sep = ", ";
935 }
936 printf("\n");
937 }
938 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0 ||
939 (reg & CFG_MRM_DIS) != 0)
940 sc->sc_cfg |= CFG_MRM_DIS;
941 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0 ||
942 (reg & CFG_MWI_DIS) != 0)
943 sc->sc_cfg |= CFG_MWI_DIS;
944
945 /*
946 * Use the extended descriptor format on the DP83820. This
947 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
948 * checksumming.
949 */
950 sc->sc_cfg |= CFG_EXTSTS_EN;
951 #endif /* DP83820 */
952
953 /*
954 * Initialize our media structures and probe the MII.
955 */
956 sc->sc_mii.mii_ifp = ifp;
957 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
958 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
959 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
960 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, SIP_DECL(mediachange),
961 SIP_DECL(mediastatus));
962
963 /*
964 * XXX We cannot handle flow control on the DP83815.
965 */
966 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815))
967 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
968 MII_OFFSET_ANY, 0);
969 else
970 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
971 MII_OFFSET_ANY, MIIF_DOPAUSE);
972 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
973 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
974 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
975 } else
976 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
977
978 ifp = &sc->sc_ethercom.ec_if;
979 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
980 ifp->if_softc = sc;
981 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
982 ifp->if_ioctl = SIP_DECL(ioctl);
983 ifp->if_start = SIP_DECL(start);
984 ifp->if_watchdog = SIP_DECL(watchdog);
985 ifp->if_init = SIP_DECL(init);
986 ifp->if_stop = SIP_DECL(stop);
987 IFQ_SET_READY(&ifp->if_snd);
988
989 /*
990 * We can support 802.1Q VLAN-sized frames.
991 */
992 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
993
994 #ifdef DP83820
995 /*
996 * And the DP83820 can do VLAN tagging in hardware, and
997 * support the jumbo Ethernet MTU.
998 */
999 sc->sc_ethercom.ec_capabilities |=
1000 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
1001
1002 /*
1003 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
1004 * in hardware.
1005 */
1006 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1007 IFCAP_CSUM_UDPv4;
1008 #endif /* DP83820 */
1009
1010 /*
1011 * Attach the interface.
1012 */
1013 if_attach(ifp);
1014 ether_ifattach(ifp, enaddr);
1015 #if NRND > 0
1016 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1017 RND_TYPE_NET, 0);
1018 #endif
1019
1020 /*
1021 * The number of bytes that must be available in
1022 * the Tx FIFO before the bus master can DMA more
1023 * data into the FIFO.
1024 */
1025 sc->sc_tx_fill_thresh = 64 / 32;
1026
1027 /*
1028 * Start at a drain threshold of 512 bytes. We will
1029 * increase it if a DMA underrun occurs.
1030 *
1031 * XXX The minimum value of this variable should be
1032 * tuned. We may be able to improve performance
1033 * by starting with a lower value. That, however,
1034 * may trash the first few outgoing packets if the
1035 * PCI bus is saturated.
1036 */
1037 #ifdef DP83820
1038 sc->sc_tx_drain_thresh = 6400 / 32; /* from FreeBSD nge(4) */
1039 #else
1040 sc->sc_tx_drain_thresh = 1504 / 32;
1041 #endif
1042
1043 /*
1044 * Initialize the Rx FIFO drain threshold.
1045 *
1046 * This is in units of 8 bytes.
1047 *
1048 * We should never set this value lower than 2; 14 bytes are
1049 * required to filter the packet.
1050 */
1051 sc->sc_rx_drain_thresh = 128 / 8;
1052
1053 #ifdef SIP_EVENT_COUNTERS
1054 /*
1055 * Attach event counters.
1056 */
1057 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1058 NULL, sc->sc_dev.dv_xname, "txsstall");
1059 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1060 NULL, sc->sc_dev.dv_xname, "txdstall");
1061 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_INTR,
1062 NULL, sc->sc_dev.dv_xname, "txforceintr");
1063 evcnt_attach_dynamic(&sc->sc_ev_txdintr, EVCNT_TYPE_INTR,
1064 NULL, sc->sc_dev.dv_xname, "txdintr");
1065 evcnt_attach_dynamic(&sc->sc_ev_txiintr, EVCNT_TYPE_INTR,
1066 NULL, sc->sc_dev.dv_xname, "txiintr");
1067 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1068 NULL, sc->sc_dev.dv_xname, "rxintr");
1069 evcnt_attach_dynamic(&sc->sc_ev_hiberr, EVCNT_TYPE_INTR,
1070 NULL, sc->sc_dev.dv_xname, "hiberr");
1071 #ifndef DP83820
1072 evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_INTR,
1073 NULL, sc->sc_dev.dv_xname, "rxpause");
1074 #endif /* !DP83820 */
1075 #ifdef DP83820
1076 evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_MISC,
1077 NULL, sc->sc_dev.dv_xname, "rxpause");
1078 evcnt_attach_dynamic(&sc->sc_ev_txpause, EVCNT_TYPE_MISC,
1079 NULL, sc->sc_dev.dv_xname, "txpause");
1080 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1081 NULL, sc->sc_dev.dv_xname, "rxipsum");
1082 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
1083 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
1084 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
1085 NULL, sc->sc_dev.dv_xname, "rxudpsum");
1086 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1087 NULL, sc->sc_dev.dv_xname, "txipsum");
1088 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
1089 NULL, sc->sc_dev.dv_xname, "txtcpsum");
1090 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
1091 NULL, sc->sc_dev.dv_xname, "txudpsum");
1092 #endif /* DP83820 */
1093 #endif /* SIP_EVENT_COUNTERS */
1094
1095 /*
1096 * Make sure the interface is shutdown during reboot.
1097 */
1098 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc);
1099 if (sc->sc_sdhook == NULL)
1100 printf("%s: WARNING: unable to establish shutdown hook\n",
1101 sc->sc_dev.dv_xname);
1102 return;
1103
1104 /*
1105 * Free any resources we've allocated during the failed attach
1106 * attempt. Do this in reverse order and fall through.
1107 */
1108 fail_5:
1109 for (i = 0; i < SIP_NRXDESC; i++) {
1110 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1111 bus_dmamap_destroy(sc->sc_dmat,
1112 sc->sc_rxsoft[i].rxs_dmamap);
1113 }
1114 fail_4:
1115 for (i = 0; i < SIP_TXQUEUELEN; i++) {
1116 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1117 bus_dmamap_destroy(sc->sc_dmat,
1118 sc->sc_txsoft[i].txs_dmamap);
1119 }
1120 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1121 fail_3:
1122 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1123 fail_2:
1124 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1125 sizeof(struct sip_control_data));
1126 fail_1:
1127 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1128 fail_0:
1129 return;
1130 }
1131
1132 /*
1133 * sip_shutdown:
1134 *
1135 * Make sure the interface is stopped at reboot time.
1136 */
1137 static void
1138 SIP_DECL(shutdown)(void *arg)
1139 {
1140 struct sip_softc *sc = arg;
1141
1142 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1);
1143 }
1144
1145 /*
1146 * sip_start: [ifnet interface function]
1147 *
1148 * Start packet transmission on the interface.
1149 */
1150 static void
1151 SIP_DECL(start)(struct ifnet *ifp)
1152 {
1153 struct sip_softc *sc = ifp->if_softc;
1154 struct mbuf *m0;
1155 #ifndef DP83820
1156 struct mbuf *m;
1157 #endif
1158 struct sip_txsoft *txs;
1159 bus_dmamap_t dmamap;
1160 int error, nexttx, lasttx, seg;
1161 int ofree = sc->sc_txfree;
1162 #if 0
1163 int firsttx = sc->sc_txnext;
1164 #endif
1165 #ifdef DP83820
1166 struct m_tag *mtag;
1167 u_int32_t extsts;
1168 #endif
1169
1170 #ifndef DP83820
1171 /*
1172 * If we've been told to pause, don't transmit any more packets.
1173 */
1174 if (sc->sc_paused)
1175 ifp->if_flags |= IFF_OACTIVE;
1176 #endif
1177
1178 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1179 return;
1180
1181 /*
1182 * Loop through the send queue, setting up transmit descriptors
1183 * until we drain the queue, or use up all available transmit
1184 * descriptors.
1185 */
1186 for (;;) {
1187 /* Get a work queue entry. */
1188 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1189 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
1190 break;
1191 }
1192
1193 /*
1194 * Grab a packet off the queue.
1195 */
1196 IFQ_POLL(&ifp->if_snd, m0);
1197 if (m0 == NULL)
1198 break;
1199 #ifndef DP83820
1200 m = NULL;
1201 #endif
1202
1203 dmamap = txs->txs_dmamap;
1204
1205 #ifdef DP83820
1206 /*
1207 * Load the DMA map. If this fails, the packet either
1208 * didn't fit in the allotted number of segments, or we
1209 * were short on resources. For the too-many-segments
1210 * case, we simply report an error and drop the packet,
1211 * since we can't sanely copy a jumbo packet to a single
1212 * buffer.
1213 */
1214 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1215 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1216 if (error) {
1217 if (error == EFBIG) {
1218 printf("%s: Tx packet consumes too many "
1219 "DMA segments, dropping...\n",
1220 sc->sc_dev.dv_xname);
1221 IFQ_DEQUEUE(&ifp->if_snd, m0);
1222 m_freem(m0);
1223 continue;
1224 }
1225 /*
1226 * Short on resources, just stop for now.
1227 */
1228 break;
1229 }
1230 #else /* DP83820 */
1231 /*
1232 * Load the DMA map. If this fails, the packet either
1233 * didn't fit in the alloted number of segments, or we
1234 * were short on resources. In this case, we'll copy
1235 * and try again.
1236 */
1237 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1238 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1239 MGETHDR(m, M_DONTWAIT, MT_DATA);
1240 if (m == NULL) {
1241 printf("%s: unable to allocate Tx mbuf\n",
1242 sc->sc_dev.dv_xname);
1243 break;
1244 }
1245 if (m0->m_pkthdr.len > MHLEN) {
1246 MCLGET(m, M_DONTWAIT);
1247 if ((m->m_flags & M_EXT) == 0) {
1248 printf("%s: unable to allocate Tx "
1249 "cluster\n", sc->sc_dev.dv_xname);
1250 m_freem(m);
1251 break;
1252 }
1253 }
1254 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1255 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1256 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1257 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1258 if (error) {
1259 printf("%s: unable to load Tx buffer, "
1260 "error = %d\n", sc->sc_dev.dv_xname, error);
1261 break;
1262 }
1263 }
1264 #endif /* DP83820 */
1265
1266 /*
1267 * Ensure we have enough descriptors free to describe
1268 * the packet. Note, we always reserve one descriptor
1269 * at the end of the ring as a termination point, to
1270 * prevent wrap-around.
1271 */
1272 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1273 /*
1274 * Not enough free descriptors to transmit this
1275 * packet. We haven't committed anything yet,
1276 * so just unload the DMA map, put the packet
1277 * back on the queue, and punt. Notify the upper
1278 * layer that there are not more slots left.
1279 *
1280 * XXX We could allocate an mbuf and copy, but
1281 * XXX is it worth it?
1282 */
1283 ifp->if_flags |= IFF_OACTIVE;
1284 bus_dmamap_unload(sc->sc_dmat, dmamap);
1285 #ifndef DP83820
1286 if (m != NULL)
1287 m_freem(m);
1288 #endif
1289 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1290 break;
1291 }
1292
1293 IFQ_DEQUEUE(&ifp->if_snd, m0);
1294 #ifndef DP83820
1295 if (m != NULL) {
1296 m_freem(m0);
1297 m0 = m;
1298 }
1299 #endif
1300
1301 /*
1302 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1303 */
1304
1305 /* Sync the DMA map. */
1306 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1307 BUS_DMASYNC_PREWRITE);
1308
1309 /*
1310 * Initialize the transmit descriptors.
1311 */
1312 for (nexttx = lasttx = sc->sc_txnext, seg = 0;
1313 seg < dmamap->dm_nsegs;
1314 seg++, nexttx = SIP_NEXTTX(nexttx)) {
1315 /*
1316 * If this is the first descriptor we're
1317 * enqueueing, don't set the OWN bit just
1318 * yet. That could cause a race condition.
1319 * We'll do it below.
1320 */
1321 sc->sc_txdescs[nexttx].sipd_bufptr =
1322 htole32(dmamap->dm_segs[seg].ds_addr);
1323 sc->sc_txdescs[nexttx].sipd_cmdsts =
1324 htole32((nexttx == sc->sc_txnext ? 0 : CMDSTS_OWN) |
1325 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1326 #ifdef DP83820
1327 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1328 #endif /* DP83820 */
1329 lasttx = nexttx;
1330 }
1331
1332 /* Clear the MORE bit on the last segment. */
1333 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE);
1334
1335 /*
1336 * If we're in the interrupt delay window, delay the
1337 * interrupt.
1338 */
1339 if (++sc->sc_txwin >= (SIP_TXQUEUELEN * 2 / 3)) {
1340 SIP_EVCNT_INCR(&sc->sc_ev_txforceintr);
1341 sc->sc_txdescs[lasttx].sipd_cmdsts |=
1342 htole32(CMDSTS_INTR);
1343 sc->sc_txwin = 0;
1344 }
1345
1346 #ifdef DP83820
1347 /*
1348 * If VLANs are enabled and the packet has a VLAN tag, set
1349 * up the descriptor to encapsulate the packet for us.
1350 *
1351 * This apparently has to be on the last descriptor of
1352 * the packet.
1353 */
1354 if (sc->sc_ethercom.ec_nvlans != 0 &&
1355 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1356 sc->sc_txdescs[lasttx].sipd_extsts |=
1357 htole32(EXTSTS_VPKT |
1358 (*(u_int *)(mtag + 1) & EXTSTS_VTCI));
1359 }
1360
1361 /*
1362 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1363 * checksumming, set up the descriptor to do this work
1364 * for us.
1365 *
1366 * This apparently has to be on the first descriptor of
1367 * the packet.
1368 *
1369 * Byte-swap constants so the compiler can optimize.
1370 */
1371 extsts = 0;
1372 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1373 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4);
1374 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1375 extsts |= htole32(EXTSTS_IPPKT);
1376 }
1377 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1378 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4);
1379 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1380 extsts |= htole32(EXTSTS_TCPPKT);
1381 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1382 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4);
1383 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1384 extsts |= htole32(EXTSTS_UDPPKT);
1385 }
1386 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1387 #endif /* DP83820 */
1388
1389 /* Sync the descriptors we're using. */
1390 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1391 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1392
1393 /*
1394 * The entire packet is set up. Give the first descrptor
1395 * to the chip now.
1396 */
1397 sc->sc_txdescs[sc->sc_txnext].sipd_cmdsts |=
1398 htole32(CMDSTS_OWN);
1399 SIP_CDTXSYNC(sc, sc->sc_txnext, 1,
1400 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1401
1402 /*
1403 * Store a pointer to the packet so we can free it later,
1404 * and remember what txdirty will be once the packet is
1405 * done.
1406 */
1407 txs->txs_mbuf = m0;
1408 txs->txs_firstdesc = sc->sc_txnext;
1409 txs->txs_lastdesc = lasttx;
1410
1411 /* Advance the tx pointer. */
1412 sc->sc_txfree -= dmamap->dm_nsegs;
1413 sc->sc_txnext = nexttx;
1414
1415 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1416 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1417
1418 #if NBPFILTER > 0
1419 /*
1420 * Pass the packet to any BPF listeners.
1421 */
1422 if (ifp->if_bpf)
1423 bpf_mtap(ifp->if_bpf, m0);
1424 #endif /* NBPFILTER > 0 */
1425 }
1426
1427 if (txs == NULL || sc->sc_txfree == 0) {
1428 /* No more slots left; notify upper layer. */
1429 ifp->if_flags |= IFF_OACTIVE;
1430 }
1431
1432 if (sc->sc_txfree != ofree) {
1433 /*
1434 * Start the transmit process. Note, the manual says
1435 * that if there are no pending transmissions in the
1436 * chip's internal queue (indicated by TXE being clear),
1437 * then the driver software must set the TXDP to the
1438 * first descriptor to be transmitted. However, if we
1439 * do this, it causes serious performance degredation on
1440 * the DP83820 under load, not setting TXDP doesn't seem
1441 * to adversely affect the SiS 900 or DP83815.
1442 *
1443 * Well, I guess it wouldn't be the first time a manual
1444 * has lied -- and they could be speaking of the NULL-
1445 * terminated descriptor list case, rather than OWN-
1446 * terminated rings.
1447 */
1448 #if 0
1449 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1450 CR_TXE) == 0) {
1451 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1452 SIP_CDTXADDR(sc, firsttx));
1453 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1454 }
1455 #else
1456 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1457 #endif
1458
1459 /* Set a watchdog timer in case the chip flakes out. */
1460 #ifdef DP83820
1461 /* Gigabit autonegotiation takes 5 seconds. */
1462 ifp->if_timer = 10;
1463 #else
1464 ifp->if_timer = 5;
1465 #endif
1466 }
1467 }
1468
1469 /*
1470 * sip_watchdog: [ifnet interface function]
1471 *
1472 * Watchdog timer handler.
1473 */
1474 static void
1475 SIP_DECL(watchdog)(struct ifnet *ifp)
1476 {
1477 struct sip_softc *sc = ifp->if_softc;
1478
1479 /*
1480 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1481 * If we get a timeout, try and sweep up transmit descriptors.
1482 * If we manage to sweep them all up, ignore the lack of
1483 * interrupt.
1484 */
1485 SIP_DECL(txintr)(sc);
1486
1487 if (sc->sc_txfree != SIP_NTXDESC) {
1488 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1489 ifp->if_oerrors++;
1490
1491 /* Reset the interface. */
1492 (void) SIP_DECL(init)(ifp);
1493 } else if (ifp->if_flags & IFF_DEBUG)
1494 printf("%s: recovered from device timeout\n",
1495 sc->sc_dev.dv_xname);
1496
1497 /* Try to get more packets going. */
1498 SIP_DECL(start)(ifp);
1499 }
1500
1501 /*
1502 * sip_ioctl: [ifnet interface function]
1503 *
1504 * Handle control requests from the operator.
1505 */
1506 static int
1507 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data)
1508 {
1509 struct sip_softc *sc = ifp->if_softc;
1510 struct ifreq *ifr = (struct ifreq *)data;
1511 int s, error;
1512
1513 s = splnet();
1514
1515 switch (cmd) {
1516 case SIOCSIFMEDIA:
1517 /* Flow control requires full-duplex mode. */
1518 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1519 (ifr->ifr_media & IFM_FDX) == 0)
1520 ifr->ifr_media &= ~IFM_ETH_FMASK;
1521 #ifdef DP83820
1522 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1523 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1524 /* We can do both TXPAUSE and RXPAUSE. */
1525 ifr->ifr_media |=
1526 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1527 }
1528 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1529 }
1530 #else
1531 /* XXX */
1532 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815))
1533 ifr->ifr_media &= ~IFM_ETH_FMASK;
1534
1535 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1536 if (ifr->ifr_media & IFM_FLOW) {
1537 /*
1538 * Both TXPAUSE and RXPAUSE must be set.
1539 * (SiS900 and DP83815 don't have PAUSE_ASYM
1540 * feature.)
1541 *
1542 * XXX Can SiS900 and DP83815 send PAUSE?
1543 */
1544 ifr->ifr_media |=
1545 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1546 }
1547 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1548 }
1549 #endif
1550 /* FALLTHROUGH */
1551 case SIOCGIFMEDIA:
1552 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1553 break;
1554
1555 default:
1556 error = ether_ioctl(ifp, cmd, data);
1557 if (error == ENETRESET) {
1558 /*
1559 * Multicast list has changed; set the hardware filter
1560 * accordingly.
1561 */
1562 if (ifp->if_flags & IFF_RUNNING)
1563 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1564 error = 0;
1565 }
1566 break;
1567 }
1568
1569 /* Try to get more packets going. */
1570 SIP_DECL(start)(ifp);
1571
1572 splx(s);
1573 return (error);
1574 }
1575
1576 /*
1577 * sip_intr:
1578 *
1579 * Interrupt service routine.
1580 */
1581 static int
1582 SIP_DECL(intr)(void *arg)
1583 {
1584 struct sip_softc *sc = arg;
1585 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1586 u_int32_t isr;
1587 int handled = 0;
1588
1589 /* Disable interrupts. */
1590 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, 0);
1591
1592 for (;;) {
1593 /* Reading clears interrupt. */
1594 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1595 if ((isr & sc->sc_imr) == 0)
1596 break;
1597
1598 #if NRND > 0
1599 if (RND_ENABLED(&sc->rnd_source))
1600 rnd_add_uint32(&sc->rnd_source, isr);
1601 #endif
1602
1603 handled = 1;
1604
1605 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1606 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1607
1608 /* Grab any new packets. */
1609 SIP_DECL(rxintr)(sc);
1610
1611 if (isr & ISR_RXORN) {
1612 printf("%s: receive FIFO overrun\n",
1613 sc->sc_dev.dv_xname);
1614
1615 /* XXX adjust rx_drain_thresh? */
1616 }
1617
1618 if (isr & ISR_RXIDLE) {
1619 printf("%s: receive ring overrun\n",
1620 sc->sc_dev.dv_xname);
1621
1622 /* Get the receive process going again. */
1623 bus_space_write_4(sc->sc_st, sc->sc_sh,
1624 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1625 bus_space_write_4(sc->sc_st, sc->sc_sh,
1626 SIP_CR, CR_RXE);
1627 }
1628 }
1629
1630 if (isr & (ISR_TXURN|ISR_TXDESC|ISR_TXIDLE)) {
1631 #ifdef SIP_EVENT_COUNTERS
1632 if (isr & ISR_TXDESC)
1633 SIP_EVCNT_INCR(&sc->sc_ev_txdintr);
1634 else if (isr & ISR_TXIDLE)
1635 SIP_EVCNT_INCR(&sc->sc_ev_txiintr);
1636 #endif
1637
1638 /* Sweep up transmit descriptors. */
1639 SIP_DECL(txintr)(sc);
1640
1641 if (isr & ISR_TXURN) {
1642 u_int32_t thresh;
1643
1644 printf("%s: transmit FIFO underrun",
1645 sc->sc_dev.dv_xname);
1646
1647 thresh = sc->sc_tx_drain_thresh + 1;
1648 if (thresh <= TXCFG_DRTH &&
1649 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1650 (sc->sc_tx_fill_thresh * 32))) {
1651 printf("; increasing Tx drain "
1652 "threshold to %u bytes\n",
1653 thresh * 32);
1654 sc->sc_tx_drain_thresh = thresh;
1655 (void) SIP_DECL(init)(ifp);
1656 } else {
1657 (void) SIP_DECL(init)(ifp);
1658 printf("\n");
1659 }
1660 }
1661 }
1662
1663 #if !defined(DP83820)
1664 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1665 if (isr & ISR_PAUSE_ST) {
1666 sc->sc_paused = 1;
1667 SIP_EVCNT_INCR(&sc->sc_ev_rxpause);
1668 ifp->if_flags |= IFF_OACTIVE;
1669 }
1670 if (isr & ISR_PAUSE_END) {
1671 sc->sc_paused = 0;
1672 ifp->if_flags &= ~IFF_OACTIVE;
1673 }
1674 }
1675 #endif /* ! DP83820 */
1676
1677 if (isr & ISR_HIBERR) {
1678 int want_init = 0;
1679
1680 SIP_EVCNT_INCR(&sc->sc_ev_hiberr);
1681
1682 #define PRINTERR(bit, str) \
1683 do { \
1684 if ((isr & (bit)) != 0) { \
1685 if ((ifp->if_flags & IFF_DEBUG) != 0) \
1686 printf("%s: %s\n", \
1687 sc->sc_dev.dv_xname, str); \
1688 want_init = 1; \
1689 } \
1690 } while (/*CONSTCOND*/0)
1691
1692 PRINTERR(ISR_DPERR, "parity error");
1693 PRINTERR(ISR_SSERR, "system error");
1694 PRINTERR(ISR_RMABT, "master abort");
1695 PRINTERR(ISR_RTABT, "target abort");
1696 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1697 /*
1698 * Ignore:
1699 * Tx reset complete
1700 * Rx reset complete
1701 */
1702 if (want_init)
1703 (void) SIP_DECL(init)(ifp);
1704 #undef PRINTERR
1705 }
1706 }
1707
1708 /* Re-enable interrupts. */
1709 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, IER_IE);
1710
1711 /* Try to get more packets going. */
1712 SIP_DECL(start)(ifp);
1713
1714 return (handled);
1715 }
1716
1717 /*
1718 * sip_txintr:
1719 *
1720 * Helper; handle transmit interrupts.
1721 */
1722 static void
1723 SIP_DECL(txintr)(struct sip_softc *sc)
1724 {
1725 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1726 struct sip_txsoft *txs;
1727 u_int32_t cmdsts;
1728
1729 #ifndef DP83820
1730 if (sc->sc_paused == 0)
1731 #endif
1732 ifp->if_flags &= ~IFF_OACTIVE;
1733
1734 /*
1735 * Go through our Tx list and free mbufs for those
1736 * frames which have been transmitted.
1737 */
1738 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1739 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1740 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1741
1742 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
1743 if (cmdsts & CMDSTS_OWN)
1744 break;
1745
1746 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1747
1748 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1749
1750 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1751 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1752 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1753 m_freem(txs->txs_mbuf);
1754 txs->txs_mbuf = NULL;
1755
1756 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1757
1758 /*
1759 * Check for errors and collisions.
1760 */
1761 if (cmdsts &
1762 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1763 ifp->if_oerrors++;
1764 if (cmdsts & CMDSTS_Tx_EC)
1765 ifp->if_collisions += 16;
1766 if (ifp->if_flags & IFF_DEBUG) {
1767 if (cmdsts & CMDSTS_Tx_ED)
1768 printf("%s: excessive deferral\n",
1769 sc->sc_dev.dv_xname);
1770 if (cmdsts & CMDSTS_Tx_EC)
1771 printf("%s: excessive collisions\n",
1772 sc->sc_dev.dv_xname);
1773 }
1774 } else {
1775 /* Packet was transmitted successfully. */
1776 ifp->if_opackets++;
1777 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1778 }
1779 }
1780
1781 /*
1782 * If there are no more pending transmissions, cancel the watchdog
1783 * timer.
1784 */
1785 if (txs == NULL) {
1786 ifp->if_timer = 0;
1787 sc->sc_txwin = 0;
1788 }
1789 }
1790
1791 #if defined(DP83820)
1792 /*
1793 * sip_rxintr:
1794 *
1795 * Helper; handle receive interrupts.
1796 */
1797 static void
1798 SIP_DECL(rxintr)(struct sip_softc *sc)
1799 {
1800 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1801 struct sip_rxsoft *rxs;
1802 struct mbuf *m;
1803 u_int32_t cmdsts, extsts;
1804 int i, len;
1805
1806 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1807 rxs = &sc->sc_rxsoft[i];
1808
1809 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1810
1811 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1812 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
1813 len = CMDSTS_SIZE(cmdsts);
1814
1815 /*
1816 * NOTE: OWN is set if owned by _consumer_. We're the
1817 * consumer of the receive ring, so if the bit is clear,
1818 * we have processed all of the packets.
1819 */
1820 if ((cmdsts & CMDSTS_OWN) == 0) {
1821 /*
1822 * We have processed all of the receive buffers.
1823 */
1824 break;
1825 }
1826
1827 if (__predict_false(sc->sc_rxdiscard)) {
1828 SIP_INIT_RXDESC(sc, i);
1829 if ((cmdsts & CMDSTS_MORE) == 0) {
1830 /* Reset our state. */
1831 sc->sc_rxdiscard = 0;
1832 }
1833 continue;
1834 }
1835
1836 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1837 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1838
1839 m = rxs->rxs_mbuf;
1840
1841 /*
1842 * Add a new receive buffer to the ring.
1843 */
1844 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1845 /*
1846 * Failed, throw away what we've done so
1847 * far, and discard the rest of the packet.
1848 */
1849 ifp->if_ierrors++;
1850 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1851 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1852 SIP_INIT_RXDESC(sc, i);
1853 if (cmdsts & CMDSTS_MORE)
1854 sc->sc_rxdiscard = 1;
1855 if (sc->sc_rxhead != NULL)
1856 m_freem(sc->sc_rxhead);
1857 SIP_RXCHAIN_RESET(sc);
1858 continue;
1859 }
1860
1861 SIP_RXCHAIN_LINK(sc, m);
1862
1863 m->m_len = len;
1864
1865 /*
1866 * If this is not the end of the packet, keep
1867 * looking.
1868 */
1869 if (cmdsts & CMDSTS_MORE) {
1870 sc->sc_rxlen += len;
1871 continue;
1872 }
1873
1874 /*
1875 * Okay, we have the entire packet now. The chip includes
1876 * the FCS, so we need to trim it.
1877 */
1878 m->m_len -= ETHER_CRC_LEN;
1879
1880 *sc->sc_rxtailp = NULL;
1881 m = sc->sc_rxhead;
1882 len = m->m_len + sc->sc_rxlen;
1883
1884 SIP_RXCHAIN_RESET(sc);
1885
1886 /*
1887 * If an error occurred, update stats and drop the packet.
1888 */
1889 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1890 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1891 ifp->if_ierrors++;
1892 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1893 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1894 /* Receive overrun handled elsewhere. */
1895 printf("%s: receive descriptor error\n",
1896 sc->sc_dev.dv_xname);
1897 }
1898 #define PRINTERR(bit, str) \
1899 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
1900 (cmdsts & (bit)) != 0) \
1901 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1902 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1903 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1904 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1905 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1906 #undef PRINTERR
1907 m_freem(m);
1908 continue;
1909 }
1910
1911 /*
1912 * If the packet is small enough to fit in a
1913 * single header mbuf, allocate one and copy
1914 * the data into it. This greatly reduces
1915 * memory consumption when we receive lots
1916 * of small packets.
1917 */
1918 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) {
1919 struct mbuf *nm;
1920 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1921 if (nm == NULL) {
1922 ifp->if_ierrors++;
1923 m_freem(m);
1924 continue;
1925 }
1926 nm->m_data += 2;
1927 nm->m_pkthdr.len = nm->m_len = len;
1928 m_copydata(m, 0, len, mtod(nm, caddr_t));
1929 m_freem(m);
1930 m = nm;
1931 }
1932 #ifndef __NO_STRICT_ALIGNMENT
1933 else {
1934 /*
1935 * The DP83820's receive buffers must be 4-byte
1936 * aligned. But this means that the data after
1937 * the Ethernet header is misaligned. To compensate,
1938 * we have artificially shortened the buffer size
1939 * in the descriptor, and we do an overlapping copy
1940 * of the data two bytes further in (in the first
1941 * buffer of the chain only).
1942 */
1943 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t),
1944 m->m_len);
1945 m->m_data += 2;
1946 }
1947 #endif /* ! __NO_STRICT_ALIGNMENT */
1948
1949 /*
1950 * If VLANs are enabled, VLAN packets have been unwrapped
1951 * for us. Associate the tag with the packet.
1952 */
1953 if (sc->sc_ethercom.ec_nvlans != 0 &&
1954 (extsts & EXTSTS_VPKT) != 0) {
1955 struct m_tag *vtag;
1956
1957 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1958 M_NOWAIT);
1959 if (vtag == NULL) {
1960 ifp->if_ierrors++;
1961 printf("%s: unable to allocate VLAN tag\n",
1962 sc->sc_dev.dv_xname);
1963 m_freem(m);
1964 continue;
1965 }
1966
1967 *(u_int *)(vtag + 1) = ntohs(extsts & EXTSTS_VTCI);
1968 }
1969
1970 /*
1971 * Set the incoming checksum information for the
1972 * packet.
1973 */
1974 if ((extsts & EXTSTS_IPPKT) != 0) {
1975 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
1976 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1977 if (extsts & EXTSTS_Rx_IPERR)
1978 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1979 if (extsts & EXTSTS_TCPPKT) {
1980 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1981 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1982 if (extsts & EXTSTS_Rx_TCPERR)
1983 m->m_pkthdr.csum_flags |=
1984 M_CSUM_TCP_UDP_BAD;
1985 } else if (extsts & EXTSTS_UDPPKT) {
1986 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1987 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1988 if (extsts & EXTSTS_Rx_UDPERR)
1989 m->m_pkthdr.csum_flags |=
1990 M_CSUM_TCP_UDP_BAD;
1991 }
1992 }
1993
1994 ifp->if_ipackets++;
1995 m->m_pkthdr.rcvif = ifp;
1996 m->m_pkthdr.len = len;
1997
1998 #if NBPFILTER > 0
1999 /*
2000 * Pass this up to any BPF listeners, but only
2001 * pass if up the stack if it's for us.
2002 */
2003 if (ifp->if_bpf)
2004 bpf_mtap(ifp->if_bpf, m);
2005 #endif /* NBPFILTER > 0 */
2006
2007 /* Pass it on. */
2008 (*ifp->if_input)(ifp, m);
2009 }
2010
2011 /* Update the receive pointer. */
2012 sc->sc_rxptr = i;
2013 }
2014 #else /* ! DP83820 */
2015 /*
2016 * sip_rxintr:
2017 *
2018 * Helper; handle receive interrupts.
2019 */
2020 static void
2021 SIP_DECL(rxintr)(struct sip_softc *sc)
2022 {
2023 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2024 struct sip_rxsoft *rxs;
2025 struct mbuf *m;
2026 u_int32_t cmdsts;
2027 int i, len;
2028
2029 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
2030 rxs = &sc->sc_rxsoft[i];
2031
2032 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2033
2034 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
2035
2036 /*
2037 * NOTE: OWN is set if owned by _consumer_. We're the
2038 * consumer of the receive ring, so if the bit is clear,
2039 * we have processed all of the packets.
2040 */
2041 if ((cmdsts & CMDSTS_OWN) == 0) {
2042 /*
2043 * We have processed all of the receive buffers.
2044 */
2045 break;
2046 }
2047
2048 /*
2049 * If any collisions were seen on the wire, count one.
2050 */
2051 if (cmdsts & CMDSTS_Rx_COL)
2052 ifp->if_collisions++;
2053
2054 /*
2055 * If an error occurred, update stats, clear the status
2056 * word, and leave the packet buffer in place. It will
2057 * simply be reused the next time the ring comes around.
2058 */
2059 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
2060 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
2061 ifp->if_ierrors++;
2062 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
2063 (cmdsts & CMDSTS_Rx_RXO) == 0) {
2064 /* Receive overrun handled elsewhere. */
2065 printf("%s: receive descriptor error\n",
2066 sc->sc_dev.dv_xname);
2067 }
2068 #define PRINTERR(bit, str) \
2069 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
2070 (cmdsts & (bit)) != 0) \
2071 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
2072 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
2073 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
2074 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
2075 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
2076 #undef PRINTERR
2077 SIP_INIT_RXDESC(sc, i);
2078 continue;
2079 }
2080
2081 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2082 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2083
2084 /*
2085 * No errors; receive the packet. Note, the SiS 900
2086 * includes the CRC with every packet.
2087 */
2088 len = CMDSTS_SIZE(cmdsts) - ETHER_CRC_LEN;
2089
2090 #ifdef __NO_STRICT_ALIGNMENT
2091 /*
2092 * If the packet is small enough to fit in a
2093 * single header mbuf, allocate one and copy
2094 * the data into it. This greatly reduces
2095 * memory consumption when we receive lots
2096 * of small packets.
2097 *
2098 * Otherwise, we add a new buffer to the receive
2099 * chain. If this fails, we drop the packet and
2100 * recycle the old buffer.
2101 */
2102 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) {
2103 MGETHDR(m, M_DONTWAIT, MT_DATA);
2104 if (m == NULL)
2105 goto dropit;
2106 memcpy(mtod(m, caddr_t),
2107 mtod(rxs->rxs_mbuf, caddr_t), len);
2108 SIP_INIT_RXDESC(sc, i);
2109 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2110 rxs->rxs_dmamap->dm_mapsize,
2111 BUS_DMASYNC_PREREAD);
2112 } else {
2113 m = rxs->rxs_mbuf;
2114 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
2115 dropit:
2116 ifp->if_ierrors++;
2117 SIP_INIT_RXDESC(sc, i);
2118 bus_dmamap_sync(sc->sc_dmat,
2119 rxs->rxs_dmamap, 0,
2120 rxs->rxs_dmamap->dm_mapsize,
2121 BUS_DMASYNC_PREREAD);
2122 continue;
2123 }
2124 }
2125 #else
2126 /*
2127 * The SiS 900's receive buffers must be 4-byte aligned.
2128 * But this means that the data after the Ethernet header
2129 * is misaligned. We must allocate a new buffer and
2130 * copy the data, shifted forward 2 bytes.
2131 */
2132 MGETHDR(m, M_DONTWAIT, MT_DATA);
2133 if (m == NULL) {
2134 dropit:
2135 ifp->if_ierrors++;
2136 SIP_INIT_RXDESC(sc, i);
2137 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2138 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2139 continue;
2140 }
2141 if (len > (MHLEN - 2)) {
2142 MCLGET(m, M_DONTWAIT);
2143 if ((m->m_flags & M_EXT) == 0) {
2144 m_freem(m);
2145 goto dropit;
2146 }
2147 }
2148 m->m_data += 2;
2149
2150 /*
2151 * Note that we use clusters for incoming frames, so the
2152 * buffer is virtually contiguous.
2153 */
2154 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
2155
2156 /* Allow the receive descriptor to continue using its mbuf. */
2157 SIP_INIT_RXDESC(sc, i);
2158 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2159 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2160 #endif /* __NO_STRICT_ALIGNMENT */
2161
2162 ifp->if_ipackets++;
2163 m->m_pkthdr.rcvif = ifp;
2164 m->m_pkthdr.len = m->m_len = len;
2165
2166 #if NBPFILTER > 0
2167 /*
2168 * Pass this up to any BPF listeners, but only
2169 * pass if up the stack if it's for us.
2170 */
2171 if (ifp->if_bpf)
2172 bpf_mtap(ifp->if_bpf, m);
2173 #endif /* NBPFILTER > 0 */
2174
2175 /* Pass it on. */
2176 (*ifp->if_input)(ifp, m);
2177 }
2178
2179 /* Update the receive pointer. */
2180 sc->sc_rxptr = i;
2181 }
2182 #endif /* DP83820 */
2183
2184 /*
2185 * sip_tick:
2186 *
2187 * One second timer, used to tick the MII.
2188 */
2189 static void
2190 SIP_DECL(tick)(void *arg)
2191 {
2192 struct sip_softc *sc = arg;
2193 int s;
2194
2195 s = splnet();
2196 #ifdef DP83820
2197 #ifdef SIP_EVENT_COUNTERS
2198 /* Read PAUSE related counts from MIB registers. */
2199 sc->sc_ev_rxpause.ev_count +=
2200 bus_space_read_4(sc->sc_st, sc->sc_sh,
2201 SIP_NS_MIB(MIB_RXPauseFrames)) & 0xffff;
2202 sc->sc_ev_txpause.ev_count +=
2203 bus_space_read_4(sc->sc_st, sc->sc_sh,
2204 SIP_NS_MIB(MIB_TXPauseFrames)) & 0xffff;
2205 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_MIBC, MIBC_ACLR);
2206 #endif /* SIP_EVENT_COUNTERS */
2207 #endif /* DP83820 */
2208 mii_tick(&sc->sc_mii);
2209 splx(s);
2210
2211 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2212 }
2213
2214 /*
2215 * sip_reset:
2216 *
2217 * Perform a soft reset on the SiS 900.
2218 */
2219 static void
2220 SIP_DECL(reset)(struct sip_softc *sc)
2221 {
2222 bus_space_tag_t st = sc->sc_st;
2223 bus_space_handle_t sh = sc->sc_sh;
2224 int i;
2225
2226 bus_space_write_4(st, sh, SIP_IER, 0);
2227 bus_space_write_4(st, sh, SIP_IMR, 0);
2228 bus_space_write_4(st, sh, SIP_RFCR, 0);
2229 bus_space_write_4(st, sh, SIP_CR, CR_RST);
2230
2231 for (i = 0; i < SIP_TIMEOUT; i++) {
2232 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
2233 break;
2234 delay(2);
2235 }
2236
2237 if (i == SIP_TIMEOUT)
2238 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
2239
2240 delay(1000);
2241
2242 #ifdef DP83820
2243 /*
2244 * Set the general purpose I/O bits. Do it here in case we
2245 * need to have GPIO set up to talk to the media interface.
2246 */
2247 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
2248 delay(1000);
2249 #endif /* DP83820 */
2250 }
2251
2252 /*
2253 * sip_init: [ ifnet interface function ]
2254 *
2255 * Initialize the interface. Must be called at splnet().
2256 */
2257 static int
2258 SIP_DECL(init)(struct ifnet *ifp)
2259 {
2260 struct sip_softc *sc = ifp->if_softc;
2261 bus_space_tag_t st = sc->sc_st;
2262 bus_space_handle_t sh = sc->sc_sh;
2263 struct sip_txsoft *txs;
2264 struct sip_rxsoft *rxs;
2265 struct sip_desc *sipd;
2266 #if defined(DP83820)
2267 u_int32_t reg;
2268 #endif
2269 int i, error = 0;
2270
2271 /*
2272 * Cancel any pending I/O.
2273 */
2274 SIP_DECL(stop)(ifp, 0);
2275
2276 /*
2277 * Reset the chip to a known state.
2278 */
2279 SIP_DECL(reset)(sc);
2280
2281 #if !defined(DP83820)
2282 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) {
2283 /*
2284 * DP83815 manual, page 78:
2285 * 4.4 Recommended Registers Configuration
2286 * For optimum performance of the DP83815, version noted
2287 * as DP83815CVNG (SRR = 203h), the listed register
2288 * modifications must be followed in sequence...
2289 *
2290 * It's not clear if this should be 302h or 203h because that
2291 * chip name is listed as SRR 302h in the description of the
2292 * SRR register. However, my revision 302h DP83815 on the
2293 * Netgear FA311 purchased in 02/2001 needs these settings
2294 * to avoid tons of errors in AcceptPerfectMatch (non-
2295 * IFF_PROMISC) mode. I do not know if other revisions need
2296 * this set or not. [briggs -- 09 March 2001]
2297 *
2298 * Note that only the low-order 12 bits of 0xe4 are documented
2299 * and that this sets reserved bits in that register.
2300 */
2301 bus_space_write_4(st, sh, 0x00cc, 0x0001);
2302
2303 bus_space_write_4(st, sh, 0x00e4, 0x189C);
2304 bus_space_write_4(st, sh, 0x00fc, 0x0000);
2305 bus_space_write_4(st, sh, 0x00f4, 0x5040);
2306 bus_space_write_4(st, sh, 0x00f8, 0x008c);
2307
2308 bus_space_write_4(st, sh, 0x00cc, 0x0000);
2309 }
2310 #endif /* ! DP83820 */
2311
2312 /*
2313 * Initialize the transmit descriptor ring.
2314 */
2315 for (i = 0; i < SIP_NTXDESC; i++) {
2316 sipd = &sc->sc_txdescs[i];
2317 memset(sipd, 0, sizeof(struct sip_desc));
2318 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i)));
2319 }
2320 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
2321 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2322 sc->sc_txfree = SIP_NTXDESC;
2323 sc->sc_txnext = 0;
2324 sc->sc_txwin = 0;
2325
2326 /*
2327 * Initialize the transmit job descriptors.
2328 */
2329 SIMPLEQ_INIT(&sc->sc_txfreeq);
2330 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2331 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2332 txs = &sc->sc_txsoft[i];
2333 txs->txs_mbuf = NULL;
2334 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2335 }
2336
2337 /*
2338 * Initialize the receive descriptor and receive job
2339 * descriptor rings.
2340 */
2341 for (i = 0; i < SIP_NRXDESC; i++) {
2342 rxs = &sc->sc_rxsoft[i];
2343 if (rxs->rxs_mbuf == NULL) {
2344 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) {
2345 printf("%s: unable to allocate or map rx "
2346 "buffer %d, error = %d\n",
2347 sc->sc_dev.dv_xname, i, error);
2348 /*
2349 * XXX Should attempt to run with fewer receive
2350 * XXX buffers instead of just failing.
2351 */
2352 SIP_DECL(rxdrain)(sc);
2353 goto out;
2354 }
2355 } else
2356 SIP_INIT_RXDESC(sc, i);
2357 }
2358 sc->sc_rxptr = 0;
2359 #ifdef DP83820
2360 sc->sc_rxdiscard = 0;
2361 SIP_RXCHAIN_RESET(sc);
2362 #endif /* DP83820 */
2363
2364 /*
2365 * Set the configuration register; it's already initialized
2366 * in sip_attach().
2367 */
2368 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2369
2370 /*
2371 * Initialize the prototype TXCFG register.
2372 */
2373 #if defined(DP83820)
2374 sc->sc_txcfg = TXCFG_MXDMA_512;
2375 sc->sc_rxcfg = RXCFG_MXDMA_512;
2376 #else
2377 if ((SIP_SIS900_REV(sc, SIS_REV_635) ||
2378 SIP_SIS900_REV(sc, SIS_REV_960) ||
2379 SIP_SIS900_REV(sc, SIS_REV_900B)) &&
2380 (sc->sc_cfg & CFG_EDBMASTEN)) {
2381 sc->sc_txcfg = TXCFG_MXDMA_64;
2382 sc->sc_rxcfg = RXCFG_MXDMA_64;
2383 } else {
2384 sc->sc_txcfg = TXCFG_MXDMA_512;
2385 sc->sc_rxcfg = RXCFG_MXDMA_512;
2386 }
2387 #endif /* DP83820 */
2388
2389 sc->sc_txcfg |= TXCFG_ATP |
2390 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
2391 sc->sc_tx_drain_thresh;
2392 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
2393
2394 /*
2395 * Initialize the receive drain threshold if we have never
2396 * done so.
2397 */
2398 if (sc->sc_rx_drain_thresh == 0) {
2399 /*
2400 * XXX This value should be tuned. This is set to the
2401 * maximum of 248 bytes, and we may be able to improve
2402 * performance by decreasing it (although we should never
2403 * set this value lower than 2; 14 bytes are required to
2404 * filter the packet).
2405 */
2406 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
2407 }
2408
2409 /*
2410 * Initialize the prototype RXCFG register.
2411 */
2412 sc->sc_rxcfg |= (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
2413 #ifdef DP83820
2414 /*
2415 * Accept long packets (including FCS) so we can handle
2416 * 802.1q-tagged frames and jumbo frames properly.
2417 */
2418 if (ifp->if_mtu > ETHERMTU ||
2419 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU))
2420 sc->sc_rxcfg |= RXCFG_ALP;
2421
2422 /*
2423 * Checksum offloading is disabled if the user selects an MTU
2424 * larger than 8109. (FreeBSD says 8152, but there is emperical
2425 * evidence that >8109 does not work on some boards, such as the
2426 * Planex GN-1000TE).
2427 */
2428 if (ifp->if_mtu > 8109 &&
2429 (ifp->if_capenable &
2430 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))) {
2431 printf("%s: Checksum offloading does not work if MTU > 8109 - "
2432 "disabled.\n", sc->sc_dev.dv_xname);
2433 ifp->if_capenable &= ~(IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2434 IFCAP_CSUM_UDPv4);
2435 ifp->if_csum_flags_tx = 0;
2436 ifp->if_csum_flags_rx = 0;
2437 }
2438 #else
2439 /*
2440 * Accept packets >1518 bytes (including FCS) so we can handle
2441 * 802.1q-tagged frames properly.
2442 */
2443 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
2444 sc->sc_rxcfg |= RXCFG_ALP;
2445 #endif
2446 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
2447
2448 #ifdef DP83820
2449 /*
2450 * Initialize the VLAN/IP receive control register.
2451 * We enable checksum computation on all incoming
2452 * packets, and do not reject packets w/ bad checksums.
2453 */
2454 reg = 0;
2455 if (ifp->if_capenable &
2456 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2457 reg |= VRCR_IPEN;
2458 if (sc->sc_ethercom.ec_nvlans != 0)
2459 reg |= VRCR_VTDEN|VRCR_VTREN;
2460 bus_space_write_4(st, sh, SIP_VRCR, reg);
2461
2462 /*
2463 * Initialize the VLAN/IP transmit control register.
2464 * We enable outgoing checksum computation on a
2465 * per-packet basis.
2466 */
2467 reg = 0;
2468 if (ifp->if_capenable &
2469 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2470 reg |= VTCR_PPCHK;
2471 if (sc->sc_ethercom.ec_nvlans != 0)
2472 reg |= VTCR_VPPTI;
2473 bus_space_write_4(st, sh, SIP_VTCR, reg);
2474
2475 /*
2476 * If we're using VLANs, initialize the VLAN data register.
2477 * To understand why we bswap the VLAN Ethertype, see section
2478 * 4.2.36 of the DP83820 manual.
2479 */
2480 if (sc->sc_ethercom.ec_nvlans != 0)
2481 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2482 #endif /* DP83820 */
2483
2484 /*
2485 * Give the transmit and receive rings to the chip.
2486 */
2487 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2488 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2489
2490 /*
2491 * Initialize the interrupt mask.
2492 */
2493 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
2494 ISR_TXURN|ISR_TXDESC|ISR_TXIDLE|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2495 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2496
2497 /* Set up the receive filter. */
2498 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2499
2500 #ifdef DP83820
2501 /*
2502 * Tune sc_rx_flow_thresh.
2503 * XXX "More than 8KB" is too short for jumbo frames.
2504 * XXX TODO: Threshold value should be user-settable.
2505 */
2506 sc->sc_rx_flow_thresh = (PCR_PS_STHI_8 | PCR_PS_STLO_4 |
2507 PCR_PS_FFHI_8 | PCR_PS_FFLO_4 |
2508 (PCR_PAUSE_CNT & PCR_PAUSE_CNT_MASK));
2509 #endif
2510
2511 /*
2512 * Set the current media. Do this after initializing the prototype
2513 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2514 * control.
2515 */
2516 mii_mediachg(&sc->sc_mii);
2517
2518 #ifdef DP83820
2519 /*
2520 * Set the interrupt hold-off timer to 100us.
2521 */
2522 bus_space_write_4(st, sh, SIP_IHR, 0x01);
2523 #endif
2524
2525 /*
2526 * Enable interrupts.
2527 */
2528 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2529
2530 /*
2531 * Start the transmit and receive processes.
2532 */
2533 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2534
2535 /*
2536 * Start the one second MII clock.
2537 */
2538 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2539
2540 /*
2541 * ...all done!
2542 */
2543 ifp->if_flags |= IFF_RUNNING;
2544 ifp->if_flags &= ~IFF_OACTIVE;
2545
2546 out:
2547 if (error)
2548 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2549 return (error);
2550 }
2551
2552 /*
2553 * sip_drain:
2554 *
2555 * Drain the receive queue.
2556 */
2557 static void
2558 SIP_DECL(rxdrain)(struct sip_softc *sc)
2559 {
2560 struct sip_rxsoft *rxs;
2561 int i;
2562
2563 for (i = 0; i < SIP_NRXDESC; i++) {
2564 rxs = &sc->sc_rxsoft[i];
2565 if (rxs->rxs_mbuf != NULL) {
2566 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2567 m_freem(rxs->rxs_mbuf);
2568 rxs->rxs_mbuf = NULL;
2569 }
2570 }
2571 }
2572
2573 /*
2574 * sip_stop: [ ifnet interface function ]
2575 *
2576 * Stop transmission on the interface.
2577 */
2578 static void
2579 SIP_DECL(stop)(struct ifnet *ifp, int disable)
2580 {
2581 struct sip_softc *sc = ifp->if_softc;
2582 bus_space_tag_t st = sc->sc_st;
2583 bus_space_handle_t sh = sc->sc_sh;
2584 struct sip_txsoft *txs;
2585 u_int32_t cmdsts = 0; /* DEBUG */
2586
2587 /*
2588 * Stop the one second clock.
2589 */
2590 callout_stop(&sc->sc_tick_ch);
2591
2592 /* Down the MII. */
2593 mii_down(&sc->sc_mii);
2594
2595 /*
2596 * Disable interrupts.
2597 */
2598 bus_space_write_4(st, sh, SIP_IER, 0);
2599
2600 /*
2601 * Stop receiver and transmitter.
2602 */
2603 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2604
2605 /*
2606 * Release any queued transmit buffers.
2607 */
2608 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2609 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2610 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2611 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) &
2612 CMDSTS_INTR) == 0)
2613 printf("%s: sip_stop: last descriptor does not "
2614 "have INTR bit set\n", sc->sc_dev.dv_xname);
2615 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
2616 #ifdef DIAGNOSTIC
2617 if (txs->txs_mbuf == NULL) {
2618 printf("%s: dirty txsoft with no mbuf chain\n",
2619 sc->sc_dev.dv_xname);
2620 panic("sip_stop");
2621 }
2622 #endif
2623 cmdsts |= /* DEBUG */
2624 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
2625 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2626 m_freem(txs->txs_mbuf);
2627 txs->txs_mbuf = NULL;
2628 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2629 }
2630
2631 if (disable)
2632 SIP_DECL(rxdrain)(sc);
2633
2634 /*
2635 * Mark the interface down and cancel the watchdog timer.
2636 */
2637 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2638 ifp->if_timer = 0;
2639
2640 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2641 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
2642 printf("%s: sip_stop: no INTR bits set in dirty tx "
2643 "descriptors\n", sc->sc_dev.dv_xname);
2644 }
2645
2646 /*
2647 * sip_read_eeprom:
2648 *
2649 * Read data from the serial EEPROM.
2650 */
2651 static void
2652 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt,
2653 u_int16_t *data)
2654 {
2655 bus_space_tag_t st = sc->sc_st;
2656 bus_space_handle_t sh = sc->sc_sh;
2657 u_int16_t reg;
2658 int i, x;
2659
2660 for (i = 0; i < wordcnt; i++) {
2661 /* Send CHIP SELECT. */
2662 reg = EROMAR_EECS;
2663 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2664
2665 /* Shift in the READ opcode. */
2666 for (x = 3; x > 0; x--) {
2667 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2668 reg |= EROMAR_EEDI;
2669 else
2670 reg &= ~EROMAR_EEDI;
2671 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2672 bus_space_write_4(st, sh, SIP_EROMAR,
2673 reg | EROMAR_EESK);
2674 delay(4);
2675 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2676 delay(4);
2677 }
2678
2679 /* Shift in address. */
2680 for (x = 6; x > 0; x--) {
2681 if ((word + i) & (1 << (x - 1)))
2682 reg |= EROMAR_EEDI;
2683 else
2684 reg &= ~EROMAR_EEDI;
2685 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2686 bus_space_write_4(st, sh, SIP_EROMAR,
2687 reg | EROMAR_EESK);
2688 delay(4);
2689 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2690 delay(4);
2691 }
2692
2693 /* Shift out data. */
2694 reg = EROMAR_EECS;
2695 data[i] = 0;
2696 for (x = 16; x > 0; x--) {
2697 bus_space_write_4(st, sh, SIP_EROMAR,
2698 reg | EROMAR_EESK);
2699 delay(4);
2700 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2701 data[i] |= (1 << (x - 1));
2702 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2703 delay(4);
2704 }
2705
2706 /* Clear CHIP SELECT. */
2707 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2708 delay(4);
2709 }
2710 }
2711
2712 /*
2713 * sip_add_rxbuf:
2714 *
2715 * Add a receive buffer to the indicated descriptor.
2716 */
2717 static int
2718 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx)
2719 {
2720 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2721 struct mbuf *m;
2722 int error;
2723
2724 MGETHDR(m, M_DONTWAIT, MT_DATA);
2725 if (m == NULL)
2726 return (ENOBUFS);
2727
2728 MCLGET(m, M_DONTWAIT);
2729 if ((m->m_flags & M_EXT) == 0) {
2730 m_freem(m);
2731 return (ENOBUFS);
2732 }
2733
2734 #if defined(DP83820)
2735 m->m_len = SIP_RXBUF_LEN;
2736 #endif /* DP83820 */
2737
2738 if (rxs->rxs_mbuf != NULL)
2739 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2740
2741 rxs->rxs_mbuf = m;
2742
2743 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2744 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2745 BUS_DMA_READ|BUS_DMA_NOWAIT);
2746 if (error) {
2747 printf("%s: can't load rx DMA map %d, error = %d\n",
2748 sc->sc_dev.dv_xname, idx, error);
2749 panic("sip_add_rxbuf"); /* XXX */
2750 }
2751
2752 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2753 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2754
2755 SIP_INIT_RXDESC(sc, idx);
2756
2757 return (0);
2758 }
2759
2760 #if !defined(DP83820)
2761 /*
2762 * sip_sis900_set_filter:
2763 *
2764 * Set up the receive filter.
2765 */
2766 static void
2767 SIP_DECL(sis900_set_filter)(struct sip_softc *sc)
2768 {
2769 bus_space_tag_t st = sc->sc_st;
2770 bus_space_handle_t sh = sc->sc_sh;
2771 struct ethercom *ec = &sc->sc_ethercom;
2772 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2773 struct ether_multi *enm;
2774 u_int8_t *cp;
2775 struct ether_multistep step;
2776 u_int32_t crc, mchash[16];
2777
2778 /*
2779 * Initialize the prototype RFCR.
2780 */
2781 sc->sc_rfcr = RFCR_RFEN;
2782 if (ifp->if_flags & IFF_BROADCAST)
2783 sc->sc_rfcr |= RFCR_AAB;
2784 if (ifp->if_flags & IFF_PROMISC) {
2785 sc->sc_rfcr |= RFCR_AAP;
2786 goto allmulti;
2787 }
2788
2789 /*
2790 * Set up the multicast address filter by passing all multicast
2791 * addresses through a CRC generator, and then using the high-order
2792 * 6 bits as an index into the 128 bit multicast hash table (only
2793 * the lower 16 bits of each 32 bit multicast hash register are
2794 * valid). The high order bits select the register, while the
2795 * rest of the bits select the bit within the register.
2796 */
2797
2798 memset(mchash, 0, sizeof(mchash));
2799
2800 /*
2801 * SiS900 (at least SiS963) requires us to register the address of
2802 * the PAUSE packet (01:80:c2:00:00:01) into the address filter.
2803 */
2804 crc = 0x0ed423f9;
2805
2806 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2807 SIP_SIS900_REV(sc, SIS_REV_960) ||
2808 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2809 /* Just want the 8 most significant bits. */
2810 crc >>= 24;
2811 } else {
2812 /* Just want the 7 most significant bits. */
2813 crc >>= 25;
2814 }
2815
2816 /* Set the corresponding bit in the hash table. */
2817 mchash[crc >> 4] |= 1 << (crc & 0xf);
2818
2819 ETHER_FIRST_MULTI(step, ec, enm);
2820 while (enm != NULL) {
2821 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2822 /*
2823 * We must listen to a range of multicast addresses.
2824 * For now, just accept all multicasts, rather than
2825 * trying to set only those filter bits needed to match
2826 * the range. (At this time, the only use of address
2827 * ranges is for IP multicast routing, for which the
2828 * range is big enough to require all bits set.)
2829 */
2830 goto allmulti;
2831 }
2832
2833 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2834
2835 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2836 SIP_SIS900_REV(sc, SIS_REV_960) ||
2837 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2838 /* Just want the 8 most significant bits. */
2839 crc >>= 24;
2840 } else {
2841 /* Just want the 7 most significant bits. */
2842 crc >>= 25;
2843 }
2844
2845 /* Set the corresponding bit in the hash table. */
2846 mchash[crc >> 4] |= 1 << (crc & 0xf);
2847
2848 ETHER_NEXT_MULTI(step, enm);
2849 }
2850
2851 ifp->if_flags &= ~IFF_ALLMULTI;
2852 goto setit;
2853
2854 allmulti:
2855 ifp->if_flags |= IFF_ALLMULTI;
2856 sc->sc_rfcr |= RFCR_AAM;
2857
2858 setit:
2859 #define FILTER_EMIT(addr, data) \
2860 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2861 delay(1); \
2862 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2863 delay(1)
2864
2865 /*
2866 * Disable receive filter, and program the node address.
2867 */
2868 cp = LLADDR(ifp->if_sadl);
2869 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
2870 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
2871 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
2872
2873 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2874 /*
2875 * Program the multicast hash table.
2876 */
2877 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
2878 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
2879 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
2880 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
2881 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
2882 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
2883 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
2884 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
2885 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2886 SIP_SIS900_REV(sc, SIS_REV_960) ||
2887 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2888 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]);
2889 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]);
2890 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]);
2891 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]);
2892 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]);
2893 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]);
2894 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]);
2895 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]);
2896 }
2897 }
2898 #undef FILTER_EMIT
2899
2900 /*
2901 * Re-enable the receiver filter.
2902 */
2903 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2904 }
2905 #endif /* ! DP83820 */
2906
2907 /*
2908 * sip_dp83815_set_filter:
2909 *
2910 * Set up the receive filter.
2911 */
2912 static void
2913 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc)
2914 {
2915 bus_space_tag_t st = sc->sc_st;
2916 bus_space_handle_t sh = sc->sc_sh;
2917 struct ethercom *ec = &sc->sc_ethercom;
2918 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2919 struct ether_multi *enm;
2920 u_int8_t *cp;
2921 struct ether_multistep step;
2922 u_int32_t crc, hash, slot, bit;
2923 #ifdef DP83820
2924 #define MCHASH_NWORDS 128
2925 #else
2926 #define MCHASH_NWORDS 32
2927 #endif /* DP83820 */
2928 u_int16_t mchash[MCHASH_NWORDS];
2929 int i;
2930
2931 /*
2932 * Initialize the prototype RFCR.
2933 * Enable the receive filter, and accept on
2934 * Perfect (destination address) Match
2935 * If IFF_BROADCAST, also accept all broadcast packets.
2936 * If IFF_PROMISC, accept all unicast packets (and later, set
2937 * IFF_ALLMULTI and accept all multicast, too).
2938 */
2939 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
2940 if (ifp->if_flags & IFF_BROADCAST)
2941 sc->sc_rfcr |= RFCR_AAB;
2942 if (ifp->if_flags & IFF_PROMISC) {
2943 sc->sc_rfcr |= RFCR_AAP;
2944 goto allmulti;
2945 }
2946
2947 #ifdef DP83820
2948 /*
2949 * Set up the DP83820 multicast address filter by passing all multicast
2950 * addresses through a CRC generator, and then using the high-order
2951 * 11 bits as an index into the 2048 bit multicast hash table. The
2952 * high-order 7 bits select the slot, while the low-order 4 bits
2953 * select the bit within the slot. Note that only the low 16-bits
2954 * of each filter word are used, and there are 128 filter words.
2955 */
2956 #else
2957 /*
2958 * Set up the DP83815 multicast address filter by passing all multicast
2959 * addresses through a CRC generator, and then using the high-order
2960 * 9 bits as an index into the 512 bit multicast hash table. The
2961 * high-order 5 bits select the slot, while the low-order 4 bits
2962 * select the bit within the slot. Note that only the low 16-bits
2963 * of each filter word are used, and there are 32 filter words.
2964 */
2965 #endif /* DP83820 */
2966
2967 memset(mchash, 0, sizeof(mchash));
2968
2969 ifp->if_flags &= ~IFF_ALLMULTI;
2970 ETHER_FIRST_MULTI(step, ec, enm);
2971 if (enm == NULL)
2972 goto setit;
2973 while (enm != NULL) {
2974 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2975 /*
2976 * We must listen to a range of multicast addresses.
2977 * For now, just accept all multicasts, rather than
2978 * trying to set only those filter bits needed to match
2979 * the range. (At this time, the only use of address
2980 * ranges is for IP multicast routing, for which the
2981 * range is big enough to require all bits set.)
2982 */
2983 goto allmulti;
2984 }
2985
2986 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2987
2988 #ifdef DP83820
2989 /* Just want the 11 most significant bits. */
2990 hash = crc >> 21;
2991 #else
2992 /* Just want the 9 most significant bits. */
2993 hash = crc >> 23;
2994 #endif /* DP83820 */
2995
2996 slot = hash >> 4;
2997 bit = hash & 0xf;
2998
2999 /* Set the corresponding bit in the hash table. */
3000 mchash[slot] |= 1 << bit;
3001
3002 ETHER_NEXT_MULTI(step, enm);
3003 }
3004 sc->sc_rfcr |= RFCR_MHEN;
3005 goto setit;
3006
3007 allmulti:
3008 ifp->if_flags |= IFF_ALLMULTI;
3009 sc->sc_rfcr |= RFCR_AAM;
3010
3011 setit:
3012 #define FILTER_EMIT(addr, data) \
3013 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
3014 delay(1); \
3015 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
3016 delay(1)
3017
3018 /*
3019 * Disable receive filter, and program the node address.
3020 */
3021 cp = LLADDR(ifp->if_sadl);
3022 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
3023 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
3024 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
3025
3026 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
3027 /*
3028 * Program the multicast hash table.
3029 */
3030 for (i = 0; i < MCHASH_NWORDS; i++) {
3031 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2),
3032 mchash[i]);
3033 }
3034 }
3035 #undef FILTER_EMIT
3036 #undef MCHASH_NWORDS
3037
3038 /*
3039 * Re-enable the receiver filter.
3040 */
3041 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
3042 }
3043
3044 #if defined(DP83820)
3045 /*
3046 * sip_dp83820_mii_readreg: [mii interface function]
3047 *
3048 * Read a PHY register on the MII of the DP83820.
3049 */
3050 static int
3051 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg)
3052 {
3053 struct sip_softc *sc = (void *) self;
3054
3055 if (sc->sc_cfg & CFG_TBI_EN) {
3056 bus_addr_t tbireg;
3057 int rv;
3058
3059 if (phy != 0)
3060 return (0);
3061
3062 switch (reg) {
3063 case MII_BMCR: tbireg = SIP_TBICR; break;
3064 case MII_BMSR: tbireg = SIP_TBISR; break;
3065 case MII_ANAR: tbireg = SIP_TANAR; break;
3066 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
3067 case MII_ANER: tbireg = SIP_TANER; break;
3068 case MII_EXTSR:
3069 /*
3070 * Don't even bother reading the TESR register.
3071 * The manual documents that the device has
3072 * 1000baseX full/half capability, but the
3073 * register itself seems read back 0 on some
3074 * boards. Just hard-code the result.
3075 */
3076 return (EXTSR_1000XFDX|EXTSR_1000XHDX);
3077
3078 default:
3079 return (0);
3080 }
3081
3082 rv = bus_space_read_4(sc->sc_st, sc->sc_sh, tbireg) & 0xffff;
3083 if (tbireg == SIP_TBISR) {
3084 /* LINK and ACOMP are switched! */
3085 int val = rv;
3086
3087 rv = 0;
3088 if (val & TBISR_MR_LINK_STATUS)
3089 rv |= BMSR_LINK;
3090 if (val & TBISR_MR_AN_COMPLETE)
3091 rv |= BMSR_ACOMP;
3092
3093 /*
3094 * The manual claims this register reads back 0
3095 * on hard and soft reset. But we want to let
3096 * the gentbi driver know that we support auto-
3097 * negotiation, so hard-code this bit in the
3098 * result.
3099 */
3100 rv |= BMSR_ANEG | BMSR_EXTSTAT;
3101 }
3102
3103 return (rv);
3104 }
3105
3106 return (mii_bitbang_readreg(self, &SIP_DECL(mii_bitbang_ops),
3107 phy, reg));
3108 }
3109
3110 /*
3111 * sip_dp83820_mii_writereg: [mii interface function]
3112 *
3113 * Write a PHY register on the MII of the DP83820.
3114 */
3115 static void
3116 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val)
3117 {
3118 struct sip_softc *sc = (void *) self;
3119
3120 if (sc->sc_cfg & CFG_TBI_EN) {
3121 bus_addr_t tbireg;
3122
3123 if (phy != 0)
3124 return;
3125
3126 switch (reg) {
3127 case MII_BMCR: tbireg = SIP_TBICR; break;
3128 case MII_ANAR: tbireg = SIP_TANAR; break;
3129 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
3130 default:
3131 return;
3132 }
3133
3134 bus_space_write_4(sc->sc_st, sc->sc_sh, tbireg, val);
3135 return;
3136 }
3137
3138 mii_bitbang_writereg(self, &SIP_DECL(mii_bitbang_ops),
3139 phy, reg, val);
3140 }
3141
3142 /*
3143 * sip_dp83820_mii_statchg: [mii interface function]
3144 *
3145 * Callback from MII layer when media changes.
3146 */
3147 static void
3148 SIP_DECL(dp83820_mii_statchg)(struct device *self)
3149 {
3150 struct sip_softc *sc = (struct sip_softc *) self;
3151 struct mii_data *mii = &sc->sc_mii;
3152 u_int32_t cfg, pcr;
3153
3154 /*
3155 * Get flow control negotiation result.
3156 */
3157 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
3158 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
3159 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
3160 mii->mii_media_active &= ~IFM_ETH_FMASK;
3161 }
3162
3163 /*
3164 * Update TXCFG for full-duplex operation.
3165 */
3166 if ((mii->mii_media_active & IFM_FDX) != 0)
3167 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3168 else
3169 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3170
3171 /*
3172 * Update RXCFG for full-duplex or loopback.
3173 */
3174 if ((mii->mii_media_active & IFM_FDX) != 0 ||
3175 IFM_SUBTYPE(mii->mii_media_active) == IFM_LOOP)
3176 sc->sc_rxcfg |= RXCFG_ATX;
3177 else
3178 sc->sc_rxcfg &= ~RXCFG_ATX;
3179
3180 /*
3181 * Update CFG for MII/GMII.
3182 */
3183 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
3184 cfg = sc->sc_cfg | CFG_MODE_1000;
3185 else
3186 cfg = sc->sc_cfg;
3187
3188 /*
3189 * 802.3x flow control.
3190 */
3191 pcr = 0;
3192 if (sc->sc_flowflags & IFM_FLOW) {
3193 if (sc->sc_flowflags & IFM_ETH_TXPAUSE)
3194 pcr |= sc->sc_rx_flow_thresh;
3195 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
3196 pcr |= PCR_PSEN | PCR_PS_MCAST;
3197 }
3198
3199 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
3200 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3201 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3202 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PCR, pcr);
3203 }
3204 #endif /* ! DP83820 */
3205
3206 /*
3207 * sip_mii_bitbang_read: [mii bit-bang interface function]
3208 *
3209 * Read the MII serial port for the MII bit-bang module.
3210 */
3211 static u_int32_t
3212 SIP_DECL(mii_bitbang_read)(struct device *self)
3213 {
3214 struct sip_softc *sc = (void *) self;
3215
3216 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
3217 }
3218
3219 /*
3220 * sip_mii_bitbang_write: [mii big-bang interface function]
3221 *
3222 * Write the MII serial port for the MII bit-bang module.
3223 */
3224 static void
3225 SIP_DECL(mii_bitbang_write)(struct device *self, u_int32_t val)
3226 {
3227 struct sip_softc *sc = (void *) self;
3228
3229 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
3230 }
3231
3232 #ifndef DP83820
3233 /*
3234 * sip_sis900_mii_readreg: [mii interface function]
3235 *
3236 * Read a PHY register on the MII.
3237 */
3238 static int
3239 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg)
3240 {
3241 struct sip_softc *sc = (struct sip_softc *) self;
3242 u_int32_t enphy;
3243
3244 /*
3245 * The PHY of recent SiS chipsets is accessed through bitbang
3246 * operations.
3247 */
3248 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900)
3249 return (mii_bitbang_readreg(self, &SIP_DECL(mii_bitbang_ops),
3250 phy, reg));
3251
3252 #ifndef SIS900_MII_RESTRICT
3253 /*
3254 * The SiS 900 has only an internal PHY on the MII. Only allow
3255 * MII address 0.
3256 */
3257 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
3258 return (0);
3259 #endif
3260
3261 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3262 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
3263 ENPHY_RWCMD | ENPHY_ACCESS);
3264 do {
3265 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3266 } while (enphy & ENPHY_ACCESS);
3267 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
3268 }
3269
3270 /*
3271 * sip_sis900_mii_writereg: [mii interface function]
3272 *
3273 * Write a PHY register on the MII.
3274 */
3275 static void
3276 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val)
3277 {
3278 struct sip_softc *sc = (struct sip_softc *) self;
3279 u_int32_t enphy;
3280
3281 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900) {
3282 mii_bitbang_writereg(self, &SIP_DECL(mii_bitbang_ops),
3283 phy, reg, val);
3284 return;
3285 }
3286
3287 #ifndef SIS900_MII_RESTRICT
3288 /*
3289 * The SiS 900 has only an internal PHY on the MII. Only allow
3290 * MII address 0.
3291 */
3292 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
3293 return;
3294 #endif
3295
3296 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3297 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
3298 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
3299 do {
3300 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3301 } while (enphy & ENPHY_ACCESS);
3302 }
3303
3304 /*
3305 * sip_sis900_mii_statchg: [mii interface function]
3306 *
3307 * Callback from MII layer when media changes.
3308 */
3309 static void
3310 SIP_DECL(sis900_mii_statchg)(struct device *self)
3311 {
3312 struct sip_softc *sc = (struct sip_softc *) self;
3313 struct mii_data *mii = &sc->sc_mii;
3314 u_int32_t flowctl;
3315
3316 /*
3317 * Get flow control negotiation result.
3318 */
3319 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
3320 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
3321 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
3322 mii->mii_media_active &= ~IFM_ETH_FMASK;
3323 }
3324
3325 /*
3326 * Update TXCFG for full-duplex operation.
3327 */
3328 if ((mii->mii_media_active & IFM_FDX) != 0)
3329 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3330 else
3331 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3332
3333 /*
3334 * Update RXCFG for full-duplex or loopback.
3335 */
3336 if ((mii->mii_media_active & IFM_FDX) != 0 ||
3337 IFM_SUBTYPE(mii->mii_media_active) == IFM_LOOP)
3338 sc->sc_rxcfg |= RXCFG_ATX;
3339 else
3340 sc->sc_rxcfg &= ~RXCFG_ATX;
3341
3342 /*
3343 * Update IMR for use of 802.3x flow control.
3344 */
3345 if (sc->sc_flowflags & IFM_FLOW) {
3346 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
3347 flowctl = FLOWCTL_FLOWEN;
3348 } else {
3349 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
3350 flowctl = 0;
3351 }
3352
3353 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3354 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3355 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
3356 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
3357 }
3358
3359 /*
3360 * sip_dp83815_mii_readreg: [mii interface function]
3361 *
3362 * Read a PHY register on the MII.
3363 */
3364 static int
3365 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg)
3366 {
3367 struct sip_softc *sc = (struct sip_softc *) self;
3368 u_int32_t val;
3369
3370 /*
3371 * The DP83815 only has an internal PHY. Only allow
3372 * MII address 0.
3373 */
3374 if (phy != 0)
3375 return (0);
3376
3377 /*
3378 * Apparently, after a reset, the DP83815 can take a while
3379 * to respond. During this recovery period, the BMSR returns
3380 * a value of 0. Catch this -- it's not supposed to happen
3381 * (the BMSR has some hardcoded-to-1 bits), and wait for the
3382 * PHY to come back to life.
3383 *
3384 * This works out because the BMSR is the first register
3385 * read during the PHY probe process.
3386 */
3387 do {
3388 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
3389 } while (reg == MII_BMSR && val == 0);
3390
3391 return (val & 0xffff);
3392 }
3393
3394 /*
3395 * sip_dp83815_mii_writereg: [mii interface function]
3396 *
3397 * Write a PHY register to the MII.
3398 */
3399 static void
3400 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val)
3401 {
3402 struct sip_softc *sc = (struct sip_softc *) self;
3403
3404 /*
3405 * The DP83815 only has an internal PHY. Only allow
3406 * MII address 0.
3407 */
3408 if (phy != 0)
3409 return;
3410
3411 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
3412 }
3413
3414 /*
3415 * sip_dp83815_mii_statchg: [mii interface function]
3416 *
3417 * Callback from MII layer when media changes.
3418 */
3419 static void
3420 SIP_DECL(dp83815_mii_statchg)(struct device *self)
3421 {
3422 struct sip_softc *sc = (struct sip_softc *) self;
3423
3424 /*
3425 * Update TXCFG for full-duplex operation.
3426 */
3427 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3428 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3429 else
3430 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3431
3432 /*
3433 * Update RXCFG for full-duplex or loopback.
3434 */
3435 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3436 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3437 sc->sc_rxcfg |= RXCFG_ATX;
3438 else
3439 sc->sc_rxcfg &= ~RXCFG_ATX;
3440
3441 /*
3442 * XXX 802.3x flow control.
3443 */
3444
3445 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3446 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3447
3448 /*
3449 * Some DP83815s experience problems when used with short
3450 * (< 30m/100ft) Ethernet cables in 100BaseTX mode. This
3451 * sequence adjusts the DSP's signal attenuation to fix the
3452 * problem.
3453 */
3454 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
3455 uint32_t reg;
3456
3457 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0x0001);
3458
3459 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4);
3460 reg &= 0x0fff;
3461 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4, reg | 0x1000);
3462 delay(100);
3463 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00fc);
3464 reg &= 0x00ff;
3465 if ((reg & 0x0080) == 0 || (reg >= 0x00d8)) {
3466 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00fc,
3467 0x00e8);
3468 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4);
3469 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4,
3470 reg | 0x20);
3471 }
3472
3473 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0);
3474 }
3475 }
3476 #endif /* DP83820 */
3477
3478 #if defined(DP83820)
3479 static void
3480 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc,
3481 const struct pci_attach_args *pa, u_int8_t *enaddr)
3482 {
3483 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
3484 u_int8_t cksum, *e, match;
3485 int i;
3486
3487 /*
3488 * EEPROM data format for the DP83820 can be found in
3489 * the DP83820 manual, section 4.2.4.
3490 */
3491
3492 SIP_DECL(read_eeprom)(sc, 0,
3493 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data);
3494
3495 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
3496 match = ~(match - 1);
3497
3498 cksum = 0x55;
3499 e = (u_int8_t *) eeprom_data;
3500 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
3501 cksum += *e++;
3502
3503 if (cksum != match)
3504 printf("%s: Checksum (%x) mismatch (%x)",
3505 sc->sc_dev.dv_xname, cksum, match);
3506
3507 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
3508 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
3509 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
3510 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
3511 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
3512 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
3513 }
3514 #else /* ! DP83820 */
3515 static void
3516 SIP_DECL(sis900_eeprom_delay)(struct sip_softc *sc)
3517 {
3518 int i;
3519
3520 /*
3521 * FreeBSD goes from (300/33)+1 [10] to 0. There must be
3522 * a reason, but I don't know it.
3523 */
3524 for (i = 0; i < 10; i++)
3525 bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR);
3526 }
3527
3528 static void
3529 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc,
3530 const struct pci_attach_args *pa, u_int8_t *enaddr)
3531 {
3532 u_int16_t myea[ETHER_ADDR_LEN / 2];
3533
3534 switch (sc->sc_rev) {
3535 case SIS_REV_630S:
3536 case SIS_REV_630E:
3537 case SIS_REV_630EA1:
3538 case SIS_REV_630ET:
3539 case SIS_REV_635:
3540 /*
3541 * The MAC address for the on-board Ethernet of
3542 * the SiS 630 chipset is in the NVRAM. Kick
3543 * the chip into re-loading it from NVRAM, and
3544 * read the MAC address out of the filter registers.
3545 */
3546 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD);
3547
3548 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3549 RFCR_RFADDR_NODE0);
3550 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3551 0xffff;
3552
3553 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3554 RFCR_RFADDR_NODE2);
3555 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3556 0xffff;
3557
3558 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3559 RFCR_RFADDR_NODE4);
3560 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3561 0xffff;
3562 break;
3563
3564 case SIS_REV_960:
3565 {
3566 #define SIS_SET_EROMAR(x,y) bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \
3567 bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) | (y))
3568
3569 #define SIS_CLR_EROMAR(x,y) bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \
3570 bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) & ~(y))
3571
3572 int waittime, i;
3573
3574 /* Allow to read EEPROM from LAN. It is shared
3575 * between a 1394 controller and the NIC and each
3576 * time we access it, we need to set SIS_EECMD_REQ.
3577 */
3578 SIS_SET_EROMAR(sc, EROMAR_REQ);
3579
3580 for (waittime = 0; waittime < 1000; waittime++) { /* 1 ms max */
3581 /* Force EEPROM to idle state. */
3582
3583 /*
3584 * XXX-cube This is ugly. I'll look for docs about it.
3585 */
3586 SIS_SET_EROMAR(sc, EROMAR_EECS);
3587 SIP_DECL(sis900_eeprom_delay)(sc);
3588 for (i = 0; i <= 25; i++) { /* Yes, 26 times. */
3589 SIS_SET_EROMAR(sc, EROMAR_EESK);
3590 SIP_DECL(sis900_eeprom_delay)(sc);
3591 SIS_CLR_EROMAR(sc, EROMAR_EESK);
3592 SIP_DECL(sis900_eeprom_delay)(sc);
3593 }
3594 SIS_CLR_EROMAR(sc, EROMAR_EECS);
3595 SIP_DECL(sis900_eeprom_delay)(sc);
3596 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, 0);
3597
3598 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR) & EROMAR_GNT) {
3599 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3600 sizeof(myea) / sizeof(myea[0]), myea);
3601 break;
3602 }
3603 DELAY(1);
3604 }
3605
3606 /*
3607 * Set SIS_EECTL_CLK to high, so a other master
3608 * can operate on the i2c bus.
3609 */
3610 SIS_SET_EROMAR(sc, EROMAR_EESK);
3611
3612 /* Refuse EEPROM access by LAN */
3613 SIS_SET_EROMAR(sc, EROMAR_DONE);
3614 } break;
3615
3616 default:
3617 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3618 sizeof(myea) / sizeof(myea[0]), myea);
3619 }
3620
3621 enaddr[0] = myea[0] & 0xff;
3622 enaddr[1] = myea[0] >> 8;
3623 enaddr[2] = myea[1] & 0xff;
3624 enaddr[3] = myea[1] >> 8;
3625 enaddr[4] = myea[2] & 0xff;
3626 enaddr[5] = myea[2] >> 8;
3627 }
3628
3629 /* Table and macro to bit-reverse an octet. */
3630 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
3631 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
3632
3633 static void
3634 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc,
3635 const struct pci_attach_args *pa, u_int8_t *enaddr)
3636 {
3637 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3638 u_int8_t cksum, *e, match;
3639 int i;
3640
3641 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) /
3642 sizeof(eeprom_data[0]), eeprom_data);
3643
3644 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3645 match = ~(match - 1);
3646
3647 cksum = 0x55;
3648 e = (u_int8_t *) eeprom_data;
3649 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3650 cksum += *e++;
3651 }
3652 if (cksum != match) {
3653 printf("%s: Checksum (%x) mismatch (%x)",
3654 sc->sc_dev.dv_xname, cksum, match);
3655 }
3656
3657 /*
3658 * Unrolled because it makes slightly more sense this way.
3659 * The DP83815 stores the MAC address in bit 0 of word 6
3660 * through bit 15 of word 8.
3661 */
3662 ea = &eeprom_data[6];
3663 enaddr[0] = ((*ea & 0x1) << 7);
3664 ea++;
3665 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3666 enaddr[1] = ((*ea & 0x1FE) >> 1);
3667 enaddr[2] = ((*ea & 0x1) << 7);
3668 ea++;
3669 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3670 enaddr[3] = ((*ea & 0x1FE) >> 1);
3671 enaddr[4] = ((*ea & 0x1) << 7);
3672 ea++;
3673 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3674 enaddr[5] = ((*ea & 0x1FE) >> 1);
3675
3676 /*
3677 * In case that's not weird enough, we also need to reverse
3678 * the bits in each byte. This all actually makes more sense
3679 * if you think about the EEPROM storage as an array of bits
3680 * being shifted into bytes, but that's not how we're looking
3681 * at it here...
3682 */
3683 for (i = 0; i < 6 ;i++)
3684 enaddr[i] = bbr(enaddr[i]);
3685 }
3686 #endif /* DP83820 */
3687
3688 /*
3689 * sip_mediastatus: [ifmedia interface function]
3690 *
3691 * Get the current interface media status.
3692 */
3693 static void
3694 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr)
3695 {
3696 struct sip_softc *sc = ifp->if_softc;
3697
3698 mii_pollstat(&sc->sc_mii);
3699 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3700 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
3701 sc->sc_flowflags;
3702 }
3703
3704 /*
3705 * sip_mediachange: [ifmedia interface function]
3706 *
3707 * Set hardware to newly-selected media.
3708 */
3709 static int
3710 SIP_DECL(mediachange)(struct ifnet *ifp)
3711 {
3712 struct sip_softc *sc = ifp->if_softc;
3713
3714 if (ifp->if_flags & IFF_UP)
3715 mii_mediachg(&sc->sc_mii);
3716 return (0);
3717 }
3718