if_sip.c revision 1.88 1 /* $NetBSD: if_sip.c,v 1.88 2004/04/11 16:57:44 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1999 Network Computer, Inc.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of Network Computer, Inc. nor the names of its
52 * contributors may be used to endorse or promote products derived
53 * from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 /*
69 * Device driver for the Silicon Integrated Systems SiS 900,
70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and
71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet
72 * controllers.
73 *
74 * Originally written to support the SiS 900 by Jason R. Thorpe for
75 * Network Computer, Inc.
76 *
77 * TODO:
78 *
79 * - Reduce the Rx interrupt load.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.88 2004/04/11 16:57:44 thorpej Exp $");
84
85 #include "bpfilter.h"
86 #include "rnd.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/socket.h>
95 #include <sys/ioctl.h>
96 #include <sys/errno.h>
97 #include <sys/device.h>
98 #include <sys/queue.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <machine/bus.h>
116 #include <machine/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/mii_bitbang.h>
122
123 #include <dev/pci/pcireg.h>
124 #include <dev/pci/pcivar.h>
125 #include <dev/pci/pcidevs.h>
126
127 #include <dev/pci/if_sipreg.h>
128
129 #ifdef DP83820 /* DP83820 Gigabit Ethernet */
130 #define SIP_DECL(x) __CONCAT(gsip_,x)
131 #else /* SiS900 and DP83815 */
132 #define SIP_DECL(x) __CONCAT(sip_,x)
133 #endif
134
135 #define SIP_STR(x) __STRING(SIP_DECL(x))
136
137 /*
138 * Transmit descriptor list size. This is arbitrary, but allocate
139 * enough descriptors for 128 pending transmissions, and 8 segments
140 * per packet (64 for DP83820 for jumbo frames).
141 *
142 * This MUST work out to a power of 2.
143 */
144 #ifdef DP83820
145 #define SIP_NTXSEGS 64
146 #define SIP_NTXSEGS_ALLOC 16
147 #else
148 #define SIP_NTXSEGS 16
149 #define SIP_NTXSEGS_ALLOC 8
150 #endif
151
152 #define SIP_TXQUEUELEN 256
153 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS_ALLOC)
154 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
155 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
156
157 #if defined(DP83820)
158 #define TX_DMAMAP_SIZE ETHER_MAX_LEN_JUMBO
159 #else
160 #define TX_DMAMAP_SIZE MCLBYTES
161 #endif
162
163 /*
164 * Receive descriptor list size. We have one Rx buffer per incoming
165 * packet, so this logic is a little simpler.
166 *
167 * Actually, on the DP83820, we allow the packet to consume more than
168 * one buffer, in order to support jumbo Ethernet frames. In that
169 * case, a packet may consume up to 5 buffers (assuming a 2048 byte
170 * mbuf cluster). 256 receive buffers is only 51 maximum size packets,
171 * so we'd better be quick about handling receive interrupts.
172 */
173 #if defined(DP83820)
174 #define SIP_NRXDESC 256
175 #else
176 #define SIP_NRXDESC 128
177 #endif /* DP83820 */
178 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
179 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
180
181 /*
182 * Control structures are DMA'd to the SiS900 chip. We allocate them in
183 * a single clump that maps to a single DMA segment to make several things
184 * easier.
185 */
186 struct sip_control_data {
187 /*
188 * The transmit descriptors.
189 */
190 struct sip_desc scd_txdescs[SIP_NTXDESC];
191
192 /*
193 * The receive descriptors.
194 */
195 struct sip_desc scd_rxdescs[SIP_NRXDESC];
196 };
197
198 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
199 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
200 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
201
202 /*
203 * Software state for transmit jobs.
204 */
205 struct sip_txsoft {
206 struct mbuf *txs_mbuf; /* head of our mbuf chain */
207 bus_dmamap_t txs_dmamap; /* our DMA map */
208 int txs_firstdesc; /* first descriptor in packet */
209 int txs_lastdesc; /* last descriptor in packet */
210 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
211 };
212
213 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
214
215 /*
216 * Software state for receive jobs.
217 */
218 struct sip_rxsoft {
219 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
220 bus_dmamap_t rxs_dmamap; /* our DMA map */
221 };
222
223 /*
224 * Software state per device.
225 */
226 struct sip_softc {
227 struct device sc_dev; /* generic device information */
228 bus_space_tag_t sc_st; /* bus space tag */
229 bus_space_handle_t sc_sh; /* bus space handle */
230 bus_dma_tag_t sc_dmat; /* bus DMA tag */
231 struct ethercom sc_ethercom; /* ethernet common data */
232 void *sc_sdhook; /* shutdown hook */
233
234 const struct sip_product *sc_model; /* which model are we? */
235 int sc_rev; /* chip revision */
236
237 void *sc_ih; /* interrupt cookie */
238
239 struct mii_data sc_mii; /* MII/media information */
240
241 struct callout sc_tick_ch; /* tick callout */
242
243 bus_dmamap_t sc_cddmamap; /* control data DMA map */
244 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
245
246 /*
247 * Software state for transmit and receive descriptors.
248 */
249 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
250 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
251
252 /*
253 * Control data structures.
254 */
255 struct sip_control_data *sc_control_data;
256 #define sc_txdescs sc_control_data->scd_txdescs
257 #define sc_rxdescs sc_control_data->scd_rxdescs
258
259 #ifdef SIP_EVENT_COUNTERS
260 /*
261 * Event counters.
262 */
263 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
264 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
265 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
266 struct evcnt sc_ev_txdintr; /* Tx descriptor interrupts */
267 struct evcnt sc_ev_txiintr; /* Tx idle interrupts */
268 struct evcnt sc_ev_rxintr; /* Rx interrupts */
269 struct evcnt sc_ev_hiberr; /* HIBERR interrupts */
270 #ifdef DP83820
271 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
272 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */
273 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */
274 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
275 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */
276 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */
277 #endif /* DP83820 */
278 #endif /* SIP_EVENT_COUNTERS */
279
280 u_int32_t sc_txcfg; /* prototype TXCFG register */
281 u_int32_t sc_rxcfg; /* prototype RXCFG register */
282 u_int32_t sc_imr; /* prototype IMR register */
283 u_int32_t sc_rfcr; /* prototype RFCR register */
284
285 u_int32_t sc_cfg; /* prototype CFG register */
286
287 #ifdef DP83820
288 u_int32_t sc_gpior; /* prototype GPIOR register */
289 #endif /* DP83820 */
290
291 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
292 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
293
294 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
295
296 int sc_flags; /* misc. flags; see below */
297
298 int sc_txfree; /* number of free Tx descriptors */
299 int sc_txnext; /* next ready Tx descriptor */
300 int sc_txwin; /* Tx descriptors since last intr */
301
302 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
303 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
304
305 int sc_rxptr; /* next ready Rx descriptor/descsoft */
306 #if defined(DP83820)
307 int sc_rxdiscard;
308 int sc_rxlen;
309 struct mbuf *sc_rxhead;
310 struct mbuf *sc_rxtail;
311 struct mbuf **sc_rxtailp;
312 #endif /* DP83820 */
313
314 #if NRND > 0
315 rndsource_element_t rnd_source; /* random source */
316 #endif
317 };
318
319 /* sc_flags */
320 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
321
322 #ifdef DP83820
323 #define SIP_RXCHAIN_RESET(sc) \
324 do { \
325 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
326 *(sc)->sc_rxtailp = NULL; \
327 (sc)->sc_rxlen = 0; \
328 } while (/*CONSTCOND*/0)
329
330 #define SIP_RXCHAIN_LINK(sc, m) \
331 do { \
332 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
333 (sc)->sc_rxtailp = &(m)->m_next; \
334 } while (/*CONSTCOND*/0)
335 #endif /* DP83820 */
336
337 #ifdef SIP_EVENT_COUNTERS
338 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++
339 #else
340 #define SIP_EVCNT_INCR(ev) /* nothing */
341 #endif
342
343 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
344 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
345
346 #define SIP_CDTXSYNC(sc, x, n, ops) \
347 do { \
348 int __x, __n; \
349 \
350 __x = (x); \
351 __n = (n); \
352 \
353 /* If it will wrap around, sync to the end of the ring. */ \
354 if ((__x + __n) > SIP_NTXDESC) { \
355 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
356 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
357 (SIP_NTXDESC - __x), (ops)); \
358 __n -= (SIP_NTXDESC - __x); \
359 __x = 0; \
360 } \
361 \
362 /* Now sync whatever is left. */ \
363 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
364 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
365 } while (0)
366
367 #define SIP_CDRXSYNC(sc, x, ops) \
368 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
369 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
370
371 #ifdef DP83820
372 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0;
373 #define SIP_RXBUF_LEN (MCLBYTES - 8)
374 #else
375 #define SIP_INIT_RXDESC_EXTSTS /* nothing */
376 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */
377 #endif
378 #define SIP_INIT_RXDESC(sc, x) \
379 do { \
380 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
381 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
382 \
383 __sipd->sipd_link = \
384 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \
385 __sipd->sipd_bufptr = \
386 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \
387 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \
388 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \
389 SIP_INIT_RXDESC_EXTSTS \
390 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
391 } while (0)
392
393 #define SIP_CHIP_VERS(sc, v, p, r) \
394 ((sc)->sc_model->sip_vendor == (v) && \
395 (sc)->sc_model->sip_product == (p) && \
396 (sc)->sc_rev == (r))
397
398 #define SIP_CHIP_MODEL(sc, v, p) \
399 ((sc)->sc_model->sip_vendor == (v) && \
400 (sc)->sc_model->sip_product == (p))
401
402 #if !defined(DP83820)
403 #define SIP_SIS900_REV(sc, rev) \
404 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev))
405 #endif
406
407 #define SIP_TIMEOUT 1000
408
409 void SIP_DECL(start)(struct ifnet *);
410 void SIP_DECL(watchdog)(struct ifnet *);
411 int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t);
412 int SIP_DECL(init)(struct ifnet *);
413 void SIP_DECL(stop)(struct ifnet *, int);
414
415 void SIP_DECL(shutdown)(void *);
416
417 void SIP_DECL(reset)(struct sip_softc *);
418 void SIP_DECL(rxdrain)(struct sip_softc *);
419 int SIP_DECL(add_rxbuf)(struct sip_softc *, int);
420 void SIP_DECL(read_eeprom)(struct sip_softc *, int, int, u_int16_t *);
421 void SIP_DECL(tick)(void *);
422
423 #if !defined(DP83820)
424 void SIP_DECL(sis900_set_filter)(struct sip_softc *);
425 #endif /* ! DP83820 */
426 void SIP_DECL(dp83815_set_filter)(struct sip_softc *);
427
428 #if defined(DP83820)
429 void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *,
430 const struct pci_attach_args *, u_int8_t *);
431 #else
432 static void SIP_DECL(sis900_eeprom_delay)(struct sip_softc *sc);
433 void SIP_DECL(sis900_read_macaddr)(struct sip_softc *,
434 const struct pci_attach_args *, u_int8_t *);
435 void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *,
436 const struct pci_attach_args *, u_int8_t *);
437 #endif /* DP83820 */
438
439 int SIP_DECL(intr)(void *);
440 void SIP_DECL(txintr)(struct sip_softc *);
441 void SIP_DECL(rxintr)(struct sip_softc *);
442
443 #if defined(DP83820)
444 int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int);
445 void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int);
446 void SIP_DECL(dp83820_mii_statchg)(struct device *);
447 #else
448 int SIP_DECL(sis900_mii_readreg)(struct device *, int, int);
449 void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int);
450 void SIP_DECL(sis900_mii_statchg)(struct device *);
451
452 int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int);
453 void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int);
454 void SIP_DECL(dp83815_mii_statchg)(struct device *);
455 #endif /* DP83820 */
456
457 int SIP_DECL(mediachange)(struct ifnet *);
458 void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *);
459
460 int SIP_DECL(match)(struct device *, struct cfdata *, void *);
461 void SIP_DECL(attach)(struct device *, struct device *, void *);
462
463 int SIP_DECL(copy_small) = 0;
464
465 #ifdef DP83820
466 CFATTACH_DECL(gsip, sizeof(struct sip_softc),
467 gsip_match, gsip_attach, NULL, NULL);
468 #else
469 CFATTACH_DECL(sip, sizeof(struct sip_softc),
470 sip_match, sip_attach, NULL, NULL);
471 #endif
472
473 /*
474 * Descriptions of the variants of the SiS900.
475 */
476 struct sip_variant {
477 int (*sipv_mii_readreg)(struct device *, int, int);
478 void (*sipv_mii_writereg)(struct device *, int, int, int);
479 void (*sipv_mii_statchg)(struct device *);
480 void (*sipv_set_filter)(struct sip_softc *);
481 void (*sipv_read_macaddr)(struct sip_softc *,
482 const struct pci_attach_args *, u_int8_t *);
483 };
484
485 u_int32_t SIP_DECL(mii_bitbang_read)(struct device *);
486 void SIP_DECL(mii_bitbang_write)(struct device *, u_int32_t);
487
488 const struct mii_bitbang_ops SIP_DECL(mii_bitbang_ops) = {
489 SIP_DECL(mii_bitbang_read),
490 SIP_DECL(mii_bitbang_write),
491 {
492 EROMAR_MDIO, /* MII_BIT_MDO */
493 EROMAR_MDIO, /* MII_BIT_MDI */
494 EROMAR_MDC, /* MII_BIT_MDC */
495 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */
496 0, /* MII_BIT_DIR_PHY_HOST */
497 }
498 };
499
500 #if defined(DP83820)
501 const struct sip_variant SIP_DECL(variant_dp83820) = {
502 SIP_DECL(dp83820_mii_readreg),
503 SIP_DECL(dp83820_mii_writereg),
504 SIP_DECL(dp83820_mii_statchg),
505 SIP_DECL(dp83815_set_filter),
506 SIP_DECL(dp83820_read_macaddr),
507 };
508 #else
509 const struct sip_variant SIP_DECL(variant_sis900) = {
510 SIP_DECL(sis900_mii_readreg),
511 SIP_DECL(sis900_mii_writereg),
512 SIP_DECL(sis900_mii_statchg),
513 SIP_DECL(sis900_set_filter),
514 SIP_DECL(sis900_read_macaddr),
515 };
516
517 const struct sip_variant SIP_DECL(variant_dp83815) = {
518 SIP_DECL(dp83815_mii_readreg),
519 SIP_DECL(dp83815_mii_writereg),
520 SIP_DECL(dp83815_mii_statchg),
521 SIP_DECL(dp83815_set_filter),
522 SIP_DECL(dp83815_read_macaddr),
523 };
524 #endif /* DP83820 */
525
526 /*
527 * Devices supported by this driver.
528 */
529 const struct sip_product {
530 pci_vendor_id_t sip_vendor;
531 pci_product_id_t sip_product;
532 const char *sip_name;
533 const struct sip_variant *sip_variant;
534 } SIP_DECL(products)[] = {
535 #if defined(DP83820)
536 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820,
537 "NatSemi DP83820 Gigabit Ethernet",
538 &SIP_DECL(variant_dp83820) },
539 #else
540 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
541 "SiS 900 10/100 Ethernet",
542 &SIP_DECL(variant_sis900) },
543 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
544 "SiS 7016 10/100 Ethernet",
545 &SIP_DECL(variant_sis900) },
546
547 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815,
548 "NatSemi DP83815 10/100 Ethernet",
549 &SIP_DECL(variant_dp83815) },
550 #endif /* DP83820 */
551
552 { 0, 0,
553 NULL,
554 NULL },
555 };
556
557 static const struct sip_product *
558 SIP_DECL(lookup)(const struct pci_attach_args *pa)
559 {
560 const struct sip_product *sip;
561
562 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) {
563 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
564 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
565 return (sip);
566 }
567 return (NULL);
568 }
569
570 #ifdef DP83820
571 /*
572 * I really hate stupid hardware vendors. There's a bit in the EEPROM
573 * which indicates if the card can do 64-bit data transfers. Unfortunately,
574 * several vendors of 32-bit cards fail to clear this bit in the EEPROM,
575 * which means we try to use 64-bit data transfers on those cards if we
576 * happen to be plugged into a 32-bit slot.
577 *
578 * What we do is use this table of cards known to be 64-bit cards. If
579 * you have a 64-bit card who's subsystem ID is not listed in this table,
580 * send the output of "pcictl dump ..." of the device to me so that your
581 * card will use the 64-bit data path when plugged into a 64-bit slot.
582 *
583 * -- Jason R. Thorpe <thorpej (at) NetBSD.org>
584 * June 30, 2002
585 */
586 static int
587 SIP_DECL(check_64bit)(const struct pci_attach_args *pa)
588 {
589 static const struct {
590 pci_vendor_id_t c64_vendor;
591 pci_product_id_t c64_product;
592 } card64[] = {
593 /* Asante GigaNIX */
594 { 0x128a, 0x0002 },
595
596 /* Accton EN1407-T, Planex GN-1000TE */
597 { 0x1113, 0x1407 },
598
599 /* Netgear GA-621 */
600 { 0x1385, 0x621a },
601
602 /* SMC EZ Card */
603 { 0x10b8, 0x9462 },
604
605 { 0, 0}
606 };
607 pcireg_t subsys;
608 int i;
609
610 subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
611
612 for (i = 0; card64[i].c64_vendor != 0; i++) {
613 if (PCI_VENDOR(subsys) == card64[i].c64_vendor &&
614 PCI_PRODUCT(subsys) == card64[i].c64_product)
615 return (1);
616 }
617
618 return (0);
619 }
620 #endif /* DP83820 */
621
622 int
623 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux)
624 {
625 struct pci_attach_args *pa = aux;
626
627 if (SIP_DECL(lookup)(pa) != NULL)
628 return (1);
629
630 return (0);
631 }
632
633 void
634 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux)
635 {
636 struct sip_softc *sc = (struct sip_softc *) self;
637 struct pci_attach_args *pa = aux;
638 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
639 pci_chipset_tag_t pc = pa->pa_pc;
640 pci_intr_handle_t ih;
641 const char *intrstr = NULL;
642 bus_space_tag_t iot, memt;
643 bus_space_handle_t ioh, memh;
644 bus_dma_segment_t seg;
645 int ioh_valid, memh_valid;
646 int i, rseg, error;
647 const struct sip_product *sip;
648 pcireg_t pmode;
649 u_int8_t enaddr[ETHER_ADDR_LEN];
650 int pmreg;
651 #ifdef DP83820
652 pcireg_t memtype;
653 u_int32_t reg;
654 #endif /* DP83820 */
655
656 callout_init(&sc->sc_tick_ch);
657
658 sip = SIP_DECL(lookup)(pa);
659 if (sip == NULL) {
660 printf("\n");
661 panic(SIP_STR(attach) ": impossible");
662 }
663 sc->sc_rev = PCI_REVISION(pa->pa_class);
664
665 printf(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev);
666
667 sc->sc_model = sip;
668
669 /*
670 * XXX Work-around broken PXE firmware on some boards.
671 *
672 * The DP83815 shares an address decoder with the MEM BAR
673 * and the ROM BAR. Make sure the ROM BAR is disabled,
674 * so that memory mapped access works.
675 */
676 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM,
677 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) &
678 ~PCI_MAPREG_ROM_ENABLE);
679
680 /*
681 * Map the device.
682 */
683 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
684 PCI_MAPREG_TYPE_IO, 0,
685 &iot, &ioh, NULL, NULL) == 0);
686 #ifdef DP83820
687 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA);
688 switch (memtype) {
689 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
690 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
691 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
692 memtype, 0, &memt, &memh, NULL, NULL) == 0);
693 break;
694 default:
695 memh_valid = 0;
696 }
697 #else
698 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
699 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
700 &memt, &memh, NULL, NULL) == 0);
701 #endif /* DP83820 */
702
703 if (memh_valid) {
704 sc->sc_st = memt;
705 sc->sc_sh = memh;
706 } else if (ioh_valid) {
707 sc->sc_st = iot;
708 sc->sc_sh = ioh;
709 } else {
710 printf("%s: unable to map device registers\n",
711 sc->sc_dev.dv_xname);
712 return;
713 }
714
715 sc->sc_dmat = pa->pa_dmat;
716
717 /*
718 * Make sure bus mastering is enabled. Also make sure
719 * Write/Invalidate is enabled if we're allowed to use it.
720 */
721 pmreg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
722 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY)
723 pmreg |= PCI_COMMAND_INVALIDATE_ENABLE;
724 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
725 pmreg | PCI_COMMAND_MASTER_ENABLE);
726
727 /* Get it out of power save mode if needed. */
728 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
729 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
730 PCI_PMCSR_STATE_MASK;
731 if (pmode == PCI_PMCSR_STATE_D3) {
732 /*
733 * The card has lost all configuration data in
734 * this state, so punt.
735 */
736 printf("%s: unable to wake up from power state D3\n",
737 sc->sc_dev.dv_xname);
738 return;
739 }
740 if (pmode != PCI_PMCSR_STATE_D0) {
741 printf("%s: waking up from power state D%d\n",
742 sc->sc_dev.dv_xname, pmode);
743 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
744 PCI_PMCSR_STATE_D0);
745 }
746 }
747
748 /*
749 * Map and establish our interrupt.
750 */
751 if (pci_intr_map(pa, &ih)) {
752 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
753 return;
754 }
755 intrstr = pci_intr_string(pc, ih);
756 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc);
757 if (sc->sc_ih == NULL) {
758 printf("%s: unable to establish interrupt",
759 sc->sc_dev.dv_xname);
760 if (intrstr != NULL)
761 printf(" at %s", intrstr);
762 printf("\n");
763 return;
764 }
765 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
766
767 SIMPLEQ_INIT(&sc->sc_txfreeq);
768 SIMPLEQ_INIT(&sc->sc_txdirtyq);
769
770 /*
771 * Allocate the control data structures, and create and load the
772 * DMA map for it.
773 */
774 if ((error = bus_dmamem_alloc(sc->sc_dmat,
775 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
776 0)) != 0) {
777 printf("%s: unable to allocate control data, error = %d\n",
778 sc->sc_dev.dv_xname, error);
779 goto fail_0;
780 }
781
782 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
783 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
784 BUS_DMA_COHERENT)) != 0) {
785 printf("%s: unable to map control data, error = %d\n",
786 sc->sc_dev.dv_xname, error);
787 goto fail_1;
788 }
789
790 if ((error = bus_dmamap_create(sc->sc_dmat,
791 sizeof(struct sip_control_data), 1,
792 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
793 printf("%s: unable to create control data DMA map, "
794 "error = %d\n", sc->sc_dev.dv_xname, error);
795 goto fail_2;
796 }
797
798 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
799 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
800 0)) != 0) {
801 printf("%s: unable to load control data DMA map, error = %d\n",
802 sc->sc_dev.dv_xname, error);
803 goto fail_3;
804 }
805
806 /*
807 * Create the transmit buffer DMA maps.
808 */
809 for (i = 0; i < SIP_TXQUEUELEN; i++) {
810 if ((error = bus_dmamap_create(sc->sc_dmat, TX_DMAMAP_SIZE,
811 SIP_NTXSEGS, MCLBYTES, 0, 0,
812 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
813 printf("%s: unable to create tx DMA map %d, "
814 "error = %d\n", sc->sc_dev.dv_xname, i, error);
815 goto fail_4;
816 }
817 }
818
819 /*
820 * Create the receive buffer DMA maps.
821 */
822 for (i = 0; i < SIP_NRXDESC; i++) {
823 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
824 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
825 printf("%s: unable to create rx DMA map %d, "
826 "error = %d\n", sc->sc_dev.dv_xname, i, error);
827 goto fail_5;
828 }
829 sc->sc_rxsoft[i].rxs_mbuf = NULL;
830 }
831
832 /*
833 * Reset the chip to a known state.
834 */
835 SIP_DECL(reset)(sc);
836
837 /*
838 * Read the Ethernet address from the EEPROM. This might
839 * also fetch other stuff from the EEPROM and stash it
840 * in the softc.
841 */
842 sc->sc_cfg = 0;
843 #if !defined(DP83820)
844 if (SIP_SIS900_REV(sc,SIS_REV_635) ||
845 SIP_SIS900_REV(sc,SIS_REV_900B))
846 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT);
847 #endif
848
849 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr);
850
851 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
852 ether_sprintf(enaddr));
853
854 /*
855 * Initialize the configuration register: aggressive PCI
856 * bus request algorithm, default backoff, default OW timer,
857 * default parity error detection.
858 *
859 * NOTE: "Big endian mode" is useless on the SiS900 and
860 * friends -- it affects packet data, not descriptors.
861 */
862 #ifdef DP83820
863 /*
864 * Cause the chip to load configuration data from the EEPROM.
865 */
866 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_PTSCR, PTSCR_EELOAD_EN);
867 for (i = 0; i < 10000; i++) {
868 delay(10);
869 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
870 PTSCR_EELOAD_EN) == 0)
871 break;
872 }
873 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) &
874 PTSCR_EELOAD_EN) {
875 printf("%s: timeout loading configuration from EEPROM\n",
876 sc->sc_dev.dv_xname);
877 return;
878 }
879
880 sc->sc_gpior = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_GPIOR);
881
882 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG);
883 if (reg & CFG_PCI64_DET) {
884 printf("%s: 64-bit PCI slot detected", sc->sc_dev.dv_xname);
885 /*
886 * Check to see if this card is 64-bit. If so, enable 64-bit
887 * data transfers.
888 *
889 * We can't use the DATA64_EN bit in the EEPROM, because
890 * vendors of 32-bit cards fail to clear that bit in many
891 * cases (yet the card still detects that it's in a 64-bit
892 * slot; go figure).
893 */
894 if (SIP_DECL(check_64bit)(pa)) {
895 sc->sc_cfg |= CFG_DATA64_EN;
896 printf(", using 64-bit data transfers");
897 }
898 printf("\n");
899 }
900
901 /*
902 * XXX Need some PCI flags indicating support for
903 * XXX 64-bit addressing.
904 */
905 #if 0
906 if (reg & CFG_M64ADDR)
907 sc->sc_cfg |= CFG_M64ADDR;
908 if (reg & CFG_T64ADDR)
909 sc->sc_cfg |= CFG_T64ADDR;
910 #endif
911
912 if (reg & (CFG_TBI_EN|CFG_EXT_125)) {
913 const char *sep = "";
914 printf("%s: using ", sc->sc_dev.dv_xname);
915 if (reg & CFG_EXT_125) {
916 sc->sc_cfg |= CFG_EXT_125;
917 printf("%s125MHz clock", sep);
918 sep = ", ";
919 }
920 if (reg & CFG_TBI_EN) {
921 sc->sc_cfg |= CFG_TBI_EN;
922 printf("%sten-bit interface", sep);
923 sep = ", ";
924 }
925 printf("\n");
926 }
927 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0 ||
928 (reg & CFG_MRM_DIS) != 0)
929 sc->sc_cfg |= CFG_MRM_DIS;
930 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0 ||
931 (reg & CFG_MWI_DIS) != 0)
932 sc->sc_cfg |= CFG_MWI_DIS;
933
934 /*
935 * Use the extended descriptor format on the DP83820. This
936 * gives us an interface to VLAN tagging and IPv4/TCP/UDP
937 * checksumming.
938 */
939 sc->sc_cfg |= CFG_EXTSTS_EN;
940 #endif /* DP83820 */
941
942 /*
943 * Initialize our media structures and probe the MII.
944 */
945 sc->sc_mii.mii_ifp = ifp;
946 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg;
947 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg;
948 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg;
949 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, SIP_DECL(mediachange),
950 SIP_DECL(mediastatus));
951
952 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
953 MII_OFFSET_ANY, 0);
954 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
955 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
956 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
957 } else
958 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
959
960 ifp = &sc->sc_ethercom.ec_if;
961 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
962 ifp->if_softc = sc;
963 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
964 ifp->if_ioctl = SIP_DECL(ioctl);
965 ifp->if_start = SIP_DECL(start);
966 ifp->if_watchdog = SIP_DECL(watchdog);
967 ifp->if_init = SIP_DECL(init);
968 ifp->if_stop = SIP_DECL(stop);
969 IFQ_SET_READY(&ifp->if_snd);
970
971 /*
972 * We can support 802.1Q VLAN-sized frames.
973 */
974 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
975
976 #ifdef DP83820
977 /*
978 * And the DP83820 can do VLAN tagging in hardware, and
979 * support the jumbo Ethernet MTU.
980 */
981 sc->sc_ethercom.ec_capabilities |=
982 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
983
984 /*
985 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums
986 * in hardware.
987 */
988 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
989 IFCAP_CSUM_UDPv4;
990 #endif /* DP83820 */
991
992 /*
993 * Attach the interface.
994 */
995 if_attach(ifp);
996 ether_ifattach(ifp, enaddr);
997 #if NRND > 0
998 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
999 RND_TYPE_NET, 0);
1000 #endif
1001
1002 /*
1003 * The number of bytes that must be available in
1004 * the Tx FIFO before the bus master can DMA more
1005 * data into the FIFO.
1006 */
1007 sc->sc_tx_fill_thresh = 64 / 32;
1008
1009 /*
1010 * Start at a drain threshold of 512 bytes. We will
1011 * increase it if a DMA underrun occurs.
1012 *
1013 * XXX The minimum value of this variable should be
1014 * tuned. We may be able to improve performance
1015 * by starting with a lower value. That, however,
1016 * may trash the first few outgoing packets if the
1017 * PCI bus is saturated.
1018 */
1019 #ifdef DP83820
1020 sc->sc_tx_drain_thresh = 6400 / 32; /* from FreeBSD nge(4) */
1021 #else
1022 sc->sc_tx_drain_thresh = 1504 / 32;
1023 #endif
1024
1025 /*
1026 * Initialize the Rx FIFO drain threshold.
1027 *
1028 * This is in units of 8 bytes.
1029 *
1030 * We should never set this value lower than 2; 14 bytes are
1031 * required to filter the packet.
1032 */
1033 sc->sc_rx_drain_thresh = 128 / 8;
1034
1035 #ifdef SIP_EVENT_COUNTERS
1036 /*
1037 * Attach event counters.
1038 */
1039 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1040 NULL, sc->sc_dev.dv_xname, "txsstall");
1041 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1042 NULL, sc->sc_dev.dv_xname, "txdstall");
1043 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_INTR,
1044 NULL, sc->sc_dev.dv_xname, "txforceintr");
1045 evcnt_attach_dynamic(&sc->sc_ev_txdintr, EVCNT_TYPE_INTR,
1046 NULL, sc->sc_dev.dv_xname, "txdintr");
1047 evcnt_attach_dynamic(&sc->sc_ev_txiintr, EVCNT_TYPE_INTR,
1048 NULL, sc->sc_dev.dv_xname, "txiintr");
1049 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1050 NULL, sc->sc_dev.dv_xname, "rxintr");
1051 evcnt_attach_dynamic(&sc->sc_ev_hiberr, EVCNT_TYPE_INTR,
1052 NULL, sc->sc_dev.dv_xname, "hiberr");
1053 #ifdef DP83820
1054 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1055 NULL, sc->sc_dev.dv_xname, "rxipsum");
1056 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
1057 NULL, sc->sc_dev.dv_xname, "rxtcpsum");
1058 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
1059 NULL, sc->sc_dev.dv_xname, "rxudpsum");
1060 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1061 NULL, sc->sc_dev.dv_xname, "txipsum");
1062 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
1063 NULL, sc->sc_dev.dv_xname, "txtcpsum");
1064 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
1065 NULL, sc->sc_dev.dv_xname, "txudpsum");
1066 #endif /* DP83820 */
1067 #endif /* SIP_EVENT_COUNTERS */
1068
1069 /*
1070 * Make sure the interface is shutdown during reboot.
1071 */
1072 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc);
1073 if (sc->sc_sdhook == NULL)
1074 printf("%s: WARNING: unable to establish shutdown hook\n",
1075 sc->sc_dev.dv_xname);
1076 return;
1077
1078 /*
1079 * Free any resources we've allocated during the failed attach
1080 * attempt. Do this in reverse order and fall through.
1081 */
1082 fail_5:
1083 for (i = 0; i < SIP_NRXDESC; i++) {
1084 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1085 bus_dmamap_destroy(sc->sc_dmat,
1086 sc->sc_rxsoft[i].rxs_dmamap);
1087 }
1088 fail_4:
1089 for (i = 0; i < SIP_TXQUEUELEN; i++) {
1090 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1091 bus_dmamap_destroy(sc->sc_dmat,
1092 sc->sc_txsoft[i].txs_dmamap);
1093 }
1094 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1095 fail_3:
1096 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1097 fail_2:
1098 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1099 sizeof(struct sip_control_data));
1100 fail_1:
1101 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1102 fail_0:
1103 return;
1104 }
1105
1106 /*
1107 * sip_shutdown:
1108 *
1109 * Make sure the interface is stopped at reboot time.
1110 */
1111 void
1112 SIP_DECL(shutdown)(void *arg)
1113 {
1114 struct sip_softc *sc = arg;
1115
1116 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1);
1117 }
1118
1119 /*
1120 * sip_start: [ifnet interface function]
1121 *
1122 * Start packet transmission on the interface.
1123 */
1124 void
1125 SIP_DECL(start)(struct ifnet *ifp)
1126 {
1127 struct sip_softc *sc = ifp->if_softc;
1128 struct mbuf *m0;
1129 #ifndef DP83820
1130 struct mbuf *m;
1131 #endif
1132 struct sip_txsoft *txs;
1133 bus_dmamap_t dmamap;
1134 int error, nexttx, lasttx, seg;
1135 int ofree = sc->sc_txfree;
1136 #if 0
1137 int firsttx = sc->sc_txnext;
1138 #endif
1139 #ifdef DP83820
1140 struct m_tag *mtag;
1141 u_int32_t extsts;
1142 #endif
1143
1144 #ifndef DP83820
1145 /*
1146 * If we've been told to pause, don't transmit any more packets.
1147 */
1148 if (sc->sc_flags & SIPF_PAUSED)
1149 ifp->if_flags |= IFF_OACTIVE;
1150 #endif
1151
1152 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1153 return;
1154
1155 /*
1156 * Loop through the send queue, setting up transmit descriptors
1157 * until we drain the queue, or use up all available transmit
1158 * descriptors.
1159 */
1160 for (;;) {
1161 /* Get a work queue entry. */
1162 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1163 SIP_EVCNT_INCR(&sc->sc_ev_txsstall);
1164 break;
1165 }
1166
1167 /*
1168 * Grab a packet off the queue.
1169 */
1170 IFQ_POLL(&ifp->if_snd, m0);
1171 if (m0 == NULL)
1172 break;
1173 #ifndef DP83820
1174 m = NULL;
1175 #endif
1176
1177 dmamap = txs->txs_dmamap;
1178
1179 #ifdef DP83820
1180 /*
1181 * Load the DMA map. If this fails, the packet either
1182 * didn't fit in the allotted number of segments, or we
1183 * were short on resources. For the too-many-segments
1184 * case, we simply report an error and drop the packet,
1185 * since we can't sanely copy a jumbo packet to a single
1186 * buffer.
1187 */
1188 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1189 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1190 if (error) {
1191 if (error == EFBIG) {
1192 printf("%s: Tx packet consumes too many "
1193 "DMA segments, dropping...\n",
1194 sc->sc_dev.dv_xname);
1195 IFQ_DEQUEUE(&ifp->if_snd, m0);
1196 m_freem(m0);
1197 continue;
1198 }
1199 /*
1200 * Short on resources, just stop for now.
1201 */
1202 break;
1203 }
1204 #else /* DP83820 */
1205 /*
1206 * Load the DMA map. If this fails, the packet either
1207 * didn't fit in the alloted number of segments, or we
1208 * were short on resources. In this case, we'll copy
1209 * and try again.
1210 */
1211 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1212 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1213 MGETHDR(m, M_DONTWAIT, MT_DATA);
1214 if (m == NULL) {
1215 printf("%s: unable to allocate Tx mbuf\n",
1216 sc->sc_dev.dv_xname);
1217 break;
1218 }
1219 if (m0->m_pkthdr.len > MHLEN) {
1220 MCLGET(m, M_DONTWAIT);
1221 if ((m->m_flags & M_EXT) == 0) {
1222 printf("%s: unable to allocate Tx "
1223 "cluster\n", sc->sc_dev.dv_xname);
1224 m_freem(m);
1225 break;
1226 }
1227 }
1228 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1229 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1230 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1231 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1232 if (error) {
1233 printf("%s: unable to load Tx buffer, "
1234 "error = %d\n", sc->sc_dev.dv_xname, error);
1235 break;
1236 }
1237 }
1238 #endif /* DP83820 */
1239
1240 /*
1241 * Ensure we have enough descriptors free to describe
1242 * the packet. Note, we always reserve one descriptor
1243 * at the end of the ring as a termination point, to
1244 * prevent wrap-around.
1245 */
1246 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
1247 /*
1248 * Not enough free descriptors to transmit this
1249 * packet. We haven't committed anything yet,
1250 * so just unload the DMA map, put the packet
1251 * back on the queue, and punt. Notify the upper
1252 * layer that there are not more slots left.
1253 *
1254 * XXX We could allocate an mbuf and copy, but
1255 * XXX is it worth it?
1256 */
1257 ifp->if_flags |= IFF_OACTIVE;
1258 bus_dmamap_unload(sc->sc_dmat, dmamap);
1259 #ifndef DP83820
1260 if (m != NULL)
1261 m_freem(m);
1262 #endif
1263 SIP_EVCNT_INCR(&sc->sc_ev_txdstall);
1264 break;
1265 }
1266
1267 IFQ_DEQUEUE(&ifp->if_snd, m0);
1268 #ifndef DP83820
1269 if (m != NULL) {
1270 m_freem(m0);
1271 m0 = m;
1272 }
1273 #endif
1274
1275 /*
1276 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1277 */
1278
1279 /* Sync the DMA map. */
1280 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1281 BUS_DMASYNC_PREWRITE);
1282
1283 /*
1284 * Initialize the transmit descriptors.
1285 */
1286 for (nexttx = lasttx = sc->sc_txnext, seg = 0;
1287 seg < dmamap->dm_nsegs;
1288 seg++, nexttx = SIP_NEXTTX(nexttx)) {
1289 /*
1290 * If this is the first descriptor we're
1291 * enqueueing, don't set the OWN bit just
1292 * yet. That could cause a race condition.
1293 * We'll do it below.
1294 */
1295 sc->sc_txdescs[nexttx].sipd_bufptr =
1296 htole32(dmamap->dm_segs[seg].ds_addr);
1297 sc->sc_txdescs[nexttx].sipd_cmdsts =
1298 htole32((nexttx == sc->sc_txnext ? 0 : CMDSTS_OWN) |
1299 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len);
1300 #ifdef DP83820
1301 sc->sc_txdescs[nexttx].sipd_extsts = 0;
1302 #endif /* DP83820 */
1303 lasttx = nexttx;
1304 }
1305
1306 /* Clear the MORE bit on the last segment. */
1307 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE);
1308
1309 /*
1310 * If we're in the interrupt delay window, delay the
1311 * interrupt.
1312 */
1313 if (++sc->sc_txwin >= (SIP_TXQUEUELEN * 2 / 3)) {
1314 SIP_EVCNT_INCR(&sc->sc_ev_txforceintr);
1315 sc->sc_txdescs[lasttx].sipd_cmdsts |=
1316 htole32(CMDSTS_INTR);
1317 sc->sc_txwin = 0;
1318 }
1319
1320 #ifdef DP83820
1321 /*
1322 * If VLANs are enabled and the packet has a VLAN tag, set
1323 * up the descriptor to encapsulate the packet for us.
1324 *
1325 * This apparently has to be on the last descriptor of
1326 * the packet.
1327 */
1328 if (sc->sc_ethercom.ec_nvlans != 0 &&
1329 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1330 sc->sc_txdescs[lasttx].sipd_extsts |=
1331 htole32(EXTSTS_VPKT |
1332 (*(u_int *)(mtag + 1) & EXTSTS_VTCI));
1333 }
1334
1335 /*
1336 * If the upper-layer has requested IPv4/TCPv4/UDPv4
1337 * checksumming, set up the descriptor to do this work
1338 * for us.
1339 *
1340 * This apparently has to be on the first descriptor of
1341 * the packet.
1342 *
1343 * Byte-swap constants so the compiler can optimize.
1344 */
1345 extsts = 0;
1346 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1347 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4);
1348 SIP_EVCNT_INCR(&sc->sc_ev_txipsum);
1349 extsts |= htole32(EXTSTS_IPPKT);
1350 }
1351 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1352 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4);
1353 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum);
1354 extsts |= htole32(EXTSTS_TCPPKT);
1355 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1356 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4);
1357 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum);
1358 extsts |= htole32(EXTSTS_UDPPKT);
1359 }
1360 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts;
1361 #endif /* DP83820 */
1362
1363 /* Sync the descriptors we're using. */
1364 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1365 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1366
1367 /*
1368 * The entire packet is set up. Give the first descrptor
1369 * to the chip now.
1370 */
1371 sc->sc_txdescs[sc->sc_txnext].sipd_cmdsts |=
1372 htole32(CMDSTS_OWN);
1373 SIP_CDTXSYNC(sc, sc->sc_txnext, 1,
1374 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1375
1376 /*
1377 * Store a pointer to the packet so we can free it later,
1378 * and remember what txdirty will be once the packet is
1379 * done.
1380 */
1381 txs->txs_mbuf = m0;
1382 txs->txs_firstdesc = sc->sc_txnext;
1383 txs->txs_lastdesc = lasttx;
1384
1385 /* Advance the tx pointer. */
1386 sc->sc_txfree -= dmamap->dm_nsegs;
1387 sc->sc_txnext = nexttx;
1388
1389 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1390 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1391
1392 #if NBPFILTER > 0
1393 /*
1394 * Pass the packet to any BPF listeners.
1395 */
1396 if (ifp->if_bpf)
1397 bpf_mtap(ifp->if_bpf, m0);
1398 #endif /* NBPFILTER > 0 */
1399 }
1400
1401 if (txs == NULL || sc->sc_txfree == 0) {
1402 /* No more slots left; notify upper layer. */
1403 ifp->if_flags |= IFF_OACTIVE;
1404 }
1405
1406 if (sc->sc_txfree != ofree) {
1407 /*
1408 * Start the transmit process. Note, the manual says
1409 * that if there are no pending transmissions in the
1410 * chip's internal queue (indicated by TXE being clear),
1411 * then the driver software must set the TXDP to the
1412 * first descriptor to be transmitted. However, if we
1413 * do this, it causes serious performance degredation on
1414 * the DP83820 under load, not setting TXDP doesn't seem
1415 * to adversely affect the SiS 900 or DP83815.
1416 *
1417 * Well, I guess it wouldn't be the first time a manual
1418 * has lied -- and they could be speaking of the NULL-
1419 * terminated descriptor list case, rather than OWN-
1420 * terminated rings.
1421 */
1422 #if 0
1423 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
1424 CR_TXE) == 0) {
1425 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
1426 SIP_CDTXADDR(sc, firsttx));
1427 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1428 }
1429 #else
1430 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
1431 #endif
1432
1433 /* Set a watchdog timer in case the chip flakes out. */
1434 #ifdef DP83820
1435 /* Gigabit autonegotiation takes 5 seconds. */
1436 ifp->if_timer = 10;
1437 #else
1438 ifp->if_timer = 5;
1439 #endif
1440 }
1441 }
1442
1443 /*
1444 * sip_watchdog: [ifnet interface function]
1445 *
1446 * Watchdog timer handler.
1447 */
1448 void
1449 SIP_DECL(watchdog)(struct ifnet *ifp)
1450 {
1451 struct sip_softc *sc = ifp->if_softc;
1452
1453 /*
1454 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
1455 * If we get a timeout, try and sweep up transmit descriptors.
1456 * If we manage to sweep them all up, ignore the lack of
1457 * interrupt.
1458 */
1459 SIP_DECL(txintr)(sc);
1460
1461 if (sc->sc_txfree != SIP_NTXDESC) {
1462 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1463 ifp->if_oerrors++;
1464
1465 /* Reset the interface. */
1466 (void) SIP_DECL(init)(ifp);
1467 } else if (ifp->if_flags & IFF_DEBUG)
1468 printf("%s: recovered from device timeout\n",
1469 sc->sc_dev.dv_xname);
1470
1471 /* Try to get more packets going. */
1472 SIP_DECL(start)(ifp);
1473 }
1474
1475 /*
1476 * sip_ioctl: [ifnet interface function]
1477 *
1478 * Handle control requests from the operator.
1479 */
1480 int
1481 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data)
1482 {
1483 struct sip_softc *sc = ifp->if_softc;
1484 struct ifreq *ifr = (struct ifreq *)data;
1485 int s, error;
1486
1487 s = splnet();
1488
1489 switch (cmd) {
1490 case SIOCSIFMEDIA:
1491 case SIOCGIFMEDIA:
1492 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1493 break;
1494
1495 default:
1496 error = ether_ioctl(ifp, cmd, data);
1497 if (error == ENETRESET) {
1498 /*
1499 * Multicast list has changed; set the hardware filter
1500 * accordingly.
1501 */
1502 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
1503 error = 0;
1504 }
1505 break;
1506 }
1507
1508 /* Try to get more packets going. */
1509 SIP_DECL(start)(ifp);
1510
1511 splx(s);
1512 return (error);
1513 }
1514
1515 /*
1516 * sip_intr:
1517 *
1518 * Interrupt service routine.
1519 */
1520 int
1521 SIP_DECL(intr)(void *arg)
1522 {
1523 struct sip_softc *sc = arg;
1524 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1525 u_int32_t isr;
1526 int handled = 0;
1527
1528 /* Disable interrupts. */
1529 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, 0);
1530
1531 for (;;) {
1532 /* Reading clears interrupt. */
1533 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
1534 if ((isr & sc->sc_imr) == 0)
1535 break;
1536
1537 #if NRND > 0
1538 if (RND_ENABLED(&sc->rnd_source))
1539 rnd_add_uint32(&sc->rnd_source, isr);
1540 #endif
1541
1542 handled = 1;
1543
1544 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
1545 SIP_EVCNT_INCR(&sc->sc_ev_rxintr);
1546
1547 /* Grab any new packets. */
1548 SIP_DECL(rxintr)(sc);
1549
1550 if (isr & ISR_RXORN) {
1551 printf("%s: receive FIFO overrun\n",
1552 sc->sc_dev.dv_xname);
1553
1554 /* XXX adjust rx_drain_thresh? */
1555 }
1556
1557 if (isr & ISR_RXIDLE) {
1558 printf("%s: receive ring overrun\n",
1559 sc->sc_dev.dv_xname);
1560
1561 /* Get the receive process going again. */
1562 bus_space_write_4(sc->sc_st, sc->sc_sh,
1563 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1564 bus_space_write_4(sc->sc_st, sc->sc_sh,
1565 SIP_CR, CR_RXE);
1566 }
1567 }
1568
1569 if (isr & (ISR_TXURN|ISR_TXDESC|ISR_TXIDLE)) {
1570 #ifdef SIP_EVENT_COUNTERS
1571 if (isr & ISR_TXDESC)
1572 SIP_EVCNT_INCR(&sc->sc_ev_txdintr);
1573 else if (isr & ISR_TXIDLE)
1574 SIP_EVCNT_INCR(&sc->sc_ev_txiintr);
1575 #endif
1576
1577 /* Sweep up transmit descriptors. */
1578 SIP_DECL(txintr)(sc);
1579
1580 if (isr & ISR_TXURN) {
1581 u_int32_t thresh;
1582
1583 printf("%s: transmit FIFO underrun",
1584 sc->sc_dev.dv_xname);
1585
1586 thresh = sc->sc_tx_drain_thresh + 1;
1587 if (thresh <= TXCFG_DRTH &&
1588 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1589 (sc->sc_tx_fill_thresh * 32))) {
1590 printf("; increasing Tx drain "
1591 "threshold to %u bytes\n",
1592 thresh * 32);
1593 sc->sc_tx_drain_thresh = thresh;
1594 (void) SIP_DECL(init)(ifp);
1595 } else {
1596 (void) SIP_DECL(init)(ifp);
1597 printf("\n");
1598 }
1599 }
1600 }
1601
1602 #if !defined(DP83820)
1603 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1604 if (isr & ISR_PAUSE_ST) {
1605 sc->sc_flags |= SIPF_PAUSED;
1606 ifp->if_flags |= IFF_OACTIVE;
1607 }
1608 if (isr & ISR_PAUSE_END) {
1609 sc->sc_flags &= ~SIPF_PAUSED;
1610 ifp->if_flags &= ~IFF_OACTIVE;
1611 }
1612 }
1613 #endif /* ! DP83820 */
1614
1615 if (isr & ISR_HIBERR) {
1616 int want_init = 0;
1617
1618 SIP_EVCNT_INCR(&sc->sc_ev_hiberr);
1619
1620 #define PRINTERR(bit, str) \
1621 do { \
1622 if ((isr & (bit)) != 0) { \
1623 if ((ifp->if_flags & IFF_DEBUG) != 0) \
1624 printf("%s: %s\n", \
1625 sc->sc_dev.dv_xname, str); \
1626 want_init = 1; \
1627 } \
1628 } while (/*CONSTCOND*/0)
1629
1630 PRINTERR(ISR_DPERR, "parity error");
1631 PRINTERR(ISR_SSERR, "system error");
1632 PRINTERR(ISR_RMABT, "master abort");
1633 PRINTERR(ISR_RTABT, "target abort");
1634 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1635 /*
1636 * Ignore:
1637 * Tx reset complete
1638 * Rx reset complete
1639 */
1640 if (want_init)
1641 (void) SIP_DECL(init)(ifp);
1642 #undef PRINTERR
1643 }
1644 }
1645
1646 /* Re-enable interrupts. */
1647 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, IER_IE);
1648
1649 /* Try to get more packets going. */
1650 SIP_DECL(start)(ifp);
1651
1652 return (handled);
1653 }
1654
1655 /*
1656 * sip_txintr:
1657 *
1658 * Helper; handle transmit interrupts.
1659 */
1660 void
1661 SIP_DECL(txintr)(struct sip_softc *sc)
1662 {
1663 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1664 struct sip_txsoft *txs;
1665 u_int32_t cmdsts;
1666
1667 #ifndef DP83820
1668 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1669 ifp->if_flags &= ~IFF_OACTIVE;
1670 #endif
1671
1672 /*
1673 * Go through our Tx list and free mbufs for those
1674 * frames which have been transmitted.
1675 */
1676 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1677 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1678 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1679
1680 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
1681 if (cmdsts & CMDSTS_OWN)
1682 break;
1683
1684 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1685
1686 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1687
1688 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1689 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1690 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1691 m_freem(txs->txs_mbuf);
1692 txs->txs_mbuf = NULL;
1693
1694 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1695
1696 /*
1697 * Check for errors and collisions.
1698 */
1699 if (cmdsts &
1700 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1701 ifp->if_oerrors++;
1702 if (cmdsts & CMDSTS_Tx_EC)
1703 ifp->if_collisions += 16;
1704 if (ifp->if_flags & IFF_DEBUG) {
1705 if (cmdsts & CMDSTS_Tx_ED)
1706 printf("%s: excessive deferral\n",
1707 sc->sc_dev.dv_xname);
1708 if (cmdsts & CMDSTS_Tx_EC)
1709 printf("%s: excessive collisions\n",
1710 sc->sc_dev.dv_xname);
1711 }
1712 } else {
1713 /* Packet was transmitted successfully. */
1714 ifp->if_opackets++;
1715 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1716 }
1717 }
1718
1719 /*
1720 * If there are no more pending transmissions, cancel the watchdog
1721 * timer.
1722 */
1723 if (txs == NULL) {
1724 ifp->if_timer = 0;
1725 sc->sc_txwin = 0;
1726 }
1727 }
1728
1729 #if defined(DP83820)
1730 /*
1731 * sip_rxintr:
1732 *
1733 * Helper; handle receive interrupts.
1734 */
1735 void
1736 SIP_DECL(rxintr)(struct sip_softc *sc)
1737 {
1738 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1739 struct sip_rxsoft *rxs;
1740 struct mbuf *m, *tailm;
1741 u_int32_t cmdsts, extsts;
1742 int i, len, frame_len;
1743
1744 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1745 rxs = &sc->sc_rxsoft[i];
1746
1747 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1748
1749 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1750 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts);
1751
1752 /*
1753 * NOTE: OWN is set if owned by _consumer_. We're the
1754 * consumer of the receive ring, so if the bit is clear,
1755 * we have processed all of the packets.
1756 */
1757 if ((cmdsts & CMDSTS_OWN) == 0) {
1758 /*
1759 * We have processed all of the receive buffers.
1760 */
1761 break;
1762 }
1763
1764 if (__predict_false(sc->sc_rxdiscard)) {
1765 SIP_INIT_RXDESC(sc, i);
1766 if ((cmdsts & CMDSTS_MORE) == 0) {
1767 /* Reset our state. */
1768 sc->sc_rxdiscard = 0;
1769 }
1770 continue;
1771 }
1772
1773 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1774 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1775
1776 m = rxs->rxs_mbuf;
1777
1778 /*
1779 * Add a new receive buffer to the ring.
1780 */
1781 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
1782 /*
1783 * Failed, throw away what we've done so
1784 * far, and discard the rest of the packet.
1785 */
1786 ifp->if_ierrors++;
1787 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1788 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1789 SIP_INIT_RXDESC(sc, i);
1790 if (cmdsts & CMDSTS_MORE)
1791 sc->sc_rxdiscard = 1;
1792 if (sc->sc_rxhead != NULL)
1793 m_freem(sc->sc_rxhead);
1794 SIP_RXCHAIN_RESET(sc);
1795 continue;
1796 }
1797
1798 SIP_RXCHAIN_LINK(sc, m);
1799
1800 /*
1801 * If this is not the end of the packet, keep
1802 * looking.
1803 */
1804 if (cmdsts & CMDSTS_MORE) {
1805 sc->sc_rxlen += m->m_len;
1806 continue;
1807 }
1808
1809 /*
1810 * Okay, we have the entire packet now...
1811 */
1812 *sc->sc_rxtailp = NULL;
1813 m = sc->sc_rxhead;
1814 tailm = sc->sc_rxtail;
1815 frame_len = sc->sc_rxlen;
1816
1817 SIP_RXCHAIN_RESET(sc);
1818
1819 /*
1820 * If an error occurred, update stats and drop the packet.
1821 */
1822 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
1823 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1824 ifp->if_ierrors++;
1825 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1826 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1827 /* Receive overrun handled elsewhere. */
1828 printf("%s: receive descriptor error\n",
1829 sc->sc_dev.dv_xname);
1830 }
1831 #define PRINTERR(bit, str) \
1832 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
1833 (cmdsts & (bit)) != 0) \
1834 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1835 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1836 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1837 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1838 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1839 #undef PRINTERR
1840 m_freem(m);
1841 continue;
1842 }
1843
1844 /*
1845 * No errors.
1846 *
1847 * Note, the DP83820 includes the CRC with
1848 * every packet.
1849 */
1850 len = CMDSTS_SIZE(cmdsts);
1851 frame_len += len;
1852 tailm->m_len = len;
1853
1854 /*
1855 * If the packet is small enough to fit in a
1856 * single header mbuf, allocate one and copy
1857 * the data into it. This greatly reduces
1858 * memory consumption when we receive lots
1859 * of small packets.
1860 */
1861 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) {
1862 struct mbuf *nm;
1863 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1864 if (nm == NULL) {
1865 ifp->if_ierrors++;
1866 m_freem(m);
1867 continue;
1868 }
1869 nm->m_data += 2;
1870 nm->m_pkthdr.len = nm->m_len = len;
1871 m_copydata(m, 0, len, mtod(nm, caddr_t));
1872 m_freem(m);
1873 m = nm;
1874 }
1875 #ifndef __NO_STRICT_ALIGNMENT
1876 else {
1877 /*
1878 * The DP83820's receive buffers must be 4-byte
1879 * aligned. But this means that the data after
1880 * the Ethernet header is misaligned. To compensate,
1881 * we have artificially shortened the buffer size
1882 * in the descriptor, and we do an overlapping copy
1883 * of the data two bytes further in (in the first
1884 * buffer of the chain only).
1885 */
1886 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t),
1887 m->m_len);
1888 m->m_data += 2;
1889 }
1890 #endif /* ! __NO_STRICT_ALIGNMENT */
1891
1892 /*
1893 * If VLANs are enabled, VLAN packets have been unwrapped
1894 * for us. Associate the tag with the packet.
1895 */
1896 if (sc->sc_ethercom.ec_nvlans != 0 &&
1897 (extsts & EXTSTS_VPKT) != 0) {
1898 struct m_tag *vtag;
1899
1900 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1901 M_NOWAIT);
1902 if (vtag == NULL) {
1903 ifp->if_ierrors++;
1904 printf("%s: unable to allocate VLAN tag\n",
1905 sc->sc_dev.dv_xname);
1906 m_freem(m);
1907 continue;
1908 }
1909
1910 *(u_int *)(vtag + 1) = ntohs(extsts & EXTSTS_VTCI);
1911 }
1912
1913 /*
1914 * Set the incoming checksum information for the
1915 * packet.
1916 */
1917 if ((extsts & EXTSTS_IPPKT) != 0) {
1918 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum);
1919 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1920 if (extsts & EXTSTS_Rx_IPERR)
1921 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1922 if (extsts & EXTSTS_TCPPKT) {
1923 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1924 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1925 if (extsts & EXTSTS_Rx_TCPERR)
1926 m->m_pkthdr.csum_flags |=
1927 M_CSUM_TCP_UDP_BAD;
1928 } else if (extsts & EXTSTS_UDPPKT) {
1929 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1930 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1931 if (extsts & EXTSTS_Rx_UDPERR)
1932 m->m_pkthdr.csum_flags |=
1933 M_CSUM_TCP_UDP_BAD;
1934 }
1935 }
1936
1937 ifp->if_ipackets++;
1938 m->m_flags |= M_HASFCS;
1939 m->m_pkthdr.rcvif = ifp;
1940 m->m_pkthdr.len = frame_len;
1941
1942 #if NBPFILTER > 0
1943 /*
1944 * Pass this up to any BPF listeners, but only
1945 * pass if up the stack if it's for us.
1946 */
1947 if (ifp->if_bpf)
1948 bpf_mtap(ifp->if_bpf, m);
1949 #endif /* NBPFILTER > 0 */
1950
1951 /* Pass it on. */
1952 (*ifp->if_input)(ifp, m);
1953 }
1954
1955 /* Update the receive pointer. */
1956 sc->sc_rxptr = i;
1957 }
1958 #else /* ! DP83820 */
1959 /*
1960 * sip_rxintr:
1961 *
1962 * Helper; handle receive interrupts.
1963 */
1964 void
1965 SIP_DECL(rxintr)(struct sip_softc *sc)
1966 {
1967 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1968 struct sip_rxsoft *rxs;
1969 struct mbuf *m;
1970 u_int32_t cmdsts;
1971 int i, len;
1972
1973 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1974 rxs = &sc->sc_rxsoft[i];
1975
1976 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1977
1978 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts);
1979
1980 /*
1981 * NOTE: OWN is set if owned by _consumer_. We're the
1982 * consumer of the receive ring, so if the bit is clear,
1983 * we have processed all of the packets.
1984 */
1985 if ((cmdsts & CMDSTS_OWN) == 0) {
1986 /*
1987 * We have processed all of the receive buffers.
1988 */
1989 break;
1990 }
1991
1992 /*
1993 * If any collisions were seen on the wire, count one.
1994 */
1995 if (cmdsts & CMDSTS_Rx_COL)
1996 ifp->if_collisions++;
1997
1998 /*
1999 * If an error occurred, update stats, clear the status
2000 * word, and leave the packet buffer in place. It will
2001 * simply be reused the next time the ring comes around.
2002 */
2003 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT|
2004 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
2005 ifp->if_ierrors++;
2006 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
2007 (cmdsts & CMDSTS_Rx_RXO) == 0) {
2008 /* Receive overrun handled elsewhere. */
2009 printf("%s: receive descriptor error\n",
2010 sc->sc_dev.dv_xname);
2011 }
2012 #define PRINTERR(bit, str) \
2013 if ((ifp->if_flags & IFF_DEBUG) != 0 && \
2014 (cmdsts & (bit)) != 0) \
2015 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
2016 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
2017 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
2018 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
2019 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
2020 #undef PRINTERR
2021 SIP_INIT_RXDESC(sc, i);
2022 continue;
2023 }
2024
2025 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2026 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2027
2028 /*
2029 * No errors; receive the packet. Note, the SiS 900
2030 * includes the CRC with every packet.
2031 */
2032 len = CMDSTS_SIZE(cmdsts);
2033
2034 #ifdef __NO_STRICT_ALIGNMENT
2035 /*
2036 * If the packet is small enough to fit in a
2037 * single header mbuf, allocate one and copy
2038 * the data into it. This greatly reduces
2039 * memory consumption when we receive lots
2040 * of small packets.
2041 *
2042 * Otherwise, we add a new buffer to the receive
2043 * chain. If this fails, we drop the packet and
2044 * recycle the old buffer.
2045 */
2046 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) {
2047 MGETHDR(m, M_DONTWAIT, MT_DATA);
2048 if (m == NULL)
2049 goto dropit;
2050 memcpy(mtod(m, caddr_t),
2051 mtod(rxs->rxs_mbuf, caddr_t), len);
2052 SIP_INIT_RXDESC(sc, i);
2053 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2054 rxs->rxs_dmamap->dm_mapsize,
2055 BUS_DMASYNC_PREREAD);
2056 } else {
2057 m = rxs->rxs_mbuf;
2058 if (SIP_DECL(add_rxbuf)(sc, i) != 0) {
2059 dropit:
2060 ifp->if_ierrors++;
2061 SIP_INIT_RXDESC(sc, i);
2062 bus_dmamap_sync(sc->sc_dmat,
2063 rxs->rxs_dmamap, 0,
2064 rxs->rxs_dmamap->dm_mapsize,
2065 BUS_DMASYNC_PREREAD);
2066 continue;
2067 }
2068 }
2069 #else
2070 /*
2071 * The SiS 900's receive buffers must be 4-byte aligned.
2072 * But this means that the data after the Ethernet header
2073 * is misaligned. We must allocate a new buffer and
2074 * copy the data, shifted forward 2 bytes.
2075 */
2076 MGETHDR(m, M_DONTWAIT, MT_DATA);
2077 if (m == NULL) {
2078 dropit:
2079 ifp->if_ierrors++;
2080 SIP_INIT_RXDESC(sc, i);
2081 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2082 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2083 continue;
2084 }
2085 if (len > (MHLEN - 2)) {
2086 MCLGET(m, M_DONTWAIT);
2087 if ((m->m_flags & M_EXT) == 0) {
2088 m_freem(m);
2089 goto dropit;
2090 }
2091 }
2092 m->m_data += 2;
2093
2094 /*
2095 * Note that we use clusters for incoming frames, so the
2096 * buffer is virtually contiguous.
2097 */
2098 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
2099
2100 /* Allow the receive descriptor to continue using its mbuf. */
2101 SIP_INIT_RXDESC(sc, i);
2102 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2103 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2104 #endif /* __NO_STRICT_ALIGNMENT */
2105
2106 ifp->if_ipackets++;
2107 m->m_flags |= M_HASFCS;
2108 m->m_pkthdr.rcvif = ifp;
2109 m->m_pkthdr.len = m->m_len = len;
2110
2111 #if NBPFILTER > 0
2112 /*
2113 * Pass this up to any BPF listeners, but only
2114 * pass if up the stack if it's for us.
2115 */
2116 if (ifp->if_bpf)
2117 bpf_mtap(ifp->if_bpf, m);
2118 #endif /* NBPFILTER > 0 */
2119
2120 /* Pass it on. */
2121 (*ifp->if_input)(ifp, m);
2122 }
2123
2124 /* Update the receive pointer. */
2125 sc->sc_rxptr = i;
2126 }
2127 #endif /* DP83820 */
2128
2129 /*
2130 * sip_tick:
2131 *
2132 * One second timer, used to tick the MII.
2133 */
2134 void
2135 SIP_DECL(tick)(void *arg)
2136 {
2137 struct sip_softc *sc = arg;
2138 int s;
2139
2140 s = splnet();
2141 mii_tick(&sc->sc_mii);
2142 splx(s);
2143
2144 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2145 }
2146
2147 /*
2148 * sip_reset:
2149 *
2150 * Perform a soft reset on the SiS 900.
2151 */
2152 void
2153 SIP_DECL(reset)(struct sip_softc *sc)
2154 {
2155 bus_space_tag_t st = sc->sc_st;
2156 bus_space_handle_t sh = sc->sc_sh;
2157 int i;
2158
2159 bus_space_write_4(st, sh, SIP_IER, 0);
2160 bus_space_write_4(st, sh, SIP_IMR, 0);
2161 bus_space_write_4(st, sh, SIP_RFCR, 0);
2162 bus_space_write_4(st, sh, SIP_CR, CR_RST);
2163
2164 for (i = 0; i < SIP_TIMEOUT; i++) {
2165 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0)
2166 break;
2167 delay(2);
2168 }
2169
2170 if (i == SIP_TIMEOUT)
2171 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
2172
2173 delay(1000);
2174
2175 #ifdef DP83820
2176 /*
2177 * Set the general purpose I/O bits. Do it here in case we
2178 * need to have GPIO set up to talk to the media interface.
2179 */
2180 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior);
2181 delay(1000);
2182 #endif /* DP83820 */
2183 }
2184
2185 /*
2186 * sip_init: [ ifnet interface function ]
2187 *
2188 * Initialize the interface. Must be called at splnet().
2189 */
2190 int
2191 SIP_DECL(init)(struct ifnet *ifp)
2192 {
2193 struct sip_softc *sc = ifp->if_softc;
2194 bus_space_tag_t st = sc->sc_st;
2195 bus_space_handle_t sh = sc->sc_sh;
2196 struct sip_txsoft *txs;
2197 struct sip_rxsoft *rxs;
2198 struct sip_desc *sipd;
2199 #if defined(DP83820)
2200 u_int32_t reg;
2201 #endif
2202 int i, error = 0;
2203
2204 /*
2205 * Cancel any pending I/O.
2206 */
2207 SIP_DECL(stop)(ifp, 0);
2208
2209 /*
2210 * Reset the chip to a known state.
2211 */
2212 SIP_DECL(reset)(sc);
2213
2214 #if !defined(DP83820)
2215 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) {
2216 /*
2217 * DP83815 manual, page 78:
2218 * 4.4 Recommended Registers Configuration
2219 * For optimum performance of the DP83815, version noted
2220 * as DP83815CVNG (SRR = 203h), the listed register
2221 * modifications must be followed in sequence...
2222 *
2223 * It's not clear if this should be 302h or 203h because that
2224 * chip name is listed as SRR 302h in the description of the
2225 * SRR register. However, my revision 302h DP83815 on the
2226 * Netgear FA311 purchased in 02/2001 needs these settings
2227 * to avoid tons of errors in AcceptPerfectMatch (non-
2228 * IFF_PROMISC) mode. I do not know if other revisions need
2229 * this set or not. [briggs -- 09 March 2001]
2230 *
2231 * Note that only the low-order 12 bits of 0xe4 are documented
2232 * and that this sets reserved bits in that register.
2233 */
2234 bus_space_write_4(st, sh, 0x00cc, 0x0001);
2235
2236 bus_space_write_4(st, sh, 0x00e4, 0x189C);
2237 bus_space_write_4(st, sh, 0x00fc, 0x0000);
2238 bus_space_write_4(st, sh, 0x00f4, 0x5040);
2239 bus_space_write_4(st, sh, 0x00f8, 0x008c);
2240
2241 bus_space_write_4(st, sh, 0x00cc, 0x0000);
2242 }
2243 #endif /* ! DP83820 */
2244
2245 /*
2246 * Initialize the transmit descriptor ring.
2247 */
2248 for (i = 0; i < SIP_NTXDESC; i++) {
2249 sipd = &sc->sc_txdescs[i];
2250 memset(sipd, 0, sizeof(struct sip_desc));
2251 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i)));
2252 }
2253 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
2254 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2255 sc->sc_txfree = SIP_NTXDESC;
2256 sc->sc_txnext = 0;
2257 sc->sc_txwin = 0;
2258
2259 /*
2260 * Initialize the transmit job descriptors.
2261 */
2262 SIMPLEQ_INIT(&sc->sc_txfreeq);
2263 SIMPLEQ_INIT(&sc->sc_txdirtyq);
2264 for (i = 0; i < SIP_TXQUEUELEN; i++) {
2265 txs = &sc->sc_txsoft[i];
2266 txs->txs_mbuf = NULL;
2267 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2268 }
2269
2270 /*
2271 * Initialize the receive descriptor and receive job
2272 * descriptor rings.
2273 */
2274 for (i = 0; i < SIP_NRXDESC; i++) {
2275 rxs = &sc->sc_rxsoft[i];
2276 if (rxs->rxs_mbuf == NULL) {
2277 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) {
2278 printf("%s: unable to allocate or map rx "
2279 "buffer %d, error = %d\n",
2280 sc->sc_dev.dv_xname, i, error);
2281 /*
2282 * XXX Should attempt to run with fewer receive
2283 * XXX buffers instead of just failing.
2284 */
2285 SIP_DECL(rxdrain)(sc);
2286 goto out;
2287 }
2288 } else
2289 SIP_INIT_RXDESC(sc, i);
2290 }
2291 sc->sc_rxptr = 0;
2292 #ifdef DP83820
2293 sc->sc_rxdiscard = 0;
2294 SIP_RXCHAIN_RESET(sc);
2295 #endif /* DP83820 */
2296
2297 /*
2298 * Set the configuration register; it's already initialized
2299 * in sip_attach().
2300 */
2301 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg);
2302
2303 /*
2304 * Initialize the prototype TXCFG register.
2305 */
2306 #if defined(DP83820)
2307 sc->sc_txcfg = TXCFG_MXDMA_512;
2308 sc->sc_rxcfg = RXCFG_MXDMA_512;
2309 #else
2310 if ((SIP_SIS900_REV(sc, SIS_REV_635) ||
2311 SIP_SIS900_REV(sc, SIS_REV_960) ||
2312 SIP_SIS900_REV(sc, SIS_REV_900B)) &&
2313 (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) & CFG_EDBMASTEN)) {
2314 sc->sc_txcfg = TXCFG_MXDMA_64;
2315 sc->sc_rxcfg = RXCFG_MXDMA_64;
2316 } else {
2317 sc->sc_txcfg = TXCFG_MXDMA_512;
2318 sc->sc_rxcfg = RXCFG_MXDMA_512;
2319 }
2320 #endif /* DP83820 */
2321
2322 sc->sc_txcfg |= TXCFG_ATP |
2323 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
2324 sc->sc_tx_drain_thresh;
2325 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
2326
2327 /*
2328 * Initialize the receive drain threshold if we have never
2329 * done so.
2330 */
2331 if (sc->sc_rx_drain_thresh == 0) {
2332 /*
2333 * XXX This value should be tuned. This is set to the
2334 * maximum of 248 bytes, and we may be able to improve
2335 * performance by decreasing it (although we should never
2336 * set this value lower than 2; 14 bytes are required to
2337 * filter the packet).
2338 */
2339 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
2340 }
2341
2342 /*
2343 * Initialize the prototype RXCFG register.
2344 */
2345 sc->sc_rxcfg |= (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
2346 #ifdef DP83820
2347 /*
2348 * Accept long packets (including FCS) so we can handle
2349 * 802.1q-tagged frames and jumbo frames properly.
2350 */
2351 if (ifp->if_mtu > ETHERMTU ||
2352 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU))
2353 sc->sc_rxcfg |= RXCFG_ALP;
2354
2355 /*
2356 * Checksum offloading is disabled if the user selects an MTU
2357 * larger than 8109. (FreeBSD says 8152, but there is emperical
2358 * evidence that >8109 does not work on some boards, such as the
2359 * Planex GN-1000TE).
2360 */
2361 if (ifp->if_mtu > 8109 &&
2362 (ifp->if_capenable &
2363 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))) {
2364 printf("%s: Checksum offloading does not work if MTU > 8109 - "
2365 "disabled.\n", sc->sc_dev.dv_xname);
2366 ifp->if_capenable &= ~(IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2367 IFCAP_CSUM_UDPv4);
2368 ifp->if_csum_flags_tx = 0;
2369 ifp->if_csum_flags_rx = 0;
2370 }
2371 #else
2372 /*
2373 * Accept packets >1518 bytes (including FCS) so we can handle
2374 * 802.1q-tagged frames properly.
2375 */
2376 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
2377 sc->sc_rxcfg |= RXCFG_ALP;
2378 #endif
2379 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
2380
2381 #ifdef DP83820
2382 /*
2383 * Initialize the VLAN/IP receive control register.
2384 * We enable checksum computation on all incoming
2385 * packets, and do not reject packets w/ bad checksums.
2386 */
2387 reg = 0;
2388 if (ifp->if_capenable &
2389 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2390 reg |= VRCR_IPEN;
2391 if (sc->sc_ethercom.ec_nvlans != 0)
2392 reg |= VRCR_VTDEN|VRCR_VTREN;
2393 bus_space_write_4(st, sh, SIP_VRCR, reg);
2394
2395 /*
2396 * Initialize the VLAN/IP transmit control register.
2397 * We enable outgoing checksum computation on a
2398 * per-packet basis.
2399 */
2400 reg = 0;
2401 if (ifp->if_capenable &
2402 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4))
2403 reg |= VTCR_PPCHK;
2404 if (sc->sc_ethercom.ec_nvlans != 0)
2405 reg |= VTCR_VPPTI;
2406 bus_space_write_4(st, sh, SIP_VTCR, reg);
2407
2408 /*
2409 * If we're using VLANs, initialize the VLAN data register.
2410 * To understand why we bswap the VLAN Ethertype, see section
2411 * 4.2.36 of the DP83820 manual.
2412 */
2413 if (sc->sc_ethercom.ec_nvlans != 0)
2414 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN));
2415 #endif /* DP83820 */
2416
2417 /*
2418 * Give the transmit and receive rings to the chip.
2419 */
2420 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
2421 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
2422
2423 /*
2424 * Initialize the interrupt mask.
2425 */
2426 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
2427 ISR_TXURN|ISR_TXDESC|ISR_TXIDLE|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
2428 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
2429
2430 /* Set up the receive filter. */
2431 (*sc->sc_model->sip_variant->sipv_set_filter)(sc);
2432
2433 /*
2434 * Set the current media. Do this after initializing the prototype
2435 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
2436 * control.
2437 */
2438 mii_mediachg(&sc->sc_mii);
2439
2440 #ifdef DP83820
2441 /*
2442 * Set the interrupt hold-off timer to 100us.
2443 */
2444 bus_space_write_4(st, sh, SIP_IHR, 0x01);
2445 #endif
2446
2447 /*
2448 * Enable interrupts.
2449 */
2450 bus_space_write_4(st, sh, SIP_IER, IER_IE);
2451
2452 /*
2453 * Start the transmit and receive processes.
2454 */
2455 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
2456
2457 /*
2458 * Start the one second MII clock.
2459 */
2460 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc);
2461
2462 /*
2463 * ...all done!
2464 */
2465 ifp->if_flags |= IFF_RUNNING;
2466 ifp->if_flags &= ~IFF_OACTIVE;
2467
2468 out:
2469 if (error)
2470 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2471 return (error);
2472 }
2473
2474 /*
2475 * sip_drain:
2476 *
2477 * Drain the receive queue.
2478 */
2479 void
2480 SIP_DECL(rxdrain)(struct sip_softc *sc)
2481 {
2482 struct sip_rxsoft *rxs;
2483 int i;
2484
2485 for (i = 0; i < SIP_NRXDESC; i++) {
2486 rxs = &sc->sc_rxsoft[i];
2487 if (rxs->rxs_mbuf != NULL) {
2488 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2489 m_freem(rxs->rxs_mbuf);
2490 rxs->rxs_mbuf = NULL;
2491 }
2492 }
2493 }
2494
2495 /*
2496 * sip_stop: [ ifnet interface function ]
2497 *
2498 * Stop transmission on the interface.
2499 */
2500 void
2501 SIP_DECL(stop)(struct ifnet *ifp, int disable)
2502 {
2503 struct sip_softc *sc = ifp->if_softc;
2504 bus_space_tag_t st = sc->sc_st;
2505 bus_space_handle_t sh = sc->sc_sh;
2506 struct sip_txsoft *txs;
2507 u_int32_t cmdsts = 0; /* DEBUG */
2508
2509 /*
2510 * Stop the one second clock.
2511 */
2512 callout_stop(&sc->sc_tick_ch);
2513
2514 /* Down the MII. */
2515 mii_down(&sc->sc_mii);
2516
2517 /*
2518 * Disable interrupts.
2519 */
2520 bus_space_write_4(st, sh, SIP_IER, 0);
2521
2522 /*
2523 * Stop receiver and transmitter.
2524 */
2525 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
2526
2527 /*
2528 * Release any queued transmit buffers.
2529 */
2530 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
2531 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2532 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
2533 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) &
2534 CMDSTS_INTR) == 0)
2535 printf("%s: sip_stop: last descriptor does not "
2536 "have INTR bit set\n", sc->sc_dev.dv_xname);
2537 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
2538 #ifdef DIAGNOSTIC
2539 if (txs->txs_mbuf == NULL) {
2540 printf("%s: dirty txsoft with no mbuf chain\n",
2541 sc->sc_dev.dv_xname);
2542 panic("sip_stop");
2543 }
2544 #endif
2545 cmdsts |= /* DEBUG */
2546 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts);
2547 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2548 m_freem(txs->txs_mbuf);
2549 txs->txs_mbuf = NULL;
2550 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
2551 }
2552
2553 if (disable)
2554 SIP_DECL(rxdrain)(sc);
2555
2556 /*
2557 * Mark the interface down and cancel the watchdog timer.
2558 */
2559 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2560 ifp->if_timer = 0;
2561
2562 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
2563 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
2564 printf("%s: sip_stop: no INTR bits set in dirty tx "
2565 "descriptors\n", sc->sc_dev.dv_xname);
2566 }
2567
2568 /*
2569 * sip_read_eeprom:
2570 *
2571 * Read data from the serial EEPROM.
2572 */
2573 void
2574 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt,
2575 u_int16_t *data)
2576 {
2577 bus_space_tag_t st = sc->sc_st;
2578 bus_space_handle_t sh = sc->sc_sh;
2579 u_int16_t reg;
2580 int i, x;
2581
2582 for (i = 0; i < wordcnt; i++) {
2583 /* Send CHIP SELECT. */
2584 reg = EROMAR_EECS;
2585 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2586
2587 /* Shift in the READ opcode. */
2588 for (x = 3; x > 0; x--) {
2589 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
2590 reg |= EROMAR_EEDI;
2591 else
2592 reg &= ~EROMAR_EEDI;
2593 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2594 bus_space_write_4(st, sh, SIP_EROMAR,
2595 reg | EROMAR_EESK);
2596 delay(4);
2597 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2598 delay(4);
2599 }
2600
2601 /* Shift in address. */
2602 for (x = 6; x > 0; x--) {
2603 if ((word + i) & (1 << (x - 1)))
2604 reg |= EROMAR_EEDI;
2605 else
2606 reg &= ~EROMAR_EEDI;
2607 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2608 bus_space_write_4(st, sh, SIP_EROMAR,
2609 reg | EROMAR_EESK);
2610 delay(4);
2611 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2612 delay(4);
2613 }
2614
2615 /* Shift out data. */
2616 reg = EROMAR_EECS;
2617 data[i] = 0;
2618 for (x = 16; x > 0; x--) {
2619 bus_space_write_4(st, sh, SIP_EROMAR,
2620 reg | EROMAR_EESK);
2621 delay(4);
2622 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
2623 data[i] |= (1 << (x - 1));
2624 bus_space_write_4(st, sh, SIP_EROMAR, reg);
2625 delay(4);
2626 }
2627
2628 /* Clear CHIP SELECT. */
2629 bus_space_write_4(st, sh, SIP_EROMAR, 0);
2630 delay(4);
2631 }
2632 }
2633
2634 /*
2635 * sip_add_rxbuf:
2636 *
2637 * Add a receive buffer to the indicated descriptor.
2638 */
2639 int
2640 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx)
2641 {
2642 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
2643 struct mbuf *m;
2644 int error;
2645
2646 MGETHDR(m, M_DONTWAIT, MT_DATA);
2647 if (m == NULL)
2648 return (ENOBUFS);
2649
2650 MCLGET(m, M_DONTWAIT);
2651 if ((m->m_flags & M_EXT) == 0) {
2652 m_freem(m);
2653 return (ENOBUFS);
2654 }
2655
2656 #if defined(DP83820)
2657 m->m_len = SIP_RXBUF_LEN;
2658 #endif /* DP83820 */
2659
2660 if (rxs->rxs_mbuf != NULL)
2661 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2662
2663 rxs->rxs_mbuf = m;
2664
2665 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2666 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2667 BUS_DMA_READ|BUS_DMA_NOWAIT);
2668 if (error) {
2669 printf("%s: can't load rx DMA map %d, error = %d\n",
2670 sc->sc_dev.dv_xname, idx, error);
2671 panic("sip_add_rxbuf"); /* XXX */
2672 }
2673
2674 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2675 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2676
2677 SIP_INIT_RXDESC(sc, idx);
2678
2679 return (0);
2680 }
2681
2682 #if !defined(DP83820)
2683 /*
2684 * sip_sis900_set_filter:
2685 *
2686 * Set up the receive filter.
2687 */
2688 void
2689 SIP_DECL(sis900_set_filter)(struct sip_softc *sc)
2690 {
2691 bus_space_tag_t st = sc->sc_st;
2692 bus_space_handle_t sh = sc->sc_sh;
2693 struct ethercom *ec = &sc->sc_ethercom;
2694 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2695 struct ether_multi *enm;
2696 u_int8_t *cp;
2697 struct ether_multistep step;
2698 u_int32_t crc, mchash[16];
2699
2700 /*
2701 * Initialize the prototype RFCR.
2702 */
2703 sc->sc_rfcr = RFCR_RFEN;
2704 if (ifp->if_flags & IFF_BROADCAST)
2705 sc->sc_rfcr |= RFCR_AAB;
2706 if (ifp->if_flags & IFF_PROMISC) {
2707 sc->sc_rfcr |= RFCR_AAP;
2708 goto allmulti;
2709 }
2710
2711 /*
2712 * Set up the multicast address filter by passing all multicast
2713 * addresses through a CRC generator, and then using the high-order
2714 * 6 bits as an index into the 128 bit multicast hash table (only
2715 * the lower 16 bits of each 32 bit multicast hash register are
2716 * valid). The high order bits select the register, while the
2717 * rest of the bits select the bit within the register.
2718 */
2719
2720 memset(mchash, 0, sizeof(mchash));
2721
2722 ETHER_FIRST_MULTI(step, ec, enm);
2723 while (enm != NULL) {
2724 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2725 /*
2726 * We must listen to a range of multicast addresses.
2727 * For now, just accept all multicasts, rather than
2728 * trying to set only those filter bits needed to match
2729 * the range. (At this time, the only use of address
2730 * ranges is for IP multicast routing, for which the
2731 * range is big enough to require all bits set.)
2732 */
2733 goto allmulti;
2734 }
2735
2736 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2737
2738 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2739 SIP_SIS900_REV(sc, SIS_REV_960) ||
2740 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2741 /* Just want the 8 most significant bits. */
2742 crc >>= 24;
2743 } else {
2744 /* Just want the 7 most significant bits. */
2745 crc >>= 25;
2746 }
2747
2748 /* Set the corresponding bit in the hash table. */
2749 mchash[crc >> 4] |= 1 << (crc & 0xf);
2750
2751 ETHER_NEXT_MULTI(step, enm);
2752 }
2753
2754 ifp->if_flags &= ~IFF_ALLMULTI;
2755 goto setit;
2756
2757 allmulti:
2758 ifp->if_flags |= IFF_ALLMULTI;
2759 sc->sc_rfcr |= RFCR_AAM;
2760
2761 setit:
2762 #define FILTER_EMIT(addr, data) \
2763 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2764 delay(1); \
2765 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2766 delay(1)
2767
2768 /*
2769 * Disable receive filter, and program the node address.
2770 */
2771 cp = LLADDR(ifp->if_sadl);
2772 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
2773 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
2774 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
2775
2776 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2777 /*
2778 * Program the multicast hash table.
2779 */
2780 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
2781 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
2782 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
2783 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
2784 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
2785 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
2786 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
2787 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
2788 if (SIP_SIS900_REV(sc, SIS_REV_635) ||
2789 SIP_SIS900_REV(sc, SIS_REV_960) ||
2790 SIP_SIS900_REV(sc, SIS_REV_900B)) {
2791 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]);
2792 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]);
2793 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]);
2794 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]);
2795 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]);
2796 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]);
2797 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]);
2798 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]);
2799 }
2800 }
2801 #undef FILTER_EMIT
2802
2803 /*
2804 * Re-enable the receiver filter.
2805 */
2806 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2807 }
2808 #endif /* ! DP83820 */
2809
2810 /*
2811 * sip_dp83815_set_filter:
2812 *
2813 * Set up the receive filter.
2814 */
2815 void
2816 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc)
2817 {
2818 bus_space_tag_t st = sc->sc_st;
2819 bus_space_handle_t sh = sc->sc_sh;
2820 struct ethercom *ec = &sc->sc_ethercom;
2821 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2822 struct ether_multi *enm;
2823 u_int8_t *cp;
2824 struct ether_multistep step;
2825 u_int32_t crc, hash, slot, bit;
2826 #ifdef DP83820
2827 #define MCHASH_NWORDS 128
2828 #else
2829 #define MCHASH_NWORDS 32
2830 #endif /* DP83820 */
2831 u_int16_t mchash[MCHASH_NWORDS];
2832 int i;
2833
2834 /*
2835 * Initialize the prototype RFCR.
2836 * Enable the receive filter, and accept on
2837 * Perfect (destination address) Match
2838 * If IFF_BROADCAST, also accept all broadcast packets.
2839 * If IFF_PROMISC, accept all unicast packets (and later, set
2840 * IFF_ALLMULTI and accept all multicast, too).
2841 */
2842 sc->sc_rfcr = RFCR_RFEN | RFCR_APM;
2843 if (ifp->if_flags & IFF_BROADCAST)
2844 sc->sc_rfcr |= RFCR_AAB;
2845 if (ifp->if_flags & IFF_PROMISC) {
2846 sc->sc_rfcr |= RFCR_AAP;
2847 goto allmulti;
2848 }
2849
2850 #ifdef DP83820
2851 /*
2852 * Set up the DP83820 multicast address filter by passing all multicast
2853 * addresses through a CRC generator, and then using the high-order
2854 * 11 bits as an index into the 2048 bit multicast hash table. The
2855 * high-order 7 bits select the slot, while the low-order 4 bits
2856 * select the bit within the slot. Note that only the low 16-bits
2857 * of each filter word are used, and there are 128 filter words.
2858 */
2859 #else
2860 /*
2861 * Set up the DP83815 multicast address filter by passing all multicast
2862 * addresses through a CRC generator, and then using the high-order
2863 * 9 bits as an index into the 512 bit multicast hash table. The
2864 * high-order 5 bits select the slot, while the low-order 4 bits
2865 * select the bit within the slot. Note that only the low 16-bits
2866 * of each filter word are used, and there are 32 filter words.
2867 */
2868 #endif /* DP83820 */
2869
2870 memset(mchash, 0, sizeof(mchash));
2871
2872 ifp->if_flags &= ~IFF_ALLMULTI;
2873 ETHER_FIRST_MULTI(step, ec, enm);
2874 if (enm == NULL)
2875 goto setit;
2876 while (enm != NULL) {
2877 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2878 /*
2879 * We must listen to a range of multicast addresses.
2880 * For now, just accept all multicasts, rather than
2881 * trying to set only those filter bits needed to match
2882 * the range. (At this time, the only use of address
2883 * ranges is for IP multicast routing, for which the
2884 * range is big enough to require all bits set.)
2885 */
2886 goto allmulti;
2887 }
2888
2889 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2890
2891 #ifdef DP83820
2892 /* Just want the 11 most significant bits. */
2893 hash = crc >> 21;
2894 #else
2895 /* Just want the 9 most significant bits. */
2896 hash = crc >> 23;
2897 #endif /* DP83820 */
2898
2899 slot = hash >> 4;
2900 bit = hash & 0xf;
2901
2902 /* Set the corresponding bit in the hash table. */
2903 mchash[slot] |= 1 << bit;
2904
2905 ETHER_NEXT_MULTI(step, enm);
2906 }
2907 sc->sc_rfcr |= RFCR_MHEN;
2908 goto setit;
2909
2910 allmulti:
2911 ifp->if_flags |= IFF_ALLMULTI;
2912 sc->sc_rfcr |= RFCR_AAM;
2913
2914 setit:
2915 #define FILTER_EMIT(addr, data) \
2916 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
2917 delay(1); \
2918 bus_space_write_4(st, sh, SIP_RFDR, (data)); \
2919 delay(1)
2920
2921 /*
2922 * Disable receive filter, and program the node address.
2923 */
2924 cp = LLADDR(ifp->if_sadl);
2925 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]);
2926 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]);
2927 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]);
2928
2929 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2930 /*
2931 * Program the multicast hash table.
2932 */
2933 for (i = 0; i < MCHASH_NWORDS; i++) {
2934 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2),
2935 mchash[i]);
2936 }
2937 }
2938 #undef FILTER_EMIT
2939 #undef MCHASH_NWORDS
2940
2941 /*
2942 * Re-enable the receiver filter.
2943 */
2944 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
2945 }
2946
2947 #if defined(DP83820)
2948 /*
2949 * sip_dp83820_mii_readreg: [mii interface function]
2950 *
2951 * Read a PHY register on the MII of the DP83820.
2952 */
2953 int
2954 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg)
2955 {
2956 struct sip_softc *sc = (void *) self;
2957
2958 if (sc->sc_cfg & CFG_TBI_EN) {
2959 bus_addr_t tbireg;
2960 int rv;
2961
2962 if (phy != 0)
2963 return (0);
2964
2965 switch (reg) {
2966 case MII_BMCR: tbireg = SIP_TBICR; break;
2967 case MII_BMSR: tbireg = SIP_TBISR; break;
2968 case MII_ANAR: tbireg = SIP_TANAR; break;
2969 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
2970 case MII_ANER: tbireg = SIP_TANER; break;
2971 case MII_EXTSR:
2972 /*
2973 * Don't even bother reading the TESR register.
2974 * The manual documents that the device has
2975 * 1000baseX full/half capability, but the
2976 * register itself seems read back 0 on some
2977 * boards. Just hard-code the result.
2978 */
2979 return (EXTSR_1000XFDX|EXTSR_1000XHDX);
2980
2981 default:
2982 return (0);
2983 }
2984
2985 rv = bus_space_read_4(sc->sc_st, sc->sc_sh, tbireg) & 0xffff;
2986 if (tbireg == SIP_TBISR) {
2987 /* LINK and ACOMP are switched! */
2988 int val = rv;
2989
2990 rv = 0;
2991 if (val & TBISR_MR_LINK_STATUS)
2992 rv |= BMSR_LINK;
2993 if (val & TBISR_MR_AN_COMPLETE)
2994 rv |= BMSR_ACOMP;
2995
2996 /*
2997 * The manual claims this register reads back 0
2998 * on hard and soft reset. But we want to let
2999 * the gentbi driver know that we support auto-
3000 * negotiation, so hard-code this bit in the
3001 * result.
3002 */
3003 rv |= BMSR_ANEG | BMSR_EXTSTAT;
3004 }
3005
3006 return (rv);
3007 }
3008
3009 return (mii_bitbang_readreg(self, &SIP_DECL(mii_bitbang_ops),
3010 phy, reg));
3011 }
3012
3013 /*
3014 * sip_dp83820_mii_writereg: [mii interface function]
3015 *
3016 * Write a PHY register on the MII of the DP83820.
3017 */
3018 void
3019 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val)
3020 {
3021 struct sip_softc *sc = (void *) self;
3022
3023 if (sc->sc_cfg & CFG_TBI_EN) {
3024 bus_addr_t tbireg;
3025
3026 if (phy != 0)
3027 return;
3028
3029 switch (reg) {
3030 case MII_BMCR: tbireg = SIP_TBICR; break;
3031 case MII_ANAR: tbireg = SIP_TANAR; break;
3032 case MII_ANLPAR: tbireg = SIP_TANLPAR; break;
3033 default:
3034 return;
3035 }
3036
3037 bus_space_write_4(sc->sc_st, sc->sc_sh, tbireg, val);
3038 return;
3039 }
3040
3041 mii_bitbang_writereg(self, &SIP_DECL(mii_bitbang_ops),
3042 phy, reg, val);
3043 }
3044
3045 /*
3046 * sip_dp83820_mii_statchg: [mii interface function]
3047 *
3048 * Callback from MII layer when media changes.
3049 */
3050 void
3051 SIP_DECL(dp83820_mii_statchg)(struct device *self)
3052 {
3053 struct sip_softc *sc = (struct sip_softc *) self;
3054 u_int32_t cfg;
3055
3056 /*
3057 * Update TXCFG for full-duplex operation.
3058 */
3059 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3060 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3061 else
3062 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3063
3064 /*
3065 * Update RXCFG for full-duplex or loopback.
3066 */
3067 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3068 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3069 sc->sc_rxcfg |= RXCFG_ATX;
3070 else
3071 sc->sc_rxcfg &= ~RXCFG_ATX;
3072
3073 /*
3074 * Update CFG for MII/GMII.
3075 */
3076 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
3077 cfg = sc->sc_cfg | CFG_MODE_1000;
3078 else
3079 cfg = sc->sc_cfg;
3080
3081 /*
3082 * XXX 802.3x flow control.
3083 */
3084
3085 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg);
3086 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3087 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3088 }
3089 #endif /* ! DP83820 */
3090
3091 /*
3092 * sip_mii_bitbang_read: [mii bit-bang interface function]
3093 *
3094 * Read the MII serial port for the MII bit-bang module.
3095 */
3096 u_int32_t
3097 SIP_DECL(mii_bitbang_read)(struct device *self)
3098 {
3099 struct sip_softc *sc = (void *) self;
3100
3101 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR));
3102 }
3103
3104 /*
3105 * sip_mii_bitbang_write: [mii big-bang interface function]
3106 *
3107 * Write the MII serial port for the MII bit-bang module.
3108 */
3109 void
3110 SIP_DECL(mii_bitbang_write)(struct device *self, u_int32_t val)
3111 {
3112 struct sip_softc *sc = (void *) self;
3113
3114 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val);
3115 }
3116
3117 #ifndef DP83820
3118 /*
3119 * sip_sis900_mii_readreg: [mii interface function]
3120 *
3121 * Read a PHY register on the MII.
3122 */
3123 int
3124 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg)
3125 {
3126 struct sip_softc *sc = (struct sip_softc *) self;
3127 u_int32_t enphy;
3128
3129 /*
3130 * The PHY of recent SiS chipsets is accessed through bitbang
3131 * operations.
3132 */
3133 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
3134 sc->sc_rev >= SIS_REV_635)
3135 return (mii_bitbang_readreg(self, &SIP_DECL(mii_bitbang_ops),
3136 phy, reg));
3137
3138 /*
3139 * The SiS 900 has only an internal PHY on the MII. Only allow
3140 * MII address 0.
3141 */
3142 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
3143 return (0);
3144
3145 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3146 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
3147 ENPHY_RWCMD | ENPHY_ACCESS);
3148 do {
3149 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3150 } while (enphy & ENPHY_ACCESS);
3151 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
3152 }
3153
3154 /*
3155 * sip_sis900_mii_writereg: [mii interface function]
3156 *
3157 * Write a PHY register on the MII.
3158 */
3159 void
3160 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val)
3161 {
3162 struct sip_softc *sc = (struct sip_softc *) self;
3163 u_int32_t enphy;
3164
3165 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 &&
3166 sc->sc_rev >= SIS_REV_635) {
3167 mii_bitbang_writereg(self, &SIP_DECL(mii_bitbang_ops),
3168 phy, reg, val);
3169 return;
3170 }
3171
3172 /*
3173 * The SiS 900 has only an internal PHY on the MII. Only allow
3174 * MII address 0.
3175 */
3176 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0)
3177 return;
3178
3179 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
3180 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
3181 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
3182 do {
3183 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
3184 } while (enphy & ENPHY_ACCESS);
3185 }
3186
3187 /*
3188 * sip_sis900_mii_statchg: [mii interface function]
3189 *
3190 * Callback from MII layer when media changes.
3191 */
3192 void
3193 SIP_DECL(sis900_mii_statchg)(struct device *self)
3194 {
3195 struct sip_softc *sc = (struct sip_softc *) self;
3196 u_int32_t flowctl;
3197
3198 /*
3199 * Update TXCFG for full-duplex operation.
3200 */
3201 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3202 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3203 else
3204 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3205
3206 /*
3207 * Update RXCFG for full-duplex or loopback.
3208 */
3209 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3210 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3211 sc->sc_rxcfg |= RXCFG_ATX;
3212 else
3213 sc->sc_rxcfg &= ~RXCFG_ATX;
3214
3215 /*
3216 * Update IMR for use of 802.3x flow control.
3217 */
3218 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
3219 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
3220 flowctl = FLOWCTL_FLOWEN;
3221 } else {
3222 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
3223 flowctl = 0;
3224 }
3225
3226 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3227 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3228 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
3229 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
3230 }
3231
3232 /*
3233 * sip_dp83815_mii_readreg: [mii interface function]
3234 *
3235 * Read a PHY register on the MII.
3236 */
3237 int
3238 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg)
3239 {
3240 struct sip_softc *sc = (struct sip_softc *) self;
3241 u_int32_t val;
3242
3243 /*
3244 * The DP83815 only has an internal PHY. Only allow
3245 * MII address 0.
3246 */
3247 if (phy != 0)
3248 return (0);
3249
3250 /*
3251 * Apparently, after a reset, the DP83815 can take a while
3252 * to respond. During this recovery period, the BMSR returns
3253 * a value of 0. Catch this -- it's not supposed to happen
3254 * (the BMSR has some hardcoded-to-1 bits), and wait for the
3255 * PHY to come back to life.
3256 *
3257 * This works out because the BMSR is the first register
3258 * read during the PHY probe process.
3259 */
3260 do {
3261 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg));
3262 } while (reg == MII_BMSR && val == 0);
3263
3264 return (val & 0xffff);
3265 }
3266
3267 /*
3268 * sip_dp83815_mii_writereg: [mii interface function]
3269 *
3270 * Write a PHY register to the MII.
3271 */
3272 void
3273 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val)
3274 {
3275 struct sip_softc *sc = (struct sip_softc *) self;
3276
3277 /*
3278 * The DP83815 only has an internal PHY. Only allow
3279 * MII address 0.
3280 */
3281 if (phy != 0)
3282 return;
3283
3284 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val);
3285 }
3286
3287 /*
3288 * sip_dp83815_mii_statchg: [mii interface function]
3289 *
3290 * Callback from MII layer when media changes.
3291 */
3292 void
3293 SIP_DECL(dp83815_mii_statchg)(struct device *self)
3294 {
3295 struct sip_softc *sc = (struct sip_softc *) self;
3296
3297 /*
3298 * Update TXCFG for full-duplex operation.
3299 */
3300 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
3301 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
3302 else
3303 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
3304
3305 /*
3306 * Update RXCFG for full-duplex or loopback.
3307 */
3308 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
3309 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
3310 sc->sc_rxcfg |= RXCFG_ATX;
3311 else
3312 sc->sc_rxcfg &= ~RXCFG_ATX;
3313
3314 /*
3315 * XXX 802.3x flow control.
3316 */
3317
3318 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
3319 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
3320
3321 /*
3322 * Some DP83815s experience problems when used with short
3323 * (< 30m/100ft) Ethernet cables in 100BaseTX mode. This
3324 * sequence adjusts the DSP's signal attenuation to fix the
3325 * problem.
3326 */
3327 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
3328 uint32_t reg;
3329
3330 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0x0001);
3331
3332 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4);
3333 reg &= 0x0fff;
3334 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4, reg | 0x1000);
3335 delay(100);
3336 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00fc);
3337 reg &= 0x00ff;
3338 if ((reg & 0x0080) == 0 || (reg >= 0x00d8)) {
3339 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00fc,
3340 0x00e8);
3341 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4);
3342 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4,
3343 reg | 0x20);
3344 }
3345
3346 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0);
3347 }
3348 }
3349 #endif /* DP83820 */
3350
3351 #if defined(DP83820)
3352 void
3353 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc,
3354 const struct pci_attach_args *pa, u_int8_t *enaddr)
3355 {
3356 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2];
3357 u_int8_t cksum, *e, match;
3358 int i;
3359
3360 /*
3361 * EEPROM data format for the DP83820 can be found in
3362 * the DP83820 manual, section 4.2.4.
3363 */
3364
3365 SIP_DECL(read_eeprom)(sc, 0,
3366 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data);
3367
3368 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8;
3369 match = ~(match - 1);
3370
3371 cksum = 0x55;
3372 e = (u_int8_t *) eeprom_data;
3373 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++)
3374 cksum += *e++;
3375
3376 if (cksum != match)
3377 printf("%s: Checksum (%x) mismatch (%x)",
3378 sc->sc_dev.dv_xname, cksum, match);
3379
3380 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff;
3381 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8;
3382 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff;
3383 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8;
3384 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff;
3385 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8;
3386 }
3387 #else /* ! DP83820 */
3388 static void
3389 SIP_DECL(sis900_eeprom_delay)(struct sip_softc *sc)
3390 {
3391 int i;
3392
3393 /*
3394 * FreeBSD goes from (300/33)+1 [10] to 0. There must be
3395 * a reason, but I don't know it.
3396 */
3397 for (i = 0; i < 10; i++)
3398 bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR);
3399 }
3400
3401 void
3402 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc,
3403 const struct pci_attach_args *pa, u_int8_t *enaddr)
3404 {
3405 u_int16_t myea[ETHER_ADDR_LEN / 2];
3406
3407 switch (sc->sc_rev) {
3408 case SIS_REV_630S:
3409 case SIS_REV_630E:
3410 case SIS_REV_630EA1:
3411 case SIS_REV_630ET:
3412 case SIS_REV_635:
3413 /*
3414 * The MAC address for the on-board Ethernet of
3415 * the SiS 630 chipset is in the NVRAM. Kick
3416 * the chip into re-loading it from NVRAM, and
3417 * read the MAC address out of the filter registers.
3418 */
3419 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD);
3420
3421 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3422 RFCR_RFADDR_NODE0);
3423 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3424 0xffff;
3425
3426 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3427 RFCR_RFADDR_NODE2);
3428 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3429 0xffff;
3430
3431 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR,
3432 RFCR_RFADDR_NODE4);
3433 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) &
3434 0xffff;
3435 break;
3436
3437 case SIS_REV_960:
3438 {
3439 #define SIS_SET_EROMAR(x,y) bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \
3440 bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) | (y))
3441
3442 #define SIS_CLR_EROMAR(x,y) bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \
3443 bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) & ~(y))
3444
3445 int waittime, i;
3446
3447 /* Allow to read EEPROM from LAN. It is shared
3448 * between a 1394 controller and the NIC and each
3449 * time we access it, we need to set SIS_EECMD_REQ.
3450 */
3451 SIS_SET_EROMAR(sc, EROMAR_REQ);
3452
3453 for (waittime = 0; waittime < 1000; waittime++) { /* 1 ms max */
3454 /* Force EEPROM to idle state. */
3455
3456 /*
3457 * XXX-cube This is ugly. I'll look for docs about it.
3458 */
3459 SIS_SET_EROMAR(sc, EROMAR_EECS);
3460 SIP_DECL(sis900_eeprom_delay)(sc);
3461 for (i = 0; i <= 25; i++) { /* Yes, 26 times. */
3462 SIS_SET_EROMAR(sc, EROMAR_EESK);
3463 SIP_DECL(sis900_eeprom_delay)(sc);
3464 SIS_CLR_EROMAR(sc, EROMAR_EESK);
3465 SIP_DECL(sis900_eeprom_delay)(sc);
3466 }
3467 SIS_CLR_EROMAR(sc, EROMAR_EECS);
3468 SIP_DECL(sis900_eeprom_delay)(sc);
3469 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, 0);
3470
3471 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR) & EROMAR_GNT) {
3472 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3473 sizeof(myea) / sizeof(myea[0]), myea);
3474 break;
3475 }
3476 DELAY(1);
3477 }
3478
3479 /*
3480 * Set SIS_EECTL_CLK to high, so a other master
3481 * can operate on the i2c bus.
3482 */
3483 SIS_SET_EROMAR(sc, EROMAR_EESK);
3484
3485 /* Refuse EEPROM access by LAN */
3486 SIS_SET_EROMAR(sc, EROMAR_DONE);
3487 } break;
3488
3489 default:
3490 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
3491 sizeof(myea) / sizeof(myea[0]), myea);
3492 }
3493
3494 enaddr[0] = myea[0] & 0xff;
3495 enaddr[1] = myea[0] >> 8;
3496 enaddr[2] = myea[1] & 0xff;
3497 enaddr[3] = myea[1] >> 8;
3498 enaddr[4] = myea[2] & 0xff;
3499 enaddr[5] = myea[2] >> 8;
3500 }
3501
3502 /* Table and macro to bit-reverse an octet. */
3503 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
3504 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf])
3505
3506 void
3507 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc,
3508 const struct pci_attach_args *pa, u_int8_t *enaddr)
3509 {
3510 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea;
3511 u_int8_t cksum, *e, match;
3512 int i;
3513
3514 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) /
3515 sizeof(eeprom_data[0]), eeprom_data);
3516
3517 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8;
3518 match = ~(match - 1);
3519
3520 cksum = 0x55;
3521 e = (u_int8_t *) eeprom_data;
3522 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) {
3523 cksum += *e++;
3524 }
3525 if (cksum != match) {
3526 printf("%s: Checksum (%x) mismatch (%x)",
3527 sc->sc_dev.dv_xname, cksum, match);
3528 }
3529
3530 /*
3531 * Unrolled because it makes slightly more sense this way.
3532 * The DP83815 stores the MAC address in bit 0 of word 6
3533 * through bit 15 of word 8.
3534 */
3535 ea = &eeprom_data[6];
3536 enaddr[0] = ((*ea & 0x1) << 7);
3537 ea++;
3538 enaddr[0] |= ((*ea & 0xFE00) >> 9);
3539 enaddr[1] = ((*ea & 0x1FE) >> 1);
3540 enaddr[2] = ((*ea & 0x1) << 7);
3541 ea++;
3542 enaddr[2] |= ((*ea & 0xFE00) >> 9);
3543 enaddr[3] = ((*ea & 0x1FE) >> 1);
3544 enaddr[4] = ((*ea & 0x1) << 7);
3545 ea++;
3546 enaddr[4] |= ((*ea & 0xFE00) >> 9);
3547 enaddr[5] = ((*ea & 0x1FE) >> 1);
3548
3549 /*
3550 * In case that's not weird enough, we also need to reverse
3551 * the bits in each byte. This all actually makes more sense
3552 * if you think about the EEPROM storage as an array of bits
3553 * being shifted into bytes, but that's not how we're looking
3554 * at it here...
3555 */
3556 for (i = 0; i < 6 ;i++)
3557 enaddr[i] = bbr(enaddr[i]);
3558 }
3559 #endif /* DP83820 */
3560
3561 /*
3562 * sip_mediastatus: [ifmedia interface function]
3563 *
3564 * Get the current interface media status.
3565 */
3566 void
3567 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr)
3568 {
3569 struct sip_softc *sc = ifp->if_softc;
3570
3571 mii_pollstat(&sc->sc_mii);
3572 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3573 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3574 }
3575
3576 /*
3577 * sip_mediachange: [ifmedia interface function]
3578 *
3579 * Set hardware to newly-selected media.
3580 */
3581 int
3582 SIP_DECL(mediachange)(struct ifnet *ifp)
3583 {
3584 struct sip_softc *sc = ifp->if_softc;
3585
3586 if (ifp->if_flags & IFF_UP)
3587 mii_mediachg(&sc->sc_mii);
3588 return (0);
3589 }
3590